parquet-converter commited on
Commit
08d4f02
·
1 Parent(s): 1d3664a

Update parquet files (step 72 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Advanced SystemCare Ultimate 12.1.0.120 Crack with License Code The Ultimate Solution for PC Problems.md +0 -127
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easy Recovery Essentials (EasyRE) Pro - Windows XP Vista 10 Full Version The Best Alternative to Windows System Restore.md +0 -122
  3. spaces/1gistliPinn/ChatGPT4/Examples/7 Days To Die Crack Download PC Free Steam Key [Updated] !!EXCLUSIVE!!.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Borang Nikah Selangor Pdf BETTER Download.md +0 -40
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bleach VS Naruto 3.8 - The Best Anime Fighting Game for PC and Mac with Online Multiplayer.md +0 -151
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car For Sale Simulator 2023 APK A Realistic and Fun Car Simulation Game.md +0 -115
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cmo jugar a Dragon Z Quest Action RPG con mod apk gua para descargar y instalar.md +0 -112
  8. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Abo Mvelo (feat. Mellow Amp Sleazy Amp M.j) Mixed __TOP__.md +0 -67
  9. spaces/1phancelerku/anime-remove-background/Facebook Messenger APK Old Version Features Benefits and Download Links.md +0 -131
  10. spaces/232labs/VToonify/vtoonify/model/raft/train.py +0 -247
  11. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/pann_model.py +0 -543
  12. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Aibn.py +0 -52
  13. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/api.py +0 -175
  14. spaces/Aditya9790/yolo7-object-tracking/detect_or_track.py +0 -285
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetTotalColumnProportions.js +0 -13
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/GetExpandedChildWidth.js +0 -16
  17. spaces/Alcedo/yunmedia/resources/chatgpt-plugin/css/app.4dc5e420.css +0 -22
  18. spaces/Alexxggs/ggvpnewen/constants.py +0 -7
  19. spaces/Arnx/MusicGenXvAKN/audiocraft/modules/transformer.py +0 -747
  20. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/initialise.py +0 -121
  21. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/config/config.py +0 -265
  22. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/matcher.py +0 -127
  23. spaces/B10915003/B10915003-autotrain-jimmy-test-face-identification-53251125423/app.py +0 -3
  24. spaces/Bart92/RVC_HF/infer/modules/vc/pipeline.py +0 -655
  25. spaces/Benson/text-generation/Examples/Descargar Entre Nosotros 3.29.md +0 -122
  26. spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/losses/segmentation.py +0 -22
  27. spaces/BetterAPI/BetterChat/src/routes/conversation/[id]/share/+server.ts +0 -54
  28. spaces/BetterAPI/BetterChat_new/src/routes/r/[id]/+page.server.ts +0 -18
  29. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/sjisprober.py +0 -105
  30. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/common.py +0 -424
  31. spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/_securetransport/__init__.py +0 -0
  32. spaces/Boadiwaa/Recipes/openai/validators.py +0 -860
  33. spaces/BramVanroy/spacey_conll/app.py +0 -129
  34. spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/distance.h +0 -43
  35. spaces/CVPR/Text2Human/Text2Human/utils/options.py +0 -129
  36. spaces/CVPR/regionclip-demo/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp +0 -39
  37. spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/visualizer.py +0 -318
  38. spaces/CofAI/CalculatorUI/style.css +0 -28
  39. spaces/CofAI/chat.b4/g4f/Provider/Providers/DeepAi.py +0 -46
  40. spaces/Cpp4App/Cpp4App/CDM/input_examples/README.md +0 -80
  41. spaces/Cvandi/remake/tests/test_utils.py +0 -87
  42. spaces/DEEMOSTECH/ChatAvatar/static/js/main.d852ae94.js +0 -0
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/zip.py +0 -127
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/utils.py +0 -1020
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_hf_folder.py +0 -102
  46. spaces/ECCV2022/PSG/OpenPSG/configs/_base_/schedules/schedule_3x.py +0 -10
  47. spaces/ECCV2022/bytetrack/exps/example/mot/yolox_l_mix_det.py +0 -138
  48. spaces/EPFL-VILAB/MultiMAE/utils/semseg_metrics.py +0 -231
  49. spaces/EasyEasy/EasyProxy/greeting.md +0 -51
  50. spaces/EcoCy/LoRA-DreamBooth-Training-UI/utils.py +0 -59
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Advanced SystemCare Ultimate 12.1.0.120 Crack with License Code The Ultimate Solution for PC Problems.md DELETED
@@ -1,127 +0,0 @@
1
-
2
- <h1>Advanced SystemCare Ultimate 12.1.0.120 Crack with License Code</h1>
3
- <p>If you are looking for a powerful and comprehensive PC security and performance utility, you might want to check out <strong>Advanced SystemCare Ultimate 12.1.0.120 Crack</strong>. This software is designed to protect your PC from viruses, ransomware, and other threats, as well as to optimize your system for faster and smoother performance. In this article, we will tell you everything you need to know about Advanced SystemCare Ultimate Crack, including what it is, why you need it, how to download and install it, how to use it, and what are its pros and cons.</p>
4
- <h2>What is Advanced SystemCare Ultimate?</h2>
5
- <p>Advanced SystemCare Ultimate is a product of IObit, a well-known developer of PC optimization and security tools. It is a combination of Advanced SystemCare Pro, which is a PC tune-up utility, and Bitdefender, which is a powerful anti-virus engine. With Advanced SystemCare Ultimate, you can enjoy both the benefits of PC optimization and virus protection in one software.</p>
6
- <h2>Advanced SystemCare Ultimate 12.1.0.120 Crack with License Code</h2><br /><p><b><b>Download Zip</b> &#9745; <a href="https://byltly.com/2uKx7O">https://byltly.com/2uKx7O</a></b></p><br /><br />
7
- <p>Some of the features of Advanced SystemCare Ultimate are:</p>
8
- <ul>
9
- <li>Real-time protection against viruses, ransomware, spyware, phishing, and other online threats</li>
10
- <li>One-click scan to detect and remove junk files, registry errors, privacy traces, and security holes</li>
11
- <li>Turbo boost to speed up your PC by disabling unnecessary services and processes</li>
12
- <li>Deep optimization to improve your system performance by optimizing disk, RAM, startup items, internet settings, etc.</li>
13
- <li>Browser protection to block malicious websites, ads, trackers, and cryptocurrency mining scripts</li>
14
- <li>FaceID to unlock your PC with your face and prevent unauthorized access</li>
15
- <li>Data shredder to permanently delete sensitive files and folders</li>
16
- <li>Firewall control to manage your network permissions and prevent unauthorized connections</li>
17
- <li>And many more...</li>
18
- </ul>
19
- <h2>Why do you need Advanced SystemCare Ultimate Crack?</h2>
20
- <p>Advanced SystemCare Ultimate is not a free software. You need to pay $49.99 per year for one PC or $69.99 per year for three PCs to use it. However, if you don't want to spend that much money on a PC utility software, you can use <strong>Advanced SystemCare Ultimate Crack</strong>. This is a modified version of the software that allows you to use it for free without any limitations or restrictions.</p>
21
- <p>Some of the benefits of using Advanced SystemCare Ultimate Crack are:</p>
22
- <p>Advanced SystemCare Ultimate 12.1.0.120 Serial Key<br />
23
- Advanced SystemCare Ultimate 12.1.0.120 Activation Code<br />
24
- Advanced SystemCare Ultimate 12.1.0.120 Keygen<br />
25
- Advanced SystemCare Ultimate 12.1.0.120 License Key 2019<br />
26
- Advanced SystemCare Ultimate 12.1.0.120 Crack Download<br />
27
- Advanced SystemCare Ultimate 12.1.0.120 Full Version<br />
28
- Advanced SystemCare Ultimate 12.1.0.120 Free Download<br />
29
- Advanced SystemCare Ultimate 12.1.0.120 Torrent<br />
30
- Advanced SystemCare Ultimate 12 Pro License Key<br />
31
- Advanced SystemCare Ultimate 12 Pro Crack<br />
32
- Advanced SystemCare Ultimate 12 Pro Serial Number<br />
33
- Advanced SystemCare Ultimate 12 Pro Activation Key<br />
34
- Advanced SystemCare Ultimate 12 Pro Keygen Download<br />
35
- Advanced SystemCare Ultimate 12 Pro Full Crack<br />
36
- Advanced SystemCare Ultimate 12 Pro Free License Code<br />
37
- Advanced SystemCare Ultimate 12 Pro Torrent Download<br />
38
- Advanced SystemCare Ultimate Crack with License Code<br />
39
- Advanced SystemCare Ultimate Serial Key 2019<br />
40
- Advanced SystemCare Ultimate Activation Code Free<br />
41
- Advanced SystemCare Ultimate Keygen Full Version<br />
42
- Advanced SystemCare Ultimate License Key Download<br />
43
- Advanced SystemCare Ultimate Crack Full Version<br />
44
- Advanced SystemCare Ultimate Free Download with Crack<br />
45
- Advanced SystemCare Ultimate Torrent with License Code<br />
46
- Advanced SystemCare Pro 13 Crack with License Code<br />
47
- Advanced SystemCare Pro 13 Serial Key Free Download<br />
48
- Advanced SystemCare Pro 13 Activation Code Full Version<br />
49
- Advanced SystemCare Pro 13 Keygen Download Free<br />
50
- Advanced SystemCare Pro 13 License Key Crack<br />
51
- Advanced SystemCare Pro 13 Full Version with Crack<br />
52
- Advanced SystemCare Pro 13 Free Download with License Code<br />
53
- Advanced SystemCare Pro 13 Torrent with Crack<br />
54
- IObit Advanced SystemCare Ultimate Crack with License Code<br />
55
- IObit Advanced SystemCare Ultimate Serial Key Free Download<br />
56
- IObit Advanced SystemCare Ultimate Activation Code Full Version<br />
57
- IObit Advanced SystemCare Ultimate Keygen Download Free<br />
58
- IObit Advanced SystemCare Ultimate License Key Crack<br />
59
- IObit Advanced SystemCare Ultimate Full Version with Crack<br />
60
- IObit Advanced SystemCare Ultimate Free Download with License Code<br />
61
- IObit Advanced SystemCare Ultimate Torrent with Crack<br />
62
- IObit Advanced SystemCare Pro Crack with License Code<br />
63
- IObit Advanced SystemCare Pro Serial Key Free Download<br />
64
- IObit Advanced SystemCare Pro Activation Code Full Version<br />
65
- IObit Advanced SystemCare Pro Keygen Download Free<br />
66
- IObit Advanced SystemCare Pro License Key Crack<br />
67
- IObit Advanced SystemCare Pro Full Version with Crack<br />
68
- IObit Advanced SystemCare Pro Free Download with License Code<br />
69
- IObit Advanced SystemCare Pro Torrent with Crack</p>
70
- <ul>
71
- <li>You can save money by not paying for the subscription fee</li>
72
- <li>You can enjoy all the features and functions of the software without any limitations or restrictions</li>
73
- <li>You can update the software regularly without any problems or errors</li>
74
- <li>You can activate the software easily with a license code that is provided by the crack</li>
75
- <li>You can use the software on any PC without any compatibility issues</li>
76
- </ul>
77
- <h2>How to download and install Advanced SystemCare Ultimate Crack?</h2>
78
- <p>If you want to download and install Advanced SystemCare Ultimate Crack on your PC, you need to follow these steps:</p>
79
- <ol>
80
- <li>Download the setup file of Advanced SystemCare Ultimate 12.1.0.120 from <a href="https://www.iobit.com/en/advancedsystemcareultimate.php">the official website</a></li>
81
- <li>Run the setup file and follow the instructions to install the software on your PC</li>
82
- <li>Download the crack file of Advanced SystemCare Ultimate 12.1.0.120 from <a href="https://zenkikiba.tistory.com/43">this website</a></li>
83
- <li>Extract the crack file and copy the license code that is given in it</li>
84
- <li>Open the software and click on "Enter Code" in the main menu</li>
85
- <li>Paste the license code in the box and click on "Register Now"</li>
86
- <li>Congratulations! You have successfully activated Advanced SystemCare Ultimate Crack on your PC</li>
87
- </ol>
88
- <h2>How to use Advanced SystemCare Ultimate Crack?</h2>
89
- <p>Using Advanced SystemCare Ultimate Crack is very easy and simple. You just need to follow these steps:</p>
90
- <ol>
91
- <li>Open the software and click on "Scan" in the main interface</li>
92
- <li>The software will scan your PC for viruses, junk files, registry errors, privacy traces, security holes, etc.</li>
93
- <li>After the scan is completed, click on "Fix All" to remove all the detected issues and optimize your PC</li>
94
- <li>If you want to speed up your PC further, click on "Turbo Boost" in the toolbox menu</li>
95
- <li>Select a mode (Work Mode or Game Mode) according to your needs and click on "Turn On"</li>
96
- <li>The software will disable unnecessary services and processes to boost your PC performance</li>
97
- <li>If you want to protect your browser from malicious websites, ads, trackers, etc., click on "Browser Protection" in the toolbox menu</li>
98
- <li>Select a browser (Chrome, Firefox, Edge, etc.) that you want to protect and click on "Enable"</li>
99
- <li>The software will block all unwanted elements from your browser and enhance your online security</li>
100
- </ol>
101
- <h2>What are the pros and cons of Advanced SystemCare Ultimate Crack?</h2>
102
- <p>As with any software, there are some pros and cons of using Advanced SystemCare Ultimate Crack. Here are some of them:</p>
103
- <table style="border-collapse: collapse; width: 100%;">
104
- <tbody><tr style="height: 23px;">
105
- <td style="width: 50%; height: 23px;"><strong>Pros</strong></td><td style="width: 50%; height: 23px;"><strong>Cons</strong></td></tr><tr style="height: 23px;">
106
- <td style="width: 50%; height: 23px;">It is free to use without any limitations or restrictions</td><td style="width: 50%; height: 23px;">It may be illegal or unethical to use a cracked version of a paid software</td></tr><tr style="height: 23px;">
107
- <td style="width: 50%; height: 23px;">It offers both PC optimization and virus protection in one software</td><td style="width: 50%; height: 23px;">It may not be compatible with some other security or optimization tools on your PC</td></tr><tr style="height: 23px;">
108
- <td style="width: 50%; height: 23px;">It has a user-friendly interface and easy-to-use features</td><td style="width: 50%; height: 23px;">It may cause some false positives or false negatives when scanning your PC for viruses or issues</td></tr><tr style="height: 23px;">
109
- <td style="width: 50%; height: 23px;">It supports multiple languages and platforms</td><td style="width: 50%; height: 23px;">It may contain some bugs or errors that affect its performance or stability</td></tr></tbody></table>
110
- <h2>Conclusion</h2>
111
- <p>In conclusion, Advanced SystemCare Ultimate Crack is a great option for anyone who wants to protect their PC from viruses, ransomware, and other threats, as well as optimize their system for faster and smoother performance. It is free to use without any limitations or restrictions, and it offers both PC optimization and virus protection in one software. However, it may also have some drawbacks such as being illegal or unethical to use a cracked version of a paid software, being incompatible with some other security or optimization tools on your PC, causing some false positives or false negatives when scanning your PC for viruses or issues, or containing some bugs or errors that affect its performance <h3>FAQs</h3>
112
- <p>Here are some frequently asked questions and answers about Advanced SystemCare Ultimate Crack.</p>
113
- <ul>
114
- <li><strong>Q: Is Advanced SystemCare Ultimate Crack safe to use?</strong></li>
115
- <li>A: Advanced SystemCare Ultimate Crack is generally safe to use, as long as you download it from a reliable source and scan it with a reputable anti-virus software. However, there is always a risk of malware or virus infection when using cracked software, so use it at your own discretion and responsibility.</li>
116
- <li><strong>Q: Does Advanced SystemCare Ultimate Crack work with Windows 10?</strong></li>
117
- <li>A: Yes, Advanced SystemCare Ultimate Crack works with Windows 10, as well as Windows 8/8.1, Windows 7, Windows Vista, and Windows XP.</li>
118
- <li><strong>Q: How long does the license code of Advanced SystemCare Ultimate Crack last?</strong></li>
119
- <li>A: The license code of Advanced SystemCare Ultimate Crack lasts for one year. You can renew it by downloading a new crack file and entering a new license code.</li>
120
- <li><strong>Q: Can I use Advanced SystemCare Ultimate Crack on multiple PCs?</strong></li>
121
- <li>A: Yes, you can use Advanced SystemCare Ultimate Crack on multiple PCs, as long as you have the crack file and the license code for each PC.</li>
122
- <li><strong>Q: What is the difference between Advanced SystemCare Ultimate and Advanced SystemCare Pro?</strong></li>
123
- <li>A: The main difference between Advanced SystemCare Ultimate and Advanced SystemCare Pro is that the former has an anti-virus feature powered by Bitdefender, while the latter does not. Both software have PC optimization and protection features, but Advanced SystemCare Ultimate offers more comprehensive and advanced security against viruses, ransomware, and other threats.</li>
124
- </ul>
125
- </p> 0a6ba089eb<br />
126
- <br />
127
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Easy Recovery Essentials (EasyRE) Pro - Windows XP Vista 10 Full Version The Best Alternative to Windows System Restore.md DELETED
@@ -1,122 +0,0 @@
1
- <br />
2
- <h1>Easy Recovery Essentials (EasyRE) Pro - Windows XP Vista 10 Full Version</h1>
3
- <p>Have you ever encountered a situation where your Windows PC won't boot or crashes frequently? If so, you know how frustrating and stressful it can be. You may lose access to your important files, settings, and applications, and you may not know how to fix the problem or recover your data.</p>
4
- <p>Fortunately, there is a solution that can help you repair and restore your Windows PC in a fast and easy way. It's called <strong>Easy Recovery Essentials</strong>, or <strong>EasyRE</strong> for short. In this article, we will introduce you to this powerful tool and show you how to use it to fix your PC problems.</p>
5
- <h2>Easy Recovery Essentials (EasyRE) Pro - Windows XP Vista 10 Full Version</h2><br /><p><b><b>DOWNLOAD</b> &#8250;&#8250;&#8250; <a href="https://byltly.com/2uKA0o">https://byltly.com/2uKA0o</a></b></p><br /><br />
6
- <h2>What is EasyRE and why do you need it?</h2>
7
- <p>Easy Recovery Essentials for Windows (EasyRE) is a bootable repair and recovery software from NeoSmart Technologies that can be used to repair non-booting/crashed laptops and PCs. It supports all Windows versions, including Windows XP, Vista, 7, 8, 10, and 11, as well as Windows Server editions.</p>
8
- <p>EasyRE is different from other repair solutions because it uses a scientific approach to understand how your PC works and where things go wrong. It tests and validates each component in your system, simulating the boot process and identifying the errors. It then automatically fixes the problems and restores your PC to a working state.</p>
9
- <h3>How does EasyRE work?</h3>
10
- <p>EasyRE works by booting from a CD or USB that you create using another working PC. You don't need to install anything on your PC or access Windows. Once you boot from the CD or USB, EasyRE will scan your PC and display a list of recovery options. All you have to do is click 'begin'.</p>
11
- <p>EasyRE will then perform a comprehensive analysis of your system, starting with your hardware and making its way up. It will check your memory, CPU, disk, motherboard, BIOS, bootloader, registry, drivers, services, files, and more. It will also detect any viruses, malware, or rootkits that may be affecting your PC.</p>
12
- <p>Based on the results of the analysis, EasyRE will apply various repair methods and workarounds to fix the errors and restore your PC. It will also optimize your system settings and performance for better stability and speed.</p>
13
- <h3>What are the benefits of EasyRE?</h3>
14
- <p>Some of the benefits of using Easy Recovery Essentials are:</p>
15
- <p>How to use EasyRE Pro for Windows XP recovery<br />
16
- EasyRE Pro features and benefits for Windows Vista users<br />
17
- Download EasyRE Pro full version for Windows 10<br />
18
- EasyRE Pro reviews and testimonials from satisfied customers<br />
19
- EasyRE Pro vs other Windows recovery tools comparison<br />
20
- EasyRE Pro discount code and coupon for saving money<br />
21
- EasyRE Pro system requirements and compatibility for Windows XP Vista 10<br />
22
- EasyRE Pro tutorial and guide for beginners<br />
23
- EasyRE Pro free trial and demo download link<br />
24
- EasyRE Pro customer support and contact information<br />
25
- EasyRE Pro license key and activation instructions<br />
26
- EasyRE Pro refund policy and guarantee terms<br />
27
- EasyRE Pro FAQs and troubleshooting tips<br />
28
- EasyRE Pro alternatives and competitors analysis<br />
29
- EasyRE Pro affiliate program and commission rates<br />
30
- How to backup and restore Windows XP Vista 10 with EasyRE Pro<br />
31
- How to fix Windows boot errors and blue screen with EasyRE Pro<br />
32
- How to recover deleted files and partitions with EasyRE Pro<br />
33
- How to optimize and speed up Windows XP Vista 10 with EasyRE Pro<br />
34
- How to repair Windows registry and system files with EasyRE Pro<br />
35
- How to create a bootable USB or CD with EasyRE Pro<br />
36
- How to clone and migrate Windows XP Vista 10 with EasyRE Pro<br />
37
- How to update and upgrade Windows XP Vista 10 with EasyRE Pro<br />
38
- How to clean and wipe Windows XP Vista 10 with EasyRE Pro<br />
39
- How to diagnose and test Windows XP Vista 10 with EasyRE Pro<br />
40
- How to install and uninstall EasyRE Pro on Windows XP Vista 10<br />
41
- How to customize and configure EasyRE Pro settings and options<br />
42
- How to access and use EasyRE Pro advanced tools and features<br />
43
- How to secure and protect Windows XP Vista 10 with EasyRE Pro<br />
44
- How to recover from ransomware and malware attacks with EasyRE Pro<br />
45
- How to recover Windows password and user account with EasyRE Pro<br />
46
- How to recover data from external hard drive or USB with EasyRE Pro<br />
47
- How to recover data from corrupted or formatted drive with EasyRE Pro<br />
48
- How to recover data from RAID or NAS with EasyRE Pro<br />
49
- How to recover data from SSD or HDD with EasyRE Pro<br />
50
- How to recover data from laptop or desktop with EasyRE Pro<br />
51
- How to recover data from different file systems with EasyRE Pro<br />
52
- How to recover data from different brands of computers with EasyRE Pro<br />
53
- How to recover data from different versions of Windows with EasyRE Pro<br />
54
- How to recover data from different scenarios with EasyRE Pro<br />
55
- What is the difference between Easy Recovery Essentials (Easy RE) Home, Professional, Server editions?<br />
56
- What are the advantages of using Easy Recovery Essentials (Easy RE) over Windows built-in recovery tools?<br />
57
- What are the common problems that can be solved by using Easy Recovery Essentials (Easy RE)?<br />
58
- What are the best practices for using Easy Recovery Essentials (Easy RE) effectively?<br />
59
- What are the latest updates and news about Easy Recovery Essentials (Easy RE)?<br />
60
- What are the customer feedbacks and ratings about Easy Recovery Essentials (Easy RE)?<br />
61
- What are the special offers and promotions for buying Easy Recovery Essentials (Easy RE)?<br />
62
- What are the sources of information and help for using Easy Recovery Essentials (Easy RE)?<br />
63
- What are the risks and limitations of using Easy Recovery Essentials (Easy RE)?</p>
64
- <ul>
65
- <li>It's easy to use. You don't need any technical skills or knowledge to use it. Just follow the simple instructions and let it do the work for you.</li>
66
- <li>It's fast and effective. It can fix most PC problems in minutes, saving you time and money.</li>
67
- <li>It's compatible with all PCs and laptops. It works with any brand, model, or configuration of Windows PCs and laptops.</li>
68
- <li>It's up-to-date and reliable. It supports the latest Windows versions and technologies, such as UEFI, GPT, SSDs, RAID arrays, etc.</li>
69
- <li>It's safe for your data. It doesn't erase or overwrite any data from your PC. You can also use it to backup or restore your files if needed.</li>
70
- </ul>
71
- <h2>How to use EasyRE to repair and recover your Windows PC?</h2>
72
- <p>In this section, we will show you how to use Easy Recovery Essentials to fix your Windows PC. You will need a blank CD or USB drive, a working PC with internet access, and the PC that needs fixing.</p>
73
- <h3>Downloading EasyRE</h3>
74
- <p>The first step is to download Easy Recovery Essentials from the official website: https://neosmart.net/EasyRE/. You can choose the version that matches your Windows version (XP/Vista/7/8/10/11) or the technicians' edition that supports all Windows versions.</p>
75
- <p>The download size is about 120 MB. After downloading the file, you will get an ISO image that contains the software.</p>
76
- <h3>Burning EasyRE to a CD or USB</h3>
77
- <p>The next step is to burn the ISO image to a CD or USB drive using another working PC. You can use any burning software that supports ISO images, such as ImgBurn, Rufus, PowerISO, etc.</p>
78
- <p>If you are using a CD, insert a blank CD into the CD/DVD drive of the working PC. Then open the burning software and select the ISO image as the source file and the CD/DVD drive as the destination. Start the burning process and wait until it finishes.</p>
79
- <p>If you are using a USB drive, insert a blank USB drive into a USB port of the working PC. Then open the burning software and select the ISO image as the source file and the USB drive as the destination. Start the burning process and wait until it finishes.</p>
80
- <p>You now have a bootable media with Easy Recovery Essentials on it.</p>
81
- <h3>Booting from EasyRE</h3>
82
- <p>The third step is to boot your PC that needs fixing from the CD or USB drive that contains Easy Recovery Essentials. To do this, you need to change the boot order in your BIOS or UEFI settings.</p>
83
- <p>To access the BIOS or UEFI settings, restart your PC and press a specific key (usually F2, F10, F12, Del, Esc) when you see the manufacturer's logo on the screen. This will take you to a menu where you can change various settings for your PC.</p>
84
- <p>Look for an option that says 'Boot' or 'Boot order' or something similar. Select it and change it so that the CD/DVD drive or USB drive is at the top of the list. This will make your PC boot from that device first instead of your hard drive.</p>
85
- <p>Save the changes and exit the BIOS or UEFI settings. Your PC will restart again and boot from the CD or USB drive.</p>
86
- <h3>Running EasyRE</h3>
87
- <p>The final step is to run Easy Recovery Essentials on your PC. When you boot from the CD or USB drive, you will see a welcome screen with some options. Choose 'Automated Repair' if you want EasyRE to automatically scan and fix your PC problems.</p>
88
- <p>You will then see a screen where you can select your Windows installation (if you have more than one). Choose the one that corresponds to your current Windows version (XP/Vista/7/8/10/11) or select 'All' if you are not sure.</p>
89
- <p>You will then see a screen where you can choose which tests and repairs you want EasyRE to perform on your PC. The default settings are recommended for most cases. Click 'Begin' when you are ready.</p>
90
- <p>Easy Recovery Essentials will then start scanning and repairing your PC using its scientific approach. You will see a progress bar and some messages on the screen indicating what it is doing. The process may take several minutes depending on your PC condition.</p>
91
- <p>When it finishes, you will see a screen with # Article with HTML formatting (continued) <p>a summary of the repair results and a message that says 'Repair complete'. You can click 'View details' to see more information about what EasyRE did.</p>
92
- <p>Now you can click 'Restart' to reboot your PC and check if it works normally. If not, you can try running EasyRE again with different options or contact the support team for help.</p>
93
- <h2>Frequently asked questions about EasyRE</h2>
94
- <p>In this section, we will answer some of the common questions that users may have about Easy Recovery Essentials.</p>
95
- <h3>Is EasyRE free?</h3>
96
- <p>No, EasyRE is not free. It is a paid software that costs $19.75 for a single license. However, you can download a free trial version that lets you scan your PC and see if EasyRE can fix it. The trial version does not perform any repairs, but it gives you an idea of what EasyRE can do for you.</p>
97
- <h3>What are the system requirements for EasyRE?</h3>
98
- <p>To use EasyRE, you need a PC or laptop that runs Windows XP, Vista, 7, 8, 10, or 11, or Windows Server editions. You also need a blank CD or USB drive with at least 256 MB of space, and another working PC with internet access to download and burn EasyRE.</p>
99
- <h3>Does EasyRE support UEFI and GPT?</h3>
100
- <p>Yes, EasyRE supports UEFI and GPT. It is the only repair solution that natively supports UEFI, the modern replacement for BIOS. It can also work with GPT disks, which are the new standard for disk partitioning. EasyRE can handle any PC configuration and technology.</p>
101
- <h3>Does EasyRE erase any data from my PC?</h3>
102
- <p>No, EasyRE does not erase any data from your PC. It only repairs the system files and settings that are causing your PC problems. It does not touch your personal files, documents, photos, music, videos, etc. However, if you want to backup or restore your data, you can use the 'Browse/Backup Files' option in EasyRE.</p>
103
- <h3>How can I contact the support team for EasyRE?</h3>
104
- <p>If you have any issues or questions about EasyRE, you can contact the support team via email at [email protected]. They will respond to your queries as soon as possible and help you with troubleshooting your PC problems. Please note that there is no phone support for EasyRE at this time.</p>
105
- # Conclusion <p>Easy Recovery Essentials (EasyRE) Pro is a powerful tool that can help you repair and recover your Windows PC in a fast and easy way. It supports all Windows versions and PCs and laptops, and uses a scientific approach to fix your PC problems. It is easy to use, fast and effective, compatible and reliable, and safe for your data.</p>
106
- <p>If you want to try EasyRE Pro for yourself, you can download it from the official website: https://neosmart.net/EasyRE/. You can also watch a demo video of how it works here: https://www.youtube.com/watch?v=0QIg9k50T-w.</p>
107
- <p>We hope this article has given you a clear overview of what EasyRE Pro is and how to use it. If you have any feedback or questions, please feel free to leave a comment below or contact us via email.</p>
108
- # FAQs <ul>
109
- <li>Q: Can I use EasyRE on more than one PC?</li>
110
- <li>A: Yes, you can use EasyRE on any number of PCs with the same Windows version as long as you have a valid license. You can also purchase a technicians' edition that supports all Windows versions.</li>
111
- <li>Q: How long does it take to repair my PC with EasyRE?</li>
112
- <li>A: The repair time depends on various factors such as your PC condition, configuration, and hardware. Generally speaking, it takes about 10 to 15 minutes to scan and fix your PC with EasyRE.</li>
113
- <li>Q: What if EasyRE cannot fix my PC?</li>
114
- <li>A: If EasyRE cannot fix your PC after trying different options and methods, you can contact the support team for help via email at [email protected]. They will try their best to assist you with troubleshooting your PC problems.</li>
115
- <li>Q: How can I update EasyRE to the latest version?</li>
116
- <li>A: You can update EasyRE by downloading the latest version from the official website: https://neosmart.net/EasyRE/. You will need to burn it to a new CD or USB drive and use it on your PC.</li>
117
- <li>Q: How can I get a refund for EasyRE?</li>
118
- <li>A: If you are not satisfied with EasyRE for any reason, you can request a refund within 30 days of purchase by contacting the support team via email at [email protected]. They will process your refund as soon as possible.</li>
119
- </ul>
120
- </p> 0a6ba089eb<br />
121
- <br />
122
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/7 Days To Die Crack Download PC Free Steam Key [Updated] !!EXCLUSIVE!!.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>7 Days To Die Crack Download PC Free Steam Key [Updated]</h2><br /><p><b><b>Download Zip</b> &#8250;&#8250;&#8250;&#8250;&#8250; <a href="https://imgfil.com/2uxX7P">https://imgfil.com/2uxX7P</a></b></p><br /><br />
2
-
3
- 7 Days To Die ARK Arma 3 Atlas Blackwake Conan Exiles Counter ... Free, secure and Rust Code Lock Cracker Coupons, Promo Codes ... back fo r Rust Codelock Hack because we update all the latest Jan 02, ... 0: 19th June 2004 07:16 PM Download the new hack for Robocraft! ... Rust, cracked color. 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Borang Nikah Selangor Pdf BETTER Download.md DELETED
@@ -1,40 +0,0 @@
1
- <h2>Borang Nikah Selangor Pdf Download</h2><br /><p><b><b>Download Zip</b> &#9733;&#9733;&#9733; <a href="https://imgfil.com/2uy155">https://imgfil.com/2uy155</a></b></p><br /><br />
2
-
3
- iphone, nikah selangor pdf iphone indonesiaStake It or Pull It? Holds Sign Stirs Debate
4
-
5
- As The Washington Times reported last week, a group of pro-gun activists in Ohio staged an event at a Cincinnati shopping mall called "Stake It or Pull It?"
6
-
7
- The gathering included weapons ranging from a.22-caliber pistol to a semi-automatic rifle and a shotgun.
8
-
9
- "A lot of people look at this as just something to be seen," said Carl Cain, 50, a retired electrical engineer, as he loaded a.357-caliber revolver into a suitcase.
10
-
11
- "For my generation, it was a rite of passage," Mr. Cain, who has fired his gun several times, added. "I grew up with a rifle in my hands."
12
-
13
- The phrase "stake it or pull it" has been used for years in a line from an old country song:
14
-
15
- My daddy said that you ought to live and let live;
16
-
17
- But if you don't, and you get caught,
18
-
19
- You'll have to stake it or pull it.
20
-
21
- The phrase has been employed in political rhetoric, sometimes as a taunt to a perceived adversary.
22
-
23
- One example was a 1992 speech given by then-President Bill Clinton, in which he said, "You know, some people stake it, or pull it, or cheat it out of you."
24
-
25
- Stake It or Pull It?
26
-
27
- The phrase has been applied in other ways.
28
-
29
- "Stake it or pull it" is used in a line in the 1967 British film, "The Knack... And How to Get It," with a scene of a man standing over a corpse after he shoots it in the head.
30
-
31
- And a magazine published in the mid-1980s by the National Rifle Association had a picture of a man with a drawn gun over the headline "Stake It or Pull It? Should the 9th Commandment Apply to Firearms?"
32
-
33
- The display of the guns in Ohio, along with posters encouraging attendees to "Be Armed" and "Stake it or Pull It?" were organized by the Second Amendment Defense organization, founded by Alan Gottlieb, who has said that the organization aims to legalize "the manufacture and lawful ownership of fully automatic weapons."
34
-
35
- The group did not respond to repeated requests for comment.
36
-
37
- Mr. Gottlieb recently said on his radio program that the gun activists were 4fefd39f24<br />
38
- <br />
39
- <br />
40
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bleach VS Naruto 3.8 - The Best Anime Fighting Game for PC and Mac with Online Multiplayer.md DELETED
@@ -1,151 +0,0 @@
1
-
2
- <h1>Bleach vs Naruto APK 3.8: A Review of the Latest Sports Version of the Anime Fighting Game</h1>
3
- <p>If you are a fan of anime and fighting games, you might have heard of <strong>Bleach vs Naruto</strong>, a popular fan-made game that features characters from two famous anime series, <em>Bleach</em> and <em>Naruto</em>. The game has been updated regularly with new versions, adding more characters, stages, modes, and features.</p>
4
- <p>In this article, we will review the latest sports version of the game, <strong>Bleach vs Naruto APK 3.8</strong>, which was released in September 2022. We will cover what this version is, what are its features, how to download and install it on PC and Mac, how to play it, what are its pros and cons, and where to find more information about it.</p>
5
- <h2>bleach vs naruto apk 3.8</h2><br /><p><b><b>DOWNLOAD</b> &#10001; &#10001; &#10001; <a href="https://urlin.us/2uT25D">https://urlin.us/2uT25D</a></b></p><br /><br />
6
- <h2>Introduction</h2>
7
- <h3>What is Bleach vs Naruto APK 3.8?</h3>
8
- <p><strong>Bleach vs Naruto APK 3.8</strong> is a sports version of the original <strong>Bleach vs Naruto</strong> game, which is a fan-made fighting game based on the <em>M.U.G.E.N</em> engine. The game allows you to choose from over 90 characters and 50 assists from both <em>Bleach</em> and <em>Naruto</em> anime series, as well as some other anime and manga franchises, such as <em>One Piece</em>, <em>Fairy Tail</em>, <em>Hunter x Hunter</em>, <em>Dragon Ball</em>, and more.</p>
9
- <p>The sports version is made by the original author team of <strong>Bleach vs Naruto</strong>, with contributions from some modders. The sports version is different from the official version in that it has more balanced characters and assists, more game modes, more features, bug fixes, and optimizations. The sports version is also used in matches in China instead of the official version, because it is more competitive and fair.</p>
10
- <h3>What are the features of Bleach vs Naruto APK 3.8?</h3>
11
- <p>Some of the features of <strong>Bleach vs Naruto APK 3.8</strong> are:</p>
12
- <ul>
13
- <li>New game modes, such as Team Battle, Survival, Training, Watch Mode, and more.</li>
14
- <li>New characters and assists, such as Ichigo Kurosaki (Bankai), Sasuke Uchiha (Rinnegan), Monkey D. Luffy (Gear Fourth), Natsu Dragneel (Dragon Force), Gon Freecss (Adult), Vegeta (Super Saiyan Blue), and more.</li>
15
- <li>New stages, such as Soul Society , Konoha Village, Marineford, Tartaros, Dark Continent, Tournament of Power, and more.</li>
16
- <li>New music and sound effects, such as the original anime soundtracks and voice clips.</li>
17
- <li>New graphics and animations, such as the improved character sprites and effects.</li>
18
- <li>New options and settings, such as the difficulty level, the time limit, the life gauge, the damage ratio, the combo system, the screen resolution, the keyboard configuration, and more.</li>
19
- </ul>
20
- <h2>Gameplay</h2>
21
- <h3>How to download and install Bleach vs Naruto APK 3.8 on PC and Mac?</h3>
22
- <p>To download and install <strong>Bleach vs Naruto APK 3.8</strong> on PC and Mac, you need to follow these steps:</p>
23
- <ol>
24
- <li>Download the <strong>Bleach vs Naruto APK 3.8</strong> file from the official website or from a trusted source. The file size is about 1.5 GB.</li>
25
- <li>Extract the file using a file extractor program, such as WinRAR or 7-Zip. You will get a folder named <em>Bleach vs Naruto 3.8</em>.</li>
26
- <li>Open the folder and double-click on the <em>Bvn.exe</em> file to launch the game.</li>
27
- <li>Enjoy playing <strong>Bleach vs Naruto APK 3.8</strong> on your PC or Mac.</li>
28
- </ol>
29
- <h3>How to play Bleach vs Naruto APK 3.8 on PC and Mac?</h3>
30
- <p>To play <strong>Bleach vs Naruto APK 3.8</strong> on PC and Mac, you need to know the basic controls and commands of the game. Here are some of them:</p>
31
- <table>
32
- <tr><th>Key</th><th>Function</th></tr>
33
- <tr><td>O</td><td>Select/Confirm/Attack</td></tr>
34
- <tr><td>P</td><td>Back/Cancel/Defend</td></tr>
35
- <tr><td>K</td><td>Jump</td></tr>
36
- <tr><td>L</td><td>Dash/Sprint</td></tr>
37
- <tr><td>I</td><td>Assist/Tag</td></tr>
38
- <tr><td>U</td><td>Skill 1/Transformation</td></tr>
39
- <tr><td>J</td><td>Skill 2/Special Attack</td></tr>
40
- <tr><td>A/D/S/W</td><td>Move Left/Right/Down/Up</td></tr>
41
- <tr><td>F1/F2/F12</td><td>Pause/Restart/Quit Game</td></tr>
42
- <tr><td>F11/F10/F9/F8/F7/F6/F5/F4/F3/F2/F1/Esc/Q/W/A/S/D/Z/X/C/V/B/N/M/,/.//Shift/Ctrl/Alt/Spacebar/Enter/Backspace/Delete/Home/End/Page Up/Page Down/Up Arrow/Down Arrow/Left Arrow/Right Arrow/Num Lock/*/-/+//0-9/Numpad 0-Numpad 9/Numpad /-Numpad +/Numpad Enter/Numpad . </td><td>Customizable Keys (You can change them in the Options menu)</td></tr>
43
- </table>
44
- <h3>What are the game modes and characters in Bleach vs Naruto APK 3.8?</h3>
45
- <p><strong>Bleach vs Naruto APK 3.8</strong> has several game modes that you can choose from, such as:</p>
46
- <ul>
47
- <li><strong>Single Player Mode:</strong> You can play against the computer in various difficulty levels, from easy to nightmare. You can also choose from different sub-modes, such as Arcade, Versus, Survival, Training, Watch Mode, and more.</li>
48
- <li><strong>Multiplayer Mode:</strong> You can play with or against other players online or offline. You can either join a room or create your own room with your own rules and settings. You can also chat with other players in the lobby or in the game.</li>
49
- <li><strong>Tournament Mode:</strong> You can participate in a tournament with up to 16 players. You can either join an existing tournament or create your own tournament with your own rules and settings. You can also watch other players' matches in the spectator mode.</li>
50
- <li><strong>Mission Mode:</strong> You can complete various missions with different objectives and challenges. You can also unlock new characters and assists by completing certain missions.</li>
51
- <li><strong>Achievement Mode:</strong> You can earn achievements by performing various feats and tasks in the game. You can also view your statistics and records in this mode.</li>
52
- <li><strong>Cheat Mode:</ Nami, Sanji, Nico Robin, Franky, Brook, Tony Tony Chopper, Natsu Dragneel, Lucy Heartfilia, Gray Fullbuster, Erza Scarlet, Happy, Wendy Marvell, Carla, Gajeel Redfox, Levy McGarden, Laxus Dreyar, Gon Freecss, Killua Zoldyck, Kurapika, Leorio Paradinight, Hisoka Morow, Chrollo Lucilfer, Illumi Zoldyck, Vegeta, Son Goku, Son Gohan, Piccolo, Krillin, Trunks, Frieza, Cell, Majin Buu, and more.</li>
53
- <li><strong>Assists:</strong> Kon (Bleach), Zabimaru (Bleach), Nemu Kurotsuchi (Bleach), Shino Aburame (Naruto), Kiba Inuzuka (Naruto), Akamaru (Naruto), Sai (Naruto), Yamato (Naruto), Jiraiya (Naruto), Tsunade (Naruto), Orochimaru (Naruto), Kabuto Yakushi (Naruto), Usopp (One Piece), Boa Hancock (One Piece), Trafalgar Law (One Piece), Sabo (One Piece), Portgas D. Ace (One Piece), Marco (One Piece), Juvia Lockser (Fairy Tail), Mirajane Strauss (Fairy Tail), Gildarts Clive (Fairy Tail), Makarov Dreyar (Fairy Tail), Zeref Dragneel (Fairy Tail), Biscuit Krueger (Hunter x Hunter), Kite (Hunter x Hunter), Knuckle Bine (Hunter x Hunter), Shoot McMahon (Hunter x Hunter), Morel Mackernasey (Hunter x Hunter), Bulma (Dragon Ball), Chi-Chi (Dragon Ball), Android 18 (Dragon Ball), Master Roshi (Dragon Ball), Beerus (Dragon Ball), Whis (Dragon Ball), and more.</li>
54
- </ul>
55
- <h2>Pros and Cons</h2>
56
- <h3>What are the advantages of Bleach vs Naruto APK 3.8?</h3>
57
- <p>Some of the advantages of <strong>Bleach vs Naruto APK 3.8</strong> are:</p>
58
- <p>bleach vs naruto android game download<br />
59
- bleach vs naruto apk mod<br />
60
- bleach vs naruto apk offline<br />
61
- bleach vs naruto apk latest version<br />
62
- bleach vs naruto apk 2023<br />
63
- bleach vs naruto apk free download<br />
64
- bleach vs naruto apk full version<br />
65
- bleach vs naruto apk no ads<br />
66
- bleach vs naruto apk unlimited money<br />
67
- bleach vs naruto apk update<br />
68
- bleach vs naruto game online<br />
69
- bleach vs naruto game for pc<br />
70
- bleach vs naruto game play store<br />
71
- bleach vs naruto game review<br />
72
- bleach vs naruto game cheats<br />
73
- bleach vs naruto game tips<br />
74
- bleach vs naruto game guide<br />
75
- bleach vs naruto game hack<br />
76
- bleach vs naruto game best characters<br />
77
- bleach vs naruto game modes<br />
78
- bleach vs naruto 3.8 download for android<br />
79
- bleach vs naruto 3.8 apk obb<br />
80
- bleach vs naruto 3.8 apk data<br />
81
- bleach vs naruto 3.8 apk revdl<br />
82
- bleach vs naruto 3.8 apk pure<br />
83
- bleach vs naruto 3.8 apk mirror<br />
84
- bleach vs naruto 3.8 apk uptodown<br />
85
- bleach vs naruto 3.8 apk rexdl<br />
86
- bleach vs naruto 3.8 apk mob.org<br />
87
- bleach vs naruto 3.8 apk apkpure.com<br />
88
- how to install bleach vs naruto apk on android<br />
89
- how to play bleach vs naruto apk on android<br />
90
- how to update bleach vs naruto apk on android<br />
91
- how to uninstall bleach vs naruto apk on android<br />
92
- how to fix bleach vs naruto apk not working on android<br />
93
- how to unlock all characters in bleach vs naruto apk on android<br />
94
- how to use special moves in bleach vs naruto apk on android<br />
95
- how to change language in bleach vs naruto apk on android<br />
96
- how to customize controls in bleach vs naruto apk on android<br />
97
- how to connect controller in bleach vs naruto apk on android<br />
98
- best settings for bleach vs naruto apk on android<br />
99
- best team for bleach vs naruto apk on android<br />
100
- best strategy for bleach vs naruto apk on android<br />
101
- best skills for bleach vs naruto apk on android<br />
102
- best combos for bleach vs naruto apk on android<br />
103
- best maps for bleach vs naruto apk on android<br />
104
- best difficulty for bleach vs naruto apk on android<br />
105
- best graphics for bleach vs naruto apk on android<br />
106
- best sound for bleach vs naruto apk on android</p>
107
- <ul>
108
- <li>It is free to download and play.</li>
109
- <li>It has a large roster of characters and assists from various anime and manga franchises.</li>
110
- <li>It has a variety of game modes and options to suit different preferences and play styles.</li>
111
- <li>It has a smooth and fast gameplay with responsive controls and commands.</li>
112
- <li>It has a high-quality graphics and sound with detailed animations and effects.</li>
113
- <li>It has a balanced and competitive gameplay with no overpowered or underpowered characters or assists.</li>
114
- <li>It has a loyal and active fan base that supports the game and its development.</li>
115
- </ul>
116
- <h3>What are the disadvantages of Bleach vs Naruto APK 3.8?</h3>
117
- <p>Some of the disadvantages of <strong>Bleach vs Naruto APK 3.8</strong> are:</p>
118
- <ul>
119
- <li>It is not an official game and it is not endorsed by the original creators of the anime and manga series.</li>
120
- <li>It may have some bugs and glitches that affect the gameplay or the performance.</li>
121
- <li>It may have some compatibility issues with some devices or operating systems.</li>
122
- <li>It may have some language barriers or cultural differences that make it hard to understand or enjoy for some players.</li>
123
- <li>It may have some legal or ethical issues that make it risky or controversial to play or distribute.</li>
124
- </ul>
125
- <h2>Conclusion</h2>
126
- <h3>Is Bleach vs Naruto APK 3.8 worth playing?</h3>
127
- <p>In conclusion, <strong>Bleach vs Naruto APK 3.8</strong> is a fun and exciting game that offers a lot of entertainment and challenge for anime and fighting game fans. It is a fan-made game that pays tribute to two of the most popular anime series in the world, <em>Bleach</em> and <em>Naruto</em>, as well as other anime and manga franchises. It has a large roster of characters and assists, a variety of game modes and options, a smooth and fast gameplay, a high-quality graphics and sound, a balanced and competitive gameplay, and a loyal and active fan base. It is free to download and play on PC and Mac.</p>
128
- <p>However, it is not an official game and it is not endorsed by the original creators of the anime and manga series. It may have some bugs and glitches that affect the gameplay or the performance. It may have some compatibility issues with some devices or operating systems. It may have some language barriers or cultural differences that make it hard to understand or enjoy for some players. It may have some legal or ethical issues that make it risky or controversial to play or distribute.</p>
129
- <p>Therefore, whether <strong>Bleach vs Naruto APK 3.8</strong> is worth playing or not depends on your personal preference and situation. If you are a fan of anime and fighting games, and you don't mind the drawbacks of the game, you might find it enjoyable and satisfying. However, if you are looking for an official and authentic game, or you have concerns about the quality or the legality of the game, you might want to avoid it or look for alternatives.</p>
130
- <h3>Where can I find more information about Bleach vs Naruto APK 3.8?</h3>
131
- <p>If you want to find more information about <strong>Bleach vs Naruto APK 3.8</strong>, you can visit the following sources:</p>
132
- <ul>
133
- <li>The official website of <strong>Bleach vs Naruto</strong>, where you can download the latest version of the game, as well as other versions and mods. You can also find news, updates, tutorials, videos, and more. The website is in Chinese, but you can use a translator tool to read it in English or other languages. The website is: <a href="">http://bvn.mugenchina.com/</a>.</li>
134
- <li>The official YouTube channel of <strong>Bleach vs Naruto</strong>, where you can watch gameplay videos, trailers, previews, reviews, and more. You can also subscribe to the channel and get notifications when new videos are uploaded. The channel is: <a href="">https://www.youtube.com/channel/UC4x1Z0w7j9WQq4fFyVZ2z6w</a>.</li>
135
- <li>The official Facebook page of <strong>Bleach vs Naruto</strong>, where you can follow the latest news, updates, events, and more. You can also interact with other fans and players, share your opinions and feedback, and ask questions. The page is: <a href="">https://www.facebook.com/Bleach-vs-Naruto-1502399393365639/</a>.</li>
136
- <li>The official Discord server of <strong>Bleach vs Naruto</strong>, where you can chat with other fans and players, join voice channels, play games together, and more. You can also get support and help from the moderators and admins. The server is: <a href="">https://discord.gg/6YnYXNk</a>.</li>
137
- <li>The official Reddit community of <strong>Bleach vs Naruto</strong>, where you can post and comment about anything related to the game. You can also find tips, guides, memes, fan art, and more. The community is: <a href="">https://www.reddit.com/r/BleachVsNaruto/</a>.</li>
138
- </ul>
139
- <h2>FAQs</h2>
140
- <h4>Q1: Is Bleach vs Naruto APK 3.8 available for Android?</h4>
141
- <p>A1: Yes, <strong>Bleach vs Naruto APK 3.8</strong> is available for Android devices. You can download the APK file from the official website or from a trusted source. However, you need to enable the installation of unknown sources in your device settings before installing the APK file.</p>
142
- <h4>Q2: Who is the author of the sports version of Bleach vs Naruto APK 3.8?</h4>
143
- <p>A2: The author of the sports version of <strong>Bleach vs Naruto APK 3.8</strong> is <em>5Dplay</em>, which is also the original author team of <strong>Bleach vs Naruto</strong>. They are a group of Chinese developers who create fan-made games based on anime and manga series.</p>
144
- <h4>Q3: What is the difference between the sports version and the official version of Bleach vs Naruto?</h4>
145
- <p>A3: The difference between the sports version and the official version of <strong>Bleach vs Naruto</strong> is that the sports version has more balanced characters and assists, more game modes, more features, bug fixes, and optimizations. The sports version is also used in matches in China instead of the official version, because it is more competitive and fair.</p>
146
- <h4>Q4: How to fix the big buttons error in Bleach vs Naruto APK 3.8?</h4>
147
- <p>A4: The big buttons error in <strong>Bleach vs Naruto APK 3.8</strong> is a common problem that occurs when the game is not compatible with some devices or operating systems. To fix this error, you need to do the following steps: - Go to the Options menu in the game and select the Screen option. - Change the screen resolution to a lower value, such as 800x600 or 640x480. - Save the changes and restart the game. This should solve the big buttons error and make the game run normally on your device.</p>
148
- <h4>Q5: How to learn all the moves and combos in Bleach vs Naruto APK 3.8?</h4>
149
- <p>A5: To learn all the moves and combos in <strong>Bleach vs Naruto APK 3.8</strong>, you need to practice and experiment with different characters and assists. You can also use the Training mode in the game to practice your skills and learn new techniques. You can also watch gameplay videos and tutorials online to get some tips and tricks from other players.</p> 197e85843d<br />
150
- <br />
151
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Car For Sale Simulator 2023 APK A Realistic and Fun Car Simulation Game.md DELETED
@@ -1,115 +0,0 @@
1
-
2
- <h1>Car For Sale Simulator 2023: A Game for Car Lovers</h1>
3
- <p>If you are a fan of cars and simulation games, you might want to check out Car For Sale Simulator 2023, a game that lets you buy, repair, customize, and sell cars in a realistic and fun way. In this game, you can explore different locations, find various car models, negotiate with sellers, fix and upgrade your vehicles, and make a profit by selling them. You can also manage your own garage and workshop, expand your business, and become the best car dealer in town. In this article, we will tell you more about this game, its features, and how to download and install it on your Android device.</p>
4
- <h2>Introduction</h2>
5
- <h3>What is Car For Sale Simulator 2023?</h3>
6
- <p>Car For Sale Simulator 2023 is a car simulation game developed by Digital Melody Games. It is available for Android devices and can be downloaded from the Google Play Store or from other sources as an APK file. The game was released in August 2021 and has received positive reviews from players and critics. The game has over 500,000 downloads and a rating of 4.1 out of 5 stars on the Google Play Store.</p>
7
- <h2>car for sale simulator 2023 apk indir</h2><br /><p><b><b>DOWNLOAD</b> &#128504;&#128504;&#128504; <a href="https://urlin.us/2uSZpC">https://urlin.us/2uSZpC</a></b></p><br /><br />
8
- <h3>Why should you play Car For Sale Simulator 2023?</h3>
9
- <p>Car For Sale Simulator 2023 is a game that will appeal to anyone who loves cars and wants to experience the thrill of buying, repairing, and selling them. The game offers a realistic and immersive gameplay that will make you feel like a real car mechanic and dealer. You can choose from over 50 different car models and countless combinations of customization and modification options. You can also interact with various characters, such as sellers, buyers, experts, bankers, and tax officers. You can also enjoy the stunning graphics, sound effects, and music that enhance the atmosphere of the game.</p>
10
- <h2>Features of Car For Sale Simulator 2023</h2>
11
- <h3>Buy, repair, and sell cars</h3>
12
- <p>The main feature of Car For Sale Simulator 2023 is the ability to buy, repair, and sell cars. You can explore different locations, such as car markets, neighborhoods, junkyards, and auctions, where you can find various car models for sale. You can compare prices, negotiate with sellers, or even haggle for a better deal. You can also inspect the cars for any damages or defects before buying them.</p>
13
- <h4>How to buy cars</h4>
14
- <p>To buy a car in Car For Sale Simulator 2023, you need to have enough money in your account. You can earn money by selling cars or by taking loans from the bank. You can also get discounts by using coupons or by completing achievements. Once you have enough money, you can go to any location where cars are sold and select the car you want to buy. You can then check the details of the car, such as its model, year, mileage, condition, price, and seller's rating. You can also use the expert system to get a more accurate evaluation of the car's value and condition. If you are satisfied with the car, you can click on the buy button and confirm your purchase.</p>
15
- <h4>How to repair cars</h4>
16
- <p>After buying a car in Car For Sale Simulator 2023, you need to repair it if it has any damages or defects. You can do this by going to your garage or workshop and select the car you want to repair. You can then use various tools and parts to fix the car's body, engine, suspension, brakes, tires, and interior. You can also use the diagnostic system to detect any hidden problems or faults in the car. You can see the progress of your repair by checking the car's condition bar and the repair cost. You can also test drive the car to see how it performs on the road. Once you are done with the repair, you can click on the finish button and save your changes.</p>
17
- <h4>How to sell cars</h4>
18
- <p>When you have a car that is ready to be sold in Car For Sale Simulator 2023, you can go to the showroom and select the car you want to sell. You can then see the details of the car, such as its model, year, mileage, condition, price, and profit. You can also use the marketing system to increase the demand and value of your car by advertising it on various platforms, such as newspapers, magazines, radio, TV, or online. You can also use the pricing system to adjust the price of your car according to the market trends and your desired profit margin. Once you have set the price of your car, you can wait for potential buyers to contact you. You can then negotiate with them or accept their offers. You can also reject their offers if they are too low or if you change your mind. Once you agree on a deal, you can click on the sell button and confirm your sale.</p>
19
- <h3>Customize and upgrade cars</h3>
20
- <p>Another feature of Car For Sale Simulator 2023 is the ability to customize and upgrade cars. You can do this by going to your garage or workshop and selecting the car you want to modify. You can then use various options and accessories to change the appearance and performance of your car. You can customize your car's color, paint, decals, wheels, tires, windows, lights, spoilers, bumpers, hoods, grills, mirrors, exhausts, and license plates. You can also upgrade your car's engine, transmission, turbo, nitro, suspension, brakes, steering, battery, and fuel tank.</p>
21
- <p>car for sale simulator 2023 apk download<br />
22
- car for sale simulator 2023 android game<br />
23
- car for sale simulator 2023 mod apk<br />
24
- car for sale simulator 2023 free download<br />
25
- car for sale simulator 2023 gameplay<br />
26
- car for sale simulator 2023 pc indir<br />
27
- car for sale simulator 2023 online oyna<br />
28
- car for sale simulator 2023 hile apk<br />
29
- car for sale simulator 2023 full version<br />
30
- car for sale simulator 2023 review<br />
31
- car for sale simulator 2023 cheats<br />
32
- car for sale simulator 2023 hack apk<br />
33
- car for sale simulator 2023 tips and tricks<br />
34
- car for sale simulator 2023 update<br />
35
- car for sale simulator 2023 best cars<br />
36
- car for sale simulator 2023 guide<br />
37
- car for sale simulator 2023 unlimited money<br />
38
- car for sale simulator 2023 walkthrough<br />
39
- car for sale simulator 2023 trailer<br />
40
- car for sale simulator 2023 system requirements<br />
41
- car for sale simulator 2023 steam indir<br />
42
- car for sale simulator 2023 ios download<br />
43
- car for sale simulator 2023 new features<br />
44
- car for sale simulator 2023 release date<br />
45
- car for sale simulator 2023 demo indir<br />
46
- car for sale simulator 2023 crack indir<br />
47
- car for sale simulator 2023 premium apk<br />
48
- car for sale simulator 2023 latest version<br />
49
- car for sale simulator 2023 reddit<br />
50
- car for sale simulator 2023 wiki<br />
51
- car for sale simulator 2023 codes<br />
52
- car for sale simulator 2023 forum<br />
53
- car for sale simulator 2023 multiplayer<br />
54
- car for sale simulator 2023 custom cars<br />
55
- car for sale simulator 2023 graphics settings<br />
56
- car for sale simulator 2023 realistic mode<br />
57
- car for sale simulator 2023 controller support<br />
58
- car for sale simulator 2023 mods indir<br />
59
- car for sale simulator 2023 bug report<br />
60
- car for sale simulator 2023 feedback<br />
61
- car for sale simulator 2023 developer contact<br />
62
- car for sale simulator 2023 faq<br />
63
- car for sale simulator 2023 rating<br />
64
- car for sale simulator 2023 screenshots<br />
65
- car for sale simulator 2023 video tutorial<br />
66
- car for sale simulator 2023 achievements<br />
67
- car for sale simulator 2023 soundtracks<br />
68
- car for sale simulator 2023 wallpapers<br />
69
- car for sale simulator 2023 news and updates</p>
70
- <h4>How to customize cars</h4>
71
- <p>To customize a car in Car For Sale Simulator 2023, you need to have enough money in your account. You can earn money by selling cars or by taking loans from the bank. You can also get discounts by using coupons or by completing achievements. Once you have enough money, you can go to your garage or workshop and select the car you want to customize. You can then click on the customize button and choose from various options and accessories that are available for your car model. You can see the preview of your customization by checking the car's appearance bar and the customization cost. You can also test drive the car to see how it looks on the road. Once you are done with the customization, you can click on the finish button and save your changes.</p>
72
- <h4>How to upgrade cars</h4>
73
- <p>To upgrade a car in Car For Sale Simulator 2023, you need to have enough money in your account. You can earn money by selling cars or by taking loans from the bank. You can also get discounts by using coupons or by completing achievements. Once you have enough money, you can go to your garage or workshop and select the car you want to upgrade. You can then click on the upgrade button and choose from various options that are available for your car model. You can see the preview of your upgrade by checking the car's performance bar and the upgrade cost. You can also test drive the car to see how it performs on the road. Once you are done with the upgrade, you can click on the finish button and save your changes.</p>
74
- <h3>Manage your garage and workshop</h3>
75
- <p>The last feature of Car For Sale Simulator 2023 is the ability to manage your own garage and workshop. You can do this by going to your garage or workshop and selecting the manage button. You can then use various options to expand your business and monitor your finances. You can expand your garage by buying more space, equipment, tools, and staff. You can also monitor your finances by checking your income, expenses, taxes, loans, and balance. You can also use the statistics system to track your progress and achievements in the game.</p>
76
- <h4>How to expand your garage</h4>
77
- <p>To expand your garage in Car For Sale Simulator 2023, you need to have enough money in your account. You can earn money by selling cars or by taking loans from the bank. You can also get discounts by using coupons or by completing achievements. Once you have enough money, you can go to your garage or workshop and select the manage button. You can then click on the expand button and choose from various options that are available for your garage. You can buy more space, equipment, tools, and staff that will help you repair and customize more cars faster and better. You can see the preview of your expansion by checking the garage's capacity bar and the expansion cost. Once you are done with the expansion, you can click on the finish button and save your changes.</p>
78
- <h4>How to monitor your finances</h4>
79
- <p>To monitor your finances in Car For Sale Simulator 2023, you need to go to your garage or workshop and select the manage button. You can then click on the finance button and see various information about your income, expenses, taxes, loans, and balance. You can also use the graph system to see the trends and fluctuations of your finances over time. You can also use the report system to see a detailed breakdown of your finances by category, such as car sales, car purchases, car repairs, car customizations, car upgrades, garage expansions, marketing costs, staff salaries, bank interests, tax payments, etc. You can also use the budget system to set a limit for your spending and saving goals.</p>
80
- <h2>How to download and install Car For Sale Simulator 2023 APK</h2>
81
- <h3>Requirements for downloading and installing Car For Sale Simulator 2023 APK</h3>
82
- <p>If you want to download and install Car For Sale Simulator 2023 APK on your Android device, you need to meet some requirements first. These are:</p>
83
- <ul>
84
- <li>Your Android device must have an operating system of version 5.0 or higher.</li>
85
- <li>Your Android device must have at least 2 GB of RAM and 500 MB of free storage space.</li>
86
- <li>Your Android device must have a stable internet connection.</li>
87
- <li>Your Android device must allow installation of apps from unknown sources. You can enable this option by going to Settings > Security > Unknown Sources and turning it on.</li>
88
- </ul>
89
- <h3>Steps for downloading and installing Car For Sale Simulator 2023 APK</h3>
90
- <p>Once you have met the requirements for downloading and installing Car For Sale Simulator 2023 APK on your Android device, you can follow these steps:</p>
91
- <ol>
92
- <li>Go to a trusted website that provides Car For Sale Simulator 2023 APK file for download. For example, you can go to [this link] where you can find the latest version of Car For Sale Simulator 2023 APK file.</li>
93
- <li>Click on the download button and wait for the APK file to be downloaded on your Android device.</li>
94
- <li>Locate the downloaded APK file on your Android device using a file manager app or by going to Downloads folder.</li>
95
- <li>Tap on the APK file and follow the instructions on the screen to install Car For Sale Simulator 2023 APK on your Android device.</li>
96
- <li>Launch Car For Sale Simulator 2023 APK from your app drawer or home screen and enjoy playing it.</li>
97
- </ol>
98
- <h2>Conclusion</h2>
99
- <p>Car For Sale Simulator 2023 is a game that will satisfy your passion for cars and simulation games. It is a game that will let you buy, repair, customize, and sell cars in a realistic and fun way. It is a game that will let you manage your own garage and workshop, expand your business, and become the best car dealer in town. It is a game that will challenge your skills and knowledge as a car mechanic and dealer. It is a game that will provide you with hours of entertainment and enjoyment. If you are interested in playing this game, you can download and install Car For Sale Simulator 2023 APK on your Android device by following the steps we have provided in this article. We hope you have found this article helpful and informative. Thank you for reading and happy gaming!</p>
100
- <h2>FAQs</h2>
101
- <p>Here are some frequently asked questions about Car For Sale Simulator 2023 APK:</p>
102
- <ul>
103
- <li><b>Is Car For Sale Simulator 2023 APK safe to download and install?</b></li>
104
- <p>Yes, Car For Sale Simulator 2023 APK is safe to download and install as long as you get it from a trusted website that provides the original and unmodified APK file. You should also scan the APK file with an antivirus app before installing it on your Android device.</p>
105
- <li><b>Is Car For Sale Simulator 2023 APK free to play?</b></li>
106
- <p>Yes, Car For Sale Simulator 2023 APK is free to play, but it may contain some in-app purchases and ads that can enhance your gaming experience or support the developers.</p>
107
- <li><b>How can I update Car For Sale Simulator 2023 APK?</b></li>
108
- <p>You can update Car For Sale Simulator 2023 APK by downloading and installing the latest version of the APK file from the same website where you got the previous version. You can also check for updates within the game by going to Settings > About > Check for Updates.</p>
109
- <li><b>How can I contact the developers of Car For Sale Simulator 2023 APK?</b></li>
110
- <p>You can contact the developers of Car For Sale Simulator 2023 APK by sending them an email at [email protected] or by visiting their website at https://digitalmelodygames.com/.</p>
111
- <li><b>How can I share my feedback or suggestions for Car For Sale Simulator 2023 APK?</b></li>
112
- <p>You can share your feedback or suggestions for Car For Sale Simulator 2023 APK by leaving a review on the Google Play Store or by sending them an email at [email protected].</p>
113
- </ul></p> 197e85843d<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cmo jugar a Dragon Z Quest Action RPG con mod apk gua para descargar y instalar.md DELETED
@@ -1,112 +0,0 @@
1
-
2
- <h1>Descargar Dragon Z Quest Action RPG Mod APK: A Guide for DB Anime Fans</h1>
3
- <p>If you are a fan of DB anime and you love platform action RPG games, then you should definitely try Dragon Z Quest Action RPG. This game is an epic adventure that lets you summon dozens of different ssj heroes and fight your way through various enemies and bosses. You can also explore a fantasy world and collect dragon star balls to rescue the world from dark forces. In this guide, we will show you how to download and install Dragon Z Quest Action RPG mod apk, which gives you unlimited resources, unlocks all heroes, removes ads, and works offline. We will also share some tips and tricks to help you master the game.</p>
4
- <h2>descargar dragon z quest action rpg mod apk</h2><br /><p><b><b>DOWNLOAD</b> &gt; <a href="https://urlin.us/2uSUPs">https://urlin.us/2uSUPs</a></b></p><br /><br />
5
- <h2>What is Dragon Z Quest Action RPG?</h2>
6
- <h3>A Platform Action RPG Game Based on DB Anime</h3>
7
- <p>Dragon Z Quest Action RPG is a game developed by DHZ SOFT, a studio that specializes in creating anime-inspired games. The game is inspired by the popular DB anime series, which features characters with superhuman abilities and transformations. The game is a combination of platform action RPG games and action games, where you can control your heroes with simple controls and perform various actions such as jumping, running, flying, attacking, dodging, and using special skills. The game has stunning 2D graphics that capture the essence of the anime style.</p>
8
- <h3>Features of Dragon Z Quest Action RPG</h3>
9
- <p>Some of the features of Dragon Z Quest Action RPG are:</p>
10
- <ul>
11
- <li>You can summon heroes from dozens of different ssj heroes, each with their own unique skills and abilities. You can also customize your heroes with different outfits and accessories.</li>
12
- <li>You can fight your way through multiple gameplay styles, such as platform action, boss battles, survival mode, time attack mode, and more.</li>
13
- <li>You can explore a fantasy world with various locations, such as forests, deserts, mountains, volcanoes, islands, temples, and more.</li>
14
- <li>You can collect dragon star balls and use them to summon the dragon god and make your wishes come true.</li>
15
- <li>You can compete with other players in PvP mode and prove who has the best team in the world.</li>
16
- </ul>
17
- <h2>Why Download Dragon Z Quest Action RPG Mod APK?</h2>
18
- <h3>Unlimited Resources and Unlock All Heroes</h3>
19
- <p>One of the reasons why you should download Dragon Z Quest Action RPG mod apk is that it gives you unlimited resources, such as coins, gems, energy, and tickets. You can use these resources to buy items, upgrade your teams, summon more heroes, and more. You can also unlock all heroes in the game without spending any money or waiting for hours. You can enjoy playing with any hero you want and experiment with different combinations.</p>
20
- <h3>No Ads and No Internet Connection Required</h3>
21
- <p>Another reason why you should download Dragon Z Quest Action RPG mod apk is that it removes all ads from the game. You can play the game without any interruptions or distractions from annoying ads. You can also play the game offline without any internet connection required. You can play the game anytime and anywhere you want without worrying about data usage or network issues.</p>
22
- <h2>How to Download and Install Dragon Z Quest Action RPG Mod APK?</h2>
23
- <h3>Step 1: Download the Mod APK File from a Trusted Source</h3>
24
- <p>The first step to download and install Dragon Z Quest Action RPG mod apk is to download the mod apk file from a trusted source. You can find many websites that offer mod apk files for various games, but you need to be careful and choose a reliable one. Some websites may contain malware or viruses that can harm your device or steal your personal information. You can use the following link to download the Dragon Z Quest Action RPG mod apk file safely and securely: [Download Dragon Z Quest Action RPG Mod APK].</p>
25
- <p>descargar dragon z quest action rpg mod apk gratis<br />
26
- descargar dragon z quest action rpg mod apk ultima version<br />
27
- descargar dragon z quest action rpg mod apk mega<br />
28
- descargar dragon z quest action rpg mod apk mediafire<br />
29
- descargar dragon z quest action rpg mod apk android<br />
30
- descargar dragon z quest action rpg mod apk 2023<br />
31
- descargar dragon z quest action rpg mod apk hack<br />
32
- descargar dragon z quest action rpg mod apk full<br />
33
- descargar dragon z quest action rpg mod apk sin anuncios<br />
34
- descargar dragon z quest action rpg mod apk dinero infinito<br />
35
- descargar dragon z quest action rpg mod apk español<br />
36
- descargar dragon z quest action rpg mod apk para pc<br />
37
- descargar dragon z quest action rpg mod apk online<br />
38
- descargar dragon z quest action rpg mod apk offline<br />
39
- descargar dragon z quest action rpg mod apk sin internet<br />
40
- descargar dragon z quest action rpg mod apk facil y rapido<br />
41
- descargar dragon z quest action rpg mod apk sin root<br />
42
- descargar dragon z quest action rpg mod apk con todo desbloqueado<br />
43
- descargar dragon z quest action rpg mod apk actualizado<br />
44
- descargar dragon z quest action rpg mod apk 1.2.1.115<br />
45
- como descargar dragon z quest action rpg mod apk<br />
46
- donde descargar dragon z quest action rpg mod apk<br />
47
- pagina para descargar dragon z quest action rpg mod apk<br />
48
- link para descargar dragon z quest action rpg mod apk<br />
49
- tutorial para descargar dragon z quest action rpg mod apk<br />
50
- reseña de dragon z quest action rpg mod apk<br />
51
- opiniones de dragon z quest action rpg mod apk<br />
52
- gameplay de dragon z quest action rpg mod apk<br />
53
- trucos de dragon z quest action rpg mod apk<br />
54
- consejos de dragon z quest action rpg mod apk<br />
55
- guia de dragon z quest action rpg mod apk<br />
56
- personajes de dragon z quest action rpg mod apk<br />
57
- niveles de dragon z quest action rpg mod apk<br />
58
- misiones de dragon z quest action rpg mod apk<br />
59
- historia de dragon z quest action rpg mod apk<br />
60
- graficos de dragon z quest action rpg mod apk<br />
61
- sonido de dragon z quest action rpg mod apk<br />
62
- controles de dragon z quest action rpg mod apk<br />
63
- requisitos de dragon z quest action rpg mod apk<br />
64
- ventajas de dragon z quest action rpg mod apk<br />
65
- desventajas de dragon z quest action rpg mod apk<br />
66
- alternativas a dragon z quest action rpg mod apk<br />
67
- comparacion entre dragon z quest action rpg y otros juegos similares<br />
68
- mejores juegos como dragon z quest action rpg para android<br />
69
- juegos parecidos a dragon z quest action rpg para pc<br />
70
- juegos inspirados en dragon ball para android 2023 <br />
71
- juegos de rol y accion basados en anime para android <br />
72
- juegos de aventura y combate con plataforma para android</p>
73
- <h3>Step 2: Enable Unknown Sources on Your Device</h3>
74
- <p>The second step to install Dragon Z Quest Action RPG mod apk is to enable unknown sources on your device. This is a security setting that prevents you from installing apps from sources other than the official Google Play Store. However, since you are installing a mod apk file, you need to enable this option temporarily. To do this, follow these steps:</p>
75
- <ol>
76
- <li>Go to your device's settings and look for the security or privacy option.</li>
77
- <li>Tap on it and find the unknown sources option.</li>
78
- <li>Toggle it on and confirm your choice.</li>
79
- </ol>
80
- <p>Once you have enabled unknown sources, you can proceed to the next step.</p>
81
- <h3>Step 3: Install the Mod APK File and Enjoy the Game</h3>
82
- <p>The third and final step to install Dragon Z Quest Action RPG mod apk is to install the mod apk file and enjoy the game. To do this, follow these steps:</p>
83
- <ol>
84
- <li>Locate the downloaded mod apk file on your device's storage. You can use a file manager app to help you with this.</li>
85
- <li>Tap on the file and follow the installation instructions on your screen.</li>
86
- <li>Wait for the installation to finish and launch the game from your app drawer or home screen.</li>
87
- </ol>
88
- <p>Congratulations! You have successfully installed Dragon Z Quest Action RPG mod apk on your device. You can now enjoy playing the game with unlimited resources, all heroes unlocked, no ads, and offline mode.</p>
89
- <h2>Tips and Tricks for Playing Dragon Z Quest Action RPG</h2>
90
- <h3>Choose the Right Heroes for Each Battle</h3>
91
- <p>One of the tips for playing Dragon Z Quest Action RPG is to choose the right heroes for each battle. The game has dozens of different ssj heroes, each with their own strengths and weaknesses. You can summon up to four heroes at a time and switch between them during the battle. You should choose heroes that complement each other and match the enemy's type and level. For example, if you are facing a fire-type enemy, you should use water-type heroes or heroes with high defense. You should also consider the terrain and obstacles in each stage and choose heroes that can overcome them easily.</p>
92
- <h3>Upgrade Your Teams and Items Regularly</h3>
93
- <p>Another tip for playing Dragon Z Quest Action RPG is to upgrade your teams and items regularly. The game has a system of leveling up, evolving, and enhancing your heroes and items. You can use coins, gems, energy, tickets, and other resources to improve your heroes' stats, skills, outfits, and accessories. You can also use items such as potions, scrolls, bombs, and more to boost your performance in battle. You should upgrade your teams and items as much as possible to increase your chances of winning and progressing in the game.</p>
94
- <h3>Use Special Skills and Combos Wisely</h3>
95
- <p>A final tip for playing Dragon Z Quest Action RPG is to use special skills and combos wisely. The game has a system of special skills and combos that allow you to unleash powerful attacks on your enemies. Each hero has their own special skill that can be activated by filling up a gauge during the battle. You can also perform combos by tapping on different buttons in sequence or by switching between heroes at the right time. You should use special skills and combos strategically to deal maximum damage, stun enemies, heal allies, or escape danger.</p>
96
- <h2>Conclusion</h2>
97
- <p>Dragon Z Quest Action RPG is a game that will appeal to fans of DB anime and platform action RPG games alike. The game has stunning graphics, addictive gameplay, diverse characters, and an engaging story. By downloading Dragon Z Quest Action RPG mod apk, you can enjoy the game with unlimited resources, all heroes unlocked, no ads, and offline mode. You can also follow our tips and tricks to master the game and become a legend in the world of Dragon Z Quest Action RPG.</p>
98
- <h2>FAQs</h2>
99
- <ul>
100
- <li><b>Q: Is Dragon Z Quest Action RPG mod apk safe to download?</b></li>
101
- <li>A: Yes, Dragon Z Quest Action RPG mod apk is safe to download as long as you use a trusted source like the one we provided in this guide. However, you should always scan any file you download with a reliable antivirus software before installing it on your device.</li>
102
- <li><b>Q: How can I get more resources in Dragon Z Quest Action RPG?</b></li>
103
- <li>A: You can get more resources in Dragon Z Quest Action RPG by playing the game regularly, completing missions and achievements, participating in events and tournaments, and watching ads. You can also use the Dragon Z Quest Action RPG mod apk to get unlimited resources without spending any money or time.</li>
104
- <li><b>Q: How can I unlock more heroes in Dragon Z Quest Action RPG?</b></li>
105
- <li>A: You can unlock more heroes in Dragon Z Quest Action RPG by summoning them with gems or tickets, which you can earn by playing the game or buying them with real money. You can also use the Dragon Z Quest Action RPG mod apk to unlock all heroes instantly without any cost or waiting.</li>
106
- <li><b>Q: How can I play Dragon Z Quest Action RPG offline?</b></li>
107
- <li>A: You can play Dragon Z Quest Action RPG offline by downloading the Dragon Z Quest Action RPG mod apk, which works without any internet connection required. You can also play the game offline by turning off your device's Wi-Fi or mobile data before launching the game. However, you may not be able to access some features or modes that require an online connection, such as PvP mode, events, tournaments, and updates.</li>
108
- <li><b>Q: How can I contact the developer of Dragon Z Quest Action RPG?</b></li>
109
- <li>A: You can contact the developer of Dragon Z Quest Action RPG by sending an email to [email protected] or by visiting their Facebook page at https://www.facebook.com/DHZSOFT. You can also leave a review or a comment on the Google Play Store or the App Store to share your feedback, suggestions, or questions.</li>
110
- </ul></p> 197e85843d<br />
111
- <br />
112
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Abo Mvelo (feat. Mellow Amp Sleazy Amp M.j) Mixed __TOP__.md DELETED
@@ -1,67 +0,0 @@
1
- <br />
2
- <h1>How to Download Abo Mvelo (feat. Mellow & Sleazy & M.J) Mixed</h1>
3
- <p>If you are a fan of South African amapiano music, you might have heard of the song Abo Mvelo by Daliwonga featuring Mellow, Sleazy, and M.J. This song is one of the top amapiano songs of 2022, with a catchy beat, smooth vocals, and rap-inspired lyrics. In this article, we will tell you everything you need to know about this song, why you should download it, and how to download it easily and legally.</p>
4
- <h2>What is Abo Mvelo?</h2>
5
- <p>Abo Mvelo is a song by Daliwonga, a South African singer and songwriter who specializes in amapiano, a genre of house music that originated in South Africa in the early 2010s. Amapiano is characterized by piano melodies, basslines, drums, and percussions, often mixed with vocals from other genres like jazz, soul, R&B, and kwaito.</p>
6
- <h2>download abo mvelo (feat. mellow amp; sleazy amp; m.j) mixed</h2><br /><p><b><b>Download</b> &raquo; <a href="https://urlin.us/2uSVuS">https://urlin.us/2uSVuS</a></b></p><br /><br />
7
- <h3>The origin and meaning of the song</h3>
8
- <p>The song Abo Mvelo was released on March 18, 2022, as part of Daliwonga's single album of the same name. The title of the song means "those who are alive" in Zulu, one of the official languages of South Africa. The song is a celebration of life and gratitude for being alive despite the challenges and hardships that people face. The song also encourages people to enjoy themselves and have fun with their friends and loved ones.</p>
9
- <h3>The artists behind the song</h3>
10
- <p>Daliwonga is not the only artist who contributed to the song. He collaborated with three other talented musicians: Mellow, Sleazy, and M.J. Mellow and Sleazy are a duo of producers and DJs who are known for their amapiano hits like 'Sjepa', 'Pele Pele', and 'Tabela Hape'. They have also worked with other famous artists like Focalistic, Kabza De Small, Mr JazziQ, and Young Stunna. M.J is a rapper and singer who adds some spice to the song with his fast-paced verses and witty wordplay.</p>
11
- <h3>The popularity and reception of the song</h3>
12
- <p>Abo Mvelo has been well-received by both fans and critics since its release. The song has amassed over 800 thousand views on YouTube, over 600 thousand streams on Spotify, and over 500 thousand Shazams. The song has also been featured on various playlists and charts on Apple Music, Deezer, Audiomack, and other music platforms. The song has been praised for its catchy melody, uplifting message, and impressive collaboration.</p>
13
- <h2>Why download Abo Mvelo?</h2>
14
- <p>Now that you know more about Abo Mvelo, you might be wondering why you should download it instead of just streaming it online. There are several reasons why downloading music is a good idea, especially if you love amapiano music.</p>
15
- <h3>The benefits of downloading music</h3>
16
- <p>Downloading music has many advantages over streaming music online. Some of them are:</p>
17
- <p></p>
18
- <ul> <li>Offline access: Downloading music allows you to listen to your favorite songs anytime and anywhere, even without an internet connection. This is especially useful if you travel a lot, have a limited data plan, or live in an area with poor network coverage.</li>
19
- <li>Better quality: Downloading music also gives you the option to choose the format and quality of the audio file. You can download high-quality MP3, WAV, FLAC, or other formats that offer better sound quality than streaming. This can enhance your listening experience and make you appreciate the music more.</li>
20
- <li>More control: Downloading music also gives you more control over your music library. You can organize your songs by genre, artist, album, or mood. You can also create your own playlists, edit the metadata, and add cover art. You can also transfer your music to other devices, such as your phone, tablet, laptop, or USB drive.</li>
21
- </ul>
22
- <h3>The legal and ethical issues of downloading music</h3>
23
- <p>However, downloading music is not always a straightforward process. There are some legal and ethical issues that you need to be aware of before you download any song. Some of them are:</p>
24
- <ul>
25
- <li>Copyright infringement: Downloading music from unauthorized sources or without paying for it can be considered as piracy, which is illegal and punishable by law. Piracy can also harm the artists and the music industry by depriving them of their rightful income and recognition.</li>
26
- <li>Malware infection: Downloading music from untrustworthy websites or apps can also expose your device to malware, viruses, spyware, or ransomware. These malicious programs can damage your device, steal your personal information, or extort money from you.</li>
27
- <li>Poor quality: Downloading music from low-quality sources or with low-quality settings can also result in poor sound quality, distorted audio, missing parts, or corrupted files. These can ruin your listening experience and waste your time and storage space.</li>
28
- </ul>
29
- <h3>The best sources and platforms to download music</h3>
30
- <p>Therefore, it is important to download music from reliable and reputable sources and platforms that offer legal, safe, and high-quality downloads. Some of the best sources and platforms to download Abo Mvelo and other amapiano songs are:</p>
31
- <table>
32
- <tr><th>Source/Platform</th><th>Description</th><th>Price</th></tr>
33
- <tr><td>[Apple Music]</td><td>A popular music streaming service that also allows you to download songs for offline listening. It has a large catalog of songs from various genres and regions, including amapiano. It also has curated playlists, radio stations, and podcasts.</td><td>$9.99 per month for individual plan; $14.99 per month for family plan; $4.99 per month for student plan; free trial available</td></tr>
34
- <tr><td>[Spotify]</td><td>A leading music streaming service that also lets you download songs for offline listening. It has a huge library of songs from different genres and artists, including amapiano. It also has personalized recommendations, playlists, podcasts, and social features.</td><td>$9.99 per month for premium plan; $14.99 per month for family plan; $4.99 per month for student plan; free version available with ads and limited features</td></tr>
35
- <tr><td>[Deezer]</td><td>A global music streaming service that also enables you to download songs for offline listening. It has a diverse collection of songs from various genres and countries, including amapiano. It also has smart playlists, radio channels, podcasts, and lyrics.</td><td>$9.99 per month for premium plan; $14.99 per month for family plan; $4.99 per month for student plan; free version available with ads and limited features</td></tr>
36
- <tr><td>[Audiomack]</td><td>A free music streaming and downloading platform that focuses on emerging artists and genres. It has a lot of amapiano songs from South African artists like Daliwonga, Mellow, Sleazy, M.J., and others. It also has trending charts, playlists, and podcasts.</td><td>Free; optional subscription for $4.99 per month to remove ads and unlock additional features</td></tr>
37
- <tr><td>[Fakaza](https://fakaza.com/)</td><td>A South African website that specializes in amapiano and other local genres like kwaito, gqom, afro house, and hip hop. It has a lot of amapiano songs from various artists like Daliwonga, Mellow, Sleazy, M.J., and others. It also has news, videos, albums, and mixtapes.</td>< <td>Free; optional donation to support the website and the artists</td></tr>
38
- </table>
39
- <h2>How to download Abo Mvelo?</h2>
40
- <p>Now that you know the best sources and platforms to download Abo Mvelo, you might be wondering how to actually download the song. The process is not very complicated, but it may vary depending on the source or platform you choose. Here are the general steps that you can follow to download Abo Mvelo:</p>
41
- <h3>Step 1: Choose a reliable and reputable website or app</h3>
42
- <p>The first step is to choose a website or app that offers legal, safe, and high-quality downloads of Abo Mvelo. You can use any of the sources or platforms that we mentioned above, or you can do your own research and find other options. Make sure that the website or app is trustworthy, secure, and has good reviews from other users. Avoid websites or apps that look suspicious, have pop-up ads, or ask for your personal or financial information.</p>
43
- <h3>Step 2: Search for the song by title, artist, or album</h3>
44
- <p>The next step is to search for the song by typing its title, artist, or album in the search bar of the website or app. You can also browse through the categories, genres, playlists, or charts to find the song. Once you find the song, click on it to see more details, such as the duration, release date, genre, and lyrics.</p>
45
- <h3>Step 3: Select the format and quality of the download</h3>
46
- <p>The third step is to select the format and quality of the download that you want. Depending on the website or app, you may have different options for the format and quality of the audio file. Some of the common formats are MP3, WAV, FLAC, and AAC. Some of the common qualities are 128 kbps, 192 kbps, 320 kbps, and lossless. The format and quality of the download will affect the sound quality, file size, and compatibility of the song. Generally, higher formats and qualities offer better sound quality but larger file sizes and lower compatibility.</p>
47
- <h3>Step 4: Confirm the purchase or download for free</h3>
48
- <p>The fourth step is to confirm the purchase or download for free of the song. Depending on the website or app, you may have to pay a fee or subscribe to a plan to download the song. If so, you will have to enter your payment details and confirm your purchase. If not, you can simply click on the download button and start downloading the song for free. Some websites or apps may also ask you to create an account or sign in with your email or social media account before downloading.</p>
49
- <h3>Step 5: Enjoy the music offline or transfer it to other devices</h3>
50
- <p>The final step is to enjoy the music offline or transfer it to other devices. Once you have downloaded the song, you can listen to it offline on your device without an internet connection. You can also transfer it to other devices, such as your phone, tablet, laptop, or USB drive. To do so, you will need a USB cable or a wireless connection and follow the instructions of your device.</p>
51
- <h2>Conclusion</h2>
52
- <p>Abo Mvelo is a great song that deserves to be downloaded and enjoyed by all amapiano lovers. It is a song that celebrates life and encourages people to have fun with their friends and loved ones. It is also a song that showcases the talent and creativity of South African artists like Daliwonga, Mellow, Sleazy, and M.J. In this article, we have shown you what Abo Mvelo is, why you should download it, and how to download it easily and legally. We hope that this article has been helpful and informative for you. Now go ahead and download Abo Mvelo and enjoy its catchy beat, smooth vocals, and rap-inspired lyrics.</p>
53
- <h2>FAQs</h2>
54
- <ul>
55
- <li><b>What does Abo Mvelo mean?</b></li>
56
- <li>Abo Mvelo means "those who are alive" in Zulu.</li>
57
- <li><b>Who are the artists behind Abo Mvelo?</b></li>
58
- <li>Abo Mvelo is a song by Daliwonga featuring Mellow, Sleazy, and M.J.</li>
59
- <li><b>What genre is Abo Mvelo?</b></li>
60
- <li>Abo Mvelo is an amapiano song.</li>
61
- <li><b>Where can I download Abo Mvelo?</b></li>
62
- <li>You can download Abo Mvelo from various sources and platforms like Apple Music, Spotify, Deezer, Audiomack, or Fakaza.</li>
63
- <li><b>How much does it cost to download Abo Mvelo?</b></li>
64
- <li>It depends on the source or platform you choose. Some of them offer free downloads, while others require a fee or a subscription.</li>
65
- </ul></p> 197e85843d<br />
66
- <br />
67
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Facebook Messenger APK Old Version Features Benefits and Download Links.md DELETED
@@ -1,131 +0,0 @@
1
- <br />
2
- <h1>How to Download Messenger Facebook APK Old Version</h1>
3
- <p>If you are a Facebook user who likes to chat with your friends and family on your mobile device, you might be familiar with Messenger Facebook APK. This is the official messaging app for Facebook users that allows you to send and receive text messages, voice calls, video calls, stickers, emojis, GIFs, and more. But what if you are not happy with the latest version of the app and want to go back to an older version? In this article, we will show you how to download Messenger Facebook APK old version and why you might need it.</p>
4
- <h2>What is Messenger Facebook APK?</h2>
5
- <h3>The official messaging app for Facebook users</h3>
6
- <p>Messenger Facebook APK is an Android application package (APK) file that contains the installation files for Messenger, the official messaging app for Facebook users. You can use Messenger to chat with your Facebook friends and contacts, as well as anyone who has your phone number. You can also create group chats, send money, play games, share your location, and access other apps and services within Messenger.</p>
7
- <h2>messenger facebook apk old version</h2><br /><p><b><b>Download File</b> &#10001; &#10001; &#10001; <a href="https://jinyurl.com/2uNOJm">https://jinyurl.com/2uNOJm</a></b></p><br /><br />
8
- <h3>The features and benefits of Messenger Facebook APK</h3>
9
- <p>Messenger Facebook APK has many features and benefits that make it a popular and convenient app for communication. Some of them are:</p>
10
- <ul>
11
- <li>You can chat with anyone on any device, even if they don't have a Facebook account.</li>
12
- <li>You can use end-to-end encryption for your messages and calls, ensuring your privacy and security.</li>
13
- <li>You can customize your chats with colors, themes, nicknames, and emojis.</li>
14
- <li>You can express yourself with stickers, GIFs, filters, masks, and effects.</li>
15
- <li>You can make free voice and video calls over Wi-Fi or cellular data.</li>
16
- <li>You can send and receive money securely with Facebook Pay.</li>
17
- <li>You can watch videos together with your friends using Watch Together.</li>
18
- <li>You can discover new apps and services within Messenger using App Center.</li>
19
- <li>You can access your Instagram messages and stories within Messenger using Accounts Center.</li>
20
- </ul>
21
- <h2>Why would you need Messenger Facebook APK old version?</h2>
22
- <h3>The compatibility issues with newer versions of the app</h3>
23
- <p>One of the reasons why you might need Messenger Facebook APK old version is that the newer versions of the app might not be compatible with your device or operating system. For example, if you have an older Android device or a device with low memory or storage space, you might not be able to run the latest version of Messenger smoothly or at all. In that case, you might want to download an older version of the app that works better with your device specifications.</p>
24
- <h3>The preference for older features and design of the app</h3>
25
- <p>Another reason why you might need Messenger Facebook APK old version is that you might prefer the older features and design of the app over the newer ones. For example, if you like the simplicity and functionality of the old interface of the app, you might not like the changes and updates that Facebook has made to the app over time. For example, you might not like the new logo, the new chat bubbles, the new swipe gestures, or the new features that you don't use or need. In that case, you might want to download an older version of the app that suits your taste and preferences better.</p>
26
- <h2>How to find and download Messenger Facebook APK old version?</h2>
27
- <h3>The sources and websites that offer old versions of the app</h3>
28
- <p>If you want to download Messenger Facebook APK old version, you need to find a reliable and trustworthy source that offers old versions of the app. You cannot download old versions of the app from the official Google Play Store or the Facebook website, as they only provide the latest version of the app. However, there are some websites and platforms that archive and host old versions of Android apps, including Messenger Facebook APK. Some of them are:</p>
29
- <ul>
30
- <li><a href="">APKMirror</a>: This is one of the most popular and reputable websites that offer old versions of Android apps. You can browse and download different versions of Messenger Facebook APK from this website, as well as check the release date, size, and signature of each version.</li>
31
- <li><a href="">Uptodown</a>: This is another well-known and trusted website that offer old versions of Android apps. You can find and download various versions of Messenger Facebook APK from this website, as well as read user reviews, ratings, and screenshots of each version.</li>
32
- <li><a href="">APKPure</a>: This is a third-party app store that offer old versions of Android apps. You can search and download different versions of Messenger Facebook APK from this platform, as well as see the changelog, permissions, and requirements of each version.</li>
33
- </ul>
34
- <h3>The steps and precautions to download and install the app</h3>
35
- <p>Once you have found a source and a version of Messenger Facebook APK that you want to download, you need to follow some steps and precautions to download and install the app on your device. Here are some of them:</p>
36
- <ol>
37
- <li>Before you download any APK file from a third-party source, you need to make sure that your device allows installation from unknown sources. To do this, go to your device settings, then security, then enable unknown sources.</li>
38
- <li>After you have enabled unknown sources, you can download the APK file from the source website or platform. You can either use your device browser or a file manager app to download the file.</li>
39
- <li>Once you have downloaded the APK file, you need to locate it on your device storage and tap on it to start the installation process. You might see some warnings or prompts asking for your permission to install the app. You need to grant those permissions and follow the instructions on the screen.</li>
40
- <li>After you have installed the app, you can launch it from your app drawer or home screen and sign in with your Facebook account or phone number. You can then enjoy using Messenger Facebook APK old version on your device.</li>
41
- </ol>
42
- <p>However, you also need to be aware of some risks and drawbacks of downloading and installing Messenger Facebook APK old version. Some of them are:</p>
43
- <p>messenger facebook apk old version download<br />
44
- messenger facebook apk old version 2022<br />
45
- messenger facebook apk old version uptodown<br />
46
- messenger facebook apk old version android<br />
47
- messenger facebook apk old version free<br />
48
- messenger facebook apk old version 2021<br />
49
- messenger facebook apk old version 2020<br />
50
- messenger facebook apk old version 2019<br />
51
- messenger facebook apk old version 2018<br />
52
- messenger facebook apk old version 2017<br />
53
- messenger facebook apk old version 2016<br />
54
- messenger facebook apk old version 2015<br />
55
- messenger facebook apk old version 2014<br />
56
- messenger facebook apk old version 2013<br />
57
- messenger facebook apk old version 2012<br />
58
- messenger facebook apk old version for pc<br />
59
- messenger facebook apk old version for ios<br />
60
- messenger facebook apk old version for windows<br />
61
- messenger facebook apk old version for mac<br />
62
- messenger facebook apk old version for linux<br />
63
- messenger facebook apk old version lite<br />
64
- messenger facebook apk old version dark mode<br />
65
- messenger facebook apk old version modded<br />
66
- messenger facebook apk old version hacked<br />
67
- messenger facebook apk old version cracked<br />
68
- messenger facebook apk old version pro<br />
69
- messenger facebook apk old version premium<br />
70
- messenger facebook apk old version plus<br />
71
- messenger facebook apk old version beta<br />
72
- messenger facebook apk old version alpha<br />
73
- messenger facebook apk old version update<br />
74
- messenger facebook apk old version offline<br />
75
- messenger facebook apk old version online<br />
76
- messenger facebook apk old version install<br />
77
- messenger facebook apk old version uninstall<br />
78
- messenger facebook apk old version backup<br />
79
- messenger facebook apk old version restore<br />
80
- messenger facebook apk old version fix<br />
81
- messenger facebook apk old version error<br />
82
- messenger facebook apk old version bug<br />
83
- messenger facebook apk old version review<br />
84
- messenger facebook apk old version rating<br />
85
- messenger facebook apk old version features<br />
86
- messenger facebook apk old version benefits<br />
87
- messenger facebook apk old version disadvantages<br />
88
- messenger facebook apk old version alternatives<br />
89
- messenger facebook apk old version comparison<br />
90
- messenger facebook apk old version tips<br />
91
- messenger facebook apk old version tricks</p>
92
- <ul>
93
- <li>You might not be able to access some features or functions that are only available on newer versions of the app.</li>
94
- <li>You might experience some bugs or glitches that have been fixed on newer versions of the app.</li>
95
- <li>You might expose your device and data to malware or viruses that might be embedded in some APK files from unverified sources.</li>
96
- <li>You might violate some terms and conditions of Facebook by using an outdated version of their app.</li>
97
- </ul>
98
- <h2>Conclusion</h2>
99
- <p>Messenger Facebook APK is a great app for communication with your Facebook friends and contacts, as well as anyone who has your phone number. However, if you are not satisfied with the latest version of the app and want to use an older version instead, you need to know how to find and download Messenger Facebook APK old version from a reliable source. You also need to follow some steps and precautions to install the app on your device safely and securely. However, you also need to be aware of some risks and drawbacks of using an outdated version of the app.</p>
100
- <p>If you found this article helpful, please share it with your friends and family who might be interested in downloading Messenger Facebook APK old version. Also, feel free to leave a comment below if you have any questions or feedback about this topic. Thank you for reading!</p>
101
- <h2>FAQs</h2>
102
- <h3>Q1: Is Messenger Facebook APK safe to download?</h3>
103
- <p>A1: It depends on where you download it from. If you download it from a reputable and trustworthy source that verifies and scans the APK files for malware and viruses, such as APKMirror, Uptodown, or APKPure, then it is safe to download. However, if you download it from an unknown or unverified source that might have tampered with or modified the APK files, then it is not safe to download. You should always check the source website, the file size, the file signature, and the user reviews before downloading any APK file from a third-party source.</p>
104
- <h3>Q2: How can I update Messenger Facebook APK if I want to?</h3>
105
- <p>A2: If you want to update Messenger Facebook APK to the latest version, you have two options. One option is to uninstall the old version of the app from your device and then download and install the latest version of the app from the official Google Play Store or the Facebook website. Another option is to download and install the latest version of the app from a third-party source that offers updated versions of Messenger Facebook APK, such as APKMirror, Uptodown, or APKPure. However, you should be careful and cautious when downloading and installing any APK file from a third-party source, as there might be some risks and drawbacks involved.</p>
106
- <h3>Q3: What are the advantages and disadvantages of using Messenger Facebook APK old version?</h3>
107
- <p>A3: There are some advantages and disadvantages of using Messenger Facebook APK old version. Some of the advantages are:</p>
108
- <ul>
109
- <li>You can use an older version of the app that is compatible with your device or operating system.</li>
110
- <li>You can use an older version of the app that has features and design that you like or prefer.</li>
111
- <li>You can use an older version of the app that consumes less memory or storage space on your device.</li>
112
- </ul>
113
- <p>Some of the disadvantages are:</p>
114
- <ul>
115
- <li>You might not be able to access some features or functions that are only available on newer versions of the app.</li>
116
- <li>You might experience some bugs or glitches that have been fixed on newer versions of the app.</li>
117
- <li>You might expose your device and data to malware or viruses that might be embedded in some APK files from unverified sources.</li>
118
- <li>You might violate some terms and conditions of Facebook by using an outdated version of their app.</li>
119
- </ul>
120
- <h3>Q4: How can I backup my messages and data on Messenger Facebook APK?</h3>
121
- <p>A4: If you want to backup your messages and data on Messenger Facebook APK, you have two options. One option is to use the built-in backup feature of Messenger that allows you to sync your messages and data with your Facebook account. To do this, go to your Messenger settings, then account settings, then sync contacts. Another option is to use a third-party app or tool that allows you to backup your messages and data on Messenger Facebook APK, such as Backup Text for FB Messenger, SMS Backup & Restore, or Titanium Backup. However, you should be careful and cautious when using any third-party app or tool, as there might be some risks and drawbacks involved.</p>
122
- <h3>Q5: How can I contact Facebook support if I have any issues with Messenger Facebook APK?</h3>
123
- <p>A5: If you have any issues with Messenger Facebook APK, such as installation problems, login errors, performance issues, or feature requests, you can contact Facebook support through various channels. Some of them are:</p>
124
- <ul>
125
- <li>You can visit the official help center of Messenger that provides answers and solutions to common questions and problems related to Messenger.</li>
126
- <li>You can visit the official community forum of Messenger that allows you to post your questions and issues and get responses from other users and experts.</li>
127
- <li>You can visit the official feedback page of Messenger that allows you to submit your feedback and suggestions about Messenger.</li>
128
- <li>You can visit the official social media pages of Messenger that allows you to send direct messages or comments to Messenger's team.</li>
129
- </ul></p> 401be4b1e0<br />
130
- <br />
131
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/raft/train.py DELETED
@@ -1,247 +0,0 @@
1
- from __future__ import print_function, division
2
- import sys
3
- sys.path.append('core')
4
-
5
- import argparse
6
- import os
7
- import cv2
8
- import time
9
- import numpy as np
10
- import matplotlib.pyplot as plt
11
-
12
- import torch
13
- import torch.nn as nn
14
- import torch.optim as optim
15
- import torch.nn.functional as F
16
-
17
- from torch.utils.data import DataLoader
18
- from raft import RAFT
19
- import evaluate
20
- import datasets
21
-
22
- from torch.utils.tensorboard import SummaryWriter
23
-
24
- try:
25
- from torch.cuda.amp import GradScaler
26
- except:
27
- # dummy GradScaler for PyTorch < 1.6
28
- class GradScaler:
29
- def __init__(self):
30
- pass
31
- def scale(self, loss):
32
- return loss
33
- def unscale_(self, optimizer):
34
- pass
35
- def step(self, optimizer):
36
- optimizer.step()
37
- def update(self):
38
- pass
39
-
40
-
41
- # exclude extremly large displacements
42
- MAX_FLOW = 400
43
- SUM_FREQ = 100
44
- VAL_FREQ = 5000
45
-
46
-
47
- def sequence_loss(flow_preds, flow_gt, valid, gamma=0.8, max_flow=MAX_FLOW):
48
- """ Loss function defined over sequence of flow predictions """
49
-
50
- n_predictions = len(flow_preds)
51
- flow_loss = 0.0
52
-
53
- # exlude invalid pixels and extremely large diplacements
54
- mag = torch.sum(flow_gt**2, dim=1).sqrt()
55
- valid = (valid >= 0.5) & (mag < max_flow)
56
-
57
- for i in range(n_predictions):
58
- i_weight = gamma**(n_predictions - i - 1)
59
- i_loss = (flow_preds[i] - flow_gt).abs()
60
- flow_loss += i_weight * (valid[:, None] * i_loss).mean()
61
-
62
- epe = torch.sum((flow_preds[-1] - flow_gt)**2, dim=1).sqrt()
63
- epe = epe.view(-1)[valid.view(-1)]
64
-
65
- metrics = {
66
- 'epe': epe.mean().item(),
67
- '1px': (epe < 1).float().mean().item(),
68
- '3px': (epe < 3).float().mean().item(),
69
- '5px': (epe < 5).float().mean().item(),
70
- }
71
-
72
- return flow_loss, metrics
73
-
74
-
75
- def count_parameters(model):
76
- return sum(p.numel() for p in model.parameters() if p.requires_grad)
77
-
78
-
79
- def fetch_optimizer(args, model):
80
- """ Create the optimizer and learning rate scheduler """
81
- optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
82
-
83
- scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100,
84
- pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
85
-
86
- return optimizer, scheduler
87
-
88
-
89
- class Logger:
90
- def __init__(self, model, scheduler):
91
- self.model = model
92
- self.scheduler = scheduler
93
- self.total_steps = 0
94
- self.running_loss = {}
95
- self.writer = None
96
-
97
- def _print_training_status(self):
98
- metrics_data = [self.running_loss[k]/SUM_FREQ for k in sorted(self.running_loss.keys())]
99
- training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, self.scheduler.get_last_lr()[0])
100
- metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data)
101
-
102
- # print the training status
103
- print(training_str + metrics_str)
104
-
105
- if self.writer is None:
106
- self.writer = SummaryWriter()
107
-
108
- for k in self.running_loss:
109
- self.writer.add_scalar(k, self.running_loss[k]/SUM_FREQ, self.total_steps)
110
- self.running_loss[k] = 0.0
111
-
112
- def push(self, metrics):
113
- self.total_steps += 1
114
-
115
- for key in metrics:
116
- if key not in self.running_loss:
117
- self.running_loss[key] = 0.0
118
-
119
- self.running_loss[key] += metrics[key]
120
-
121
- if self.total_steps % SUM_FREQ == SUM_FREQ-1:
122
- self._print_training_status()
123
- self.running_loss = {}
124
-
125
- def write_dict(self, results):
126
- if self.writer is None:
127
- self.writer = SummaryWriter()
128
-
129
- for key in results:
130
- self.writer.add_scalar(key, results[key], self.total_steps)
131
-
132
- def close(self):
133
- self.writer.close()
134
-
135
-
136
- def train(args):
137
-
138
- model = nn.DataParallel(RAFT(args), device_ids=args.gpus)
139
- print("Parameter Count: %d" % count_parameters(model))
140
-
141
- if args.restore_ckpt is not None:
142
- model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
143
-
144
- model.cuda()
145
- model.train()
146
-
147
- if args.stage != 'chairs':
148
- model.module.freeze_bn()
149
-
150
- train_loader = datasets.fetch_dataloader(args)
151
- optimizer, scheduler = fetch_optimizer(args, model)
152
-
153
- total_steps = 0
154
- scaler = GradScaler(enabled=args.mixed_precision)
155
- logger = Logger(model, scheduler)
156
-
157
- VAL_FREQ = 5000
158
- add_noise = True
159
-
160
- should_keep_training = True
161
- while should_keep_training:
162
-
163
- for i_batch, data_blob in enumerate(train_loader):
164
- optimizer.zero_grad()
165
- image1, image2, flow, valid = [x.cuda() for x in data_blob]
166
-
167
- if args.add_noise:
168
- stdv = np.random.uniform(0.0, 5.0)
169
- image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0)
170
- image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0)
171
-
172
- flow_predictions = model(image1, image2, iters=args.iters)
173
-
174
- loss, metrics = sequence_loss(flow_predictions, flow, valid, args.gamma)
175
- scaler.scale(loss).backward()
176
- scaler.unscale_(optimizer)
177
- torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
178
-
179
- scaler.step(optimizer)
180
- scheduler.step()
181
- scaler.update()
182
-
183
- logger.push(metrics)
184
-
185
- if total_steps % VAL_FREQ == VAL_FREQ - 1:
186
- PATH = 'checkpoints/%d_%s.pth' % (total_steps+1, args.name)
187
- torch.save(model.state_dict(), PATH)
188
-
189
- results = {}
190
- for val_dataset in args.validation:
191
- if val_dataset == 'chairs':
192
- results.update(evaluate.validate_chairs(model.module))
193
- elif val_dataset == 'sintel':
194
- results.update(evaluate.validate_sintel(model.module))
195
- elif val_dataset == 'kitti':
196
- results.update(evaluate.validate_kitti(model.module))
197
-
198
- logger.write_dict(results)
199
-
200
- model.train()
201
- if args.stage != 'chairs':
202
- model.module.freeze_bn()
203
-
204
- total_steps += 1
205
-
206
- if total_steps > args.num_steps:
207
- should_keep_training = False
208
- break
209
-
210
- logger.close()
211
- PATH = 'checkpoints/%s.pth' % args.name
212
- torch.save(model.state_dict(), PATH)
213
-
214
- return PATH
215
-
216
-
217
- if __name__ == '__main__':
218
- parser = argparse.ArgumentParser()
219
- parser.add_argument('--name', default='raft', help="name your experiment")
220
- parser.add_argument('--stage', help="determines which dataset to use for training")
221
- parser.add_argument('--restore_ckpt', help="restore checkpoint")
222
- parser.add_argument('--small', action='store_true', help='use small model')
223
- parser.add_argument('--validation', type=str, nargs='+')
224
-
225
- parser.add_argument('--lr', type=float, default=0.00002)
226
- parser.add_argument('--num_steps', type=int, default=100000)
227
- parser.add_argument('--batch_size', type=int, default=6)
228
- parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
229
- parser.add_argument('--gpus', type=int, nargs='+', default=[0,1])
230
- parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
231
-
232
- parser.add_argument('--iters', type=int, default=12)
233
- parser.add_argument('--wdecay', type=float, default=.00005)
234
- parser.add_argument('--epsilon', type=float, default=1e-8)
235
- parser.add_argument('--clip', type=float, default=1.0)
236
- parser.add_argument('--dropout', type=float, default=0.0)
237
- parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
238
- parser.add_argument('--add_noise', action='store_true')
239
- args = parser.parse_args()
240
-
241
- torch.manual_seed(1234)
242
- np.random.seed(1234)
243
-
244
- if not os.path.isdir('checkpoints'):
245
- os.mkdir('checkpoints')
246
-
247
- train(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/pann_model.py DELETED
@@ -1,543 +0,0 @@
1
- # PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition
2
- # Reference from https://github.com/qiuqiangkong/audioset_tagging_cnn
3
- # Some layers are re-designed for CLAP
4
- import os
5
- os.environ['NUMBA_CACHE_DIR'] = '/tmp/'
6
-
7
- import torch
8
- import torch.nn as nn
9
- import torch.nn.functional as F
10
- from torchlibrosa.stft import Spectrogram, LogmelFilterBank
11
- from torchlibrosa.augmentation import SpecAugmentation
12
-
13
- from .utils import do_mixup, interpolate, pad_framewise_output
14
- from .feature_fusion import iAFF, AFF, DAF
15
-
16
-
17
- def init_layer(layer):
18
- """Initialize a Linear or Convolutional layer. """
19
- nn.init.xavier_uniform_(layer.weight)
20
-
21
- if hasattr(layer, 'bias'):
22
- if layer.bias is not None:
23
- layer.bias.data.fill_(0.)
24
-
25
-
26
- def init_bn(bn):
27
- """Initialize a Batchnorm layer. """
28
- bn.bias.data.fill_(0.)
29
- bn.weight.data.fill_(1.)
30
-
31
-
32
- class ConvBlock(nn.Module):
33
- def __init__(self, in_channels, out_channels):
34
-
35
- super(ConvBlock, self).__init__()
36
-
37
- self.conv1 = nn.Conv2d(in_channels=in_channels,
38
- out_channels=out_channels,
39
- kernel_size=(3, 3), stride=(1, 1),
40
- padding=(1, 1), bias=False)
41
-
42
- self.conv2 = nn.Conv2d(in_channels=out_channels,
43
- out_channels=out_channels,
44
- kernel_size=(3, 3), stride=(1, 1),
45
- padding=(1, 1), bias=False)
46
-
47
- self.bn1 = nn.BatchNorm2d(out_channels)
48
- self.bn2 = nn.BatchNorm2d(out_channels)
49
-
50
- self.init_weight()
51
-
52
- def init_weight(self):
53
- init_layer(self.conv1)
54
- init_layer(self.conv2)
55
- init_bn(self.bn1)
56
- init_bn(self.bn2)
57
-
58
-
59
- def forward(self, input, pool_size=(2, 2), pool_type='avg'):
60
-
61
- x = input
62
- x = F.relu_(self.bn1(self.conv1(x)))
63
- x = F.relu_(self.bn2(self.conv2(x)))
64
- if pool_type == 'max':
65
- x = F.max_pool2d(x, kernel_size=pool_size)
66
- elif pool_type == 'avg':
67
- x = F.avg_pool2d(x, kernel_size=pool_size)
68
- elif pool_type == 'avg+max':
69
- x1 = F.avg_pool2d(x, kernel_size=pool_size)
70
- x2 = F.max_pool2d(x, kernel_size=pool_size)
71
- x = x1 + x2
72
- else:
73
- raise Exception('Incorrect argument!')
74
-
75
- return x
76
-
77
-
78
- class ConvBlock5x5(nn.Module):
79
- def __init__(self, in_channels, out_channels):
80
-
81
- super(ConvBlock5x5, self).__init__()
82
-
83
- self.conv1 = nn.Conv2d(in_channels=in_channels,
84
- out_channels=out_channels,
85
- kernel_size=(5, 5), stride=(1, 1),
86
- padding=(2, 2), bias=False)
87
-
88
- self.bn1 = nn.BatchNorm2d(out_channels)
89
-
90
- self.init_weight()
91
-
92
- def init_weight(self):
93
- init_layer(self.conv1)
94
- init_bn(self.bn1)
95
-
96
-
97
- def forward(self, input, pool_size=(2, 2), pool_type='avg'):
98
-
99
- x = input
100
- x = F.relu_(self.bn1(self.conv1(x)))
101
- if pool_type == 'max':
102
- x = F.max_pool2d(x, kernel_size=pool_size)
103
- elif pool_type == 'avg':
104
- x = F.avg_pool2d(x, kernel_size=pool_size)
105
- elif pool_type == 'avg+max':
106
- x1 = F.avg_pool2d(x, kernel_size=pool_size)
107
- x2 = F.max_pool2d(x, kernel_size=pool_size)
108
- x = x1 + x2
109
- else:
110
- raise Exception('Incorrect argument!')
111
-
112
- return x
113
-
114
-
115
- class AttBlock(nn.Module):
116
- def __init__(self, n_in, n_out, activation='linear', temperature=1.):
117
- super(AttBlock, self).__init__()
118
-
119
- self.activation = activation
120
- self.temperature = temperature
121
- self.att = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
122
- self.cla = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
123
-
124
- self.bn_att = nn.BatchNorm1d(n_out)
125
- self.init_weights()
126
-
127
- def init_weights(self):
128
- init_layer(self.att)
129
- init_layer(self.cla)
130
- init_bn(self.bn_att)
131
-
132
- def forward(self, x):
133
- # x: (n_samples, n_in, n_time)
134
- norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
135
- cla = self.nonlinear_transform(self.cla(x))
136
- x = torch.sum(norm_att * cla, dim=2)
137
- return x, norm_att, cla
138
-
139
- def nonlinear_transform(self, x):
140
- if self.activation == 'linear':
141
- return x
142
- elif self.activation == 'sigmoid':
143
- return torch.sigmoid(x)
144
-
145
-
146
- class Cnn14(nn.Module):
147
- def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
148
- fmax, classes_num, enable_fusion=False, fusion_type='None'):
149
-
150
- super(Cnn14, self).__init__()
151
-
152
- window = 'hann'
153
- center = True
154
- pad_mode = 'reflect'
155
- ref = 1.0
156
- amin = 1e-10
157
- top_db = None
158
-
159
- self.enable_fusion = enable_fusion
160
- self.fusion_type = fusion_type
161
-
162
- # Spectrogram extractor
163
- self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
164
- win_length=window_size, window=window, center=center, pad_mode=pad_mode,
165
- freeze_parameters=True)
166
-
167
- # Logmel feature extractor
168
- self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
169
- n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
170
- freeze_parameters=True)
171
-
172
- # Spec augmenter
173
- self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
174
- freq_drop_width=8, freq_stripes_num=2)
175
-
176
- self.bn0 = nn.BatchNorm2d(64)
177
-
178
- if (self.enable_fusion) and (self.fusion_type == 'channel_map'):
179
- self.conv_block1 = ConvBlock(in_channels=4, out_channels=64)
180
- else:
181
- self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
182
- self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
183
- self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
184
- self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
185
- self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
186
- self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
187
-
188
- self.fc1 = nn.Linear(2048, 2048, bias=True)
189
- self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
190
-
191
- if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']):
192
- self.mel_conv1d = nn.Sequential(
193
- nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
194
- nn.BatchNorm1d(64) # No Relu
195
- )
196
- if self.fusion_type == 'daf_1d':
197
- self.fusion_model = DAF()
198
- elif self.fusion_type == 'aff_1d':
199
- self.fusion_model = AFF(channels=64, type='1D')
200
- elif self.fusion_type == 'iaff_1d':
201
- self.fusion_model = iAFF(channels=64, type='1D')
202
-
203
- if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
204
- self.mel_conv2d = nn.Sequential(
205
- nn.Conv2d(1, 64, kernel_size=(5,5), stride=(6, 2), padding=(2,2)),
206
- nn.BatchNorm2d(64),
207
- nn.ReLU(inplace=True)
208
- )
209
-
210
- if self.fusion_type == 'daf_2d':
211
- self.fusion_model = DAF()
212
- elif self.fusion_type == 'aff_2d':
213
- self.fusion_model = AFF(channels=64, type='2D')
214
- elif self.fusion_type == 'iaff_2d':
215
- self.fusion_model = iAFF(channels=64, type='2D')
216
- self.init_weight()
217
-
218
- def init_weight(self):
219
- init_bn(self.bn0)
220
- init_layer(self.fc1)
221
- init_layer(self.fc_audioset)
222
-
223
- def forward(self, input, mixup_lambda=None, device=None):
224
- """
225
- Input: (batch_size, data_length)"""
226
-
227
- if self.enable_fusion and input["longer"].sum() == 0:
228
- # if no audio is longer than 10s, then randomly select one audio to be longer
229
- input["longer"][torch.randint(0, input["longer"].shape[0], (1,))] = True
230
-
231
- if not self.enable_fusion:
232
- x = self.spectrogram_extractor(input['waveform'].to(device=device, non_blocking=True)) # (batch_size, 1, time_steps, freq_bins)
233
- x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
234
-
235
- x = x.transpose(1, 3)
236
- x = self.bn0(x)
237
- x = x.transpose(1, 3)
238
- else:
239
- longer_list = input["longer"].to(device=device, non_blocking=True)
240
- x = input["mel_fusion"].to(device=device, non_blocking=True)
241
- longer_list_idx = torch.where(longer_list)[0]
242
- x = x.transpose(1, 3)
243
- x = self.bn0(x)
244
- x = x.transpose(1, 3)
245
- if self.fusion_type in ['daf_1d','aff_1d','iaff_1d']:
246
- new_x = x[:,0:1,:,:].clone().contiguous()
247
- # local processing
248
- if len(longer_list_idx) > 0:
249
- fusion_x_local = x[longer_list_idx,1:,:,:].clone().contiguous()
250
- FB,FC,FT,FF = fusion_x_local.size()
251
- fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
252
- fusion_x_local = torch.permute(fusion_x_local, (0,2,1)).contiguous()
253
- fusion_x_local = self.mel_conv1d(fusion_x_local)
254
- fusion_x_local = fusion_x_local.view(FB,FC,FF,fusion_x_local.size(-1))
255
- fusion_x_local = torch.permute(fusion_x_local, (0,2,1,3)).contiguous().flatten(2)
256
- if fusion_x_local.size(-1) < FT:
257
- fusion_x_local = torch.cat([fusion_x_local, torch.zeros((FB,FF,FT- fusion_x_local.size(-1)), device=device)], dim=-1)
258
- else:
259
- fusion_x_local = fusion_x_local[:,:,:FT]
260
- # 1D fusion
261
- new_x = new_x.squeeze(1).permute((0,2,1)).contiguous()
262
- new_x[longer_list_idx] = self.fusion_model(new_x[longer_list_idx], fusion_x_local)
263
- x = new_x.permute((0,2,1)).contiguous()[:,None,:,:]
264
- else:
265
- x = new_x
266
- elif self.fusion_type in ['daf_2d','aff_2d','iaff_2d','channel_map']:
267
- x = x # no change
268
-
269
- if self.training:
270
- x = self.spec_augmenter(x)
271
- # Mixup on spectrogram
272
- if self.training and mixup_lambda is not None:
273
- x = do_mixup(x, mixup_lambda)
274
- if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
275
- global_x = x[:,0:1,:,:]
276
-
277
- # global processing
278
- B, C, H, W = global_x.shape
279
- global_x = self.conv_block1(global_x, pool_size=(2, 2), pool_type='avg')
280
- if len(longer_list_idx) > 0:
281
- local_x = x[longer_list_idx,1:,:,:].contiguous()
282
- TH = global_x.size(-2)
283
- # local processing
284
- B, C, H, W = local_x.shape
285
- local_x = local_x.view(B*C,1,H,W)
286
- local_x = self.mel_conv2d(local_x)
287
- local_x = local_x.view(B,C,local_x.size(1),local_x.size(2),local_x.size(3))
288
- local_x = local_x.permute((0,2,1,3,4)).contiguous().flatten(2,3)
289
- TB,TC,_,TW = local_x.size()
290
- if local_x.size(-2) < TH:
291
- local_x = torch.cat([local_x, torch.zeros((TB,TC,TH-local_x.size(-2),TW), device=global_x.device)], dim=-2)
292
- else:
293
- local_x = local_x[:,:,:TH,:]
294
-
295
- global_x[longer_list_idx] = self.fusion_model(global_x[longer_list_idx],local_x)
296
- x = global_x
297
- else:
298
- x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
299
-
300
- x = F.dropout(x, p=0.2, training=self.training)
301
- x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
302
- x = F.dropout(x, p=0.2, training=self.training)
303
- x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
304
- x = F.dropout(x, p=0.2, training=self.training)
305
- x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
306
- x = F.dropout(x, p=0.2, training=self.training)
307
- x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
308
- x = F.dropout(x, p=0.2, training=self.training)
309
- x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
310
- x = F.dropout(x, p=0.2, training=self.training)
311
- x = torch.mean(x, dim=3)
312
-
313
- latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
314
- latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
315
- latent_x = latent_x1 + latent_x2
316
- latent_x = latent_x.transpose(1, 2)
317
- latent_x = F.relu_(self.fc1(latent_x))
318
- latent_output = interpolate(latent_x, 32)
319
-
320
-
321
- (x1, _) = torch.max(x, dim=2)
322
- x2 = torch.mean(x, dim=2)
323
- x = x1 + x2
324
- x = F.dropout(x, p=0.5, training=self.training)
325
- x = F.relu_(self.fc1(x))
326
- embedding = F.dropout(x, p=0.5, training=self.training)
327
- clipwise_output = torch.sigmoid(self.fc_audioset(x))
328
-
329
- output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output}
330
- return output_dict
331
-
332
-
333
- class Cnn6(nn.Module):
334
- def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
335
- fmax, classes_num, enable_fusion=False, fusion_type='None'):
336
-
337
- super(Cnn6, self).__init__()
338
-
339
- window = 'hann'
340
- center = True
341
- pad_mode = 'reflect'
342
- ref = 1.0
343
- amin = 1e-10
344
- top_db = None
345
-
346
- self.enable_fusion = enable_fusion
347
- self.fusion_type = fusion_type
348
-
349
- # Spectrogram extractor
350
- self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
351
- win_length=window_size, window=window, center=center, pad_mode=pad_mode,
352
- freeze_parameters=True)
353
-
354
- # Logmel feature extractor
355
- self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
356
- n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
357
- freeze_parameters=True)
358
-
359
- # Spec augmenter
360
- self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
361
- freq_drop_width=8, freq_stripes_num=2)
362
-
363
- self.bn0 = nn.BatchNorm2d(64)
364
-
365
- self.conv_block1 = ConvBlock5x5(in_channels=1, out_channels=64)
366
- self.conv_block2 = ConvBlock5x5(in_channels=64, out_channels=128)
367
- self.conv_block3 = ConvBlock5x5(in_channels=128, out_channels=256)
368
- self.conv_block4 = ConvBlock5x5(in_channels=256, out_channels=512)
369
-
370
- self.fc1 = nn.Linear(512, 512, bias=True)
371
- self.fc_audioset = nn.Linear(512, classes_num, bias=True)
372
-
373
- self.init_weight()
374
-
375
- def init_weight(self):
376
- init_bn(self.bn0)
377
- init_layer(self.fc1)
378
- init_layer(self.fc_audioset)
379
-
380
- def forward(self, input, mixup_lambda=None, device=None):
381
- """
382
- Input: (batch_size, data_length)"""
383
-
384
- x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
385
- x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
386
-
387
- x = x.transpose(1, 3)
388
- x = self.bn0(x)
389
- x = x.transpose(1, 3)
390
-
391
- if self.training:
392
- x = self.spec_augmenter(x)
393
-
394
- # Mixup on spectrogram
395
- if self.training and mixup_lambda is not None:
396
- x = do_mixup(x, mixup_lambda)
397
-
398
- x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
399
- x = F.dropout(x, p=0.2, training=self.training)
400
- x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
401
- x = F.dropout(x, p=0.2, training=self.training)
402
- x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
403
- x = F.dropout(x, p=0.2, training=self.training)
404
- x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
405
- x = F.dropout(x, p=0.2, training=self.training)
406
- x = torch.mean(x, dim=3)
407
-
408
- latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
409
- latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
410
- latent_x = latent_x1 + latent_x2
411
- latent_x = latent_x.transpose(1, 2)
412
- latent_x = F.relu_(self.fc1(latent_x))
413
- latent_output = interpolate(latent_x, 16)
414
-
415
- (x1, _) = torch.max(x, dim=2)
416
- x2 = torch.mean(x, dim=2)
417
- x = x1 + x2
418
- x = F.dropout(x, p=0.5, training=self.training)
419
- x = F.relu_(self.fc1(x))
420
- embedding = F.dropout(x, p=0.5, training=self.training)
421
- clipwise_output = torch.sigmoid(self.fc_audioset(x))
422
-
423
- output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output}
424
-
425
- return output_dict
426
-
427
-
428
- class Cnn10(nn.Module):
429
- def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
430
- fmax, classes_num, enable_fusion=False, fusion_type='None'):
431
-
432
- super(Cnn10, self).__init__()
433
-
434
- window = 'hann'
435
- center = True
436
- pad_mode = 'reflect'
437
- ref = 1.0
438
- amin = 1e-10
439
- top_db = None
440
-
441
- self.enable_fusion = enable_fusion
442
- self.fusion_type = fusion_type
443
-
444
- # Spectrogram extractor
445
- self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
446
- win_length=window_size, window=window, center=center, pad_mode=pad_mode,
447
- freeze_parameters=True)
448
-
449
- # Logmel feature extractor
450
- self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
451
- n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
452
- freeze_parameters=True)
453
-
454
- # Spec augmenter
455
- self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
456
- freq_drop_width=8, freq_stripes_num=2)
457
-
458
- self.bn0 = nn.BatchNorm2d(64)
459
-
460
- self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
461
- self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
462
- self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
463
- self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
464
- self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
465
-
466
- self.fc1 = nn.Linear(1024, 1024, bias=True)
467
- self.fc_audioset = nn.Linear(1024, classes_num, bias=True)
468
-
469
- self.init_weight()
470
-
471
- def init_weight(self):
472
- init_bn(self.bn0)
473
- init_layer(self.fc1)
474
- init_layer(self.fc_audioset)
475
-
476
- def forward(self, input, mixup_lambda=None, device=None):
477
- """
478
- Input: (batch_size, data_length)"""
479
-
480
- x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
481
- x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
482
-
483
- x = x.transpose(1, 3)
484
- x = self.bn0(x)
485
- x = x.transpose(1, 3)
486
-
487
- if self.training:
488
- x = self.spec_augmenter(x)
489
-
490
- # Mixup on spectrogram
491
- if self.training and mixup_lambda is not None:
492
- x = do_mixup(x, mixup_lambda)
493
-
494
- x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
495
- x = F.dropout(x, p=0.2, training=self.training)
496
- x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
497
- x = F.dropout(x, p=0.2, training=self.training)
498
- x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
499
- x = F.dropout(x, p=0.2, training=self.training)
500
- x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
501
- x = F.dropout(x, p=0.2, training=self.training)
502
- x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
503
- x = F.dropout(x, p=0.2, training=self.training)
504
- x = torch.mean(x, dim=3)
505
-
506
- latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
507
- latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
508
- latent_x = latent_x1 + latent_x2
509
- latent_x = latent_x.transpose(1, 2)
510
- latent_x = F.relu_(self.fc1(latent_x))
511
- latent_output = interpolate(latent_x, 32)
512
-
513
- (x1, _) = torch.max(x, dim=2)
514
- x2 = torch.mean(x, dim=2)
515
- x = x1 + x2
516
- x = F.dropout(x, p=0.5, training=self.training)
517
- x = F.relu_(self.fc1(x))
518
- embedding = F.dropout(x, p=0.5, training=self.training)
519
- clipwise_output = torch.sigmoid(self.fc_audioset(x))
520
-
521
- output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output}
522
-
523
- return output_dict
524
-
525
-
526
- def create_pann_model(audio_cfg, enable_fusion=False, fusion_type='None'):
527
- try:
528
- ModelProto = eval(audio_cfg.model_name)
529
- model = ModelProto(
530
- sample_rate = audio_cfg.sample_rate,
531
- window_size = audio_cfg.window_size,
532
- hop_size =audio_cfg.hop_size,
533
- mel_bins = audio_cfg.mel_bins,
534
- fmin = audio_cfg.fmin,
535
- fmax = audio_cfg.fmax,
536
- classes_num = audio_cfg.class_num,
537
- enable_fusion = enable_fusion,
538
- fusion_type = fusion_type
539
- )
540
- return model
541
- except:
542
- raise RuntimeError(f'Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough.')
543
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Aibn.py DELETED
@@ -1,52 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import time
4
- import hashlib
5
-
6
- from ..typing import AsyncGenerator
7
- from ..requests import StreamSession
8
- from .base_provider import AsyncGeneratorProvider
9
-
10
-
11
- class Aibn(AsyncGeneratorProvider):
12
- url = "https://aibn.cc"
13
- supports_gpt_35_turbo = True
14
- working = True
15
-
16
- @classmethod
17
- async def create_async_generator(
18
- cls,
19
- model: str,
20
- messages: list[dict[str, str]],
21
- timeout: int = 30,
22
- **kwargs
23
- ) -> AsyncGenerator:
24
- async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
25
- timestamp = int(time.time())
26
- data = {
27
- "messages": messages,
28
- "pass": None,
29
- "sign": generate_signature(timestamp, messages[-1]["content"]),
30
- "time": timestamp
31
- }
32
- async with session.post(f"{cls.url}/api/generate", json=data) as response:
33
- response.raise_for_status()
34
- async for chunk in response.iter_content():
35
- yield chunk.decode()
36
-
37
- @classmethod
38
- @property
39
- def params(cls):
40
- params = [
41
- ("model", "str"),
42
- ("messages", "list[dict[str, str]]"),
43
- ("stream", "bool"),
44
- ("temperature", "float"),
45
- ]
46
- param = ", ".join([": ".join(p) for p in params])
47
- return f"g4f.provider.{cls.__name__} supports: ({param})"
48
-
49
-
50
- def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
51
- data = f"{timestamp}:{message}:{secret}"
52
- return hashlib.sha256(data.encode()).hexdigest()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/midas/api.py DELETED
@@ -1,175 +0,0 @@
1
- # based on https://github.com/isl-org/MiDaS
2
- import os
3
-
4
- import cv2
5
- import torch
6
- import torch.nn as nn
7
- from torchvision.transforms import Compose
8
-
9
- from ldm.modules.extra_condition.midas.midas.dpt_depth import DPTDepthModel
10
- from ldm.modules.extra_condition.midas.midas.midas_net import MidasNet
11
- from ldm.modules.extra_condition.midas.midas.midas_net_custom import MidasNet_small
12
- from ldm.modules.extra_condition.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
13
-
14
-
15
- ISL_PATHS = {
16
- "dpt_large": "models/dpt_large-midas-2f21e586.pt",
17
- "dpt_hybrid": "models/dpt_hybrid-midas-501f0c75.pt",
18
- "midas_v21": "",
19
- "midas_v21_small": "",
20
- }
21
-
22
- remote_model_path = "https://github.com/intel-isl/DPT/releases/download/1_0/dpt_hybrid-midas-501f0c75.pt"
23
-
24
- def disabled_train(self, mode=True):
25
- """Overwrite model.train with this function to make sure train/eval mode
26
- does not change anymore."""
27
- return self
28
-
29
-
30
- def load_midas_transform(model_type):
31
- # https://github.com/isl-org/MiDaS/blob/master/run.py
32
- # load transform only
33
- if model_type == "dpt_large": # DPT-Large
34
- net_w, net_h = 384, 384
35
- resize_mode = "minimal"
36
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
37
-
38
- elif model_type == "dpt_hybrid": # DPT-Hybrid
39
- net_w, net_h = 384, 384
40
- resize_mode = "minimal"
41
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
42
-
43
- elif model_type == "midas_v21":
44
- net_w, net_h = 384, 384
45
- resize_mode = "upper_bound"
46
- normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
47
-
48
- elif model_type == "midas_v21_small":
49
- net_w, net_h = 256, 256
50
- resize_mode = "upper_bound"
51
- normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
52
-
53
- else:
54
- assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
55
-
56
- transform = Compose(
57
- [
58
- Resize(
59
- net_w,
60
- net_h,
61
- resize_target=None,
62
- keep_aspect_ratio=True,
63
- ensure_multiple_of=32,
64
- resize_method=resize_mode,
65
- image_interpolation_method=cv2.INTER_CUBIC,
66
- ),
67
- normalization,
68
- PrepareForNet(),
69
- ]
70
- )
71
-
72
- return transform
73
-
74
-
75
- def load_model(model_type):
76
- # https://github.com/isl-org/MiDaS/blob/master/run.py
77
- # load network
78
- model_path = ISL_PATHS[model_type]
79
- if model_type == "dpt_large": # DPT-Large
80
- model = DPTDepthModel(
81
- path=model_path,
82
- backbone="vitl16_384",
83
- non_negative=True,
84
- )
85
- net_w, net_h = 384, 384
86
- resize_mode = "minimal"
87
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
88
-
89
- elif model_type == "dpt_hybrid": # DPT-Hybrid
90
- if not os.path.exists(model_path):
91
- from basicsr.utils.download_util import load_file_from_url
92
- load_file_from_url(remote_model_path, model_dir='models')
93
-
94
- model = DPTDepthModel(
95
- path=model_path,
96
- backbone="vitb_rn50_384",
97
- non_negative=True,
98
- )
99
- net_w, net_h = 384, 384
100
- resize_mode = "minimal"
101
- normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
102
-
103
- elif model_type == "midas_v21":
104
- model = MidasNet(model_path, non_negative=True)
105
- net_w, net_h = 384, 384
106
- resize_mode = "upper_bound"
107
- normalization = NormalizeImage(
108
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
109
- )
110
-
111
- elif model_type == "midas_v21_small":
112
- model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
113
- non_negative=True, blocks={'expand': True})
114
- net_w, net_h = 256, 256
115
- resize_mode = "upper_bound"
116
- normalization = NormalizeImage(
117
- mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
118
- )
119
-
120
- else:
121
- print(f"model_type '{model_type}' not implemented, use: --model_type large")
122
- assert False
123
-
124
- transform = Compose(
125
- [
126
- Resize(
127
- net_w,
128
- net_h,
129
- resize_target=None,
130
- keep_aspect_ratio=True,
131
- ensure_multiple_of=32,
132
- resize_method=resize_mode,
133
- image_interpolation_method=cv2.INTER_CUBIC,
134
- ),
135
- normalization,
136
- PrepareForNet(),
137
- ]
138
- )
139
-
140
- return model.eval(), transform
141
-
142
-
143
- class MiDaSInference(nn.Module):
144
- MODEL_TYPES_TORCH_HUB = [
145
- "DPT_Large",
146
- "DPT_Hybrid",
147
- "MiDaS_small"
148
- ]
149
- MODEL_TYPES_ISL = [
150
- "dpt_large",
151
- "dpt_hybrid",
152
- "midas_v21",
153
- "midas_v21_small",
154
- ]
155
-
156
- def __init__(self, model_type):
157
- super().__init__()
158
- assert (model_type in self.MODEL_TYPES_ISL)
159
- model, _ = load_model(model_type)
160
- self.model = model
161
- self.model.train = disabled_train
162
-
163
- def forward(self, x):
164
- # x in 0..1 as produced by calling self.transform on a 0..1 float64 numpy array
165
- # NOTE: we expect that the correct transform has been called during dataloading.
166
- with torch.no_grad():
167
- prediction = self.model(x)
168
- prediction = torch.nn.functional.interpolate(
169
- prediction.unsqueeze(1),
170
- size=x.shape[2:],
171
- mode="bicubic",
172
- align_corners=False,
173
- )
174
- assert prediction.shape == (x.shape[0], 1, x.shape[2], x.shape[3])
175
- return prediction
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aditya9790/yolo7-object-tracking/detect_or_track.py DELETED
@@ -1,285 +0,0 @@
1
- import argparse
2
- import time
3
- from pathlib import Path
4
- import cv2
5
- import torch
6
- import torch.backends.cudnn as cudnn
7
- from numpy import random
8
-
9
- from models.experimental import attempt_load
10
- from utils.datasets import LoadStreams, LoadImages
11
- from utils.general import check_img_size, check_requirements, \
12
- check_imshow, non_max_suppression, apply_classifier, \
13
- scale_coords, xyxy2xywh, strip_optimizer, set_logging, \
14
- increment_path
15
- from utils.plots import plot_one_box
16
- from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
17
-
18
- from sort import *
19
-
20
-
21
- """Function to Draw Bounding boxes"""
22
- def draw_boxes(img, bbox, identities=None, categories=None, confidences = None, names=None, colors = None):
23
- for i, box in enumerate(bbox):
24
- x1, y1, x2, y2 = [int(i) for i in box]
25
- tl = opt.thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
26
-
27
- cat = int(categories[i]) if categories is not None else 0
28
- id = int(identities[i]) if identities is not None else 0
29
- # conf = confidences[i] if confidences is not None else 0
30
-
31
- color = colors[cat]
32
-
33
- if not opt.nobbox:
34
- cv2.rectangle(img, (x1, y1), (x2, y2), color, tl)
35
-
36
- if not opt.nolabel:
37
- label = str(id) + ":"+ names[cat] if identities is not None else f'{names[cat]} {confidences[i]:.2f}'
38
- tf = max(tl - 1, 1) # font thickness
39
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
40
- c2 = x1 + t_size[0], y1 - t_size[1] - 3
41
- cv2.rectangle(img, (x1, y1), c2, color, -1, cv2.LINE_AA) # filled
42
- cv2.putText(img, label, (x1, y1 - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
43
-
44
-
45
- return img
46
-
47
-
48
- def detect(save_img=False):
49
- source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
50
- save_img = not opt.nosave and not source.endswith('.txt') # save inference images
51
- webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
52
- ('rtsp://', 'rtmp://', 'http://', 'https://'))
53
- save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
54
- if not opt.nosave:
55
- (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
56
-
57
- # Initialize
58
- set_logging()
59
- device = select_device(opt.device)
60
- half = device.type != 'cpu' # half precision only supported on CUDA
61
-
62
- # Load model
63
- model = attempt_load(weights, map_location=device) # load FP32 model
64
- stride = int(model.stride.max()) # model stride
65
- imgsz = check_img_size(imgsz, s=stride) # check img_size
66
-
67
- if trace:
68
- model = TracedModel(model, device, opt.img_size)
69
-
70
- if half:
71
- model.half() # to FP16
72
-
73
- # Second-stage classifier
74
- classify = False
75
- if classify:
76
- modelc = load_classifier(name='resnet101', n=2) # initialize
77
- modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
78
-
79
- # Set Dataloader
80
- vid_path, vid_writer = None, None
81
- if webcam:
82
- view_img = check_imshow()
83
- cudnn.benchmark = True # set True to speed up constant image size inference
84
- dataset = LoadStreams(source, img_size=imgsz, stride=stride)
85
- else:
86
- dataset = LoadImages(source, img_size=imgsz, stride=stride)
87
-
88
- # Get names and colors
89
- names = model.module.names if hasattr(model, 'module') else model.names
90
- colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
91
-
92
- # Run inference
93
- if device.type != 'cpu':
94
- model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
95
- old_img_w = old_img_h = imgsz
96
- old_img_b = 1
97
-
98
- t0 = time.time()
99
- ###################################
100
- startTime = 0
101
- ###################################
102
- for path, img, im0s, vid_cap in dataset:
103
- img = torch.from_numpy(img).to(device)
104
- img = img.half() if half else img.float() # uint8 to fp16/32
105
- img /= 255.0 # 0 - 255 to 0.0 - 1.0
106
- if img.ndimension() == 3:
107
- img = img.unsqueeze(0)
108
-
109
- # Warmup
110
- if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]):
111
- old_img_b = img.shape[0]
112
- old_img_h = img.shape[2]
113
- old_img_w = img.shape[3]
114
- for i in range(3):
115
- model(img, augment=opt.augment)[0]
116
-
117
- # Inference
118
- t1 = time_synchronized()
119
- pred = model(img, augment=opt.augment)[0]
120
- t2 = time_synchronized()
121
-
122
- # Apply NMS
123
- pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
124
- t3 = time_synchronized()
125
-
126
- # Apply Classifier
127
- if classify:
128
- pred = apply_classifier(pred, modelc, img, im0s)
129
-
130
- # Process detections
131
- for i, det in enumerate(pred): # detections per image
132
- if webcam: # batch_size >= 1
133
- p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
134
- else:
135
- p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
136
-
137
- p = Path(p) # to Path
138
- save_path = str(save_dir / p.name) # img.jpg
139
- txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
140
- gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
141
- if len(det):
142
- # Rescale boxes from img_size to im0 size
143
- det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
144
-
145
- # Print results
146
- for c in det[:, -1].unique():
147
- n = (det[:, -1] == c).sum() # detections per class
148
- s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
149
-
150
- dets_to_sort = np.empty((0,6))
151
- # NOTE: We send in detected object class too
152
- for x1,y1,x2,y2,conf,detclass in det.cpu().detach().numpy():
153
- dets_to_sort = np.vstack((dets_to_sort,
154
- np.array([x1, y1, x2, y2, conf, detclass])))
155
-
156
-
157
- if opt.track:
158
-
159
- tracked_dets = sort_tracker.update(dets_to_sort, opt.unique_track_color)
160
- tracks =sort_tracker.getTrackers()
161
-
162
- # draw boxes for visualization
163
- if len(tracked_dets)>0:
164
- bbox_xyxy = tracked_dets[:,:4]
165
- identities = tracked_dets[:, 8]
166
- categories = tracked_dets[:, 4]
167
- confidences = None
168
-
169
- if opt.show_track:
170
- #loop over tracks
171
- for t, track in enumerate(tracks):
172
-
173
- track_color = colors[int(track.detclass)] if not opt.unique_track_color else sort_tracker.color_list[t]
174
-
175
- [cv2.line(im0, (int(track.centroidarr[i][0]),
176
- int(track.centroidarr[i][1])),
177
- (int(track.centroidarr[i+1][0]),
178
- int(track.centroidarr[i+1][1])),
179
- track_color, thickness=opt.thickness)
180
- for i,_ in enumerate(track.centroidarr)
181
- if i < len(track.centroidarr)-1 ]
182
- else:
183
- bbox_xyxy = dets_to_sort[:,:4]
184
- identities = None
185
- categories = dets_to_sort[:, 5]
186
- confidences = dets_to_sort[:, 4]
187
-
188
- im0 = draw_boxes(im0, bbox_xyxy, identities, categories, confidences, names, colors)
189
-
190
-
191
-
192
-
193
-
194
- # Print time (inference + NMS)
195
- print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
196
-
197
- # Stream results
198
- ######################################################
199
- if dataset.mode != 'image' and opt.show_fps:
200
- currentTime = time.time()
201
-
202
- fps = 1/(currentTime - startTime)
203
- startTime = currentTime
204
- cv2.putText(im0, "FPS: " + str(int(fps)), (20, 70), cv2.FONT_HERSHEY_PLAIN, 2, (0,255,0),2)
205
-
206
- #######################################################
207
- if view_img:
208
- cv2.imshow(str(p), im0)
209
- cv2.waitKey(1) # 1 millisecond
210
-
211
- # Save results (image with detections)
212
- if save_img:
213
- if dataset.mode == 'image':
214
- cv2.imwrite(save_path, im0)
215
- print(f" The image with the result is saved in: {save_path}")
216
- else: # 'video' or 'stream'
217
- if vid_path != save_path: # new video
218
- vid_path = save_path
219
- if isinstance(vid_writer, cv2.VideoWriter):
220
- vid_writer.release() # release previous video writer
221
- if vid_cap: # video
222
- fps = vid_cap.get(cv2.CAP_PROP_FPS)
223
- w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
224
- h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
225
- else: # stream
226
- fps, w, h = 30, im0.shape[1], im0.shape[0]
227
- save_path += '.mp4'
228
- vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
229
- vid_writer.write(im0)
230
-
231
- if save_txt or save_img:
232
- s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
233
- #print(f"Results saved to {save_dir}{s}")
234
-
235
- print(f'Done. ({time.time() - t0:.3f}s)')
236
-
237
-
238
- if __name__ == '__main__':
239
- parser = argparse.ArgumentParser()
240
- parser.add_argument('--weights', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)')
241
- parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
242
- parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
243
- parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
244
- parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
245
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
246
- parser.add_argument('--view-img', action='store_true', help='display results')
247
- parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
248
- parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
249
- parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
250
- parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
251
- parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
252
- parser.add_argument('--augment', action='store_true', help='augmented inference')
253
- parser.add_argument('--update', action='store_true', help='update all models')
254
- parser.add_argument('--project', default='runs/detect', help='save results to project/name')
255
- parser.add_argument('--name', default='exp', help='save results to project/name')
256
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
257
- parser.add_argument('--no-trace', action='store_true', help='don`t trace model')
258
-
259
- parser.add_argument('--track', action='store_true', help='run tracking')
260
- parser.add_argument('--show-track', action='store_true', help='show tracked path')
261
- parser.add_argument('--show-fps', action='store_true', help='show fps')
262
- parser.add_argument('--thickness', type=int, default=2, help='bounding box and font size thickness')
263
- parser.add_argument('--seed', type=int, default=1, help='random seed to control bbox colors')
264
- parser.add_argument('--nobbox', action='store_true', help='don`t show bounding box')
265
- parser.add_argument('--nolabel', action='store_true', help='don`t show label')
266
- parser.add_argument('--unique-track-color', action='store_true', help='show each track in unique color')
267
-
268
-
269
- opt = parser.parse_args()
270
- print(opt)
271
- np.random.seed(opt.seed)
272
-
273
- sort_tracker = Sort(max_age=5,
274
- min_hits=2,
275
- iou_threshold=0.2)
276
-
277
- #check_requirements(exclude=('pycocotools', 'thop'))
278
-
279
- with torch.no_grad():
280
- if opt.update: # update all models (to fix SourceChangeWarning)
281
- for opt.weights in ['yolov7.pt']:
282
- detect()
283
- strip_optimizer(opt.weights)
284
- else:
285
- detect()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetTotalColumnProportions.js DELETED
@@ -1,13 +0,0 @@
1
- var GetTotalColumnProportions = function () {
2
- var result = 0,
3
- proportion;
4
- for (var i = 0; i < this.columnCount; i++) {
5
- proportion = this.columnProportions[i];
6
- if (proportion > 0) {
7
- result += proportion;
8
- }
9
- }
10
- return result;
11
- }
12
-
13
- export default GetTotalColumnProportions;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/GetExpandedChildWidth.js DELETED
@@ -1,16 +0,0 @@
1
- var GetExpandedChildWidth = function (child, parentWidth) {
2
- if (parentWidth === undefined) {
3
- parentWidth = this.width;
4
- }
5
-
6
- var childWidth;
7
- var childConfig = child.rexSizer;
8
- if (childConfig.expandWidth) {
9
- var innerWidth = parentWidth - this.space.left - this.space.right;
10
- var padding = childConfig.padding;
11
- childWidth = innerWidth - padding.left - padding.right;
12
- }
13
- return childWidth;
14
- }
15
-
16
- export default GetExpandedChildWidth;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alcedo/yunmedia/resources/chatgpt-plugin/css/app.4dc5e420.css DELETED
@@ -1,22 +0,0 @@
1
- /*!
2
-
3
- =========================================================
4
- * Vue Notus - v1.1.0 based on Tailwind Starter Kit by Creative Tim
5
- =========================================================
6
-
7
- * Product Page: https://www.creative-tim.com/product/vue-notus
8
- * Copyright 2021 Creative Tim (https://www.creative-tim.com)
9
- * Licensed under MIT (https://github.com/creativetimofficial/vue-notus/blob/main/LICENSE.md)
10
-
11
- * Tailwind Starter Kit Page: https://www.creative-tim.com/learning-lab/tailwind-starter-kit/presentation
12
-
13
- * Coded by Creative Tim
14
-
15
- =========================================================
16
-
17
- * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
18
-
19
- */
20
- /*
21
- ! tailwindcss v3.3.1 | MIT License | https://tailwindcss.com
22
- */*,:after,:before{-webkit-box-sizing:border-box;box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}:after,:before{--tw-content:""}html{line-height:1.5;-webkit-text-size-adjust:100%;-moz-tab-size:4;-o-tab-size:4;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;-webkit-font-feature-settings:normal;font-feature-settings:normal;font-variation-settings:normal}body{margin:0;line-height:inherit}hr{height:0;color:inherit;border-top-width:1px}abbr:where([title]){-webkit-text-decoration:underline dotted;text-decoration:underline dotted}h1,h2,h3,h4,h5,h6{font-size:inherit;font-weight:inherit}a{color:inherit;text-decoration:inherit}b,strong{font-weight:bolder}code,kbd,pre,samp{font-family:ui-monospace,SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,monospace;font-size:1em}small{font-size:80%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}table{text-indent:0;border-color:inherit;border-collapse:collapse}button,input,optgroup,select,textarea{font-family:inherit;font-size:100%;font-weight:inherit;line-height:inherit;color:inherit;margin:0;padding:0}button,select{text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button;background-color:transparent;background-image:none}:-moz-focusring{outline:auto}:-moz-ui-invalid{box-shadow:none}progress{vertical-align:baseline}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}summary{display:list-item}blockquote,dd,dl,figure,h1,h2,h3,h4,h5,h6,hr,p,pre{margin:0}fieldset{margin:0}fieldset,legend{padding:0}menu,ol,ul{list-style:none;margin:0;padding:0}textarea{resize:vertical}input::-webkit-input-placeholder,textarea::-webkit-input-placeholder{color:#9ca3af}input::-moz-placeholder,textarea::-moz-placeholder{color:#9ca3af}input:-ms-input-placeholder,textarea:-ms-input-placeholder{color:#9ca3af}input::-ms-input-placeholder,textarea::-ms-input-placeholder{color:#9ca3af}input::placeholder,textarea::placeholder{color:#9ca3af}[role=button],button{cursor:pointer}:disabled{cursor:default}audio,canvas,embed,iframe,img,object,svg,video{display:block;vertical-align:middle}img,video{max-width:100%;height:auto}[hidden]{display:none}[multiple],[type=date],[type=datetime-local],[type=email],[type=month],[type=number],[type=password],[type=search],[type=tel],[type=text],[type=time],[type=url],[type=week],select,textarea{-webkit-appearance:none;-moz-appearance:none;appearance:none;background-color:#fff;border-color:#6b7280;border-width:1px;border-radius:0;padding-top:.5rem;padding-right:.75rem;padding-bottom:.5rem;padding-left:.75rem;font-size:1rem;line-height:1.5rem;--tw-shadow:0 0 #0000}[multiple]:focus,[type=date]:focus,[type=datetime-local]:focus,[type=email]:focus,[type=month]:focus,[type=number]:focus,[type=password]:focus,[type=search]:focus,[type=tel]:focus,[type=text]:focus,[type=time]:focus,[type=url]:focus,[type=week]:focus,select:focus,textarea:focus{outline:2px solid transparent;outline-offset:2px;--tw-ring-inset:var(--tw-empty,/*!*/ /*!*/);--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:#2563eb;--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color);-webkit-box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow);border-color:#2563eb}input::-webkit-input-placeholder,textarea::-webkit-input-placeholder{color:#6b7280;opacity:1}input::-moz-placeholder,textarea::-moz-placeholder{color:#6b7280;opacity:1}input:-ms-input-placeholder,textarea:-ms-input-placeholder{color:#6b7280;opacity:1}input::-ms-input-placeholder,textarea::-ms-input-placeholder{color:#6b7280;opacity:1}input::placeholder,textarea::placeholder{color:#6b7280;opacity:1}::-webkit-datetime-edit-fields-wrapper{padding:0}::-webkit-date-and-time-value{min-height:1.5em}::-webkit-datetime-edit,::-webkit-datetime-edit-day-field,::-webkit-datetime-edit-hour-field,::-webkit-datetime-edit-meridiem-field,::-webkit-datetime-edit-millisecond-field,::-webkit-datetime-edit-minute-field,::-webkit-datetime-edit-month-field,::-webkit-datetime-edit-second-field,::-webkit-datetime-edit-year-field{padding-top:0;padding-bottom:0}select{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3E%3Cpath stroke='%236b7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='m6 8 4 4 4-4'/%3E%3C/svg%3E");background-position:right .5rem center;background-repeat:no-repeat;background-size:1.5em 1.5em;padding-right:2.5rem;-webkit-print-color-adjust:exact;print-color-adjust:exact}[multiple]{background-image:none;background-position:0 0;background-repeat:unset;background-size:initial;padding-right:.75rem;-webkit-print-color-adjust:unset;print-color-adjust:unset}[type=checkbox],[type=radio]{-webkit-appearance:none;-moz-appearance:none;appearance:none;padding:0;-webkit-print-color-adjust:exact;print-color-adjust:exact;display:inline-block;vertical-align:middle;background-origin:border-box;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-ms-flex-negative:0;flex-shrink:0;height:1rem;width:1rem;color:#2563eb;background-color:#fff;border-color:#6b7280;border-width:1px;--tw-shadow:0 0 #0000}[type=checkbox]{border-radius:0}[type=radio]{border-radius:100%}[type=checkbox]:focus,[type=radio]:focus{outline:2px solid transparent;outline-offset:2px;--tw-ring-inset:var(--tw-empty,/*!*/ /*!*/);--tw-ring-offset-width:2px;--tw-ring-offset-color:#fff;--tw-ring-color:#2563eb;--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);-webkit-box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}[type=checkbox]:checked,[type=radio]:checked{border-color:transparent;background-color:currentColor;background-size:100% 100%;background-position:50%;background-repeat:no-repeat}[type=checkbox]:checked{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 16 16' fill='%23fff' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12.207 4.793a1 1 0 0 1 0 1.414l-5 5a1 1 0 0 1-1.414 0l-2-2a1 1 0 0 1 1.414-1.414L6.5 9.086l4.293-4.293a1 1 0 0 1 1.414 0z'/%3E%3C/svg%3E")}[type=radio]:checked{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 16 16' fill='%23fff' xmlns='http://www.w3.org/2000/svg'%3E%3Ccircle cx='8' cy='8' r='3'/%3E%3C/svg%3E")}[type=checkbox]:checked:focus,[type=checkbox]:checked:hover,[type=radio]:checked:focus,[type=radio]:checked:hover{border-color:transparent;background-color:currentColor}[type=checkbox]:indeterminate{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 16 16'%3E%3Cpath stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M4 8h8'/%3E%3C/svg%3E");border-color:transparent;background-color:currentColor;background-size:100% 100%;background-position:50%;background-repeat:no-repeat}[type=checkbox]:indeterminate:focus,[type=checkbox]:indeterminate:hover{border-color:transparent;background-color:currentColor}[type=file]{background:unset;border-color:inherit;border-width:0;border-radius:0;padding:0;font-size:unset;line-height:inherit}[type=file]:focus{outline:1px solid ButtonText;outline:1px auto -webkit-focus-ring-color}*,:after,:before{--tw-border-spacing-x:0;--tw-border-spacing-y:0;--tw-translate-x:0;--tw-translate-y:0;--tw-rotate:0;--tw-skew-x:0;--tw-skew-y:0;--tw-scale-x:1;--tw-scale-y:1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness:proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:rgba(59,130,246,.5);--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::-webkit-backdrop{--tw-border-spacing-x:0;--tw-border-spacing-y:0;--tw-translate-x:0;--tw-translate-y:0;--tw-rotate:0;--tw-skew-x:0;--tw-skew-y:0;--tw-scale-x:1;--tw-scale-y:1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness:proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:rgba(59,130,246,.5);--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }::backdrop{--tw-border-spacing-x:0;--tw-border-spacing-y:0;--tw-translate-x:0;--tw-translate-y:0;--tw-rotate:0;--tw-skew-x:0;--tw-skew-y:0;--tw-scale-x:1;--tw-scale-y:1;--tw-pan-x: ;--tw-pan-y: ;--tw-pinch-zoom: ;--tw-scroll-snap-strictness:proximity;--tw-ordinal: ;--tw-slashed-zero: ;--tw-numeric-figure: ;--tw-numeric-spacing: ;--tw-numeric-fraction: ;--tw-ring-inset: ;--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:rgba(59,130,246,.5);--tw-ring-offset-shadow:0 0 #0000;--tw-ring-shadow:0 0 #0000;--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;--tw-blur: ;--tw-brightness: ;--tw-contrast: ;--tw-grayscale: ;--tw-hue-rotate: ;--tw-invert: ;--tw-saturate: ;--tw-sepia: ;--tw-drop-shadow: ;--tw-backdrop-blur: ;--tw-backdrop-brightness: ;--tw-backdrop-contrast: ;--tw-backdrop-grayscale: ;--tw-backdrop-hue-rotate: ;--tw-backdrop-invert: ;--tw-backdrop-opacity: ;--tw-backdrop-saturate: ;--tw-backdrop-sepia: }@media (min-width:1536px){.\!container{max-width:1536px!important}.container{max-width:1536px}}.form-input,.form-multiselect,.form-select,.form-textarea{-webkit-appearance:none;-moz-appearance:none;appearance:none;background-color:#fff;border-color:#6b7280;border-width:1px;border-radius:0;padding-top:.5rem;padding-right:.75rem;padding-bottom:.5rem;padding-left:.75rem;font-size:1rem;line-height:1.5rem;--tw-shadow:0 0 #0000}.form-input:focus,.form-multiselect:focus,.form-select:focus,.form-textarea:focus{outline:2px solid transparent;outline-offset:2px;--tw-ring-inset:var(--tw-empty,/*!*/ /*!*/);--tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:#2563eb;--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color);-webkit-box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow);border-color:#2563eb}.form-input::-webkit-input-placeholder,.form-textarea::-webkit-input-placeholder{color:#6b7280;opacity:1}.form-input::-moz-placeholder,.form-textarea::-moz-placeholder{color:#6b7280;opacity:1}.form-input:-ms-input-placeholder,.form-textarea:-ms-input-placeholder{color:#6b7280;opacity:1}.form-input::-ms-input-placeholder,.form-textarea::-ms-input-placeholder{color:#6b7280;opacity:1}.form-input::placeholder,.form-textarea::placeholder{color:#6b7280;opacity:1}.form-input::-webkit-datetime-edit-fields-wrapper{padding:0}.form-input::-webkit-date-and-time-value{min-height:1.5em}.form-input::-webkit-datetime-edit,.form-input::-webkit-datetime-edit-day-field,.form-input::-webkit-datetime-edit-hour-field,.form-input::-webkit-datetime-edit-meridiem-field,.form-input::-webkit-datetime-edit-millisecond-field,.form-input::-webkit-datetime-edit-minute-field,.form-input::-webkit-datetime-edit-month-field,.form-input::-webkit-datetime-edit-second-field,.form-input::-webkit-datetime-edit-year-field{padding-top:0;padding-bottom:0}.form-select{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3E%3Cpath stroke='%236b7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='m6 8 4 4 4-4'/%3E%3C/svg%3E");background-position:right .5rem center;background-repeat:no-repeat;background-size:1.5em 1.5em;padding-right:2.5rem}.form-checkbox,.form-radio,.form-select{-webkit-print-color-adjust:exact;print-color-adjust:exact}.form-checkbox,.form-radio{-webkit-appearance:none;-moz-appearance:none;appearance:none;padding:0;display:inline-block;vertical-align:middle;background-origin:border-box;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-ms-flex-negative:0;flex-shrink:0;height:1rem;width:1rem;color:#2563eb;background-color:#fff;border-color:#6b7280;border-width:1px;--tw-shadow:0 0 #0000}.form-checkbox{border-radius:0}.form-radio{border-radius:100%}.form-checkbox:focus,.form-radio:focus{outline:2px solid transparent;outline-offset:2px;--tw-ring-inset:var(--tw-empty,/*!*/ /*!*/);--tw-ring-offset-width:2px;--tw-ring-offset-color:#fff;--tw-ring-color:#2563eb;--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);-webkit-box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow)}.form-checkbox:checked,.form-radio:checked{border-color:transparent;background-color:currentColor;background-size:100% 100%;background-position:50%;background-repeat:no-repeat}.form-checkbox:checked{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 16 16' fill='%23fff' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath d='M12.207 4.793a1 1 0 0 1 0 1.414l-5 5a1 1 0 0 1-1.414 0l-2-2a1 1 0 0 1 1.414-1.414L6.5 9.086l4.293-4.293a1 1 0 0 1 1.414 0z'/%3E%3C/svg%3E")}.form-radio:checked{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 16 16' fill='%23fff' xmlns='http://www.w3.org/2000/svg'%3E%3Ccircle cx='8' cy='8' r='3'/%3E%3C/svg%3E")}.form-checkbox:checked:focus,.form-checkbox:checked:hover,.form-radio:checked:focus,.form-radio:checked:hover{border-color:transparent;background-color:currentColor}.form-checkbox:indeterminate{background-image:url("data:image/svg+xml;charset=utf-8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 16 16'%3E%3Cpath stroke='%23fff' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M4 8h8'/%3E%3C/svg%3E");border-color:transparent;background-color:currentColor;background-size:100% 100%;background-position:50%;background-repeat:no-repeat}.form-checkbox:indeterminate:focus,.form-checkbox:indeterminate:hover{border-color:transparent;background-color:currentColor}.\!container{width:100%!important}.container{width:100%}@media (min-width:640px){.\!container{max-width:640px!important}.container{max-width:640px}}@media (min-width:768px){.\!container{max-width:768px!important}.container{max-width:768px}}@media (min-width:1024px){.\!container{max-width:1024px!important}.container{max-width:1024px}}@media (min-width:1280px){.\!container{max-width:1280px!important}.container{max-width:1280px}}@media (min-width:1536px){.\!container{max-width:1280px!important}.container{max-width:1280px}}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border-width:0}.not-sr-only{position:static;width:auto;height:auto;padding:0;margin:0;overflow:visible;clip:auto;white-space:normal}.pointer-events-none{pointer-events:none}.pointer-events-auto{pointer-events:auto}.\!visible{visibility:visible!important}.visible{visibility:visible}.invisible{visibility:hidden}.collapse{visibility:collapse}.static{position:static}.\!fixed{position:fixed!important}.fixed{position:fixed}.absolute{position:absolute}.\!relative{position:relative!important}.relative{position:relative}.sticky{position:sticky}.-inset-1{inset:-.25rem}.bottom-0{bottom:0}.bottom-auto{bottom:auto}.end-1{inset-inline-end:.25rem}.end-2{inset-inline-end:.5rem}.end-7{inset-inline-end:1.75rem}.left-0{left:0}.right-0{right:0}.start-1{inset-inline-start:.25rem}.top-0{top:0}.top-auto{top:auto}.isolate{isolation:isolate}.isolation-auto{isolation:auto}.z-10{z-index:10}.z-2{z-index:2}.z-40{z-index:40}.z-50{z-index:50}.order-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.float-right{float:right}.float-left{float:left}.float-none{float:none}.clear-left{clear:left}.clear-right{clear:right}.clear-both{clear:both}.clear-none{clear:none}.-m-16{margin:-4rem}.-m-24{margin:-6rem}.m-1{margin:.25rem}.m-10{margin:2.5rem}.m-2{margin:.5rem}.m-4{margin:1rem}.m-5{margin:1.25rem}.m-6{margin:1.5rem}.m-8{margin:2rem}.mx-1{margin-left:.25rem;margin-right:.25rem}.mx-4{margin-left:1rem;margin-right:1rem}.mx-auto{margin-left:auto;margin-right:auto}.my-2{margin-top:.5rem;margin-bottom:.5rem}.my-4{margin-top:1rem;margin-bottom:1rem}.my-6{margin-top:1.5rem;margin-bottom:1.5rem}.-mb-px{margin-bottom:-1px}.-ml-20{margin-left:-5rem}.-ml-4{margin-left:-1rem}.-ml-px{margin-left:-1px}.-mr-px{margin-right:-1px}.-mt-20{margin-top:-5rem}.-mt-24{margin-top:-6rem}.-mt-48{margin-top:-12rem}.-mt-64{margin-top:-16rem}.mb-0{margin-bottom:0}.mb-1{margin-bottom:.25rem}.mb-12{margin-bottom:3rem}.mb-2{margin-bottom:.5rem}.mb-3{margin-bottom:.75rem}.mb-4{margin-bottom:1rem}.mb-5{margin-bottom:1.25rem}.mb-6{margin-bottom:1.5rem}.ml-1{margin-left:.25rem}.ml-2{margin-left:.5rem}.ml-3{margin-left:.75rem}.ml-auto{margin-left:auto}.mr-0{margin-right:0}.mr-1{margin-right:.25rem}.mr-2{margin-right:.5rem}.mr-3{margin-right:.75rem}.mr-4{margin-right:1rem}.mr-5{margin-right:1.25rem}.mr-8{margin-right:2rem}.mr-auto{margin-right:auto}.mt-0{margin-top:0}.mt-1{margin-top:.25rem}.mt-10{margin-top:2.5rem}.mt-12{margin-top:3rem}.mt-16{margin-top:4rem}.mt-2{margin-top:.5rem}.mt-20{margin-top:5rem}.mt-24{margin-top:6rem}.mt-3{margin-top:.75rem}.mt-32{margin-top:8rem}.mt-4{margin-top:1rem}.mt-48{margin-top:12rem}.mt-6{margin-top:1.5rem}.mt-8{margin-top:2rem}.box-border{-webkit-box-sizing:border-box;box-sizing:border-box}.box-content{-webkit-box-sizing:content-box;box-sizing:content-box}.line-clamp-none{overflow:visible;display:block;-webkit-box-orient:horizontal;-webkit-line-clamp:none}.\!block{display:block!important}.block{display:block}.inline-block{display:inline-block}.\!inline{display:inline!important}.inline{display:inline}.flex{display:-webkit-box;display:-ms-flexbox;display:flex}.inline-flex{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex}.\!table{display:table!important}.table{display:table}.inline-table{display:inline-table}.table-caption{display:table-caption}.table-cell{display:table-cell}.table-column{display:table-column}.table-column-group{display:table-column-group}.table-footer-group{display:table-footer-group}.table-header-group{display:table-header-group}.table-row-group{display:table-row-group}.table-row{display:table-row}.flow-root{display:flow-root}.\!grid{display:grid!important}.grid{display:grid}.inline-grid{display:inline-grid}.\!contents{display:contents!important}.contents{display:contents}.list-item{display:list-item}.\!hidden{display:none!important}.hidden{display:none}.h-0{height:0}.h-1{height:.25rem}.h-10{height:2.5rem}.h-12{height:3rem}.h-16{height:4rem}.h-2{height:.5rem}.h-20{height:5rem}.h-24{height:6rem}.h-350-px{height:350px}.h-40{height:10rem}.h-5{height:1.25rem}.h-600-px{height:600px}.h-70-px{height:70px}.h-8{height:2rem}.h-auto{height:auto}.h-full{height:100%}.h-screen{height:100vh}.max-h-860-px{max-height:860px}.min-h-screen{min-height:100vh}.w-1{width:.25rem}.w-1\/2{width:50%}.w-10{width:2.5rem}.w-10\/12{width:83.333333%}.w-12{width:3rem}.w-16{width:4rem}.w-5{width:1.25rem}.w-6\/12{width:50%}.w-8{width:2rem}.w-auto{width:auto}.w-full{width:100%}.min-w-0{min-width:0}.min-w-140-px{min-width:140px}.min-w-48{min-width:12rem}.max-w-100-px{max-width:100px}.max-w-150-px{max-width:150px}.max-w-4xl{max-width:56rem}.max-w-full{max-width:100%}.max-w-md{max-width:28rem}.max-w-xl{max-width:36rem}.max-w-xs{max-width:20rem}.flex-1{-webkit-box-flex:1;-ms-flex:1 1 0%;flex:1 1 0%}.flex-auto{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto}.flex-initial{-webkit-box-flex:0;-ms-flex:0 1 auto;flex:0 1 auto}.flex-shrink,.shrink{-ms-flex-negative:1;flex-shrink:1}.flex-grow,.grow{-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1}.table-auto{table-layout:auto}.table-fixed{table-layout:fixed}.caption-top{caption-side:top}.caption-bottom{caption-side:bottom}.border-collapse{border-collapse:collapse}.border-separate{border-collapse:separate}.rotate-180{--tw-rotate:180deg}.rotate-180,.rotate-90{-webkit-transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.rotate-90{--tw-rotate:90deg}.\!transform{-webkit-transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))!important;transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))!important}.transform,.transform-cpu{-webkit-transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));transform:translate(var(--tw-translate-x),var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.transform-gpu{-webkit-transform:translate3d(var(--tw-translate-x),var(--tw-translate-y),0) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));transform:translate3d(var(--tw-translate-x),var(--tw-translate-y),0) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y))}.transform-none{-webkit-transform:none;transform:none}.cursor-pointer{cursor:pointer}.touch-auto{-ms-touch-action:auto;touch-action:auto}.touch-none{-ms-touch-action:none;touch-action:none}.touch-pan-x{--tw-pan-x:pan-x}.touch-pan-left,.touch-pan-x{-ms-touch-action:var(--tw-pan-x) var(--tw-pan-y) var(--tw-pinch-zoom);touch-action:var(--tw-pan-x) var(--tw-pan-y) var(--tw-pinch-zoom)}.touch-pan-left{--tw-pan-x:pan-left}.touch-pan-right{--tw-pan-x:pan-right}.touch-pan-right,.touch-pan-y{-ms-touch-action:var(--tw-pan-x) var(--tw-pan-y) var(--tw-pinch-zoom);touch-action:var(--tw-pan-x) var(--tw-pan-y) var(--tw-pinch-zoom)}.touch-pan-y{--tw-pan-y:pan-y}.touch-pan-up{--tw-pan-y:pan-up}.touch-pan-down,.touch-pan-up{-ms-touch-action:var(--tw-pan-x) var(--tw-pan-y) var(--tw-pinch-zoom);touch-action:var(--tw-pan-x) var(--tw-pan-y) var(--tw-pinch-zoom)}.touch-pan-down{--tw-pan-y:pan-down}.touch-pinch-zoom{--tw-pinch-zoom:pinch-zoom;-ms-touch-action:var(--tw-pan-x) var(--tw-pan-y) var(--tw-pinch-zoom);touch-action:var(--tw-pan-x) var(--tw-pan-y) var(--tw-pinch-zoom)}.touch-manipulation{-ms-touch-action:manipulation;touch-action:manipulation}.select-none{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.select-text{-webkit-user-select:text;-moz-user-select:text;-ms-user-select:text;user-select:text}.select-all{-webkit-user-select:all;-moz-user-select:all;user-select:all}.select-auto{-webkit-user-select:auto;-moz-user-select:auto;-ms-user-select:auto;user-select:auto}.resize-none{resize:none}.resize-y{resize:vertical}.resize-x{resize:horizontal}.resize{resize:both}.snap-none{-ms-scroll-snap-type:none;scroll-snap-type:none}.snap-x{-ms-scroll-snap-type:x var(--tw-scroll-snap-strictness);scroll-snap-type:x var(--tw-scroll-snap-strictness)}.snap-y{-ms-scroll-snap-type:y var(--tw-scroll-snap-strictness);scroll-snap-type:y var(--tw-scroll-snap-strictness)}.snap-both{-ms-scroll-snap-type:both var(--tw-scroll-snap-strictness);scroll-snap-type:both var(--tw-scroll-snap-strictness)}.snap-mandatory{--tw-scroll-snap-strictness:mandatory}.snap-proximity{--tw-scroll-snap-strictness:proximity}.snap-start{scroll-snap-align:start}.snap-end{scroll-snap-align:end}.snap-center{scroll-snap-align:center}.snap-align-none{scroll-snap-align:none}.snap-normal{scroll-snap-stop:normal}.snap-always{scroll-snap-stop:always}.list-inside{list-style-position:inside}.list-outside{list-style-position:outside}.list-none{list-style-type:none}.appearance-none{-webkit-appearance:none;-moz-appearance:none;appearance:none}.break-before-auto{-webkit-column-break-before:auto;-moz-column-break-before:auto;break-before:auto}.break-before-avoid{-webkit-column-break-before:avoid;-moz-column-break-before:avoid;break-before:avoid}.break-before-all{-webkit-column-break-before:all;-moz-column-break-before:all;break-before:all}.break-before-avoid-page{-webkit-column-break-before:avoid;-moz-column-break-before:avoid;break-before:avoid-page}.break-before-page{-webkit-column-break-before:page;-moz-column-break-before:page;break-before:page}.break-before-left{-webkit-column-break-before:left;-moz-column-break-before:left;break-before:left}.break-before-right{-webkit-column-break-before:right;-moz-column-break-before:right;break-before:right}.break-before-column{-webkit-column-break-before:column;-moz-column-break-before:column;break-before:column}.break-inside-auto{-webkit-column-break-inside:auto;-moz-column-break-inside:auto;break-inside:auto}.break-inside-avoid{-webkit-column-break-inside:avoid;-moz-column-break-inside:avoid;break-inside:avoid}.break-inside-avoid-page{break-inside:avoid-page}.break-inside-avoid-column{-webkit-column-break-inside:avoid;-moz-column-break-inside:avoid;break-inside:avoid-column}.break-after-auto{-webkit-column-break-after:auto;-moz-column-break-after:auto;break-after:auto}.break-after-avoid{-webkit-column-break-after:avoid;-moz-column-break-after:avoid;break-after:avoid}.break-after-all{-webkit-column-break-after:all;-moz-column-break-after:all;break-after:all}.break-after-avoid-page{-webkit-column-break-after:avoid;-moz-column-break-after:avoid;break-after:avoid-page}.break-after-page{-webkit-column-break-after:page;-moz-column-break-after:page;break-after:page}.break-after-left{-webkit-column-break-after:left;-moz-column-break-after:left;break-after:left}.break-after-right{-webkit-column-break-after:right;-moz-column-break-after:right;break-after:right}.break-after-column{-webkit-column-break-after:column;-moz-column-break-after:column;break-after:column}.grid-flow-row{grid-auto-flow:row}.grid-flow-col{grid-auto-flow:column}.grid-flow-dense{grid-auto-flow:dense}.grid-flow-row-dense{grid-auto-flow:row dense}.grid-flow-col-dense{grid-auto-flow:column dense}.grid-cols-1{grid-template-columns:repeat(1,minmax(0,1fr))}.grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.flex-row{-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.flex-row,.flex-row-reverse{-webkit-box-orient:horizontal}.flex-row-reverse{-webkit-box-direction:reverse;-ms-flex-direction:row-reverse;flex-direction:row-reverse}.flex-col{-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.flex-col-reverse{-webkit-box-orient:vertical;-webkit-box-direction:reverse;-ms-flex-direction:column-reverse;flex-direction:column-reverse}.flex-wrap{-ms-flex-wrap:wrap;flex-wrap:wrap}.flex-wrap-reverse{-ms-flex-wrap:wrap-reverse;flex-wrap:wrap-reverse}.flex-nowrap{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.place-content-center{place-content:center}.place-content-start{place-content:start}.place-content-end{place-content:end}.place-content-between{place-content:space-between}.place-content-around{place-content:space-around}.place-content-evenly{place-content:space-evenly}.place-content-baseline{place-content:baseline}.place-content-stretch{place-content:stretch}.place-items-start{place-items:start}.place-items-end{place-items:end}.place-items-center{place-items:center}.place-items-baseline{place-items:baseline}.place-items-stretch{place-items:stretch}.content-normal{-ms-flex-line-pack:normal;align-content:normal}.content-center{-ms-flex-line-pack:center;align-content:center}.content-start{-ms-flex-line-pack:start;align-content:flex-start}.content-end{-ms-flex-line-pack:end;align-content:flex-end}.content-between{-ms-flex-line-pack:justify;align-content:space-between}.content-around{-ms-flex-line-pack:distribute;align-content:space-around}.content-evenly{-ms-flex-line-pack:space-evenly;align-content:space-evenly}.content-baseline{-ms-flex-line-pack:baseline;align-content:baseline}.content-stretch{-ms-flex-line-pack:stretch;align-content:stretch}.items-start{-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start}.items-end{-webkit-box-align:end;-ms-flex-align:end;align-items:flex-end}.items-center{-webkit-box-align:center;-ms-flex-align:center;align-items:center}.items-baseline{-webkit-box-align:baseline;-ms-flex-align:baseline;align-items:baseline}.items-stretch{-webkit-box-align:stretch;-ms-flex-align:stretch;align-items:stretch}.justify-normal{-webkit-box-pack:normal;-ms-flex-pack:normal;justify-content:normal}.justify-start{-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.justify-end{-webkit-box-pack:end;-ms-flex-pack:end;justify-content:flex-end}.justify-center{-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center}.justify-between{-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between}.justify-around{-ms-flex-pack:distribute;justify-content:space-around}.justify-evenly{-webkit-box-pack:space-evenly;-ms-flex-pack:space-evenly;justify-content:space-evenly}.justify-stretch{-webkit-box-pack:stretch;-ms-flex-pack:stretch;justify-content:stretch}.justify-items-start{justify-items:start}.justify-items-end{justify-items:end}.justify-items-center{justify-items:center}.justify-items-stretch{justify-items:stretch}.gap-6{gap:1.5rem}.space-x-2>:not([hidden])~:not([hidden]){--tw-space-x-reverse:0;margin-right:calc(.5rem*var(--tw-space-x-reverse));margin-left:calc(.5rem*(1 - var(--tw-space-x-reverse)))}.space-x-4>:not([hidden])~:not([hidden]){--tw-space-x-reverse:0;margin-right:calc(1rem*var(--tw-space-x-reverse));margin-left:calc(1rem*(1 - var(--tw-space-x-reverse)))}.space-y-reverse>:not([hidden])~:not([hidden]){--tw-space-y-reverse:1}.space-x-reverse>:not([hidden])~:not([hidden]){--tw-space-x-reverse:1}.divide-x>:not([hidden])~:not([hidden]){--tw-divide-x-reverse:0;border-right-width:calc(1px*var(--tw-divide-x-reverse));border-left-width:calc(1px*(1 - var(--tw-divide-x-reverse)))}.divide-y>:not([hidden])~:not([hidden]){--tw-divide-y-reverse:0;border-top-width:calc(1px*(1 - var(--tw-divide-y-reverse)));border-bottom-width:calc(1px*var(--tw-divide-y-reverse))}.divide-y-reverse>:not([hidden])~:not([hidden]){--tw-divide-y-reverse:1}.divide-x-reverse>:not([hidden])~:not([hidden]){--tw-divide-x-reverse:1}.divide-solid>:not([hidden])~:not([hidden]){border-style:solid}.divide-dashed>:not([hidden])~:not([hidden]){border-style:dashed}.divide-dotted>:not([hidden])~:not([hidden]){border-style:dotted}.divide-double>:not([hidden])~:not([hidden]){border-style:double}.divide-none>:not([hidden])~:not([hidden]){border-style:none}.place-self-auto{place-self:auto}.place-self-start{place-self:start}.place-self-end{place-self:end}.place-self-center{place-self:center}.place-self-stretch{place-self:stretch}.self-auto{-ms-flex-item-align:auto;align-self:auto}.self-start{-ms-flex-item-align:start;align-self:flex-start}.self-end{-ms-flex-item-align:end;align-self:flex-end}.self-center{-ms-flex-item-align:center;align-self:center}.self-stretch{-ms-flex-item-align:stretch;align-self:stretch}.self-baseline{-ms-flex-item-align:baseline;align-self:baseline}.justify-self-auto{justify-self:auto}.justify-self-start{justify-self:start}.justify-self-end{justify-self:end}.justify-self-center{justify-self:center}.justify-self-stretch{justify-self:stretch}.overflow-auto{overflow:auto}.overflow-hidden{overflow:hidden}.overflow-clip{overflow:clip}.overflow-visible{overflow:visible}.overflow-scroll{overflow:scroll}.overflow-x-auto{overflow-x:auto}.overflow-y-auto{overflow-y:auto}.overflow-x-hidden{overflow-x:hidden}.overflow-y-hidden{overflow-y:hidden}.overflow-x-clip{overflow-x:clip}.overflow-y-clip{overflow-y:clip}.overflow-x-visible{overflow-x:visible}.overflow-y-visible{overflow-y:visible}.overflow-x-scroll{overflow-x:scroll}.overflow-y-scroll{overflow-y:scroll}.overscroll-auto{-ms-scroll-chaining:chained;overscroll-behavior:auto}.overscroll-contain{-ms-scroll-chaining:none;overscroll-behavior:contain}.overscroll-none{-ms-scroll-chaining:none;overscroll-behavior:none}.overscroll-y-auto{overscroll-behavior-y:auto}.overscroll-y-contain{overscroll-behavior-y:contain}.overscroll-y-none{overscroll-behavior-y:none}.overscroll-x-auto{overscroll-behavior-x:auto}.overscroll-x-contain{overscroll-behavior-x:contain}.overscroll-x-none{overscroll-behavior-x:none}.scroll-auto{scroll-behavior:auto}.scroll-smooth{scroll-behavior:smooth}.truncate{overflow:hidden;white-space:nowrap}.overflow-ellipsis,.text-ellipsis,.truncate{text-overflow:ellipsis}.text-clip{text-overflow:clip}.hyphens-none{-webkit-hyphens:none;-ms-hyphens:none;hyphens:none}.hyphens-manual{-webkit-hyphens:manual;-ms-hyphens:manual;hyphens:manual}.hyphens-auto{-webkit-hyphens:auto;-ms-hyphens:auto;hyphens:auto}.whitespace-normal{white-space:normal}.whitespace-nowrap{white-space:nowrap}.whitespace-pre{white-space:pre}.whitespace-pre-line{white-space:pre-line}.whitespace-pre-wrap{white-space:pre-wrap}.whitespace-break-spaces{white-space:break-spaces}.break-normal{overflow-wrap:normal;word-break:normal}.break-words{overflow-wrap:break-word}.break-all{word-break:break-all}.break-keep{word-break:keep-all}.rounded{border-radius:.25rem}.rounded-full{border-radius:9999px}.rounded-lg{border-radius:.5rem}.rounded-md{border-radius:.375rem}.rounded-b{border-bottom-right-radius:.25rem;border-bottom-left-radius:.25rem}.rounded-e{border-start-end-radius:.25rem;border-end-end-radius:.25rem}.rounded-l{border-top-left-radius:.25rem;border-bottom-left-radius:.25rem}.rounded-r{border-top-right-radius:.25rem;border-bottom-right-radius:.25rem}.rounded-s{border-start-start-radius:.25rem;border-end-start-radius:.25rem}.rounded-t{border-top-left-radius:.25rem;border-top-right-radius:.25rem}.rounded-t-lg{border-top-left-radius:.5rem;border-top-right-radius:.5rem}.rounded-bl{border-bottom-left-radius:.25rem}.rounded-br{border-bottom-right-radius:.25rem}.rounded-ee{border-end-end-radius:.25rem}.rounded-es{border-end-start-radius:.25rem}.rounded-se{border-start-end-radius:.25rem}.rounded-ss{border-start-start-radius:.25rem}.rounded-tl{border-top-left-radius:.25rem}.rounded-tr{border-top-right-radius:.25rem}.\!border{border-width:1px!important}.border{border-width:1px}.border-0{border-width:0}.border-2{border-width:2px}.border-x{border-left-width:1px;border-right-width:1px}.border-y{border-top-width:1px}.border-b,.border-y{border-bottom-width:1px}.border-b-2{border-bottom-width:2px}.border-e{border-inline-end-width:1px}.border-l{border-left-width:1px}.border-l-0{border-left-width:0}.border-r{border-right-width:1px}.border-r-0{border-right-width:0}.border-s{border-inline-start-width:1px}.border-t{border-top-width:1px}.border-t-0{border-top-width:0}.border-solid{border-style:solid}.border-dashed{border-style:dashed}.border-dotted{border-style:dotted}.border-double{border-style:double}.border-hidden{border-style:hidden}.border-none{border-style:none}.border-blueGray-100{--tw-border-opacity:1;border-color:rgb(241 245 249/var(--tw-border-opacity))}.border-blueGray-200{--tw-border-opacity:1;border-color:rgb(226 232 240/var(--tw-border-opacity))}.border-blueGray-300{--tw-border-opacity:1;border-color:rgb(203 213 225/var(--tw-border-opacity))}.border-blueGray-50{--tw-border-opacity:1;border-color:rgb(248 250 252/var(--tw-border-opacity))}.border-blueGray-500{--tw-border-opacity:1;border-color:rgb(100 116 139/var(--tw-border-opacity))}.border-blueGray-600{--tw-border-opacity:1;border-color:rgb(71 85 105/var(--tw-border-opacity))}.border-emerald-700{--tw-border-opacity:1;border-color:rgb(4 120 87/var(--tw-border-opacity))}.border-gray-200{--tw-border-opacity:1;border-color:rgb(229 231 235/var(--tw-border-opacity))}.border-gray-300{--tw-border-opacity:1;border-color:rgb(209 213 219/var(--tw-border-opacity))}.border-sky-500{--tw-border-opacity:1;border-color:rgb(14 165 233/var(--tw-border-opacity))}.border-slate-100{--tw-border-opacity:1;border-color:rgb(241 245 249/var(--tw-border-opacity))}.border-transparent{border-color:transparent}.bg-\[rgb\(255\2c 0\2c 0\)\]{--tw-bg-opacity:1;background-color:rgb(255 0 0/var(--tw-bg-opacity))}.bg-black{--tw-bg-opacity:1;background-color:rgb(0 0 0/var(--tw-bg-opacity))}.bg-blueGray-100{--tw-bg-opacity:1;background-color:rgb(241 245 249/var(--tw-bg-opacity))}.bg-blueGray-200{--tw-bg-opacity:1;background-color:rgb(226 232 240/var(--tw-bg-opacity))}.bg-blueGray-50{--tw-bg-opacity:1;background-color:rgb(248 250 252/var(--tw-bg-opacity))}.bg-blueGray-600{--tw-bg-opacity:1;background-color:rgb(71 85 105/var(--tw-bg-opacity))}.bg-blueGray-700{--tw-bg-opacity:1;background-color:rgb(51 65 85/var(--tw-bg-opacity))}.bg-blueGray-800{--tw-bg-opacity:1;background-color:rgb(30 41 59/var(--tw-bg-opacity))}.bg-emerald-200{--tw-bg-opacity:1;background-color:rgb(167 243 208/var(--tw-bg-opacity))}.bg-emerald-300{--tw-bg-opacity:1;background-color:rgb(110 231 183/var(--tw-bg-opacity))}.bg-emerald-500{--tw-bg-opacity:1;background-color:rgb(16 185 129/var(--tw-bg-opacity))}.bg-emerald-600{--tw-bg-opacity:1;background-color:rgb(5 150 105/var(--tw-bg-opacity))}.bg-emerald-800{--tw-bg-opacity:1;background-color:rgb(6 95 70/var(--tw-bg-opacity))}.bg-emerald-900{--tw-bg-opacity:1;background-color:rgb(6 78 59/var(--tw-bg-opacity))}.bg-gray-100{--tw-bg-opacity:1;background-color:rgb(243 244 246/var(--tw-bg-opacity))}.bg-gray-200{--tw-bg-opacity:1;background-color:rgb(229 231 235/var(--tw-bg-opacity))}.bg-indigo-500{--tw-bg-opacity:1;background-color:rgb(99 102 241/var(--tw-bg-opacity))}.bg-lightBlue-200{--tw-bg-opacity:1;background-color:rgb(186 230 253/var(--tw-bg-opacity))}.bg-lightBlue-400{--tw-bg-opacity:1;background-color:rgb(56 189 248/var(--tw-bg-opacity))}.bg-lightBlue-500{--tw-bg-opacity:1;background-color:rgb(14 165 233/var(--tw-bg-opacity))}.bg-orange-200{--tw-bg-opacity:1;background-color:rgb(254 215 170/var(--tw-bg-opacity))}.bg-orange-500{--tw-bg-opacity:1;background-color:rgb(249 115 22/var(--tw-bg-opacity))}.bg-pink-500{--tw-bg-opacity:1;background-color:rgb(236 72 153/var(--tw-bg-opacity))}.bg-pink-600{--tw-bg-opacity:1;background-color:rgb(219 39 119/var(--tw-bg-opacity))}.bg-purple-200{--tw-bg-opacity:1;background-color:rgb(233 213 255/var(--tw-bg-opacity))}.bg-purple-500{--tw-bg-opacity:1;background-color:rgb(168 85 247/var(--tw-bg-opacity))}.bg-red-200{--tw-bg-opacity:1;background-color:rgb(254 202 202/var(--tw-bg-opacity))}.bg-red-400{--tw-bg-opacity:1;background-color:rgb(248 113 113/var(--tw-bg-opacity))}.bg-red-500{--tw-bg-opacity:1;background-color:rgb(239 68 68/var(--tw-bg-opacity))}.bg-teal-200{--tw-bg-opacity:1;background-color:rgb(153 246 228/var(--tw-bg-opacity))}.bg-teal-500{--tw-bg-opacity:1;background-color:rgb(20 184 166/var(--tw-bg-opacity))}.bg-transparent{background-color:transparent}.bg-white{--tw-bg-opacity:1;background-color:rgb(255 255 255/var(--tw-bg-opacity))}.bg-opacity-0{--tw-bg-opacity:0}.bg-none{background-image:none}.decoration-slice{-webkit-box-decoration-break:slice;box-decoration-break:slice}.decoration-clone{-webkit-box-decoration-break:clone;box-decoration-break:clone}.box-decoration-slice{-webkit-box-decoration-break:slice;box-decoration-break:slice}.box-decoration-clone{-webkit-box-decoration-break:clone;box-decoration-break:clone}.bg-cover{background-size:cover}.bg-full{background-size:100%}.bg-fixed{background-attachment:fixed}.bg-local{background-attachment:local}.bg-scroll{background-attachment:scroll}.bg-clip-border{background-clip:border-box}.bg-clip-padding{background-clip:padding-box}.bg-clip-content{background-clip:content-box}.bg-clip-text{-webkit-background-clip:text;background-clip:text}.bg-center{background-position:50%}.bg-repeat{background-repeat:repeat}.bg-no-repeat{background-repeat:no-repeat}.bg-repeat-x{background-repeat:repeat-x}.bg-repeat-y{background-repeat:repeat-y}.bg-repeat-round{background-repeat:round}.bg-repeat-space{background-repeat:space}.bg-origin-border{background-origin:border-box}.bg-origin-padding{background-origin:padding-box}.bg-origin-content{background-origin:content-box}.fill-current{fill:currentColor}.object-contain{-o-object-fit:contain;object-fit:contain}.object-cover{-o-object-fit:cover;object-fit:cover}.object-fill{-o-object-fit:fill;object-fit:fill}.object-none{-o-object-fit:none;object-fit:none}.object-scale-down{-o-object-fit:scale-down;object-fit:scale-down}.p-0{padding:0}.p-1{padding:.25rem}.p-14{padding:3.5rem}.p-16{padding:4rem}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-4{padding:1rem}.p-48{padding:12rem}.p-8{padding:2rem}.px-0{padding-left:0;padding-right:0}.px-0\.5{padding-left:.125rem;padding-right:.125rem}.px-12{padding-left:3rem;padding-right:3rem}.px-2{padding-left:.5rem;padding-right:.5rem}.px-3{padding-left:.75rem;padding-right:.75rem}.px-4{padding-left:1rem;padding-right:1rem}.px-5{padding-left:1.25rem;padding-right:1.25rem}.px-6{padding-left:1.5rem;padding-right:1.5rem}.py-1{padding-top:.25rem;padding-bottom:.25rem}.py-10{padding-top:2.5rem;padding-bottom:2.5rem}.py-12{padding-top:3rem;padding-bottom:3rem}.py-16{padding-top:4rem;padding-bottom:4rem}.py-2{padding-top:.5rem;padding-bottom:.5rem}.py-20{padding-top:5rem;padding-bottom:5rem}.py-3{padding-top:.75rem;padding-bottom:.75rem}.py-4{padding-top:1rem;padding-bottom:1rem}.py-40{padding-top:10rem;padding-bottom:10rem}.py-5{padding-top:1.25rem;padding-bottom:1.25rem}.py-6{padding-top:1.5rem;padding-bottom:1.5rem}.py-8{padding-top:2rem;padding-bottom:2rem}.pb-0{padding-bottom:0}.pb-16{padding-bottom:4rem}.pb-2{padding-bottom:.5rem}.pb-20{padding-bottom:5rem}.pb-3{padding-bottom:.75rem}.pb-32{padding-bottom:8rem}.pb-4{padding-bottom:1rem}.pb-40{padding-bottom:10rem}.pb-6{padding-bottom:1.5rem}.pl-0{padding-left:0}.pl-4{padding-left:1rem}.pr-12{padding-right:3rem}.pr-3{padding-right:.75rem}.pr-4{padding-right:1rem}.pt-0{padding-top:0}.pt-1{padding-top:.25rem}.pt-12{padding-top:3rem}.pt-16{padding-top:4rem}.pt-2{padding-top:.5rem}.pt-20{padding-top:5rem}.pt-3{padding-top:.75rem}.pt-32{padding-top:8rem}.pt-6{padding-top:1.5rem}.pt-8{padding-top:2rem}.text-left{text-align:left}.text-center{text-align:center}.text-right{text-align:right}.text-justify{text-align:justify}.text-start{text-align:start}.text-end{text-align:end}.align-baseline{vertical-align:baseline}.align-top{vertical-align:top}.align-middle{vertical-align:middle}.align-bottom{vertical-align:bottom}.align-text-top{vertical-align:text-top}.align-text-bottom{vertical-align:text-bottom}.align-sub{vertical-align:sub}.align-super{vertical-align:super}.text-2xl{font-size:1.5rem;line-height:2rem}.text-3xl{font-size:1.875rem;line-height:2.25rem}.text-4xl{font-size:2.25rem;line-height:2.5rem}.text-base{font-size:1rem;line-height:1.5rem}.text-lg{font-size:1.125rem;line-height:1.75rem}.text-sm{font-size:.875rem;line-height:1.25rem}.text-xl{font-size:1.25rem;line-height:1.75rem}.text-xs{font-size:.75rem;line-height:1rem}.font-bold{font-weight:700}.font-light{font-weight:300}.font-normal{font-weight:400}.font-semibold{font-weight:600}.uppercase{text-transform:uppercase}.lowercase{text-transform:lowercase}.capitalize{text-transform:capitalize}.normal-case{text-transform:none}.italic{font-style:italic}.not-italic{font-style:normal}.normal-nums{font-variant-numeric:normal}.ordinal{--tw-ordinal:ordinal}.ordinal,.slashed-zero{font-variant-numeric:var(--tw-ordinal) var(--tw-slashed-zero) var(--tw-numeric-figure) var(--tw-numeric-spacing) var(--tw-numeric-fraction)}.slashed-zero{--tw-slashed-zero:slashed-zero}.lining-nums{--tw-numeric-figure:lining-nums}.lining-nums,.oldstyle-nums{font-variant-numeric:var(--tw-ordinal) var(--tw-slashed-zero) var(--tw-numeric-figure) var(--tw-numeric-spacing) var(--tw-numeric-fraction)}.oldstyle-nums{--tw-numeric-figure:oldstyle-nums}.proportional-nums{--tw-numeric-spacing:proportional-nums}.proportional-nums,.tabular-nums{font-variant-numeric:var(--tw-ordinal) var(--tw-slashed-zero) var(--tw-numeric-figure) var(--tw-numeric-spacing) var(--tw-numeric-fraction)}.tabular-nums{--tw-numeric-spacing:tabular-nums}.diagonal-fractions{--tw-numeric-fraction:diagonal-fractions}.diagonal-fractions,.stacked-fractions{font-variant-numeric:var(--tw-ordinal) var(--tw-slashed-zero) var(--tw-numeric-figure) var(--tw-numeric-spacing) var(--tw-numeric-fraction)}.stacked-fractions{--tw-numeric-fraction:stacked-fractions}.leading-none{line-height:1}.leading-normal{line-height:1.5}.leading-relaxed{line-height:1.625}.leading-snug{line-height:1.375}.leading-tight{line-height:1.25}.tracking-wide{letter-spacing:.025em}.text-\[\#336699\]\/\[\.35\]{color:rgba(51,102,153,.35)}.text-black{--tw-text-opacity:1;color:rgb(0 0 0/var(--tw-text-opacity))}.text-blue-500{--tw-text-opacity:1;color:rgb(59 130 246/var(--tw-text-opacity))}.text-blueGray-100{--tw-text-opacity:1;color:rgb(241 245 249/var(--tw-text-opacity))}.text-blueGray-200{--tw-text-opacity:1;color:rgb(226 232 240/var(--tw-text-opacity))}.text-blueGray-300{--tw-text-opacity:1;color:rgb(203 213 225/var(--tw-text-opacity))}.text-blueGray-400{--tw-text-opacity:1;color:rgb(148 163 184/var(--tw-text-opacity))}.text-blueGray-500{--tw-text-opacity:1;color:rgb(100 116 139/var(--tw-text-opacity))}.text-blueGray-600{--tw-text-opacity:1;color:rgb(71 85 105/var(--tw-text-opacity))}.text-blueGray-700{--tw-text-opacity:1;color:rgb(51 65 85/var(--tw-text-opacity))}.text-emerald-300{--tw-text-opacity:1;color:rgb(110 231 183/var(--tw-text-opacity))}.text-emerald-500{--tw-text-opacity:1;color:rgb(16 185 129/var(--tw-text-opacity))}.text-emerald-600{--tw-text-opacity:1;color:rgb(5 150 105/var(--tw-text-opacity))}.text-gray-500{--tw-text-opacity:1;color:rgb(107 114 128/var(--tw-text-opacity))}.text-gray-600{--tw-text-opacity:1;color:rgb(75 85 99/var(--tw-text-opacity))}.text-gray-700{--tw-text-opacity:1;color:rgb(55 65 81/var(--tw-text-opacity))}.text-gray-800{--tw-text-opacity:1;color:rgb(31 41 55/var(--tw-text-opacity))}.text-gray-900{--tw-text-opacity:1;color:rgb(17 24 39/var(--tw-text-opacity))}.text-indigo-600{--tw-text-opacity:1;color:rgb(79 70 229/var(--tw-text-opacity))}.text-lightBlue-600{--tw-text-opacity:1;color:rgb(2 132 199/var(--tw-text-opacity))}.text-orange-500{--tw-text-opacity:1;color:rgb(249 115 22/var(--tw-text-opacity))}.text-orange-600{--tw-text-opacity:1;color:rgb(234 88 12/var(--tw-text-opacity))}.text-red-400{--tw-text-opacity:1;color:rgb(248 113 113/var(--tw-text-opacity))}.text-red-500{--tw-text-opacity:1;color:rgb(239 68 68/var(--tw-text-opacity))}.text-sky-500{--tw-text-opacity:1;color:rgb(14 165 233/var(--tw-text-opacity))}.text-slate-300{--tw-text-opacity:1;color:rgb(203 213 225/var(--tw-text-opacity))}.text-teal-500{--tw-text-opacity:1;color:rgb(20 184 166/var(--tw-text-opacity))}.text-white{--tw-text-opacity:1;color:rgb(255 255 255/var(--tw-text-opacity))}.underline{text-decoration-line:underline}.overline{text-decoration-line:overline}.line-through{text-decoration-line:line-through}.no-underline{text-decoration-line:none}.decoration-solid{text-decoration-style:solid}.decoration-double{text-decoration-style:double}.decoration-dotted{text-decoration-style:dotted}.decoration-dashed{text-decoration-style:dashed}.decoration-wavy{text-decoration-style:wavy}.antialiased{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.subpixel-antialiased{-webkit-font-smoothing:auto;-moz-osx-font-smoothing:auto}.placeholder-blueGray-300::-webkit-input-placeholder{--tw-placeholder-opacity:1;color:rgb(203 213 225/var(--tw-placeholder-opacity))}.placeholder-blueGray-300::-moz-placeholder{--tw-placeholder-opacity:1;color:rgb(203 213 225/var(--tw-placeholder-opacity))}.placeholder-blueGray-300:-ms-input-placeholder{--tw-placeholder-opacity:1;color:rgb(203 213 225/var(--tw-placeholder-opacity))}.placeholder-blueGray-300::-ms-input-placeholder{--tw-placeholder-opacity:1;color:rgb(203 213 225/var(--tw-placeholder-opacity))}.placeholder-blueGray-300::placeholder{--tw-placeholder-opacity:1;color:rgb(203 213 225/var(--tw-placeholder-opacity))}.opacity-50{opacity:.5}.opacity-75{opacity:.75}.bg-blend-normal{background-blend-mode:normal}.bg-blend-multiply{background-blend-mode:multiply}.bg-blend-screen{background-blend-mode:screen}.bg-blend-overlay{background-blend-mode:overlay}.bg-blend-darken{background-blend-mode:darken}.bg-blend-lighten{background-blend-mode:lighten}.bg-blend-color-dodge{background-blend-mode:color-dodge}.bg-blend-color-burn{background-blend-mode:color-burn}.bg-blend-hard-light{background-blend-mode:hard-light}.bg-blend-soft-light{background-blend-mode:soft-light}.bg-blend-difference{background-blend-mode:difference}.bg-blend-exclusion{background-blend-mode:exclusion}.bg-blend-hue{background-blend-mode:hue}.bg-blend-saturation{background-blend-mode:saturation}.bg-blend-color{background-blend-mode:color}.bg-blend-luminosity{background-blend-mode:luminosity}.mix-blend-normal{mix-blend-mode:normal}.mix-blend-multiply{mix-blend-mode:multiply}.mix-blend-screen{mix-blend-mode:screen}.mix-blend-overlay{mix-blend-mode:overlay}.mix-blend-darken{mix-blend-mode:darken}.mix-blend-lighten{mix-blend-mode:lighten}.mix-blend-color-dodge{mix-blend-mode:color-dodge}.mix-blend-color-burn{mix-blend-mode:color-burn}.mix-blend-hard-light{mix-blend-mode:hard-light}.mix-blend-soft-light{mix-blend-mode:soft-light}.mix-blend-difference{mix-blend-mode:difference}.mix-blend-exclusion{mix-blend-mode:exclusion}.mix-blend-hue{mix-blend-mode:hue}.mix-blend-saturation{mix-blend-mode:saturation}.mix-blend-color{mix-blend-mode:color}.mix-blend-luminosity{mix-blend-mode:luminosity}.mix-blend-plus-lighter{mix-blend-mode:plus-lighter}.\!shadow{--tw-shadow:0 1px 3px 0 rgba(0,0,0,.1),0 1px 2px -1px rgba(0,0,0,.1)!important;--tw-shadow-colored:0 1px 3px 0 var(--tw-shadow-color),0 1px 2px -1px var(--tw-shadow-color)!important;-webkit-box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)!important;box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)!important}.shadow{--tw-shadow:0 1px 3px 0 rgba(0,0,0,.1),0 1px 2px -1px rgba(0,0,0,.1);--tw-shadow-colored:0 1px 3px 0 var(--tw-shadow-color),0 1px 2px -1px var(--tw-shadow-color)}.shadow,.shadow-lg{-webkit-box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}.shadow-lg{--tw-shadow:0 10px 15px -3px rgba(0,0,0,.1),0 4px 6px -4px rgba(0,0,0,.1);--tw-shadow-colored:0 10px 15px -3px var(--tw-shadow-color),0 4px 6px -4px var(--tw-shadow-color)}.shadow-none{--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000}.shadow-none,.shadow-sm{-webkit-box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}.shadow-sm{--tw-shadow:0 1px 2px 0 rgba(0,0,0,.05);--tw-shadow-colored:0 1px 2px 0 var(--tw-shadow-color)}.shadow-xl{--tw-shadow:0 20px 25px -5px rgba(0,0,0,.1),0 8px 10px -6px rgba(0,0,0,.1);--tw-shadow-colored:0 20px 25px -5px var(--tw-shadow-color),0 8px 10px -6px var(--tw-shadow-color);-webkit-box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}.outline-none{outline:2px solid transparent;outline-offset:2px}.outline{outline-style:solid}.outline-dashed{outline-style:dashed}.outline-dotted{outline-style:dotted}.outline-double{outline-style:double}.ring{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(3px + var(--tw-ring-offset-width)) var(--tw-ring-color);-webkit-box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.ring-inset{--tw-ring-inset:inset}.blur{--tw-blur:blur(8px)}.blur,.drop-shadow{-webkit-filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow);filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.drop-shadow{--tw-drop-shadow:drop-shadow(0 1px 2px rgba(0,0,0,.1)) drop-shadow(0 1px 1px rgba(0,0,0,.06))}.grayscale{--tw-grayscale:grayscale(100%)}.grayscale,.invert{-webkit-filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow);filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.invert{--tw-invert:invert(100%)}.sepia{--tw-sepia:sepia(100%);-webkit-filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow);filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.\!filter{-webkit-filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)!important;filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)!important}.filter{-webkit-filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow);filter:var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow)}.filter-none{-webkit-filter:none;filter:none}.backdrop-blur{--tw-backdrop-blur:blur(8px)}.backdrop-blur,.backdrop-grayscale{-webkit-backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia)}.backdrop-grayscale{--tw-backdrop-grayscale:grayscale(100%)}.backdrop-invert{--tw-backdrop-invert:invert(100%)}.backdrop-invert,.backdrop-sepia{-webkit-backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia)}.backdrop-sepia{--tw-backdrop-sepia:sepia(100%)}.backdrop-filter{-webkit-backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia);backdrop-filter:var(--tw-backdrop-blur) var(--tw-backdrop-brightness) var(--tw-backdrop-contrast) var(--tw-backdrop-grayscale) var(--tw-backdrop-hue-rotate) var(--tw-backdrop-invert) var(--tw-backdrop-opacity) var(--tw-backdrop-saturate) var(--tw-backdrop-sepia)}.backdrop-filter-none{-webkit-backdrop-filter:none;backdrop-filter:none}.\!transition{-webkit-transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,-webkit-box-shadow,-webkit-transform,-webkit-filter,-webkit-backdrop-filter!important;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,-webkit-box-shadow,-webkit-transform,-webkit-filter,-webkit-backdrop-filter!important;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter!important;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-box-shadow,-webkit-transform,-webkit-filter,-webkit-backdrop-filter!important;-webkit-transition-timing-function:cubic-bezier(.4,0,.2,1)!important;transition-timing-function:cubic-bezier(.4,0,.2,1)!important;-webkit-transition-duration:.15s!important;transition-duration:.15s!important}.transition{-webkit-transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,-webkit-box-shadow,-webkit-transform,-webkit-filter,-webkit-backdrop-filter;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,-webkit-box-shadow,-webkit-transform,-webkit-filter,-webkit-backdrop-filter;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter;transition-property:color,background-color,border-color,text-decoration-color,fill,stroke,opacity,box-shadow,transform,filter,backdrop-filter,-webkit-box-shadow,-webkit-transform,-webkit-filter,-webkit-backdrop-filter;-webkit-transition-timing-function:cubic-bezier(.4,0,.2,1);transition-timing-function:cubic-bezier(.4,0,.2,1);-webkit-transition-duration:.15s;transition-duration:.15s}.transition-all{-webkit-transition-property:all;transition-property:all;-webkit-transition-timing-function:cubic-bezier(.4,0,.2,1);transition-timing-function:cubic-bezier(.4,0,.2,1)}.duration-150,.transition-all{-webkit-transition-duration:.15s;transition-duration:.15s}.ease-in{-webkit-transition-timing-function:cubic-bezier(.4,0,1,1);transition-timing-function:cubic-bezier(.4,0,1,1)}.ease-in-out{-webkit-transition-timing-function:cubic-bezier(.4,0,.2,1);transition-timing-function:cubic-bezier(.4,0,.2,1)}.ease-linear{-webkit-transition-timing-function:linear;transition-timing-function:linear}.ease-out{-webkit-transition-timing-function:cubic-bezier(0,0,.2,1);transition-timing-function:cubic-bezier(0,0,.2,1)}.content-\[\'this-is-also-valid\]-weirdly-enough\'\]{--tw-content:"this-is-also-valid]-weirdly-enough";content:var(--tw-content)}.\[-\:\=\]{-:=}.\[-\:\^\/\+\#\]{-:^/+#}.\[-\:\|\\s\]{-:|\s}.\[-\:\|\]{-:|}.\[-a-zA-Z0-9_\:\.\]{-a-z-a--z0-9_:.}.\[a-zA-Z-\:\#\]{a-z-a--z-:#}.\[contenthash\:8\]{contenthash:8}.\[hash\:8\]{hash:8}.\[hash\:base64\]{hash:base64}.\[key\:string\]{key:string}.\[size\:\%2d\]{size:%2d}@media (min-width:640px){@media (min-width:1536px){.sm\:container{max-width:1536px}}.sm\:container{width:100%}@media (min-width:640px){.sm\:container{max-width:640px}}@media (min-width:768px){.sm\:container{max-width:768px}}@media (min-width:1024px){.sm\:container{max-width:1024px}}@media (min-width:1280px){.sm\:container{max-width:1280px}}@media (min-width:1536px){.sm\:container{max-width:1280px}}}.first\:ml-0:first-child{margin-left:0}.last\:mr-0:last-child{margin-right:0}.hover\:font-bold:hover{font-weight:700}.hover\:text-blueGray-300:hover{--tw-text-opacity:1;color:rgb(203 213 225/var(--tw-text-opacity))}.hover\:text-blueGray-500:hover{--tw-text-opacity:1;color:rgb(100 116 139/var(--tw-text-opacity))}.hover\:text-blueGray-700:hover{--tw-text-opacity:1;color:rgb(51 65 85/var(--tw-text-opacity))}.hover\:text-blueGray-800:hover{--tw-text-opacity:1;color:rgb(30 41 59/var(--tw-text-opacity))}.hover\:text-emerald-600:hover{--tw-text-opacity:1;color:rgb(5 150 105/var(--tw-text-opacity))}.hover\:shadow-lg:hover{--tw-shadow:0 10px 15px -3px rgba(0,0,0,.1),0 4px 6px -4px rgba(0,0,0,.1);--tw-shadow-colored:0 10px 15px -3px var(--tw-shadow-color),0 4px 6px -4px var(--tw-shadow-color)}.hover\:shadow-lg:hover,.hover\:shadow-md:hover{-webkit-box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}.hover\:shadow-md:hover{--tw-shadow:0 4px 6px -1px rgba(0,0,0,.1),0 2px 4px -2px rgba(0,0,0,.1);--tw-shadow-colored:0 4px 6px -1px var(--tw-shadow-color),0 2px 4px -2px var(--tw-shadow-color)}.before\:hover\:text-center:hover:before,.hover\:before\:text-center:hover:before{content:var(--tw-content);text-align:center}.focus\:border-black:focus{--tw-border-opacity:1;border-color:rgb(0 0 0/var(--tw-border-opacity))}.focus\:border-gray-300:focus{--tw-border-opacity:1;border-color:rgb(209 213 219/var(--tw-border-opacity))}.focus\:border-gray-500:focus{--tw-border-opacity:1;border-color:rgb(107 114 128/var(--tw-border-opacity))}.focus\:border-indigo-300:focus{--tw-border-opacity:1;border-color:rgb(165 180 252/var(--tw-border-opacity))}.focus\:border-transparent:focus{border-color:transparent}.focus\:bg-gray-200:focus{--tw-bg-opacity:1;background-color:rgb(229 231 235/var(--tw-bg-opacity))}.focus\:bg-white:focus{--tw-bg-opacity:1;background-color:rgb(255 255 255/var(--tw-bg-opacity))}.focus\:outline-none:focus{outline:2px solid transparent;outline-offset:2px}.focus\:ring:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(3px + var(--tw-ring-offset-width)) var(--tw-ring-color)}.focus\:ring-0:focus,.focus\:ring:focus{-webkit-box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.focus\:ring-0:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(var(--tw-ring-offset-width)) var(--tw-ring-color)}.focus\:ring-1:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(1px + var(--tw-ring-offset-width)) var(--tw-ring-color);-webkit-box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow,0 0 #0000)}.focus\:ring-black:focus{--tw-ring-opacity:1;--tw-ring-color:rgb(0 0 0/var(--tw-ring-opacity))}.focus\:ring-gray-500:focus{--tw-ring-opacity:1;--tw-ring-color:rgb(107 114 128/var(--tw-ring-opacity))}.focus\:ring-indigo-200:focus{--tw-ring-opacity:1;--tw-ring-color:rgb(199 210 254/var(--tw-ring-opacity))}.focus\:ring-opacity-50:focus{--tw-ring-opacity:0.5}.focus\:ring-offset-0:focus{--tw-ring-offset-width:0px}.focus\:ring-offset-2:focus{--tw-ring-offset-width:2px}.focus\:hover\:text-center:hover:focus,.hover\:focus\:text-center:focus:hover{text-align:center}.active\:bg-blueGray-50:active{--tw-bg-opacity:1;background-color:rgb(248 250 252/var(--tw-bg-opacity))}.active\:bg-blueGray-600:active{--tw-bg-opacity:1;background-color:rgb(71 85 105/var(--tw-bg-opacity))}.active\:bg-emerald-600:active{--tw-bg-opacity:1;background-color:rgb(5 150 105/var(--tw-bg-opacity))}.active\:bg-indigo-600:active{--tw-bg-opacity:1;background-color:rgb(79 70 229/var(--tw-bg-opacity))}.active\:bg-red-600:active{--tw-bg-opacity:1;background-color:rgb(220 38 38/var(--tw-bg-opacity))}@media (min-width:640px){.sm\:mr-2{margin-right:.5rem}.sm\:mt-0{margin-top:0}.sm\:w-6\/12{width:50%}.sm\:pt-0{padding-top:0}.sm\:underline{text-decoration-line:underline}}@media (min-width:768px){.md\:fixed{position:fixed}.md\:relative{position:relative}.md\:bottom-0{bottom:0}.md\:left-0{left:0}.md\:top-0{top:0}.md\:mb-4{margin-bottom:1rem}.md\:mt-4{margin-top:1rem}.md\:mt-40{margin-top:10rem}.md\:block{display:block}.md\:flex{display:-webkit-box;display:-ms-flexbox;display:flex}.md\:hidden{display:none}.md\:min-h-full{min-height:100%}.md\:w-4\/12{width:33.333333%}.md\:w-6\/12{width:50%}.md\:w-64{width:16rem}.md\:w-8\/12{width:66.666667%}.md\:min-w-full{min-width:100%}.md\:max-w-4xl{max-width:56rem}.md\:grid-cols-2{grid-template-columns:repeat(2,minmax(0,1fr))}.md\:flex-row{-webkit-box-orient:horizontal;-ms-flex-direction:row;flex-direction:row}.md\:flex-col,.md\:flex-row{-webkit-box-direction:normal}.md\:flex-col{-webkit-box-orient:vertical;-ms-flex-direction:column;flex-direction:column}.md\:flex-nowrap{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.md\:items-stretch{-webkit-box-align:stretch;-ms-flex-align:stretch;align-items:stretch}.md\:justify-start{-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.md\:justify-end{-webkit-box-pack:end;-ms-flex-pack:end;justify-content:flex-end}.md\:justify-between{-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between}.md\:overflow-hidden{overflow:hidden}.md\:overflow-y-auto{overflow-y:auto}.md\:px-10{padding-left:2.5rem;padding-right:2.5rem}.md\:px-4{padding-left:1rem;padding-right:1rem}.md\:pb-2{padding-bottom:.5rem}.md\:pr-12{padding-right:3rem}.md\:text-left{text-align:left}.md\:opacity-100{opacity:1}.md\:shadow-none{--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;-webkit-box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}}@media (min-width:1024px){.lg\:static{position:static}.lg\:order-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.lg\:order-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.lg\:-ml-16{margin-left:-4rem}.lg\:ml-auto{margin-left:auto}.lg\:mr-4{margin-right:1rem}.lg\:block{display:block}.lg\:inline-block{display:inline-block}.lg\:flex{display:-webkit-box;display:-ms-flexbox;display:flex}.lg\:hidden{display:none}.lg\:w-3\/12{width:25%}.lg\:w-4\/12{width:33.333333%}.lg\:w-6\/12{width:50%}.lg\:w-9\/12{width:75%}.lg\:w-auto{width:auto}.lg\:flex-row{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.lg\:justify-start{-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.lg\:self-center{-ms-flex-item-align:center;align-self:center}.lg\:px-10{padding-left:2.5rem;padding-right:2.5rem}.lg\:py-2{padding-top:.5rem;padding-bottom:.5rem}.lg\:pt-4{padding-top:1rem}.lg\:text-left{text-align:left}.lg\:text-right{text-align:right}.lg\:text-white{--tw-text-opacity:1;color:rgb(255 255 255/var(--tw-text-opacity))}.lg\:shadow-none{--tw-shadow:0 0 #0000;--tw-shadow-colored:0 0 #0000;-webkit-box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow);box-shadow:var(--tw-ring-offset-shadow,0 0 #0000),var(--tw-ring-shadow,0 0 #0000),var(--tw-shadow)}.lg\:hover\:text-blueGray-200:hover{--tw-text-opacity:1;color:rgb(226 232 240/var(--tw-text-opacity))}}@media (prefers-color-scheme:dark){@media (min-width:1024px){.dark\:lg\:hover\:\[paint-order\:markers\]:hover{paint-order:markers}}}@media (min-width:1280px){.xl\:mb-0{margin-bottom:0}.xl\:w-3\/12{width:25%}.xl\:w-4\/12{width:33.333333%}.xl\:w-6\/12{width:50%}.xl\:w-8\/12{width:66.666667%}}.mdcode code{white-space:pre-wrap}[data-v-md-anchor]{cursor:pointer}.vuepress-markdown-body code[class*=v-md-prism-],.vuepress-markdown-body pre[class*=v-md-prism-]{color:#ccc;font-size:1em;font-family:Consolas,Monaco,Andale Mono,Ubuntu Mono,monospace;line-height:1.5;white-space:pre;text-align:left;word-wrap:normal;word-break:normal;word-spacing:normal;-webkit-hyphens:none;-ms-hyphens:none;hyphens:none;background:none}.vuepress-markdown-body>:first-child,.vuepress-markdown-body>div[data-v-md-line]:first-child>:first-child{margin-top:0!important}.vuepress-markdown-body>:last-child,.vuepress-markdown-body>div[data-v-md-line]:last-child>:last-child{margin-bottom:0!important}.vuepress-markdown-body pre[class*=v-md-prism-]{margin:.5em 0;padding:1em;overflow:auto}.vuepress-markdown-body :not(pre)>code[class*=v-md-prism-],.vuepress-markdown-body pre[class*=v-md-prism-]{background:#2d2d2d}.vuepress-markdown-body :not(pre)>code[class*=v-md-prism-]{padding:.1em;white-space:normal;border-radius:.3em}.vuepress-markdown-body .token.block-comment,.vuepress-markdown-body .token.cdata,.vuepress-markdown-body .token.comment,.vuepress-markdown-body .token.doctype,.vuepress-markdown-body .token.prolog{color:#999}.vuepress-markdown-body .token.punctuation{color:#ccc}.vuepress-markdown-body .token.attr-name,.vuepress-markdown-body .token.deleted,.vuepress-markdown-body .token.namespace,.vuepress-markdown-body .token.tag{color:#e2777a}.vuepress-markdown-body .token.function-name{color:#6196cc}.vuepress-markdown-body .token.boolean,.vuepress-markdown-body .token.function,.vuepress-markdown-body .token.number{color:#f08d49}.vuepress-markdown-body .token.class-name,.vuepress-markdown-body .token.constant,.vuepress-markdown-body .token.property,.vuepress-markdown-body .token.symbol{color:#f8c555}.vuepress-markdown-body .token.atrule,.vuepress-markdown-body .token.builtin,.vuepress-markdown-body .token.important,.vuepress-markdown-body .token.keyword,.vuepress-markdown-body .token.selector{color:#cc99cd}.vuepress-markdown-body .token.attr-value,.vuepress-markdown-body .token.char,.vuepress-markdown-body .token.regex,.vuepress-markdown-body .token.string,.vuepress-markdown-body .token.variable{color:#7ec699}.vuepress-markdown-body .token.entity,.vuepress-markdown-body .token.operator,.vuepress-markdown-body .token.url{color:#67cdcc}.vuepress-markdown-body .token.bold,.vuepress-markdown-body .token.important{font-weight:700}.vuepress-markdown-body .token.italic{font-style:italic}.vuepress-markdown-body .token.entity{cursor:help}.vuepress-markdown-body .token.inserted{color:green}.vuepress-markdown-body code{margin:0;padding:.25rem .5rem;color:#476582;font-size:.85em;background-color:rgba(27,31,35,.05);border-radius:3px}.vuepress-markdown-body code .token.deleted{color:#ec5975}.vuepress-markdown-body code .token.inserted{color:#3eaf7c}.vuepress-markdown-body pre,.vuepress-markdown-body pre[class*=v-md-prism-]{margin:.85rem 0;padding:1.25rem 1.5rem;overflow:auto;line-height:1.4;background-color:#282c34;border-radius:6px}.vuepress-markdown-body pre code,.vuepress-markdown-body pre[class*=v-md-prism-] code{padding:0;color:#fff;background-color:initial;border-radius:0}.vuepress-markdown-body div[class*=v-md-pre-wrapper-]{position:relative;background-color:#282c34;border-radius:6px}.vuepress-markdown-body div[class*=v-md-pre-wrapper-] pre,.vuepress-markdown-body div[class*=v-md-pre-wrapper-] pre[class*=v-md-prism-]{position:relative;z-index:1;background:transparent}.vuepress-markdown-body div[class*=v-md-pre-wrapper-]:before{position:absolute;top:.8em;right:1em;z-index:3;color:hsla(0,0%,100%,.4);font-size:.75rem}.vuepress-markdown-body div[class*=v-md-pre-wrapper-]:not(.line-numbers-mode) .line-numbers-wrapper{display:none}.vuepress-markdown-body div[class*=v-md-pre-wrapper-].line-numbers-mode pre{padding-left:4.5rem;vertical-align:middle}.vuepress-markdown-body div[class*=v-md-pre-wrapper-].line-numbers-mode .line-numbers-wrapper{position:absolute;top:0;width:3.5rem;padding:1.25rem 0;color:hsla(0,0%,100%,.3);line-height:1.4;text-align:center}.vuepress-markdown-body div[class*=v-md-pre-wrapper-].line-numbers-mode .line-numbers-wrapper br{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.vuepress-markdown-body div[class*=v-md-pre-wrapper-].line-numbers-mode .line-numbers-wrapper .line-number{position:relative;z-index:4;font-size:.85em;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}.vuepress-markdown-body div[class*=v-md-pre-wrapper-].line-numbers-mode:after{position:absolute;top:0;left:0;z-index:2;width:3.5rem;height:100%;background-color:#282c34;border-right:1px solid rgba(0,0,0,.66);border-radius:6px 0 0 6px;content:""}.vuepress-markdown-body div[class~=v-md-pre-wrapper-js]:before{content:"js"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-ts]:before{content:"ts"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-html]:before{content:"html"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-md]:before{content:"md"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-vue]:before{content:"vue"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-css]:before{content:"css"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-sass]:before{content:"sass"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-scss]:before{content:"scss"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-less]:before{content:"less"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-stylus]:before{content:"stylus"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-go]:before{content:"go"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-java]:before{content:"java"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-c]:before{content:"c"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-sh]:before{content:"sh"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-yaml]:before{content:"yaml"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-py]:before{content:"py"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-docker]:before{content:"docker"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-dockerfile]:before{content:"dockerfile"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-makefile]:before{content:"makefile"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-javascript]:before{content:"js"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-typescript]:before{content:"ts"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-markup]:before{content:"html"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-markdown]:before{content:"md"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-json]:before{content:"json"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-ruby]:before{content:"rb"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-python]:before{content:"py"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-bash]:before{content:"sh"}.vuepress-markdown-body div[class~=v-md-pre-wrapper-php]:before{content:"php"}.vuepress-markdown-body .arrow{display:inline-block;width:0;height:0}.vuepress-markdown-body .arrow.up{border-bottom:6px solid #ccc}.vuepress-markdown-body .arrow.down,.vuepress-markdown-body .arrow.up{border-right:4px solid transparent;border-left:4px solid transparent}.vuepress-markdown-body .arrow.down{border-top:6px solid #ccc}.vuepress-markdown-body .arrow.right{border-left:6px solid #ccc}.vuepress-markdown-body .arrow.left,.vuepress-markdown-body .arrow.right{border-top:4px solid transparent;border-bottom:4px solid transparent}.vuepress-markdown-body .arrow.left{border-right:6px solid #ccc}.vuepress-markdown-body:not(.custom){padding:2rem 2.5rem}@media (max-width:959px){.vuepress-markdown-body:not(.custom){padding:2rem}}@media (max-width:419px){.vuepress-markdown-body:not(.custom){padding:1.5rem}}.vuepress-markdown-body .table-of-contents .badge{vertical-align:middle}.vuepress-markdown-body{color:#2c3e50;font-size:18px;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;background-color:#fff}.vuepress-markdown-body:not(.custom) a:hover{text-decoration:underline}.vuepress-markdown-body:not(.custom) p.demo{padding:1rem 1.5rem;border:1px solid #ddd;border-radius:4px}.vuepress-markdown-body:not(.custom) img{max-width:100%}.vuepress-markdown-body.custom{margin:0;padding:0}.vuepress-markdown-body.custom img{max-width:100%}.vuepress-markdown-body a{font-weight:500;text-decoration:none}.vuepress-markdown-body a,.vuepress-markdown-body p a code{color:#3eaf7c}.vuepress-markdown-body p a code{font-weight:400}.vuepress-markdown-body kbd{padding:0 .15em;background:#eee;border:.15rem solid #ddd;border-bottom:.25rem solid #ddd;border-radius:.15rem}.vuepress-markdown-body blockquote{margin:1rem 0;padding:.25rem 0 .25rem 1rem;color:#999;font-size:1rem;border-left:.2rem solid #dfe2e5}.vuepress-markdown-body blockquote>p{margin:0}.vuepress-markdown-body ol,.vuepress-markdown-body ul{margin:1em 0;padding-left:1.2em}.vuepress-markdown-body strong{font-weight:600}.vuepress-markdown-body h1,.vuepress-markdown-body h2,.vuepress-markdown-body h3,.vuepress-markdown-body h4,.vuepress-markdown-body h5,.vuepress-markdown-body h6{font-weight:600;line-height:1.25}.vuepress-markdown-body h1{margin:.67em 0;font-size:2.2rem}.vuepress-markdown-body h2{margin:.83em 0;padding-bottom:.3rem;font-size:1.65rem;border-bottom:1px solid #eaecef}.vuepress-markdown-body h3{margin:1em 0;font-size:1.35rem}.vuepress-markdown-body h4{margin:1.33em 0}.vuepress-markdown-body h5{margin:1.67em 0}.vuepress-markdown-body h6{margin:2.33em 0}.vuepress-markdown-body em,.vuepress-markdown-body i{font-style:italic}.vuepress-markdown-body ul{list-style-type:disc}.vuepress-markdown-body ol ul,.vuepress-markdown-body ul ul{list-style-type:circle}.vuepress-markdown-body ol ol ul,.vuepress-markdown-body ol ul ul,.vuepress-markdown-body ul ol ul,.vuepress-markdown-body ul ul ul{list-style-type:square}.vuepress-markdown-body ol{list-style-type:decimal}.vuepress-markdown-body .line-number,.vuepress-markdown-body code,.vuepress-markdown-body kbd{font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace}.vuepress-markdown-body ol,.vuepress-markdown-body p,.vuepress-markdown-body ul{line-height:1.7}.vuepress-markdown-body hr{border:0;border-top:1px solid #eaecef}.vuepress-markdown-body table{display:block;margin:1rem 0;overflow-x:auto;border-collapse:collapse}.vuepress-markdown-body tr{border-top:1px solid #dfe2e5}.vuepress-markdown-body tr:nth-child(2n){background-color:#f6f8fa}.vuepress-markdown-body td,.vuepress-markdown-body th{padding:.6em 1em;border:1px solid #dfe2e5}.vuepress-markdown-body .v-md-svg-outbound{position:relative;top:-1px;display:inline-block;color:#aaa;vertical-align:middle}@media (max-width:419px){.vuepress-markdown-body h1{font-size:1.9rem}.vuepress-markdown-body div[class*=v-md-pre-wrapper-]{margin:.85rem -1.5rem;border-radius:0}}.v-md-plugin-tip p{margin-top:1em;margin-bottom:1em}.v-md-plugin-tip .v-md-plugin-tip-title{margin-bottom:-.4rem;font-weight:600}.v-md-plugin-tip.danger,.v-md-plugin-tip.tip,.v-md-plugin-tip.warning{margin:1rem 0;padding:.1rem 1.5rem;border-left-width:.5rem;border-left-style:solid}.v-md-plugin-tip.tip{background-color:#f3f5f7;border-color:#42b983}.v-md-plugin-tip.warning{color:#6b5900;background-color:rgba(255,229,100,.3);border-color:#e7c000}.v-md-plugin-tip.warning .v-md-plugin-tip-title{color:#b29400}.v-md-plugin-tip.warning a{color:#2c3e50}.v-md-plugin-tip.danger{color:#4d0000;background-color:#ffe6e6;border-color:#c00}.v-md-plugin-tip.danger .v-md-plugin-tip-title{color:#900}.v-md-plugin-tip.danger a{color:#2c3e50}.v-md-plugin-tip.details{position:relative;display:block;margin:1.6em 0;padding:1.6em;background-color:#eee;border-radius:2px}.v-md-plugin-tip.details h4{margin-top:0}.v-md-plugin-tip.details figure:last-child,.v-md-plugin-tip.details p:last-child{margin-bottom:0;padding-bottom:0}.v-md-plugin-tip.details summary{outline:none;cursor:pointer}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alexxggs/ggvpnewen/constants.py DELETED
@@ -1,7 +0,0 @@
1
- import numpy as np
2
-
3
- MUBERT_TAGS_STRING = 'рэп,russian,tribal,action,kids,neo-classic,run 130,pumped,jazz / funk,ethnic,dubtechno,reggae,acid jazz,liquidfunk,funk,witch house,tech house,underground,artists,mystical,disco,sensorium,r&b,agender,psychedelic trance / psytrance,peaceful,run 140,piano,run 160,setting,meditation,christmas,ambient,horror,cinematic,electro house,idm,bass,minimal,underscore,drums,glitchy,beautiful,technology,tribal house,country pop,jazz & funk,documentary,space,classical,valentines,chillstep,experimental,trap,new jack swing,drama,post-rock,tense,corporate,neutral,happy,analog,funky,spiritual,sberzvuk special,chill hop,dramatic,catchy,holidays,fitness 90,optimistic,orchestra,acid techno,energizing,romantic,minimal house,breaks,hyper pop,warm up,dreamy,dark,urban,microfunk,dub,nu disco,vogue,keys,hardcore,aggressive,indie,electro funk,beauty,relaxing,trance,pop,hiphop,soft,acoustic,chillrave / ethno-house,deep techno,angry,dance,fun,dubstep,tropical,latin pop,heroic,world music,inspirational,uplifting,atmosphere,art,epic,advertising,chillout,scary,spooky,slow ballad,saxophone,summer,erotic,jazzy,energy 100,kara mar,xmas,atmospheric,indie pop,hip-hop,yoga,reggaeton,lounge,travel,running,folk,chillrave & ethno-house,detective,darkambient,chill,fantasy,minimal techno,special,night,tropical house,downtempo,lullaby,meditative,upbeat,glitch hop,fitness,neurofunk,sexual,indie rock,future pop,jazz,cyberpunk,melancholic,happy hardcore,family / kids,synths,electric guitar,comedy,psychedelic trance & psytrance,edm,psychedelic rock,calm,zen,bells,podcast,melodic house,ethnic percussion,nature,heavy,bassline,indie dance,techno,drumnbass,synth pop,vaporwave,sad,8-bit,chillgressive,deep,orchestral,futuristic,hardtechno,nostalgic,big room,sci-fi,tutorial,joyful,pads,minimal 170,drill,ethnic 108,amusing,sleepy ambient,psychill,italo disco,lofi,house,acoustic guitar,bassline house,rock,k-pop,synthwave,deep house,electronica,gabber,nightlife,sport & fitness,road trip,celebration,electro,disco house,electronic'
4
- MUBERT_TAGS = np.array(MUBERT_TAGS_STRING.split(','))
5
- MUBERT_LICENSE = "ttmmubertlicense#f0acYBenRcfeFpNT4wpYGaTQIyDI4mJGv5MfIhBFz97NXDwDNFHmMRsBSzmGsJwbTpP1A6i07AXcIeAHo5"
6
- MUBERT_MODE = "loop"
7
- MUBERT_TOKEN = "4951f6428e83172a4f39de05d5b3ab10d58560b8"
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/audiocraft/modules/transformer.py DELETED
@@ -1,747 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Transformer model, with streaming support, xformer attention support
9
- and easy causal attention with a potentially finite receptive field.
10
-
11
- See `StreamingTransformer` for more information.
12
-
13
- Unlike regular PyTorch Transformer, we make the hard choice that batches are first.
14
- """
15
-
16
- import typing as tp
17
-
18
- from einops import rearrange
19
- import torch
20
- import torch.nn as nn
21
- from torch.nn import functional as F
22
- from torch.utils.checkpoint import checkpoint as torch_checkpoint
23
- from xformers import ops
24
-
25
- from .rope import RotaryEmbedding
26
- from .streaming import StreamingModule
27
-
28
- _efficient_attention_backend: str = 'torch'
29
-
30
-
31
- def set_efficient_attention_backend(backend: str = 'torch'):
32
- # Using torch by default, it seems a bit faster on older P100 GPUs (~20% faster).
33
- global _efficient_attention_backend
34
- assert _efficient_attention_backend in ['xformers', 'torch']
35
- _efficient_attention_backend = backend
36
-
37
-
38
- def _get_attention_time_dimension() -> int:
39
- if _efficient_attention_backend == 'torch':
40
- return 2
41
- else:
42
- return 1
43
-
44
-
45
- def _is_profiled() -> bool:
46
- # Return true if we are currently running with a xformers profiler activated.
47
- try:
48
- from xformers.profiler import profiler
49
- except ImportError:
50
- return False
51
- return profiler._Profiler._CURRENT_PROFILER is not None
52
-
53
-
54
- def create_norm_fn(norm_type: str, dim: int, **kwargs) -> nn.Module:
55
- """Create normalization module for transformer encoder layer.
56
-
57
- Args:
58
- norm_type (str): Normalization method.
59
- dim (int): Dimension of the normalized layer.
60
- **kwargs (dict): Additional parameters for normalization layer.
61
- Returns:
62
- nn.Module: Normalization module.
63
- """
64
- if norm_type == 'layer_norm':
65
- return nn.LayerNorm(dim, eps=1e-5, **kwargs)
66
- else:
67
- raise ValueError(f"Unknown norm type: {norm_type}")
68
-
69
-
70
- def create_sin_embedding(positions: torch.Tensor, dim: int, max_period: float = 10000,
71
- dtype: torch.dtype = torch.float32) -> torch.Tensor:
72
- """Create sinusoidal positional embedding, with shape `[B, T, C]`.
73
-
74
- Args:
75
- positions (torch.Tensor): LongTensor of positions.
76
- dim (int): Dimension of the embedding.
77
- max_period (float): Maximum period of the cosine/sine functions.
78
- dtype (torch.dtype or str): dtype to use to generate the embedding.
79
- Returns:
80
- torch.Tensor: Sinusoidal positional embedding.
81
- """
82
- # We aim for BTC format
83
- assert dim % 2 == 0
84
- half_dim = dim // 2
85
- positions = positions.to(dtype)
86
- adim = torch.arange(half_dim, device=positions.device, dtype=dtype).view(1, 1, -1)
87
- max_period_tensor = torch.full([], max_period, device=positions.device, dtype=dtype) # avoid sync point
88
- phase = positions / (max_period_tensor ** (adim / (half_dim - 1)))
89
- return torch.cat([torch.cos(phase), torch.sin(phase)], dim=-1)
90
-
91
-
92
- def expand_repeated_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
93
- """torch.repeat_interleave(x, dim=2, repeats=n_rep) from xlformers"""
94
- if n_rep == 1:
95
- return x
96
- if _efficient_attention_backend == 'torch':
97
- bs, n_kv_heads, slen, head_dim = x.shape
98
- return (
99
- x[:, :, None, :, :]
100
- .expand(bs, n_kv_heads, n_rep, slen, head_dim)
101
- .reshape(bs, n_kv_heads * n_rep, slen, head_dim)
102
- )
103
- else:
104
- bs, slen, n_kv_heads, head_dim = x.shape
105
- return (
106
- x[:, :, :, None, :]
107
- .expand(bs, slen, n_kv_heads, n_rep, head_dim)
108
- .reshape(bs, slen, n_kv_heads * n_rep, head_dim)
109
- )
110
-
111
-
112
- class LayerScale(nn.Module):
113
- """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf).
114
- This rescales diagonaly the residual outputs close to 0, with a learnt scale.
115
-
116
- Args:
117
- channels (int): Number of channels.
118
- init (float): Initial scale.
119
- channel_last (bool): If True, expect `[*, C]` shaped tensors, otherwise, `[*, C, T]`.
120
- device (torch.device or None): Device on which to initialize the module.
121
- dtype (torch.dtype or None): dtype to use to initialize the module.
122
- """
123
- def __init__(self, channels: int, init: float = 1e-4, channel_last: bool = True,
124
- device=None, dtype=None):
125
- super().__init__()
126
- self.channel_last = channel_last
127
- self.scale = nn.Parameter(
128
- torch.full((channels,), init,
129
- requires_grad=True, device=device, dtype=dtype))
130
-
131
- def forward(self, x: torch.Tensor):
132
- if self.channel_last:
133
- return self.scale * x
134
- else:
135
- return self.scale[:, None] * x
136
-
137
-
138
- class StreamingMultiheadAttention(StreamingModule):
139
- """Similar to `nn.MultiheadAttention` but with support for streaming, causal evaluation.
140
-
141
- Args:
142
- embed_dim (int): Dimension to project to.
143
- num_heads (int): Number of heads.
144
- dropout (float): Dropout level.
145
- bias (bool): Use bias in projections.
146
- causal (bool): Causal mask applied automatically.
147
- past_context (int or None): Receptive field for the causal mask, infinite if None.
148
- custom (bool): Use custom MHA implementation, for testing / benchmarking.
149
- memory_efficient (bool): Use xformers based memory efficient attention.
150
- attention_as_float32 (bool): Perform the attention as float32
151
- (especially important with memory_efficient as autocast won't do this automatically).
152
- rope (`RotaryEmbedding` or None): Rope embedding to use.
153
- cross_attention: Should be true when used as a cross attention.
154
- All keys and values must be available at once, streaming is only for the queries.
155
- Cannot be used with `causal` or `rope` (as it wouldn't make sens to
156
- intepret the time steps in the keys relative to those in the queries).
157
- safe_streaming (bool): Bug fix, will go away with xformers update.
158
- qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product.
159
- kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads).
160
- This will lead to faster decoding time on A100 or other GPUs with tensorcore.
161
- device (torch.device or None): Sevice on which to initialize.
162
- dtype (torch.dtype or None): dtype to use.
163
- """
164
- def __init__(self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True,
165
- causal: bool = False, past_context: tp.Optional[int] = None, custom: bool = False,
166
- memory_efficient: bool = False, attention_as_float32: bool = False,
167
- rope: tp.Optional[RotaryEmbedding] = None, cross_attention: bool = False,
168
- safe_streaming: bool = True, qk_layer_norm: bool = False, kv_repeat: int = 1,
169
- device=None, dtype=None):
170
- super().__init__()
171
- factory_kwargs = {'device': device, 'dtype': dtype}
172
- if past_context is not None:
173
- assert causal
174
-
175
- self.embed_dim = embed_dim
176
- self.causal = causal
177
- self.past_context = past_context
178
- self.memory_efficient = memory_efficient
179
- self.attention_as_float32 = attention_as_float32
180
- self.rope = rope
181
- self.cross_attention = cross_attention
182
- self.safe_streaming = safe_streaming
183
- self.num_heads = num_heads
184
- self.dropout = dropout
185
- self.kv_repeat = kv_repeat
186
- if cross_attention:
187
- assert not causal, "Causal cannot work with cross attention."
188
- assert rope is None, "Rope cannot work with cross attention."
189
-
190
- if memory_efficient:
191
- _verify_xformers_memory_efficient_compat()
192
-
193
- self.custom = _is_custom(custom, memory_efficient)
194
- if self.custom:
195
- out_dim = embed_dim
196
- assert num_heads % kv_repeat == 0
197
- assert not cross_attention or kv_repeat == 1
198
- num_kv = num_heads // kv_repeat
199
- kv_dim = (embed_dim // num_heads) * num_kv
200
- out_dim += 2 * kv_dim
201
- in_proj = nn.Linear(embed_dim, out_dim, bias=bias, **factory_kwargs)
202
- # We try to follow the default PyTorch MHA convention, to easily compare results.
203
- self.in_proj_weight = in_proj.weight
204
- self.in_proj_bias = in_proj.bias
205
- if bias:
206
- self.in_proj_bias.data.zero_() # Following Pytorch convention
207
- self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)
208
- if bias:
209
- self.out_proj.bias.data.zero_()
210
- else:
211
- assert not qk_layer_norm
212
- assert kv_repeat == 1
213
- self.mha = nn.MultiheadAttention(
214
- embed_dim, num_heads, dropout=dropout, bias=bias, batch_first=True,
215
- **factory_kwargs)
216
- self.qk_layer_norm = qk_layer_norm
217
- if qk_layer_norm:
218
- assert self.custom
219
- assert kv_repeat == 1
220
- ln_dim = embed_dim
221
- self.q_layer_norm = nn.LayerNorm(ln_dim)
222
- self.k_layer_norm = nn.LayerNorm(ln_dim)
223
-
224
- def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
225
- if not self.custom:
226
- # Support compat with regular MHA
227
- keys = [n for n, _ in self.mha.named_parameters()]
228
- for key in keys:
229
- if prefix + key in state_dict:
230
- state_dict[prefix + "mha." + key] = state_dict.pop(prefix + key)
231
- super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
232
-
233
- def _get_mask(self, current_steps: int, device: torch.device, dtype: torch.dtype):
234
- # Return a causal mask, accounting for potentially stored past keys/values
235
- # We actually return a bias for the attention score, as this has the same
236
- # convention both in the builtin MHA in Pytorch, and Xformers functions.
237
- time_dim = _get_attention_time_dimension()
238
- if self.memory_efficient:
239
- from xformers.ops import LowerTriangularMask
240
- if current_steps == 1:
241
- # If we only have one step, then we do not need a mask.
242
- return None
243
- elif 'past_keys' in self._streaming_state:
244
- raise RuntimeError('Not supported at the moment')
245
- else:
246
- # Then we can safely use a lower triangular mask
247
- return LowerTriangularMask()
248
- if self._streaming_state:
249
- past_keys = self._streaming_state['past_keys']
250
- past_steps = past_keys.shape[time_dim]
251
- else:
252
- past_steps = 0
253
-
254
- queries_pos = torch.arange(
255
- past_steps, current_steps + past_steps, device=device).view(-1, 1)
256
- keys_pos = torch.arange(past_steps + current_steps, device=device).view(1, -1)
257
- delta = queries_pos - keys_pos
258
- valid = delta >= 0
259
- if self.past_context is not None:
260
- valid &= (delta <= self.past_context)
261
- return torch.where(
262
- valid,
263
- torch.zeros([], device=device, dtype=dtype),
264
- torch.full([], float('-inf'), device=device, dtype=dtype))
265
-
266
- def _complete_kv(self, k, v):
267
- time_dim = _get_attention_time_dimension()
268
- if self.cross_attention:
269
- # With cross attention we assume all keys and values
270
- # are already available, and streaming is with respect
271
- # to the queries only.
272
- return k, v
273
- # Complete the key/value pair using the streaming state.
274
- if self._streaming_state:
275
- pk = self._streaming_state['past_keys']
276
- nk = torch.cat([pk, k], dim=time_dim)
277
- if v is k:
278
- nv = nk
279
- else:
280
- pv = self._streaming_state['past_values']
281
- nv = torch.cat([pv, v], dim=time_dim)
282
- else:
283
- nk = k
284
- nv = v
285
-
286
- assert nk.shape[time_dim] == nv.shape[time_dim]
287
- offset = 0
288
- if self.past_context is not None:
289
- offset = max(0, nk.shape[time_dim] - self.past_context)
290
- if self._is_streaming:
291
- self._streaming_state['past_keys'] = nk[:, offset:]
292
- if v is not k:
293
- self._streaming_state['past_values'] = nv[:, offset:]
294
- if 'offset' in self._streaming_state:
295
- self._streaming_state['offset'] += offset
296
- else:
297
- self._streaming_state['offset'] = torch.tensor(0)
298
- return nk, nv
299
-
300
- def _apply_rope(self, query: torch.Tensor, key: torch.Tensor):
301
- # TODO: fix and verify layout.
302
- assert _efficient_attention_backend == 'xformers', 'Rope not supported with torch attn.'
303
- # Apply rope embeddings to query and key tensors.
304
- assert self.rope is not None
305
- if 'past_keys' in self._streaming_state:
306
- past_keys_offset = self._streaming_state['past_keys'].shape[1]
307
- else:
308
- past_keys_offset = 0
309
- if 'offset' in self._streaming_state:
310
- past_context_offset = int(self._streaming_state['offset'].item())
311
- else:
312
- past_context_offset = 0
313
- streaming_offset = past_context_offset + past_keys_offset
314
- return self.rope.rotate_qk(query, key, start=streaming_offset)
315
-
316
- def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor,
317
- key_padding_mask=None, need_weights=False, attn_mask=None,
318
- average_attn_weights=True, is_causal=False):
319
- assert attn_mask is None
320
- assert not is_causal, ("new param added in torch 2.0.1 not supported, "
321
- "use the causal args in the constructor.")
322
-
323
- time_dim = _get_attention_time_dimension()
324
- if time_dim == 2:
325
- layout = "b h t d"
326
- else:
327
- layout = "b t h d"
328
- dtype = query.dtype
329
- if self._is_streaming:
330
- assert self.causal or self.cross_attention, \
331
- "Streaming only available for causal or cross attention"
332
-
333
- if self.causal:
334
- # At the moment we specialize only for the self-attention case.
335
- assert query.shape[1] == key.shape[1], "Causal only for same length query / key / value"
336
- assert value.shape[1] == key.shape[1], "Causal only for same length query / key / value"
337
- attn_mask = self._get_mask(query.shape[1], query.device, query.dtype)
338
-
339
- if self.custom:
340
- # custom implementation
341
- assert need_weights is False
342
- assert key_padding_mask is None
343
- if self.cross_attention:
344
- # Different queries, keys, values, we have to spit manually the weights
345
- # before applying the linear.
346
- dim = self.in_proj_weight.shape[0] // 3
347
- if self.in_proj_bias is None:
348
- bias_q, bias_k, bias_v = None, None, None
349
- else:
350
- bias_q = self.in_proj_bias[:dim]
351
- bias_k = self.in_proj_bias[dim: 2 * dim]
352
- bias_v = self.in_proj_bias[2 * dim:]
353
- q = nn.functional.linear(query, self.in_proj_weight[:dim], bias_q)
354
- # todo: when streaming, we could actually save k, v and check the shape actually match.
355
- k = nn.functional.linear(key, self.in_proj_weight[dim: 2 * dim], bias_k)
356
- v = nn.functional.linear(value, self.in_proj_weight[2 * dim:], bias_v)
357
- if self.qk_layer_norm is True:
358
- q = self.q_layer_norm(q)
359
- k = self.k_layer_norm(k)
360
- q, k, v = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k, v]]
361
- else:
362
- if not _is_profiled():
363
- # profiling breaks that propertysomehow.
364
- assert query is key, "specialized implementation"
365
- assert value is key, "specialized implementation"
366
- projected = nn.functional.linear(query, self.in_proj_weight, self.in_proj_bias)
367
- if self.kv_repeat == 1:
368
- if time_dim == 2:
369
- bound_layout = "b h p t d"
370
- else:
371
- bound_layout = "b t p h d"
372
- packed = rearrange(projected, f"b t (p h d) -> {bound_layout}", p=3, h=self.num_heads)
373
- q, k, v = ops.unbind(packed, dim=2)
374
- else:
375
- embed_dim = self.embed_dim
376
- per_head_dim = (embed_dim // self.num_heads)
377
- kv_heads = self.num_heads // self.kv_repeat
378
- q = projected[:, :, :embed_dim]
379
- start = embed_dim
380
- end = start + per_head_dim * kv_heads
381
- k = projected[:, :, start: end]
382
- v = projected[:, :, end:]
383
- q = rearrange(q, f"b t (h d) -> {layout}", h=self.num_heads)
384
- k = rearrange(k, f"b t (h d) -> {layout}", h=kv_heads)
385
- v = rearrange(v, f"b t (h d) -> {layout}", h=kv_heads)
386
-
387
- if self.qk_layer_norm is True:
388
- assert self.kv_repeat == 1
389
- q, k = [rearrange(x, f"{layout} -> b t (h d)") for x in [q, k]]
390
- q = self.q_layer_norm(q)
391
- k = self.k_layer_norm(k)
392
- q, k = [rearrange(x, f"b t (h d) -> {layout}", h=self.num_heads) for x in [q, k]]
393
- if self.rope:
394
- q, k = self._apply_rope(q, k)
395
- k, v = self._complete_kv(k, v)
396
- if self.kv_repeat > 1:
397
- k = expand_repeated_kv(k, self.kv_repeat)
398
- v = expand_repeated_kv(v, self.kv_repeat)
399
- if self.attention_as_float32:
400
- q, k, v = [x.float() for x in [q, k, v]]
401
- if self.memory_efficient:
402
- p = self.dropout if self.training else 0
403
- if _efficient_attention_backend == 'torch':
404
- x = torch.nn.functional.scaled_dot_product_attention(
405
- q, k, v, is_causal=attn_mask is not None, dropout_p=p)
406
- else:
407
- x = ops.memory_efficient_attention(q, k, v, attn_mask, p=p)
408
- else:
409
- # We include the dot product as float32, for consistency
410
- # with the other implementations that include that step
411
- # as part of the attention. Note that when using `autocast`,
412
- # the einsums would be done as bfloat16, but the softmax
413
- # would be done as bfloat16, so `attention_as_float32` will
414
- # extend a bit the range of operations done in float32,
415
- # although this should make no difference.
416
- q = q / q.shape[-1] ** 0.5
417
- key_layout = layout.replace('t', 'k')
418
- query_layout = layout
419
- if self._is_streaming and self.safe_streaming and q.device.type == 'cuda':
420
- with torch.autocast(device_type=q.device.type, dtype=torch.float32):
421
- pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k)
422
- else:
423
- pre_w = torch.einsum(f"{query_layout},{key_layout}-> b h t k", q, k)
424
- if attn_mask is not None:
425
- pre_w = pre_w + attn_mask
426
- w = torch.softmax(pre_w, dim=-1)
427
- w = F.dropout(w, self.dropout, training=self.training).to(v)
428
- # Key and value have the same format.
429
- x = torch.einsum(f"b h t k, {key_layout} -> {layout}", w, v)
430
- x = x.to(dtype)
431
- x = rearrange(x, f"{layout} -> b t (h d)", h=self.num_heads)
432
- x = self.out_proj(x)
433
- else:
434
- key, value = self._complete_kv(key, value)
435
- if self.attention_as_float32:
436
- query, key, value = [x.float() for x in [query, key, value]]
437
- x, _ = self.mha(
438
- query, key, value, key_padding_mask,
439
- need_weights, attn_mask, average_attn_weights)
440
- x = x.to(dtype)
441
-
442
- return x, None
443
-
444
-
445
- class StreamingTransformerLayer(nn.TransformerEncoderLayer):
446
- """TransformerLayer with Streaming / Causal support.
447
- This also integrates cross_attention, when passing `cross_attention=True`,
448
- rather than having two separate classes like in PyTorch.
449
-
450
- Args:
451
- d_model (int): Dimension of the data.
452
- num_heads (int): Number of heads.
453
- dim_feedforward (int): Intermediate dimension of FF module.
454
- dropout (float): Dropout both for MHA and FF.
455
- bias_ff (bool): Use bias for FF.
456
- bias_attn (bool): Use bias for MHA.
457
- causal (bool): Causal mask applied automatically.
458
- past_context (int or None): Receptive field for the causal mask, infinite if None.
459
- custom (bool): Use custom MHA implementation, for testing / benchmarking.
460
- memory_efficient (bool): Use xformers based memory efficient attention.
461
- attention_as_float32 (bool): Perform the attention as float32
462
- (especially important with memory_efficient as autocast won't do this automatically).
463
- qk_layer_norm (bool): Layer normalization applied to queries and keys before dot product in attention.
464
- qk_layer_norm_cross (bool): Same for the cross attention.
465
- cross_attention (bool): If True, expect to get secondary input for cross-attention.
466
- Cross attention will use the default MHA, as it typically won't require
467
- special treatment.
468
- layer_scale (float or None): If not None, LayerScale will be used with
469
- the given value as initial scale.
470
- rope (`RotaryEmbedding` or None): Rope embedding to use.
471
- attention_dropout (float or None): If not None, separate the value of the dimension dropout
472
- in FFN and of the attention dropout.
473
- kv_repeat (int): If > 1, will repeat keys and queries multiple times (need to divide num_heads).
474
- This will lead to faster decoding time on A100 or other GPUs with tensorcore.
475
- device (torch.device or None): Device on which to initialize.
476
- dtype (torch.dtype or None): dtype to use.
477
- **kwargs: See `nn.TransformerEncoderLayer`.
478
- """
479
- def __init__(self, d_model: int, num_heads: int, dim_feedforward: int = 2048, dropout: float = 0.1,
480
- bias_ff: bool = True, bias_attn: bool = True, causal: bool = False,
481
- past_context: tp.Optional[int] = None, custom: bool = False,
482
- memory_efficient: bool = False, attention_as_float32: bool = False,
483
- qk_layer_norm: bool = False, qk_layer_norm_cross: bool = False,
484
- cross_attention: bool = False, layer_scale: tp.Optional[float] = None,
485
- rope: tp.Optional[RotaryEmbedding] = None, attention_dropout: tp.Optional[float] = None,
486
- kv_repeat: int = 1, norm: str = 'layer_norm', device=None, dtype=None, **kwargs):
487
- super().__init__(d_model, num_heads, dim_feedforward, dropout,
488
- device=device, dtype=dtype, batch_first=True, **kwargs)
489
- factory_kwargs = {'device': device, 'dtype': dtype}
490
- # Redefine self_attn to our streaming multi-head attention
491
- attn_kwargs: tp.Dict[str, tp.Any] = {
492
- 'embed_dim': d_model,
493
- 'num_heads': num_heads,
494
- 'dropout': dropout if attention_dropout is None else attention_dropout,
495
- 'bias': bias_attn,
496
- 'custom': custom,
497
- 'memory_efficient': memory_efficient,
498
- 'attention_as_float32': attention_as_float32,
499
- }
500
- self.self_attn: StreamingMultiheadAttention = StreamingMultiheadAttention(
501
- causal=causal, past_context=past_context, rope=rope, qk_layer_norm=qk_layer_norm,
502
- kv_repeat=kv_repeat, **attn_kwargs, **factory_kwargs) # type: ignore
503
- # Redefine feedforward layers to expose bias parameter
504
- self.linear1 = nn.Linear(d_model, dim_feedforward, bias=bias_ff, **factory_kwargs)
505
- self.linear2 = nn.Linear(dim_feedforward, d_model, bias=bias_ff, **factory_kwargs)
506
-
507
- self.layer_scale_1: nn.Module
508
- self.layer_scale_2: nn.Module
509
- if layer_scale is None:
510
- self.layer_scale_1 = nn.Identity()
511
- self.layer_scale_2 = nn.Identity()
512
- else:
513
- self.layer_scale_1 = LayerScale(d_model, layer_scale, **factory_kwargs)
514
- self.layer_scale_2 = LayerScale(d_model, layer_scale, **factory_kwargs)
515
-
516
- self.cross_attention: tp.Optional[nn.Module] = None
517
- if cross_attention:
518
- self.cross_attention = StreamingMultiheadAttention(
519
- cross_attention=True, qk_layer_norm=qk_layer_norm_cross,
520
- **attn_kwargs, **factory_kwargs)
521
- # Norm and dropout
522
- self.dropout_cross = nn.Dropout(dropout)
523
- # eps value matching that used in PyTorch reference implementation.
524
- self.norm_cross = nn.LayerNorm(d_model, eps=1e-5, **factory_kwargs)
525
- self.layer_scale_cross: nn.Module
526
- if layer_scale is None:
527
- self.layer_scale_cross = nn.Identity()
528
- else:
529
- self.layer_scale_cross = LayerScale(d_model, layer_scale, **factory_kwargs)
530
- self.norm1 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore
531
- self.norm2 = create_norm_fn(norm, d_model, **factory_kwargs) # type: ignore
532
-
533
- def _cross_attention_block(self, src: torch.Tensor,
534
- cross_attention_src: torch.Tensor) -> torch.Tensor:
535
- assert self.cross_attention is not None
536
- # queries are from src, keys and values from cross_attention_src.
537
- x = self.cross_attention(
538
- src, cross_attention_src, cross_attention_src, need_weights=False)[0]
539
- return self.dropout_cross(x) # type: ignore
540
-
541
- def forward(self, src: torch.Tensor, src_mask: tp.Optional[torch.Tensor] = None, # type: ignore
542
- src_key_padding_mask: tp.Optional[torch.Tensor] = None,
543
- cross_attention_src: tp.Optional[torch.Tensor] = None):
544
- if self.cross_attention is None:
545
- assert cross_attention_src is None
546
- else:
547
- assert cross_attention_src is not None
548
- x = src
549
- if self.norm_first:
550
- x = x + self.layer_scale_1(
551
- self._sa_block(self.norm1(x), src_mask, src_key_padding_mask))
552
- if cross_attention_src is not None:
553
- x = x + self.layer_scale_cross(
554
- self._cross_attention_block(
555
- self.norm_cross(x), cross_attention_src))
556
- x = x + self.layer_scale_2(self._ff_block(self.norm2(x)))
557
- else:
558
- x = self.norm1(x + self.layer_scale_1(
559
- self._sa_block(x, src_mask, src_key_padding_mask)))
560
- if cross_attention_src is not None:
561
- x = self.norm_cross(
562
- x + self.layer_scale_cross(
563
- self._cross_attention_block(src, cross_attention_src)))
564
- x = self.norm2(x + self.layer_scale_2(self._ff_block(x)))
565
- return x
566
-
567
-
568
- class StreamingTransformer(StreamingModule):
569
- """Transformer with Streaming / Causal support.
570
-
571
- Args:
572
- d_model (int): Dimension of the data.
573
- num_heads (int): Number of heads.
574
- dim_feedforward (int): Intermediate dimension of FF module.
575
- dropout (float): Dropout both for MHA and FF.
576
- bias_ff (bool): Use bias for FF.
577
- bias_attn (bool): Use bias for MHA.
578
- causal (bool): Causal mask applied automatically.
579
- past_context (int or None): Receptive field for the causal mask, infinite if None.
580
- custom (bool): Use custom MHA implementation, for testing / benchmarking.
581
- memory_efficient (bool): Use xformers based memory efficient attention.
582
- attention_as_float32 (bool): Perform the attention as float32
583
- (especially important with memory_efficient as autocast won't do this automatically).
584
- cross_attention (bool): If True, expect to get secondary input for cross-attention.
585
- layer_scale (float or None): If not None, LayerScale will be used
586
- with the given value as initial scale.
587
- positional_embedding (str): Positional embedding strategy (sin, rope, or sin_rope).
588
- max_period (float): Maximum period of the time embedding.
589
- positional_scale (float): Scale of positional embedding, set to 0 to deactivate.
590
- xpos (bool): Apply xpos exponential decay to positional embedding (rope only).
591
- lr (float or None): learning rate override through the `make_optim_group` API.
592
- weight_decay (float or None): Weight_decay override through the `make_optim_group` API.
593
- layer_class: (subclass of `StreamingTransformerLayer): class to use
594
- to initialize the layers, allowing further customization outside of Audiocraft.
595
- checkpointing (str): Checkpointing strategy to reduce memory usage.
596
- No checkpointing if set to 'none'. Per layer checkpointing using PyTorch
597
- if set to 'torch' (entire layer checkpointed, i.e. linears are evaluated twice,
598
- minimal memory usage, but maximal runtime). Finally, `xformers_default` provide
599
- a policy for opting-out some operations of the checkpointing like
600
- linear layers and attention, providing a middle ground between speed and memory.
601
- device (torch.device or None): Device on which to initialize.
602
- dtype (torch.dtype or None): dtype to use.
603
- **kwargs: See `nn.TransformerEncoderLayer`.
604
- """
605
- def __init__(self, d_model: int, num_heads: int, num_layers: int, dim_feedforward: int = 2048,
606
- dropout: float = 0.1, bias_ff: bool = True, bias_attn: bool = True,
607
- causal: bool = False, past_context: tp.Optional[int] = None,
608
- custom: bool = False, memory_efficient: bool = False, attention_as_float32: bool = False,
609
- cross_attention: bool = False, layer_scale: tp.Optional[float] = None,
610
- positional_embedding: str = 'sin', max_period: float = 10_000, positional_scale: float = 1.,
611
- xpos: bool = False, lr: tp.Optional[float] = None, weight_decay: tp.Optional[float] = None,
612
- layer_class: tp.Type[StreamingTransformerLayer] = StreamingTransformerLayer,
613
- checkpointing: str = 'none', device=None, dtype=None, **kwargs):
614
- super().__init__()
615
- assert d_model % num_heads == 0
616
-
617
- self.positional_embedding = positional_embedding
618
- self.max_period = max_period
619
- self.positional_scale = positional_scale
620
- self.weight_decay = weight_decay
621
- self.lr = lr
622
-
623
- assert positional_embedding in ['sin', 'rope', 'sin_rope']
624
- self.rope: tp.Optional[RotaryEmbedding] = None
625
- if self.positional_embedding in ['rope', 'sin_rope']:
626
- assert _is_custom(custom, memory_efficient)
627
- self.rope = RotaryEmbedding(d_model // num_heads, max_period=max_period,
628
- xpos=xpos, scale=positional_scale, device=device)
629
-
630
- self.checkpointing = checkpointing
631
-
632
- assert checkpointing in ['none', 'torch', 'xformers_default', 'xformers_mm']
633
- if self.checkpointing.startswith('xformers'):
634
- _verify_xformers_internal_compat()
635
-
636
- self.layers = nn.ModuleList()
637
- for idx in range(num_layers):
638
- self.layers.append(
639
- layer_class(
640
- d_model=d_model, num_heads=num_heads, dim_feedforward=dim_feedforward,
641
- dropout=dropout, bias_ff=bias_ff, bias_attn=bias_attn,
642
- causal=causal, past_context=past_context, custom=custom,
643
- memory_efficient=memory_efficient, attention_as_float32=attention_as_float32,
644
- cross_attention=cross_attention, layer_scale=layer_scale, rope=self.rope,
645
- device=device, dtype=dtype, **kwargs))
646
-
647
- if self.checkpointing != 'none':
648
- for layer in self.layers:
649
- # see audiocraft/optim/fsdp.py, magic signal to indicate this requires fixing the
650
- # backward hook inside of FSDP...
651
- layer._magma_checkpointed = True # type: ignore
652
- assert layer.layer_drop == 0., "Need further checking" # type: ignore
653
-
654
- def _apply_layer(self, layer, *args, **kwargs):
655
- method = self.checkpointing
656
- if method == 'none':
657
- return layer(*args, **kwargs)
658
- elif method == 'torch':
659
- return torch_checkpoint(layer, *args, use_reentrant=False, **kwargs)
660
- elif method.startswith('xformers'):
661
- from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy
662
- if method == 'xformers_default':
663
- # those operations will be saved, and not recomputed.
664
- # According to Francisco we can get smarter policies but this is a good start.
665
- allow_list = [
666
- "xformers.efficient_attention_forward_cutlass.default",
667
- "xformers_flash.flash_fwd.default",
668
- "aten.addmm.default",
669
- "aten.mm.default",
670
- ]
671
- elif method == 'xformers_mm':
672
- # those operations will be saved, and not recomputed.
673
- # According to Francisco we can get smarter policies but this is a good start.
674
- allow_list = [
675
- "aten.addmm.default",
676
- "aten.mm.default",
677
- ]
678
- else:
679
- raise ValueError(f"xformers checkpointing xformers policy {method} is not known.")
680
- policy_fn = _get_default_policy(allow_list)
681
- return checkpoint(layer, *args, policy_fn=policy_fn, **kwargs)
682
- else:
683
- raise ValueError(f"Checkpointing method {method} is unknown.")
684
-
685
- def forward(self, x: torch.Tensor, *args, **kwargs):
686
- B, T, C = x.shape
687
-
688
- if 'offsets' in self._streaming_state:
689
- offsets = self._streaming_state['offsets']
690
- else:
691
- offsets = torch.zeros(B, dtype=torch.long, device=x.device)
692
-
693
- if self.positional_embedding in ['sin', 'sin_rope']:
694
- positions = torch.arange(T, device=x.device).view(1, -1, 1)
695
- positions = positions + offsets.view(-1, 1, 1)
696
- pos_emb = create_sin_embedding(positions, C, max_period=self.max_period, dtype=x.dtype)
697
- x = x + self.positional_scale * pos_emb
698
-
699
- for layer in self.layers:
700
- x = self._apply_layer(layer, x, *args, **kwargs)
701
-
702
- if self._is_streaming:
703
- self._streaming_state['offsets'] = offsets + T
704
-
705
- return x
706
-
707
- def make_optim_group(self):
708
- group = {"params": list(self.parameters())}
709
- if self.lr is not None:
710
- group["lr"] = self.lr
711
- if self.weight_decay is not None:
712
- group["weight_decay"] = self.weight_decay
713
- return group
714
-
715
-
716
- # special attention attention related function
717
-
718
- def _verify_xformers_memory_efficient_compat():
719
- try:
720
- from xformers.ops import memory_efficient_attention, LowerTriangularMask # noqa
721
- except ImportError:
722
- raise ImportError(
723
- "xformers is not installed. Please install it and try again.\n"
724
- "To install on AWS and Azure, run \n"
725
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n"
726
- "pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n"
727
- "To install on FAIR Cluster, run \n"
728
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n"
729
- "pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n")
730
-
731
-
732
- def _verify_xformers_internal_compat():
733
- try:
734
- from xformers.checkpoint_fairinternal import checkpoint, _get_default_policy # noqa
735
- except ImportError:
736
- raise ImportError(
737
- "Francisco's fairinternal xformers is not installed. Please install it and try again.\n"
738
- "To install on AWS and Azure, run \n"
739
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='8.0'\\\n"
740
- "pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n"
741
- "To install on FAIR Cluster, run \n"
742
- "FORCE_CUDA=1 TORCH_CUDA_ARCH_LIST='6.0;7.0'\\\n"
743
- "pip install -U git+https://[email protected]/fairinternal/xformers.git#egg=xformers\n")
744
-
745
-
746
- def _is_custom(custom: bool, memory_efficient: bool):
747
- return custom or memory_efficient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/colorama/initialise.py DELETED
@@ -1,121 +0,0 @@
1
- # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
2
- import atexit
3
- import contextlib
4
- import sys
5
-
6
- from .ansitowin32 import AnsiToWin32
7
-
8
-
9
- def _wipe_internal_state_for_tests():
10
- global orig_stdout, orig_stderr
11
- orig_stdout = None
12
- orig_stderr = None
13
-
14
- global wrapped_stdout, wrapped_stderr
15
- wrapped_stdout = None
16
- wrapped_stderr = None
17
-
18
- global atexit_done
19
- atexit_done = False
20
-
21
- global fixed_windows_console
22
- fixed_windows_console = False
23
-
24
- try:
25
- # no-op if it wasn't registered
26
- atexit.unregister(reset_all)
27
- except AttributeError:
28
- # python 2: no atexit.unregister. Oh well, we did our best.
29
- pass
30
-
31
-
32
- def reset_all():
33
- if AnsiToWin32 is not None: # Issue #74: objects might become None at exit
34
- AnsiToWin32(orig_stdout).reset_all()
35
-
36
-
37
- def init(autoreset=False, convert=None, strip=None, wrap=True):
38
-
39
- if not wrap and any([autoreset, convert, strip]):
40
- raise ValueError('wrap=False conflicts with any other arg=True')
41
-
42
- global wrapped_stdout, wrapped_stderr
43
- global orig_stdout, orig_stderr
44
-
45
- orig_stdout = sys.stdout
46
- orig_stderr = sys.stderr
47
-
48
- if sys.stdout is None:
49
- wrapped_stdout = None
50
- else:
51
- sys.stdout = wrapped_stdout = \
52
- wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
53
- if sys.stderr is None:
54
- wrapped_stderr = None
55
- else:
56
- sys.stderr = wrapped_stderr = \
57
- wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
58
-
59
- global atexit_done
60
- if not atexit_done:
61
- atexit.register(reset_all)
62
- atexit_done = True
63
-
64
-
65
- def deinit():
66
- if orig_stdout is not None:
67
- sys.stdout = orig_stdout
68
- if orig_stderr is not None:
69
- sys.stderr = orig_stderr
70
-
71
-
72
- def just_fix_windows_console():
73
- global fixed_windows_console
74
-
75
- if sys.platform != "win32":
76
- return
77
- if fixed_windows_console:
78
- return
79
- if wrapped_stdout is not None or wrapped_stderr is not None:
80
- # Someone already ran init() and it did stuff, so we won't second-guess them
81
- return
82
-
83
- # On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the
84
- # native ANSI support in the console as a side-effect. We only need to actually
85
- # replace sys.stdout/stderr if we're in the old-style conversion mode.
86
- new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False)
87
- if new_stdout.convert:
88
- sys.stdout = new_stdout
89
- new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False)
90
- if new_stderr.convert:
91
- sys.stderr = new_stderr
92
-
93
- fixed_windows_console = True
94
-
95
- @contextlib.contextmanager
96
- def colorama_text(*args, **kwargs):
97
- init(*args, **kwargs)
98
- try:
99
- yield
100
- finally:
101
- deinit()
102
-
103
-
104
- def reinit():
105
- if wrapped_stdout is not None:
106
- sys.stdout = wrapped_stdout
107
- if wrapped_stderr is not None:
108
- sys.stderr = wrapped_stderr
109
-
110
-
111
- def wrap_stream(stream, convert, strip, autoreset, wrap):
112
- if wrap:
113
- wrapper = AnsiToWin32(stream,
114
- convert=convert, strip=strip, autoreset=autoreset)
115
- if wrapper.should_wrap():
116
- stream = wrapper.stream
117
- return stream
118
-
119
-
120
- # Use this for initial setup as well, to reduce code duplication
121
- _wipe_internal_state_for_tests()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/config/config.py DELETED
@@ -1,265 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
-
4
- import functools
5
- import inspect
6
- import logging
7
- from fvcore.common.config import CfgNode as _CfgNode
8
-
9
- from detectron2.utils.file_io import PathManager
10
-
11
-
12
- class CfgNode(_CfgNode):
13
- """
14
- The same as `fvcore.common.config.CfgNode`, but different in:
15
-
16
- 1. Use unsafe yaml loading by default.
17
- Note that this may lead to arbitrary code execution: you must not
18
- load a config file from untrusted sources before manually inspecting
19
- the content of the file.
20
- 2. Support config versioning.
21
- When attempting to merge an old config, it will convert the old config automatically.
22
-
23
- .. automethod:: clone
24
- .. automethod:: freeze
25
- .. automethod:: defrost
26
- .. automethod:: is_frozen
27
- .. automethod:: load_yaml_with_base
28
- .. automethod:: merge_from_list
29
- .. automethod:: merge_from_other_cfg
30
- """
31
-
32
- @classmethod
33
- def _open_cfg(cls, filename):
34
- return PathManager.open(filename, "r")
35
-
36
- # Note that the default value of allow_unsafe is changed to True
37
- def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
38
- """
39
- Load content from the given config file and merge it into self.
40
-
41
- Args:
42
- cfg_filename: config filename
43
- allow_unsafe: allow unsafe yaml syntax
44
- """
45
- assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
46
- loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
47
- loaded_cfg = type(self)(loaded_cfg)
48
-
49
- # defaults.py needs to import CfgNode
50
- from .defaults import _C
51
-
52
- latest_ver = _C.VERSION
53
- assert (
54
- latest_ver == self.VERSION
55
- ), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
56
-
57
- logger = logging.getLogger(__name__)
58
-
59
- loaded_ver = loaded_cfg.get("VERSION", None)
60
- if loaded_ver is None:
61
- from .compat import guess_version
62
-
63
- loaded_ver = guess_version(loaded_cfg, cfg_filename)
64
- assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
65
- loaded_ver, self.VERSION
66
- )
67
-
68
- if loaded_ver == self.VERSION:
69
- self.merge_from_other_cfg(loaded_cfg)
70
- else:
71
- # compat.py needs to import CfgNode
72
- from .compat import upgrade_config, downgrade_config
73
-
74
- logger.warning(
75
- "Loading an old v{} config file '{}' by automatically upgrading to v{}. "
76
- "See docs/CHANGELOG.md for instructions to update your files.".format(
77
- loaded_ver, cfg_filename, self.VERSION
78
- )
79
- )
80
- # To convert, first obtain a full config at an old version
81
- old_self = downgrade_config(self, to_version=loaded_ver)
82
- old_self.merge_from_other_cfg(loaded_cfg)
83
- new_config = upgrade_config(old_self)
84
- self.clear()
85
- self.update(new_config)
86
-
87
- def dump(self, *args, **kwargs):
88
- """
89
- Returns:
90
- str: a yaml string representation of the config
91
- """
92
- # to make it show up in docs
93
- return super().dump(*args, **kwargs)
94
-
95
-
96
- global_cfg = CfgNode()
97
-
98
-
99
- def get_cfg() -> CfgNode:
100
- """
101
- Get a copy of the default config.
102
-
103
- Returns:
104
- a detectron2 CfgNode instance.
105
- """
106
- from .defaults import _C
107
-
108
- return _C.clone()
109
-
110
-
111
- def set_global_cfg(cfg: CfgNode) -> None:
112
- """
113
- Let the global config point to the given cfg.
114
-
115
- Assume that the given "cfg" has the key "KEY", after calling
116
- `set_global_cfg(cfg)`, the key can be accessed by:
117
- ::
118
- from detectron2.config import global_cfg
119
- print(global_cfg.KEY)
120
-
121
- By using a hacky global config, you can access these configs anywhere,
122
- without having to pass the config object or the values deep into the code.
123
- This is a hacky feature introduced for quick prototyping / research exploration.
124
- """
125
- global global_cfg
126
- global_cfg.clear()
127
- global_cfg.update(cfg)
128
-
129
-
130
- def configurable(init_func=None, *, from_config=None):
131
- """
132
- Decorate a function or a class's __init__ method so that it can be called
133
- with a :class:`CfgNode` object using a :func:`from_config` function that translates
134
- :class:`CfgNode` to arguments.
135
-
136
- Examples:
137
- ::
138
- # Usage 1: Decorator on __init__:
139
- class A:
140
- @configurable
141
- def __init__(self, a, b=2, c=3):
142
- pass
143
-
144
- @classmethod
145
- def from_config(cls, cfg): # 'cfg' must be the first argument
146
- # Returns kwargs to be passed to __init__
147
- return {"a": cfg.A, "b": cfg.B}
148
-
149
- a1 = A(a=1, b=2) # regular construction
150
- a2 = A(cfg) # construct with a cfg
151
- a3 = A(cfg, b=3, c=4) # construct with extra overwrite
152
-
153
- # Usage 2: Decorator on any function. Needs an extra from_config argument:
154
- @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
155
- def a_func(a, b=2, c=3):
156
- pass
157
-
158
- a1 = a_func(a=1, b=2) # regular call
159
- a2 = a_func(cfg) # call with a cfg
160
- a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
161
-
162
- Args:
163
- init_func (callable): a class's ``__init__`` method in usage 1. The
164
- class must have a ``from_config`` classmethod which takes `cfg` as
165
- the first argument.
166
- from_config (callable): the from_config function in usage 2. It must take `cfg`
167
- as its first argument.
168
- """
169
-
170
- if init_func is not None:
171
- assert (
172
- inspect.isfunction(init_func)
173
- and from_config is None
174
- and init_func.__name__ == "__init__"
175
- ), "Incorrect use of @configurable. Check API documentation for examples."
176
-
177
- @functools.wraps(init_func)
178
- def wrapped(self, *args, **kwargs):
179
- try:
180
- from_config_func = type(self).from_config
181
- except AttributeError as e:
182
- raise AttributeError(
183
- "Class with @configurable must have a 'from_config' classmethod."
184
- ) from e
185
- if not inspect.ismethod(from_config_func):
186
- raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
187
-
188
- if _called_with_cfg(*args, **kwargs):
189
- explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
190
- init_func(self, **explicit_args)
191
- else:
192
- init_func(self, *args, **kwargs)
193
-
194
- return wrapped
195
-
196
- else:
197
- if from_config is None:
198
- return configurable # @configurable() is made equivalent to @configurable
199
- assert inspect.isfunction(
200
- from_config
201
- ), "from_config argument of configurable must be a function!"
202
-
203
- def wrapper(orig_func):
204
- @functools.wraps(orig_func)
205
- def wrapped(*args, **kwargs):
206
- if _called_with_cfg(*args, **kwargs):
207
- explicit_args = _get_args_from_config(from_config, *args, **kwargs)
208
- return orig_func(**explicit_args)
209
- else:
210
- return orig_func(*args, **kwargs)
211
-
212
- wrapped.from_config = from_config
213
- return wrapped
214
-
215
- return wrapper
216
-
217
-
218
- def _get_args_from_config(from_config_func, *args, **kwargs):
219
- """
220
- Use `from_config` to obtain explicit arguments.
221
-
222
- Returns:
223
- dict: arguments to be used for cls.__init__
224
- """
225
- signature = inspect.signature(from_config_func)
226
- if list(signature.parameters.keys())[0] != "cfg":
227
- if inspect.isfunction(from_config_func):
228
- name = from_config_func.__name__
229
- else:
230
- name = f"{from_config_func.__self__}.from_config"
231
- raise TypeError(f"{name} must take 'cfg' as the first argument!")
232
- support_var_arg = any(
233
- param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
234
- for param in signature.parameters.values()
235
- )
236
- if support_var_arg: # forward all arguments to from_config, if from_config accepts them
237
- ret = from_config_func(*args, **kwargs)
238
- else:
239
- # forward supported arguments to from_config
240
- supported_arg_names = set(signature.parameters.keys())
241
- extra_kwargs = {}
242
- for name in list(kwargs.keys()):
243
- if name not in supported_arg_names:
244
- extra_kwargs[name] = kwargs.pop(name)
245
- ret = from_config_func(*args, **kwargs)
246
- # forward the other arguments to __init__
247
- ret.update(extra_kwargs)
248
- return ret
249
-
250
-
251
- def _called_with_cfg(*args, **kwargs):
252
- """
253
- Returns:
254
- bool: whether the arguments contain CfgNode and should be considered
255
- forwarded to from_config.
256
- """
257
- from omegaconf import DictConfig
258
-
259
- if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
260
- return True
261
- if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
262
- return True
263
- # `from_config`'s first argument is forced to be "cfg".
264
- # So the above check covers all cases.
265
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/matcher.py DELETED
@@ -1,127 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from typing import List
3
- import torch
4
-
5
- from detectron2.layers import nonzero_tuple
6
-
7
-
8
- # TODO: the name is too general
9
- class Matcher(object):
10
- """
11
- This class assigns to each predicted "element" (e.g., a box) a ground-truth
12
- element. Each predicted element will have exactly zero or one matches; each
13
- ground-truth element may be matched to zero or more predicted elements.
14
-
15
- The matching is determined by the MxN match_quality_matrix, that characterizes
16
- how well each (ground-truth, prediction)-pair match each other. For example,
17
- if the elements are boxes, this matrix may contain box intersection-over-union
18
- overlap values.
19
-
20
- The matcher returns (a) a vector of length N containing the index of the
21
- ground-truth element m in [0, M) that matches to prediction n in [0, N).
22
- (b) a vector of length N containing the labels for each prediction.
23
- """
24
-
25
- def __init__(
26
- self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False
27
- ):
28
- """
29
- Args:
30
- thresholds (list): a list of thresholds used to stratify predictions
31
- into levels.
32
- labels (list): a list of values to label predictions belonging at
33
- each level. A label can be one of {-1, 0, 1} signifying
34
- {ignore, negative class, positive class}, respectively.
35
- allow_low_quality_matches (bool): if True, produce additional matches
36
- for predictions with maximum match quality lower than high_threshold.
37
- See set_low_quality_matches_ for more details.
38
-
39
- For example,
40
- thresholds = [0.3, 0.5]
41
- labels = [0, -1, 1]
42
- All predictions with iou < 0.3 will be marked with 0 and
43
- thus will be considered as false positives while training.
44
- All predictions with 0.3 <= iou < 0.5 will be marked with -1 and
45
- thus will be ignored.
46
- All predictions with 0.5 <= iou will be marked with 1 and
47
- thus will be considered as true positives.
48
- """
49
- # Add -inf and +inf to first and last position in thresholds
50
- thresholds = thresholds[:]
51
- assert thresholds[0] > 0
52
- thresholds.insert(0, -float("inf"))
53
- thresholds.append(float("inf"))
54
- # Currently torchscript does not support all + generator
55
- assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])])
56
- assert all([l in [-1, 0, 1] for l in labels])
57
- assert len(labels) == len(thresholds) - 1
58
- self.thresholds = thresholds
59
- self.labels = labels
60
- self.allow_low_quality_matches = allow_low_quality_matches
61
-
62
- def __call__(self, match_quality_matrix):
63
- """
64
- Args:
65
- match_quality_matrix (Tensor[float]): an MxN tensor, containing the
66
- pairwise quality between M ground-truth elements and N predicted
67
- elements. All elements must be >= 0 (due to the us of `torch.nonzero`
68
- for selecting indices in :meth:`set_low_quality_matches_`).
69
-
70
- Returns:
71
- matches (Tensor[int64]): a vector of length N, where matches[i] is a matched
72
- ground-truth index in [0, M)
73
- match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates
74
- whether a prediction is a true or false positive or ignored
75
- """
76
- assert match_quality_matrix.dim() == 2
77
- if match_quality_matrix.numel() == 0:
78
- default_matches = match_quality_matrix.new_full(
79
- (match_quality_matrix.size(1),), 0, dtype=torch.int64
80
- )
81
- # When no gt boxes exist, we define IOU = 0 and therefore set labels
82
- # to `self.labels[0]`, which usually defaults to background class 0
83
- # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds
84
- default_match_labels = match_quality_matrix.new_full(
85
- (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8
86
- )
87
- return default_matches, default_match_labels
88
-
89
- assert torch.all(match_quality_matrix >= 0)
90
-
91
- # match_quality_matrix is M (gt) x N (predicted)
92
- # Max over gt elements (dim 0) to find best gt candidate for each prediction
93
- matched_vals, matches = match_quality_matrix.max(dim=0)
94
-
95
- match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)
96
-
97
- for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):
98
- low_high = (matched_vals >= low) & (matched_vals < high)
99
- match_labels[low_high] = l
100
-
101
- if self.allow_low_quality_matches:
102
- self.set_low_quality_matches_(match_labels, match_quality_matrix)
103
-
104
- return matches, match_labels
105
-
106
- def set_low_quality_matches_(self, match_labels, match_quality_matrix):
107
- """
108
- Produce additional matches for predictions that have only low-quality matches.
109
- Specifically, for each ground-truth G find the set of predictions that have
110
- maximum overlap with it (including ties); for each prediction in that set, if
111
- it is unmatched, then match it to the ground-truth G.
112
-
113
- This function implements the RPN assignment case (i) in Sec. 3.1.2 of
114
- :paper:`Faster R-CNN`.
115
- """
116
- # For each gt, find the prediction with which it has highest quality
117
- highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
118
- # Find the highest quality match available, even if it is low, including ties.
119
- # Note that the matches qualities must be positive due to the use of
120
- # `torch.nonzero`.
121
- _, pred_inds_with_highest_quality = nonzero_tuple(
122
- match_quality_matrix == highest_quality_foreach_gt[:, None]
123
- )
124
- # If an anchor was labeled positive only due to a low-quality match
125
- # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B.
126
- # This follows the implementation in Detectron, and is found to have no significant impact.
127
- match_labels[pred_inds_with_highest_quality] = 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/B10915003/B10915003-autotrain-jimmy-test-face-identification-53251125423/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/B10915003/autotrain-jimmy-test-face-identification-53251125423").launch()
 
 
 
 
spaces/Bart92/RVC_HF/infer/modules/vc/pipeline.py DELETED
@@ -1,655 +0,0 @@
1
- import os
2
- import sys
3
- import traceback
4
- import logging
5
-
6
- logger = logging.getLogger(__name__)
7
-
8
- from functools import lru_cache
9
- from time import time as ttime
10
- from torch import Tensor
11
- import faiss
12
- import librosa
13
- import numpy as np
14
- import parselmouth
15
- import pyworld
16
- import torch
17
- import torch.nn.functional as F
18
- import torchcrepe
19
- from scipy import signal
20
- from tqdm import tqdm
21
-
22
- import random
23
- now_dir = os.getcwd()
24
- sys.path.append(now_dir)
25
- import re
26
- from functools import partial
27
- bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000)
28
-
29
- input_audio_path2wav = {}
30
- from LazyImport import lazyload
31
- torchcrepe = lazyload("torchcrepe") # Fork Feature. Crepe algo for training and preprocess
32
- torch = lazyload("torch")
33
- from infer.lib.rmvpe import RMVPE
34
-
35
- @lru_cache
36
- def cache_harvest_f0(input_audio_path, fs, f0max, f0min, frame_period):
37
- audio = input_audio_path2wav[input_audio_path]
38
- f0, t = pyworld.harvest(
39
- audio,
40
- fs=fs,
41
- f0_ceil=f0max,
42
- f0_floor=f0min,
43
- frame_period=frame_period,
44
- )
45
- f0 = pyworld.stonemask(audio, f0, t, fs)
46
- return f0
47
-
48
-
49
- def change_rms(data1, sr1, data2, sr2, rate): # 1是输入音频,2是输出音频,rate是2的占比
50
- # print(data1.max(),data2.max())
51
- rms1 = librosa.feature.rms(
52
- y=data1, frame_length=sr1 // 2 * 2, hop_length=sr1 // 2
53
- ) # 每半秒一个点
54
- rms2 = librosa.feature.rms(y=data2, frame_length=sr2 // 2 * 2, hop_length=sr2 // 2)
55
- rms1 = torch.from_numpy(rms1)
56
- rms1 = F.interpolate(
57
- rms1.unsqueeze(0), size=data2.shape[0], mode="linear"
58
- ).squeeze()
59
- rms2 = torch.from_numpy(rms2)
60
- rms2 = F.interpolate(
61
- rms2.unsqueeze(0), size=data2.shape[0], mode="linear"
62
- ).squeeze()
63
- rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-6)
64
- data2 *= (
65
- torch.pow(rms1, torch.tensor(1 - rate))
66
- * torch.pow(rms2, torch.tensor(rate - 1))
67
- ).numpy()
68
- return data2
69
-
70
-
71
- class Pipeline(object):
72
- def __init__(self, tgt_sr, config):
73
- self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (
74
- config.x_pad,
75
- config.x_query,
76
- config.x_center,
77
- config.x_max,
78
- config.is_half,
79
- )
80
- self.sr = 16000 # hubert输入采样率
81
- self.window = 160 # 每帧点数
82
- self.t_pad = self.sr * self.x_pad # 每条前后pad时间
83
- self.t_pad_tgt = tgt_sr * self.x_pad
84
- self.t_pad2 = self.t_pad * 2
85
- self.t_query = self.sr * self.x_query # 查询切点前后查询时间
86
- self.t_center = self.sr * self.x_center # 查询切点位置
87
- self.t_max = self.sr * self.x_max # 免查询时长阈值
88
- self.device = config.device
89
- self.model_rmvpe = RMVPE("%s/rmvpe.pt" % os.environ["rmvpe_root"], is_half=self.is_half, device=self.device)
90
- self.f0_method_dict = {
91
- "pm": self.get_pm,
92
- "harvest": self.get_harvest,
93
- "dio": self.get_dio,
94
- "rmvpe": self.get_rmvpe,
95
- "rmvpe+": self.get_pitch_dependant_rmvpe,
96
- "crepe": self.get_f0_official_crepe_computation,
97
- "crepe-tiny": partial(self.get_f0_official_crepe_computation, model='model'),
98
- "mangio-crepe": self.get_f0_crepe_computation,
99
- "mangio-crepe-tiny": partial(self.get_f0_crepe_computation, model='model'),
100
-
101
- }
102
- self.note_dict = [
103
- 65.41, 69.30, 73.42, 77.78, 82.41, 87.31,
104
- 92.50, 98.00, 103.83, 110.00, 116.54, 123.47,
105
- 130.81, 138.59, 146.83, 155.56, 164.81, 174.61,
106
- 185.00, 196.00, 207.65, 220.00, 233.08, 246.94,
107
- 261.63, 277.18, 293.66, 311.13, 329.63, 349.23,
108
- 369.99, 392.00, 415.30, 440.00, 466.16, 493.88,
109
- 523.25, 554.37, 587.33, 622.25, 659.25, 698.46,
110
- 739.99, 783.99, 830.61, 880.00, 932.33, 987.77,
111
- 1046.50, 1108.73, 1174.66, 1244.51, 1318.51, 1396.91,
112
- 1479.98, 1567.98, 1661.22, 1760.00, 1864.66, 1975.53,
113
- 2093.00, 2217.46, 2349.32, 2489.02, 2637.02, 2793.83,
114
- 2959.96, 3135.96, 3322.44, 3520.00, 3729.31, 3951.07
115
- ]
116
-
117
- # Fork Feature: Get the best torch device to use for f0 algorithms that require a torch device. Will return the type (torch.device)
118
- def get_optimal_torch_device(self, index: int = 0) -> torch.device:
119
- if torch.cuda.is_available():
120
- return torch.device(
121
- f"cuda:{index % torch.cuda.device_count()}"
122
- ) # Very fast
123
- elif torch.backends.mps.is_available():
124
- return torch.device("mps")
125
- return torch.device("cpu")
126
-
127
- # Fork Feature: Compute f0 with the crepe method
128
- def get_f0_crepe_computation(
129
- self,
130
- x,
131
- f0_min,
132
- f0_max,
133
- p_len,
134
- *args, # 512 before. Hop length changes the speed that the voice jumps to a different dramatic pitch. Lower hop lengths means more pitch accuracy but longer inference time.
135
- **kwargs, # Either use crepe-tiny "tiny" or crepe "full". Default is full
136
- ):
137
- x = x.astype(
138
- np.float32
139
- ) # fixes the F.conv2D exception. We needed to convert double to float.
140
- x /= np.quantile(np.abs(x), 0.999)
141
- torch_device = self.get_optimal_torch_device()
142
- audio = torch.from_numpy(x).to(torch_device, copy=True)
143
- audio = torch.unsqueeze(audio, dim=0)
144
- if audio.ndim == 2 and audio.shape[0] > 1:
145
- audio = torch.mean(audio, dim=0, keepdim=True).detach()
146
- audio = audio.detach()
147
- hop_length = kwargs.get('crepe_hop_length', 160)
148
- model = kwargs.get('model', 'full')
149
- print("Initiating prediction with a crepe_hop_length of: " + str(hop_length))
150
- pitch: Tensor = torchcrepe.predict(
151
- audio,
152
- self.sr,
153
- hop_length,
154
- f0_min,
155
- f0_max,
156
- model,
157
- batch_size=hop_length * 2,
158
- device=torch_device,
159
- pad=True,
160
- )
161
- p_len = p_len or x.shape[0] // hop_length
162
- # Resize the pitch for final f0
163
- source = np.array(pitch.squeeze(0).cpu().float().numpy())
164
- source[source < 0.001] = np.nan
165
- target = np.interp(
166
- np.arange(0, len(source) * p_len, len(source)) / p_len,
167
- np.arange(0, len(source)),
168
- source,
169
- )
170
- f0 = np.nan_to_num(target)
171
- return f0 # Resized f0
172
-
173
- def get_f0_official_crepe_computation(
174
- self,
175
- x,
176
- f0_min,
177
- f0_max,
178
- *args,
179
- **kwargs
180
- ):
181
- # Pick a batch size that doesn't cause memory errors on your gpu
182
- batch_size = 512
183
- # Compute pitch using first gpu
184
- audio = torch.tensor(np.copy(x))[None].float()
185
- model = kwargs.get('model', 'full')
186
- f0, pd = torchcrepe.predict(
187
- audio,
188
- self.sr,
189
- self.window,
190
- f0_min,
191
- f0_max,
192
- model,
193
- batch_size=batch_size,
194
- device=self.device,
195
- return_periodicity=True,
196
- )
197
- pd = torchcrepe.filter.median(pd, 3)
198
- f0 = torchcrepe.filter.mean(f0, 3)
199
- f0[pd < 0.1] = 0
200
- f0 = f0[0].cpu().numpy()
201
- return f0
202
-
203
- # Fork Feature: Compute pYIN f0 method
204
- def get_f0_pyin_computation(self, x, f0_min, f0_max):
205
- y, sr = librosa.load("saudio/Sidney.wav", self.sr, mono=True)
206
- f0, _, _ = librosa.pyin(y, sr=self.sr, fmin=f0_min, fmax=f0_max)
207
- f0 = f0[1:] # Get rid of extra first frame
208
- return f0
209
-
210
- def get_pm(self, x, p_len, *args, **kwargs):
211
- f0 = parselmouth.Sound(x, self.sr).to_pitch_ac(
212
- time_step=160 / 16000,
213
- voicing_threshold=0.6,
214
- pitch_floor=kwargs.get('f0_min'),
215
- pitch_ceiling=kwargs.get('f0_max'),
216
- ).selected_array["frequency"]
217
-
218
- return np.pad(
219
- f0,
220
- [[max(0, (p_len - len(f0) + 1) // 2), max(0, p_len - len(f0) - (p_len - len(f0) + 1) // 2)]],
221
- mode="constant"
222
- )
223
-
224
- def get_harvest(self, x, *args, **kwargs):
225
- f0_spectral = pyworld.harvest(
226
- x.astype(np.double),
227
- fs=self.sr,
228
- f0_ceil=kwargs.get('f0_max'),
229
- f0_floor=kwargs.get('f0_min'),
230
- frame_period=1000 * kwargs.get('hop_length', 160) / self.sr,
231
- )
232
- return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr)
233
-
234
- def get_dio(self, x, *args, **kwargs):
235
- f0_spectral = pyworld.dio(
236
- x.astype(np.double),
237
- fs=self.sr,
238
- f0_ceil=kwargs.get('f0_max'),
239
- f0_floor=kwargs.get('f0_min'),
240
- frame_period=1000 * kwargs.get('hop_length', 160) / self.sr,
241
- )
242
- return pyworld.stonemask(x.astype(np.double), *f0_spectral, self.sr)
243
-
244
-
245
- def get_rmvpe(self, x, *args, **kwargs):
246
- if not hasattr(self, "model_rmvpe"):
247
- from infer.lib.rmvpe import RMVPE
248
-
249
- logger.info(
250
- "Loading rmvpe model,%s" % "%s/rmvpe.pt" % os.environ["rmvpe_root"]
251
- )
252
- self.model_rmvpe = RMVPE(
253
- "%s/rmvpe.pt" % os.environ["rmvpe_root"],
254
- is_half=self.is_half,
255
- device=self.device,
256
- )
257
- f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)
258
-
259
- return f0
260
-
261
-
262
- def get_pitch_dependant_rmvpe(self, x, f0_min=1, f0_max=40000, *args, **kwargs):
263
- return self.model_rmvpe.infer_from_audio_with_pitch(x, thred=0.03, f0_min=f0_min, f0_max=f0_max)
264
-
265
- def autotune_f0(self, f0):
266
- autotuned_f0 = []
267
- for freq in f0:
268
- closest_notes = [x for x in self.note_dict if abs(x - freq) == min(abs(n - freq) for n in self.note_dict)]
269
- autotuned_f0.append(random.choice(closest_notes))
270
- return np.array(autotuned_f0, np.float64)
271
-
272
- # Fork Feature: Acquire median hybrid f0 estimation calculation
273
- def get_f0_hybrid_computation(
274
- self,
275
- methods_str,
276
- input_audio_path,
277
- x,
278
- f0_min,
279
- f0_max,
280
- p_len,
281
- filter_radius,
282
- crepe_hop_length,
283
- time_step
284
- ):
285
- # Get various f0 methods from input to use in the computation stack
286
- params = {'x': x, 'p_len': p_len, 'f0_min': f0_min,
287
- 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius,
288
- 'crepe_hop_length': crepe_hop_length, 'model': "full"
289
- }
290
- methods_str = re.search('hybrid\[(.+)\]', methods_str)
291
- if methods_str: # Ensure a match was found
292
- methods = [method.strip() for method in methods_str.group(1).split('+')]
293
- f0_computation_stack = []
294
-
295
- print(f"Calculating f0 pitch estimations for methods: {str(methods)}")
296
- x = x.astype(np.float32)
297
- x /= np.quantile(np.abs(x), 0.999)
298
- # Get f0 calculations for all methods specified
299
-
300
- for method in methods:
301
- if method not in self.f0_method_dict:
302
- print(f"Method {method} not found.")
303
- continue
304
- f0 = self.f0_method_dict[method](**params)
305
- if method == 'harvest' and filter_radius > 2:
306
- f0 = signal.medfilt(f0, 3)
307
- f0 = f0[1:] # Get rid of first frame.
308
- f0_computation_stack.append(f0)
309
-
310
- for fc in f0_computation_stack:
311
- print(len(fc))
312
-
313
- print(f"Calculating hybrid median f0 from the stack of: {str(methods)}")
314
- f0_median_hybrid = np.nanmedian(f0_computation_stack, axis=0)
315
- return f0_median_hybrid
316
-
317
- def get_f0(
318
- self,
319
- input_audio_path,
320
- x,
321
- p_len,
322
- f0_up_key,
323
- f0_method,
324
- filter_radius,
325
- crepe_hop_length,
326
- f0_autotune,
327
- inp_f0=None,
328
- f0_min=50,
329
- f0_max=1100,
330
- ):
331
- global input_audio_path2wav
332
- time_step = self.window / self.sr * 1000
333
- f0_min = 50
334
- f0_max = 1100
335
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
336
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
337
- params = {'x': x, 'p_len': p_len, 'f0_up_key': f0_up_key, 'f0_min': f0_min,
338
- 'f0_max': f0_max, 'time_step': time_step, 'filter_radius': filter_radius,
339
- 'crepe_hop_length': crepe_hop_length, 'model': "full"
340
- }
341
-
342
- if "hybrid" in f0_method:
343
- # Perform hybrid median pitch estimation
344
- input_audio_path2wav[input_audio_path] = x.astype(np.double)
345
- f0 = self.get_f0_hybrid_computation(
346
- f0_method,+
347
- input_audio_path,
348
- x,
349
- f0_min,
350
- f0_max,
351
- p_len,
352
- filter_radius,
353
- crepe_hop_length,
354
- time_step,
355
- )
356
- else:
357
- f0 = self.f0_method_dict[f0_method](**params)
358
-
359
- if "privateuseone" in str(self.device): # clean ortruntime memory
360
- del self.model_rmvpe.model
361
- del self.model_rmvpe
362
- logger.info("Cleaning ortruntime memory")
363
-
364
- if f0_autotune:
365
- f0 = self.autotune_f0(f0)
366
-
367
- f0 *= pow(2, f0_up_key / 12)
368
- # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
369
- tf0 = self.sr // self.window # 每秒f0点数
370
- if inp_f0 is not None:
371
- delta_t = np.round(
372
- (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1
373
- ).astype("int16")
374
- replace_f0 = np.interp(
375
- list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]
376
- )
377
- shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]
378
- f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[
379
- :shape
380
- ]
381
- # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()]))
382
- f0bak = f0.copy()
383
- f0_mel = 1127 * np.log(1 + f0 / 700)
384
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
385
- f0_mel_max - f0_mel_min
386
- ) + 1
387
- f0_mel[f0_mel <= 1] = 1
388
- f0_mel[f0_mel > 255] = 255
389
- f0_coarse = np.rint(f0_mel).astype(np.int32)
390
- return f0_coarse, f0bak # 1-0
391
-
392
- def vc(
393
- self,
394
- model,
395
- net_g,
396
- sid,
397
- audio0,
398
- pitch,
399
- pitchf,
400
- times,
401
- index,
402
- big_npy,
403
- index_rate,
404
- version,
405
- protect,
406
- ): # ,file_index,file_big_npy
407
- feats = torch.from_numpy(audio0)
408
- if self.is_half:
409
- feats = feats.half()
410
- else:
411
- feats = feats.float()
412
- if feats.dim() == 2: # double channels
413
- feats = feats.mean(-1)
414
- assert feats.dim() == 1, feats.dim()
415
- feats = feats.view(1, -1)
416
- padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)
417
-
418
- inputs = {
419
- "source": feats.to(self.device),
420
- "padding_mask": padding_mask,
421
- "output_layer": 9 if version == "v1" else 12,
422
- }
423
- t0 = ttime()
424
- with torch.no_grad():
425
- logits = model.extract_features(**inputs)
426
- feats = model.final_proj(logits[0]) if version == "v1" else logits[0]
427
- if protect < 0.5 and pitch is not None and pitchf is not None:
428
- feats0 = feats.clone()
429
- if (
430
- not isinstance(index, type(None))
431
- and not isinstance(big_npy, type(None))
432
- and index_rate != 0
433
- ):
434
- npy = feats[0].cpu().numpy()
435
- if self.is_half:
436
- npy = npy.astype("float32")
437
-
438
- # _, I = index.search(npy, 1)
439
- # npy = big_npy[I.squeeze()]
440
-
441
- score, ix = index.search(npy, k=8)
442
- weight = np.square(1 / score)
443
- weight /= weight.sum(axis=1, keepdims=True)
444
- npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)
445
-
446
- if self.is_half:
447
- npy = npy.astype("float16")
448
- feats = (
449
- torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate
450
- + (1 - index_rate) * feats
451
- )
452
-
453
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
454
- if protect < 0.5 and pitch is not None and pitchf is not None:
455
- feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(
456
- 0, 2, 1
457
- )
458
- t1 = ttime()
459
- p_len = audio0.shape[0] // self.window
460
- if feats.shape[1] < p_len:
461
- p_len = feats.shape[1]
462
- if pitch is not None and pitchf is not None:
463
- pitch = pitch[:, :p_len]
464
- pitchf = pitchf[:, :p_len]
465
-
466
- if protect < 0.5 and pitch is not None and pitchf is not None:
467
- pitchff = pitchf.clone()
468
- pitchff[pitchf > 0] = 1
469
- pitchff[pitchf < 1] = protect
470
- pitchff = pitchff.unsqueeze(-1)
471
- feats = feats * pitchff + feats0 * (1 - pitchff)
472
- feats = feats.to(feats0.dtype)
473
- p_len = torch.tensor([p_len], device=self.device).long()
474
- with torch.no_grad():
475
- hasp = pitch is not None and pitchf is not None
476
- arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)
477
- audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()
478
- del hasp, arg
479
- del feats, p_len, padding_mask
480
- if torch.cuda.is_available():
481
- torch.cuda.empty_cache()
482
- t2 = ttime()
483
- times[0] += t1 - t0
484
- times[2] += t2 - t1
485
- return audio1
486
- def process_t(self, t, s, window, audio_pad, pitch, pitchf, times, index, big_npy, index_rate, version, protect, t_pad_tgt, if_f0, sid, model, net_g):
487
- t = t // window * window
488
- if if_f0 == 1:
489
- return self.vc(
490
- model,
491
- net_g,
492
- sid,
493
- audio_pad[s : t + t_pad_tgt + window],
494
- pitch[:, s // window : (t + t_pad_tgt) // window],
495
- pitchf[:, s // window : (t + t_pad_tgt) // window],
496
- times,
497
- index,
498
- big_npy,
499
- index_rate,
500
- version,
501
- protect,
502
- )[t_pad_tgt : -t_pad_tgt]
503
- else:
504
- return self.vc(
505
- model,
506
- net_g,
507
- sid,
508
- audio_pad[s : t + t_pad_tgt + window],
509
- None,
510
- None,
511
- times,
512
- index,
513
- big_npy,
514
- index_rate,
515
- version,
516
- protect,
517
- )[t_pad_tgt : -t_pad_tgt]
518
-
519
-
520
- def pipeline(
521
- self,
522
- model,
523
- net_g,
524
- sid,
525
- audio,
526
- input_audio_path,
527
- times,
528
- f0_up_key,
529
- f0_method,
530
- file_index,
531
- index_rate,
532
- if_f0,
533
- filter_radius,
534
- tgt_sr,
535
- resample_sr,
536
- rms_mix_rate,
537
- version,
538
- protect,
539
- crepe_hop_length,
540
- f0_autotune,
541
- f0_file=None,
542
- f0_min=50,
543
- f0_max=1100
544
- ):
545
- if (
546
- file_index != ""
547
- # and file_big_npy != ""
548
- # and os.path.exists(file_big_npy) == True
549
- and os.path.exists(file_index)
550
- and index_rate != 0
551
- ):
552
- try:
553
- index = faiss.read_index(file_index)
554
- # big_npy = np.load(file_big_npy)
555
- big_npy = index.reconstruct_n(0, index.ntotal)
556
- except:
557
- traceback.print_exc()
558
- index = big_npy = None
559
- else:
560
- index = big_npy = None
561
- audio = signal.filtfilt(bh, ah, audio)
562
- audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect")
563
- opt_ts = []
564
- if audio_pad.shape[0] > self.t_max:
565
- audio_sum = np.zeros_like(audio)
566
- for i in range(self.window):
567
- audio_sum += audio_pad[i : i - self.window]
568
- for t in range(self.t_center, audio.shape[0], self.t_center):
569
- opt_ts.append(
570
- t
571
- - self.t_query
572
- + np.where(
573
- np.abs(audio_sum[t - self.t_query : t + self.t_query])
574
- == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min()
575
- )[0][0]
576
- )
577
- s = 0
578
- audio_opt = []
579
- t = None
580
- t1 = ttime()
581
- audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect")
582
- p_len = audio_pad.shape[0] // self.window
583
- inp_f0 = None
584
- if hasattr(f0_file, "name"):
585
- try:
586
- with open(f0_file.name, "r") as f:
587
- lines = f.read().strip("\n").split("\n")
588
- inp_f0 = []
589
- for line in lines:
590
- inp_f0.append([float(i) for i in line.split(",")])
591
- inp_f0 = np.array(inp_f0, dtype="float32")
592
- except:
593
- traceback.print_exc()
594
- sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()
595
- pitch, pitchf = None, None
596
- if if_f0:
597
- pitch, pitchf = self.get_f0(
598
- input_audio_path,
599
- audio_pad,
600
- p_len,
601
- f0_up_key,
602
- f0_method,
603
- filter_radius,
604
- crepe_hop_length,
605
- f0_autotune,
606
- inp_f0,
607
- f0_min,
608
- f0_max
609
- )
610
- pitch = pitch[:p_len]
611
- pitchf = pitchf[:p_len]
612
- if self.device == "mps" or "xpu" in self.device:
613
- pitchf = pitchf.astype(np.float32)
614
- pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()
615
- pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()
616
- t2 = ttime()
617
- times[1] += t2 - t1
618
-
619
- with tqdm(total=len(opt_ts), desc="Processing", unit="window") as pbar:
620
- for i, t in enumerate(opt_ts):
621
- t = t // self.window * self.window
622
- start = s
623
- end = t + self.t_pad2 + self.window
624
- audio_slice = audio_pad[start:end]
625
- pitch_slice = pitch[:, start // self.window:end // self.window] if if_f0 else None
626
- pitchf_slice = pitchf[:, start // self.window:end // self.window] if if_f0 else None
627
- audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
628
- s = t
629
- pbar.update(1)
630
- pbar.refresh()
631
-
632
- audio_slice = audio_pad[t:]
633
- pitch_slice = pitch[:, t // self.window:] if if_f0 and t is not None else pitch
634
- pitchf_slice = pitchf[:, t // self.window:] if if_f0 and t is not None else pitchf
635
- audio_opt.append(self.vc(model, net_g, sid, audio_slice, pitch_slice, pitchf_slice, times, index, big_npy, index_rate, version, protect)[self.t_pad_tgt : -self.t_pad_tgt])
636
-
637
- audio_opt = np.concatenate(audio_opt)
638
- if rms_mix_rate != 1:
639
- audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)
640
- if tgt_sr != resample_sr >= 16000:
641
- audio_opt = librosa.resample(
642
- audio_opt, orig_sr=tgt_sr, target_sr=resample_sr
643
- )
644
- audio_max = np.abs(audio_opt).max() / 0.99
645
- max_int16 = 32768
646
- if audio_max > 1:
647
- max_int16 /= audio_max
648
- audio_opt = (audio_opt * max_int16).astype(np.int16)
649
- del pitch, pitchf, sid
650
- if torch.cuda.is_available():
651
- torch.cuda.empty_cache()
652
-
653
- print("Returning completed audio...")
654
- print("-------------------")
655
- return audio_opt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Entre Nosotros 3.29.md DELETED
@@ -1,122 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar entre nosotros 3.29 en su dispositivo</h1>
3
- <p>Among Us es uno de los juegos multijugador más populares y adictivos de los últimos años. Es un juego de trabajo en equipo y traición, donde tienes que trabajar junto a otros jugadores para preparar tu nave espacial para la salida, evitando ser asesinado por uno o más impostores que se esconden entre vosotros. Si usted está buscando una manera divertida y emocionante para pasar su tiempo con sus amigos o extraños en línea, entonces usted debe descargar definitivamente entre nosotros 3.29, la última versión del juego, en su dispositivo. </p>
4
- <p>En este artículo, le diremos todo lo que necesita saber sobre Among Us 3.29, incluyendo qué es, por qué debe descargarlo, cómo descargarlo en diferentes plataformas y cómo reproducirlo después de descargarlo. Al final de este artículo, estarás listo para unirte a los millones de jugadores que están disfrutando de este juego todos los días. </p>
5
- <h2>descargar entre nosotros 3.29</h2><br /><p><b><b>Download File</b> &#9733; <a href="https://bltlly.com/2v6IHW">https://bltlly.com/2v6IHW</a></b></p><br /><br />
6
- <h2>¿Qué hay entre nosotros 3.29? </h2>
7
- <h3>Una breve introducción al juego y sus características</h3>
8
- <p>Among Us es un juego que fue lanzado en 2018 por Innersloth, un pequeño estudio de juegos indie. Es un juego que se puede jugar en línea o a través de WiFi local con 4-15 jugadores. El juego tiene cuatro mapas diferentes para elegir: The Skeld, MIRA HQ, Polus y Airship. Cada mapa tiene su propio diseño, tareas, respiraderos y sabotajes. </p>
9
- <p>El juego tiene dos papeles principales: compañeros de equipo e impostores. Los compañeros de equipo son los jugadores inocentes que tienen que completar tareas alrededor del mapa para llenar una barra de progreso y ganar el juego. Los impostores son los enemigos ocultos que tienen que matar compañeros de tripulación, sabotear el mapa y evitar ser atrapados. El juego puede tener hasta tres impostores por partido, dependiendo de la configuración del juego. </p>
10
-
11
- <h3>La última actualización y lo que incluye</h3>
12
- <p>Among Us 3.29 es la última actualización para el juego que fue lanzado el 31 de marzo de 2021. Es una actualización gratuita que añade algunas nuevas características y mejoras al juego. Estos son algunos de los aspectos más destacados de esta actualización:</p>
13
- <ul>
14
- <li>Lista de amigos: Ahora puede agregar amigos usando un código de amigo único, enviar y recibir solicitudes de amistad, invitar a amigos a su lobby, ver con quién jugó recientemente, bloquear jugadores, mostrar u ocultar su código de amigo, y activar o desactivar las notificaciones de invitación del lobby. </li>
15
- <li>Cosméticos Ghostface: Ahora puedes obtener una máscara y túnicas Scream como parte de una colaboración con Scream hasta el 30 de abril. </li>
16
- <li> Opción de agitación de pantalla: Ahora puede activar o desactivar la agitación de pantalla en la configuración del juego. </li>
17
- <li>Orden de cosméticos: Los cosméticos ahora están dispuestos en un orden diferente. </li>
18
- <li>Corrección de errores: La actualización también corrige algunos errores y problemas con el juego. </li>
19
- </ul>
20
- <h2>Por qué debería descargar entre nosotros 3.29</ <h2>Por qué debería descargar entre nosotros 3.29</h2>
21
- <p>Si todavía te estás preguntando por qué deberías descargar Among Us 3.29, estas son algunas de las razones por las que no deberías perderte esta oportunidad:</p>
22
- <h3>Los beneficios de jugar con amigos y extraños en línea</h3>
23
- <p>Una de las mejores cosas de Among Us es que puedes jugar con cualquiera, en cualquier lugar y en cualquier momento. Puedes jugar con tus amigos y familiares, o puedes unirte a un lobby público y conocer gente nueva de todo el mundo. También puede utilizar el chat de voz o de texto para comunicarse con otros jugadores y tener conversaciones divertidas. Jugar con otros en línea puede ayudarte a mejorar tus habilidades sociales, hacer nuevos amigos y pasar un buen rato. </p>
24
- <h3>La diversión y el desafío de ser un compañero de equipo o un impostor</h3>
25
-
26
- <h3>Las opciones de personalización y juego disponibles</h3>
27
- <p>Una tercera razón por la que deberías descargar Among Us 3.29 es que te da muchas opciones de personalización y juegos para elegir. Puedes personalizar la apariencia, nombre, color, sombrero, visera, piel y mascota de tu personaje. También puedes cambiar la configuración del juego para adaptarla a tus preferencias, como el número de impostores, tareas, roles, velocidad, visión, tiempo de reutilización, tiempo de votación y más. También puedes probar diferentes modos, como Classic o Hide n Seek. Puedes hacer el juego tan fácil o tan difícil como quieras, y tener una experiencia diferente cada vez. </p>
28
- <h2>Cómo descargar entre nosotros 3.29 en diferentes plataformas</h2>
29
- <p>Ahora que sabes lo que es Among Us 3.29 y por qué deberías descargarlo, veamos cómo puedes descargarlo en diferentes plataformas. El juego está disponible en dispositivos Android, iOS, PC y Nintendo Switch. Estos son los pasos para descargarlo en cada plataforma:</p>
30
- <p></p> <h3>Cómo descargar en dispositivos Android</h3>
31
- <p>Si tienes un dispositivo Android, puedes descargar Among Us 3.29 desde Google Play Store. Estos son los pasos para hacerlo:</p>
32
- <ol>
33
- <li>Abra la aplicación Google Play Store en su dispositivo. </li>
34
- <li>Buscar entre nosotros en la barra de búsqueda. </li>
35
- <li>Toca el icono del juego y luego toca Instalar.</li>
36
- <li>Espera a que el juego se descargue e instale en tu dispositivo. </li>
37
- <li>Abre el juego y disfruta jugando. </li>
38
- </ol>
39
- <p>Nota: Necesitas tener Android 6.0 o superior y al menos 70 MB de espacio libre en tu dispositivo para descargar y jugar entre nosotros 3.29. </p>
40
- <h3>Cómo descargar en dispositivos iOS</h3>
41
- <p>Si tienes un dispositivo iOS, puedes descargar Among Us 3.29 desde la App Store. Estos son los pasos para hacerlo:</p>
42
- <ol>
43
- <li>Abra la aplicación App Store en su dispositivo. </li>
44
- <li>Buscar entre nosotros en la barra de búsqueda. </li>
45
- <li>Toque en el icono del juego y luego toque en Get.</li>
46
- <li>Espera a que el juego se descargue e instale en tu dispositivo. </li>
47
- <li>Abre el juego y disfruta jugando. </li>
48
- </ol>
49
-
50
- <h3>Cómo descargar en PC</h3>
51
- <p>Si tienes un PC, puedes descargar Among Us 3.29 de Steam o Epic Games Store. Estos son los pasos para hacerlo:</p>
52
- <ol>
53
- <li>Abra Steam o Epic Games Store en su PC.</li>
54
- <li>Buscar entre nosotros en la barra de búsqueda. </li>
55
- <li>Haga clic en el icono del juego y luego haga clic en Añadir al carrito o Comprar ahora.</li>
56
- <li>Complete el proceso de pago y espere a que el juego se descargue e instale en su PC.</li>
57
- <li>Abre el juego y disfruta jugando. </li>
58
- </ol>
59
- <p>Nota: Necesita tener Windows 7 SP1 o superior, 1 GB de RAM y 250 MB de espacio libre en su PC para descargar y jugar Entre nosotros 3.29. El juego cuesta $4.99 USD en Steam y Epic Games Store.</p>
60
- <h3>Cómo descargar en Nintendo Switch</h3>
61
- <p>Si tienes un Nintendo Switch, puedes descargar Among Us 3.29 desde la Nintendo eShop. Estos son los pasos para hacerlo:</p>
62
- <ol>
63
- <li>Abre la Nintendo eShop en tu Switch.</li>
64
- <li>Buscar entre nosotros en la barra de búsqueda. </li>
65
- <li>Seleccione el icono del juego y luego seleccione Proceder a la compra o descargar Demo.</li>
66
- <li>Complete el proceso de pago o confirme la descarga de la versión demo. </li>
67
- <li>Espera a que el juego se descargue e instale en tu Switch.</li>
68
- <li>Abre el juego y disfruta jugando. </li>
69
- </ol>
70
- <p>Nota: Necesitas tener una suscripción a Nintendo Switch Online, una cuenta de Nintendo y al menos 421 MB de espacio libre en tu Switch para descargar y jugar Among Us 3.29. El juego cuesta $5 USD en Nintendo eShop, pero también puedes probar la versión demo gratis. </p>
71
- <h2>Cómo jugar entre nosotros 3.29 Después de descargar</h2>
72
- <p>Después de haber descargado Entre nosotros 3.29 en su dispositivo, puede comenzar a jugar de inmediato. Aquí hay algunos consejos sobre cómo jugar después de descargar:</p>
73
- <h3>Cómo crear o unirse a un lobby</h3>
74
-
75
- <ol>
76
- <li>Seleccione Online desde el menú principal del juego. </li>
77
- <li>Seleccione Host si desea crear un lobby, o Buscar juego si desea unirse a un lobby. </li>
78
- <li>Si selecciona Host, elija un mapa, el número de impostores, el idioma de chat y el número máximo de jugadores. Luego toca Confirmar y espera a que otros jugadores se unan a tu lobby. También puedes invitar a tus amigos usando tu código de amigo o código de lobby. </li>
79
- <li>Si selecciona Buscar juego, elija un mapa, el número de impostores, el idioma de chat y el número máximo de jugadores. A continuación, toque en Confirmar y navegue por los lobbies disponibles. Toque en uno que se adapte a sus preferencias y únase a él. </li>
80
- </ol>
81
- <p>Para crear o unirse a un lobby a través de WiFi local, siga estos pasos:</p>
82
- <ol>
83
- <li>Seleccione Local en el menú principal del juego. </li>
84
- <li>Seleccione Host si desea crear un lobby, o Unirse al juego si desea unirse a un lobby. </li>
85
- <li>Si selecciona Host, elija un mapa, el número de impostores, el idioma de chat y el número máximo de jugadores. Luego toca Confirmar y espera a que otros jugadores se unan a tu lobby. Tú y los otros jugadores deben estar conectados a la misma red WiFi. </li>
86
- <li>Si selecciona Unirse al juego, navegue por los lobbies disponibles que están en la misma red WiFi que usted. Toca en uno que se adapte a tus preferencias y únete a él. </li>
87
- </ol>
88
- <h3>Cómo completar tareas o sabotear como compañero de equipo o como impostor</h3>
89
- <p>Una vez que estés en un lobby, el juego te asignará al azar un rol: compañero de equipo o impostor. Dependiendo de tu rol, tienes diferentes objetivos y habilidades. Como compañero de equipo, su objetivo es completar tareas alrededor del mapa o encontrar y expulsar a los impostores. Como impostor, tu objetivo es matar compañeros de equipo, sabotear el mapa y evitar ser expulsado. Aquí hay algunos consejos sobre cómo completar tareas o sabotear como compañero de equipo o como impostor:</p>
90
- <p>Como un compañero de equipo:</p>
91
- <ul>
92
-
93
- <li>Para hacer una tarea, vaya a la ubicación donde se encuentra la tarea e interactúe con ella. Algunas tareas son simples y requieren una sola interacción, como deslizar una tarjeta o escanear su identificación. Algunas tareas son complejas y requieren múltiples interacciones, como cableado o motores de carga. Algunas tareas son visuales y pueden ser vistas por otros jugadores, como el escaneo del medbay o el disparo de armas. Algunas tareas son comunes y compartidas por todos los compañeros de equipo, como las llaves o la tarjeta de embarque. </li>
94
- <li>Para ganar el juego como compañero de equipo, es necesario llenar la barra de tareas completando todas sus tareas, o expulsar a todos los impostores antes de que te maten. </li>
95
- </ul>
96
- <p>Como un impostor:</p>
97
- <ul>
98
- <li>Para matar compañeros de equipo, acércate a ellos y toca el botón de matar en la esquina inferior derecha de la pantalla. Solo puedes matar a un compañero de equipo a la vez, y tienes que esperar un tiempo de reutilización antes de poder matar de nuevo. El tiempo de reutilización depende de la configuración del juego. </li>
99
- <li>Para sabotear el mapa, toque el botón de sabotaje en la esquina inferior derecha de la pantalla. Verás un mapa con algunos iconos que indican lo que puedes sabotear. Puede sabotear puertas, luces, comunicaciones, oxígeno, reactores, estabilizadores sísmicos o laboratorios. Cada sabotaje tiene un efecto diferente y requiere respuestas diferentes de los compañeros de equipo. </li>
100
- <li>Para ganar el juego como impostor, necesitas matar a suficientes compañeros de equipo para que su número sea igual o menor que el tuyo, o evitar que arreglen un sabotaje crítico antes de que se acabe el tiempo. </li>
101
- </ul>
102
- <h3>Cómo comunicarse y votar durante las reuniones</h3>
103
- <p>Durante el juego, las reuniones pueden ser convocadas ya sea reportando un cadáver o presionando el botón de emergencia. Las reuniones son donde los jugadores pueden comunicarse y votar por quien creen que es el impostor. Aquí hay algunos consejos sobre cómo comunicarse y votar durante las reuniones:</p>
104
- <ul>
105
-
106
- <li>Para votar por alguien, toque en su nombre y luego toque en la marca de verificación. También puede saltar la votación pulsando en el botón de saltar. Tienes un tiempo limitado para votar, dependiendo de la configuración del juego. </li>
107
- Para convencer a otros de tu inocencia o acusar a alguien de ser un impostor, necesitas usar lógica, evidencia, coartadas y engaño. También puedes usar emoticonos para expresar tus emociones. </li>
108
- </ul>
109
- <h2>Conclusión y preguntas frecuentes</h2>
110
- <p>En conclusión, Among Us 3.29 es un divertido y emocionante juego multijugador que debes descargar en tu dispositivo. Es un juego de trabajo en equipo y traición, donde tienes que trabajar junto a otros jugadores para preparar tu nave espacial para la salida, evitando ser asesinado por uno o más impostores que se esconden entre vosotros. Se puede jugar en línea o a través de WiFi local con 4-15 jugadores. También puedes personalizar tu personaje y cambiar la configuración del juego para que sea más desafiante o divertido. </p>
111
- <p>Si tiene alguna pregunta sobre Among Us 3.29, aquí hay algunas preguntas frecuentes que podrían ayudarlo:</p>
112
- <tabla>
113
- <tr><th>Pregunta</th><th>Respuesta</th></tr>
114
- <tr><td>¿Cómo actualizo Among Us a 3.29? </td><td>Si ya tienes Among Us en tu dispositivo, puedes actualizarlo yendo a Google Play Store, App Store, Steam, Epic Games Store o Nintendo eShop, dependiendo de tu plataforma. Luego, encuentra el juego y toca Actualizar o Instalar. Si no tienes Among Us en tu dispositivo, puedes seguir los pasos de este artículo para descargarlo. </td></tr>
115
- <tr><td>Cómo puedo agregar amigos en Among Us 3.29? </td><td>Para agregar amigos en Among Us 3.29, necesita usar la nueva función de lista de amigos. Para acceder a ella, toque en el icono de amigos en la esquina superior derecha del menú principal. Luego, toca Agregar amigo e ingresa el código de tu amigo o comparte tu propio código. También puedes enviar y recibir solicitudes de amistad, invitar a amigos a tu lobby, ver con quién has jugado recientemente, bloquear jugadores, mostrar u ocultar tu código de amigo, y activar o desactivar las notificaciones de invitación al lobby. </td></tr>
116
-
117
- <tr><td>¿Cómo se juega al modo Ocultar n Buscar en Among Us 3.29? </td><td>Para jugar al modo Ocultar n Buscar en Entre Nosotros 3.29, necesitas crear o unirte a un lobby con ajustes de juego personalizados. Los ajustes recomendados son: un impostor, visión de compañero de equipo bajo, visión de impostor alto, tiempo de reutilización bajo, sin expulsar confirmaciones, sin tareas visuales, sin votos anónimos y sin reuniones de emergencia. El impostor tiene que anunciarse al comienzo del juego y luego tratar de encontrar y matar a todos los compañeros de equipo que se esconden y hacer tareas. Los compañeros de equipo tienen que evitar al impostor y completar todas sus tareas para ganar. </td></tr>
118
- <tr><td>¿Cómo informo de un error o problema con Among Us 3.29? </td><td>Para reportar un error o problema con Among Us 3.29, puede ponerse en contacto con Innersloth, los desarrolladores del juego, a través de su sitio web, correo electrónico, Twitter o Discord. También puede consultar su página de preguntas frecuentes para encontrar problemas y soluciones comunes. </td></tr>
119
- </tabla>
120
- <p>Espero que este artículo fue útil e informativo para usted. Si te gustó, por favor compártelo con tus amigos y familiares que podrían estar interesados en jugar Among Us 3.29. Gracias por leer y divertirse jugando! </p> 64aa2da5cf<br />
121
- <br />
122
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/taming-transformers/taming/modules/losses/segmentation.py DELETED
@@ -1,22 +0,0 @@
1
- import torch.nn as nn
2
- import torch.nn.functional as F
3
-
4
-
5
- class BCELoss(nn.Module):
6
- def forward(self, prediction, target):
7
- loss = F.binary_cross_entropy_with_logits(prediction,target)
8
- return loss, {}
9
-
10
-
11
- class BCELossWithQuant(nn.Module):
12
- def __init__(self, codebook_weight=1.):
13
- super().__init__()
14
- self.codebook_weight = codebook_weight
15
-
16
- def forward(self, qloss, target, prediction, split):
17
- bce_loss = F.binary_cross_entropy_with_logits(prediction,target)
18
- loss = bce_loss + self.codebook_weight*qloss
19
- return loss, {"{}/total_loss".format(split): loss.clone().detach().mean(),
20
- "{}/bce_loss".format(split): bce_loss.detach().mean(),
21
- "{}/quant_loss".format(split): qloss.detach().mean()
22
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/src/routes/conversation/[id]/share/+server.ts DELETED
@@ -1,54 +0,0 @@
1
- import { base } from "$app/paths";
2
- import { PUBLIC_ORIGIN } from "$env/static/public";
3
- import { collections } from "$lib/server/database.js";
4
- import type { SharedConversation } from "$lib/types/SharedConversation.js";
5
- import { sha256 } from "$lib/utils/sha256.js";
6
- import { error } from "@sveltejs/kit";
7
- import { ObjectId } from "mongodb";
8
- import { nanoid } from "nanoid";
9
-
10
- export async function POST({ params, url, locals }) {
11
- const conversation = await collections.conversations.findOne({
12
- _id: new ObjectId(params.id),
13
- sessionId: locals.sessionId,
14
- });
15
-
16
- if (!conversation) {
17
- throw error(404, "Conversation not found");
18
- }
19
-
20
- const hash = await sha256(JSON.stringify(conversation.messages));
21
-
22
- const existingShare = await collections.sharedConversations.findOne({ hash });
23
-
24
- if (existingShare) {
25
- return new Response(
26
- JSON.stringify({
27
- url: getShareUrl(url, existingShare._id),
28
- }),
29
- { headers: { "Content-Type": "application/json" } }
30
- );
31
- }
32
-
33
- const shared: SharedConversation = {
34
- _id: nanoid(7),
35
- createdAt: new Date(),
36
- messages: conversation.messages,
37
- hash,
38
- updatedAt: new Date(),
39
- title: conversation.title,
40
- };
41
-
42
- await collections.sharedConversations.insertOne(shared);
43
-
44
- return new Response(
45
- JSON.stringify({
46
- url: getShareUrl(url, shared._id),
47
- }),
48
- { headers: { "Content-Type": "application/json" } }
49
- );
50
- }
51
-
52
- function getShareUrl(url: URL, shareId: string): string {
53
- return `${PUBLIC_ORIGIN || url.origin}${base}/r/${shareId}`;
54
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/src/routes/r/[id]/+page.server.ts DELETED
@@ -1,18 +0,0 @@
1
- import type { PageServerLoad } from "./$types";
2
- import { collections } from "$lib/server/database";
3
- import { error } from "@sveltejs/kit";
4
-
5
- export const load: PageServerLoad = async ({ params }) => {
6
- const conversation = await collections.sharedConversations.findOne({
7
- _id: params.id,
8
- });
9
-
10
- if (!conversation) {
11
- throw error(404, "Conversation not found");
12
- }
13
-
14
- return {
15
- messages: conversation.messages,
16
- title: conversation.title,
17
- };
18
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/sjisprober.py DELETED
@@ -1,105 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is mozilla.org code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- from typing import Union
29
-
30
- from .chardistribution import SJISDistributionAnalysis
31
- from .codingstatemachine import CodingStateMachine
32
- from .enums import MachineState, ProbingState
33
- from .jpcntx import SJISContextAnalysis
34
- from .mbcharsetprober import MultiByteCharSetProber
35
- from .mbcssm import SJIS_SM_MODEL
36
-
37
-
38
- class SJISProber(MultiByteCharSetProber):
39
- def __init__(self) -> None:
40
- super().__init__()
41
- self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
42
- self.distribution_analyzer = SJISDistributionAnalysis()
43
- self.context_analyzer = SJISContextAnalysis()
44
- self.reset()
45
-
46
- def reset(self) -> None:
47
- super().reset()
48
- self.context_analyzer.reset()
49
-
50
- @property
51
- def charset_name(self) -> str:
52
- return self.context_analyzer.charset_name
53
-
54
- @property
55
- def language(self) -> str:
56
- return "Japanese"
57
-
58
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
59
- assert self.coding_sm is not None
60
- assert self.distribution_analyzer is not None
61
-
62
- for i, byte in enumerate(byte_str):
63
- coding_state = self.coding_sm.next_state(byte)
64
- if coding_state == MachineState.ERROR:
65
- self.logger.debug(
66
- "%s %s prober hit error at byte %s",
67
- self.charset_name,
68
- self.language,
69
- i,
70
- )
71
- self._state = ProbingState.NOT_ME
72
- break
73
- if coding_state == MachineState.ITS_ME:
74
- self._state = ProbingState.FOUND_IT
75
- break
76
- if coding_state == MachineState.START:
77
- char_len = self.coding_sm.get_current_charlen()
78
- if i == 0:
79
- self._last_char[1] = byte
80
- self.context_analyzer.feed(
81
- self._last_char[2 - char_len :], char_len
82
- )
83
- self.distribution_analyzer.feed(self._last_char, char_len)
84
- else:
85
- self.context_analyzer.feed(
86
- byte_str[i + 1 - char_len : i + 3 - char_len], char_len
87
- )
88
- self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
89
-
90
- self._last_char[0] = byte_str[-1]
91
-
92
- if self.state == ProbingState.DETECTING:
93
- if self.context_analyzer.got_enough_data() and (
94
- self.get_confidence() > self.SHORTCUT_THRESHOLD
95
- ):
96
- self._state = ProbingState.FOUND_IT
97
-
98
- return self.state
99
-
100
- def get_confidence(self) -> float:
101
- assert self.distribution_analyzer is not None
102
-
103
- context_conf = self.context_analyzer.get_confidence()
104
- distrib_conf = self.distribution_analyzer.get_confidence()
105
- return max(context_conf, distrib_conf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pyparsing/common.py DELETED
@@ -1,424 +0,0 @@
1
- # common.py
2
- from .core import *
3
- from .helpers import delimited_list, any_open_tag, any_close_tag
4
- from datetime import datetime
5
-
6
-
7
- # some other useful expressions - using lower-case class name since we are really using this as a namespace
8
- class pyparsing_common:
9
- """Here are some common low-level expressions that may be useful in
10
- jump-starting parser development:
11
-
12
- - numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
13
- :class:`scientific notation<sci_real>`)
14
- - common :class:`programming identifiers<identifier>`
15
- - network addresses (:class:`MAC<mac_address>`,
16
- :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
17
- - ISO8601 :class:`dates<iso8601_date>` and
18
- :class:`datetime<iso8601_datetime>`
19
- - :class:`UUID<uuid>`
20
- - :class:`comma-separated list<comma_separated_list>`
21
- - :class:`url`
22
-
23
- Parse actions:
24
-
25
- - :class:`convertToInteger`
26
- - :class:`convertToFloat`
27
- - :class:`convertToDate`
28
- - :class:`convertToDatetime`
29
- - :class:`stripHTMLTags`
30
- - :class:`upcaseTokens`
31
- - :class:`downcaseTokens`
32
-
33
- Example::
34
-
35
- pyparsing_common.number.runTests('''
36
- # any int or real number, returned as the appropriate type
37
- 100
38
- -100
39
- +100
40
- 3.14159
41
- 6.02e23
42
- 1e-12
43
- ''')
44
-
45
- pyparsing_common.fnumber.runTests('''
46
- # any int or real number, returned as float
47
- 100
48
- -100
49
- +100
50
- 3.14159
51
- 6.02e23
52
- 1e-12
53
- ''')
54
-
55
- pyparsing_common.hex_integer.runTests('''
56
- # hex numbers
57
- 100
58
- FF
59
- ''')
60
-
61
- pyparsing_common.fraction.runTests('''
62
- # fractions
63
- 1/2
64
- -3/4
65
- ''')
66
-
67
- pyparsing_common.mixed_integer.runTests('''
68
- # mixed fractions
69
- 1
70
- 1/2
71
- -3/4
72
- 1-3/4
73
- ''')
74
-
75
- import uuid
76
- pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
77
- pyparsing_common.uuid.runTests('''
78
- # uuid
79
- 12345678-1234-5678-1234-567812345678
80
- ''')
81
-
82
- prints::
83
-
84
- # any int or real number, returned as the appropriate type
85
- 100
86
- [100]
87
-
88
- -100
89
- [-100]
90
-
91
- +100
92
- [100]
93
-
94
- 3.14159
95
- [3.14159]
96
-
97
- 6.02e23
98
- [6.02e+23]
99
-
100
- 1e-12
101
- [1e-12]
102
-
103
- # any int or real number, returned as float
104
- 100
105
- [100.0]
106
-
107
- -100
108
- [-100.0]
109
-
110
- +100
111
- [100.0]
112
-
113
- 3.14159
114
- [3.14159]
115
-
116
- 6.02e23
117
- [6.02e+23]
118
-
119
- 1e-12
120
- [1e-12]
121
-
122
- # hex numbers
123
- 100
124
- [256]
125
-
126
- FF
127
- [255]
128
-
129
- # fractions
130
- 1/2
131
- [0.5]
132
-
133
- -3/4
134
- [-0.75]
135
-
136
- # mixed fractions
137
- 1
138
- [1]
139
-
140
- 1/2
141
- [0.5]
142
-
143
- -3/4
144
- [-0.75]
145
-
146
- 1-3/4
147
- [1.75]
148
-
149
- # uuid
150
- 12345678-1234-5678-1234-567812345678
151
- [UUID('12345678-1234-5678-1234-567812345678')]
152
- """
153
-
154
- convert_to_integer = token_map(int)
155
- """
156
- Parse action for converting parsed integers to Python int
157
- """
158
-
159
- convert_to_float = token_map(float)
160
- """
161
- Parse action for converting parsed numbers to Python float
162
- """
163
-
164
- integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
165
- """expression that parses an unsigned integer, returns an int"""
166
-
167
- hex_integer = (
168
- Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
169
- )
170
- """expression that parses a hexadecimal integer, returns an int"""
171
-
172
- signed_integer = (
173
- Regex(r"[+-]?\d+")
174
- .set_name("signed integer")
175
- .set_parse_action(convert_to_integer)
176
- )
177
- """expression that parses an integer with optional leading sign, returns an int"""
178
-
179
- fraction = (
180
- signed_integer().set_parse_action(convert_to_float)
181
- + "/"
182
- + signed_integer().set_parse_action(convert_to_float)
183
- ).set_name("fraction")
184
- """fractional expression of an integer divided by an integer, returns a float"""
185
- fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
186
-
187
- mixed_integer = (
188
- fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
189
- ).set_name("fraction or mixed integer-fraction")
190
- """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
191
- mixed_integer.add_parse_action(sum)
192
-
193
- real = (
194
- Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
195
- .set_name("real number")
196
- .set_parse_action(convert_to_float)
197
- )
198
- """expression that parses a floating point number and returns a float"""
199
-
200
- sci_real = (
201
- Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
202
- .set_name("real number with scientific notation")
203
- .set_parse_action(convert_to_float)
204
- )
205
- """expression that parses a floating point number with optional
206
- scientific notation and returns a float"""
207
-
208
- # streamlining this expression makes the docs nicer-looking
209
- number = (sci_real | real | signed_integer).setName("number").streamline()
210
- """any numeric expression, returns the corresponding Python type"""
211
-
212
- fnumber = (
213
- Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
214
- .set_name("fnumber")
215
- .set_parse_action(convert_to_float)
216
- )
217
- """any int or real number, returned as float"""
218
-
219
- identifier = Word(identchars, identbodychars).set_name("identifier")
220
- """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
221
-
222
- ipv4_address = Regex(
223
- r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
224
- ).set_name("IPv4 address")
225
- "IPv4 address (``0.0.0.0 - 255.255.255.255``)"
226
-
227
- _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
228
- _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
229
- "full IPv6 address"
230
- )
231
- _short_ipv6_address = (
232
- Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
233
- + "::"
234
- + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
235
- ).set_name("short IPv6 address")
236
- _short_ipv6_address.add_condition(
237
- lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
238
- )
239
- _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
240
- ipv6_address = Combine(
241
- (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
242
- "IPv6 address"
243
- )
244
- ).set_name("IPv6 address")
245
- "IPv6 address (long, short, or mixed form)"
246
-
247
- mac_address = Regex(
248
- r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
249
- ).set_name("MAC address")
250
- "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
251
-
252
- @staticmethod
253
- def convert_to_date(fmt: str = "%Y-%m-%d"):
254
- """
255
- Helper to create a parse action for converting parsed date string to Python datetime.date
256
-
257
- Params -
258
- - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
259
-
260
- Example::
261
-
262
- date_expr = pyparsing_common.iso8601_date.copy()
263
- date_expr.setParseAction(pyparsing_common.convertToDate())
264
- print(date_expr.parseString("1999-12-31"))
265
-
266
- prints::
267
-
268
- [datetime.date(1999, 12, 31)]
269
- """
270
-
271
- def cvt_fn(ss, ll, tt):
272
- try:
273
- return datetime.strptime(tt[0], fmt).date()
274
- except ValueError as ve:
275
- raise ParseException(ss, ll, str(ve))
276
-
277
- return cvt_fn
278
-
279
- @staticmethod
280
- def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
281
- """Helper to create a parse action for converting parsed
282
- datetime string to Python datetime.datetime
283
-
284
- Params -
285
- - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
286
-
287
- Example::
288
-
289
- dt_expr = pyparsing_common.iso8601_datetime.copy()
290
- dt_expr.setParseAction(pyparsing_common.convertToDatetime())
291
- print(dt_expr.parseString("1999-12-31T23:59:59.999"))
292
-
293
- prints::
294
-
295
- [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
296
- """
297
-
298
- def cvt_fn(s, l, t):
299
- try:
300
- return datetime.strptime(t[0], fmt)
301
- except ValueError as ve:
302
- raise ParseException(s, l, str(ve))
303
-
304
- return cvt_fn
305
-
306
- iso8601_date = Regex(
307
- r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
308
- ).set_name("ISO8601 date")
309
- "ISO8601 date (``yyyy-mm-dd``)"
310
-
311
- iso8601_datetime = Regex(
312
- r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
313
- ).set_name("ISO8601 datetime")
314
- "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
315
-
316
- uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
317
- "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
318
-
319
- _html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
320
-
321
- @staticmethod
322
- def strip_html_tags(s: str, l: int, tokens: ParseResults):
323
- """Parse action to remove HTML tags from web page HTML source
324
-
325
- Example::
326
-
327
- # strip HTML links from normal text
328
- text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
329
- td, td_end = makeHTMLTags("TD")
330
- table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
331
- print(table_text.parseString(text).body)
332
-
333
- Prints::
334
-
335
- More info at the pyparsing wiki page
336
- """
337
- return pyparsing_common._html_stripper.transform_string(tokens[0])
338
-
339
- _commasepitem = (
340
- Combine(
341
- OneOrMore(
342
- ~Literal(",")
343
- + ~LineEnd()
344
- + Word(printables, exclude_chars=",")
345
- + Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
346
- )
347
- )
348
- .streamline()
349
- .set_name("commaItem")
350
- )
351
- comma_separated_list = delimited_list(
352
- Opt(quoted_string.copy() | _commasepitem, default="")
353
- ).set_name("comma separated list")
354
- """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
355
-
356
- upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
357
- """Parse action to convert tokens to upper case."""
358
-
359
- downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
360
- """Parse action to convert tokens to lower case."""
361
-
362
- # fmt: off
363
- url = Regex(
364
- # https://mathiasbynens.be/demo/url-regex
365
- # https://gist.github.com/dperini/729294
366
- r"^" +
367
- # protocol identifier (optional)
368
- # short syntax // still required
369
- r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
370
- # user:pass BasicAuth (optional)
371
- r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
372
- r"(?P<host>" +
373
- # IP address exclusion
374
- # private & local networks
375
- r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
376
- r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
377
- r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
378
- # IP address dotted notation octets
379
- # excludes loopback network 0.0.0.0
380
- # excludes reserved space >= 224.0.0.0
381
- # excludes network & broadcast addresses
382
- # (first & last IP address of each class)
383
- r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
384
- r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
385
- r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
386
- r"|" +
387
- # host & domain names, may end with dot
388
- # can be replaced by a shortest alternative
389
- # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
390
- r"(?:" +
391
- r"(?:" +
392
- r"[a-z0-9\u00a1-\uffff]" +
393
- r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
394
- r")?" +
395
- r"[a-z0-9\u00a1-\uffff]\." +
396
- r")+" +
397
- # TLD identifier name, may end with dot
398
- r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
399
- r")" +
400
- # port number (optional)
401
- r"(:(?P<port>\d{2,5}))?" +
402
- # resource path (optional)
403
- r"(?P<path>\/[^?# ]*)?" +
404
- # query string (optional)
405
- r"(\?(?P<query>[^#]*))?" +
406
- # fragment (optional)
407
- r"(#(?P<fragment>\S*))?" +
408
- r"$"
409
- ).set_name("url")
410
- # fmt: on
411
-
412
- # pre-PEP8 compatibility names
413
- convertToInteger = convert_to_integer
414
- convertToFloat = convert_to_float
415
- convertToDate = convert_to_date
416
- convertToDatetime = convert_to_datetime
417
- stripHTMLTags = strip_html_tags
418
- upcaseTokens = upcase_tokens
419
- downcaseTokens = downcase_tokens
420
-
421
-
422
- _builtin_exprs = [
423
- v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
424
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/_securetransport/__init__.py DELETED
File without changes
spaces/Boadiwaa/Recipes/openai/validators.py DELETED
@@ -1,860 +0,0 @@
1
- import os
2
- import sys
3
- from typing import Any, Callable, NamedTuple, Optional
4
-
5
- import pandas as pd
6
-
7
-
8
- class Remediation(NamedTuple):
9
- name: str
10
- immediate_msg: Optional[str] = None
11
- necessary_msg: Optional[str] = None
12
- necessary_fn: Optional[Callable[[Any], Any]] = None
13
- optional_msg: Optional[str] = None
14
- optional_fn: Optional[Callable[[Any], Any]] = None
15
- error_msg: Optional[str] = None
16
-
17
-
18
- def num_examples_validator(df):
19
- """
20
- This validator will only print out the number of examples and recommend to the user to increase the number of examples if less than 100.
21
- """
22
- MIN_EXAMPLES = 100
23
- optional_suggestion = (
24
- ""
25
- if len(df) >= MIN_EXAMPLES
26
- else ". In general, we recommend having at least a few hundred examples. We've found that performance tends to linearly increase for every doubling of the number of examples"
27
- )
28
- immediate_msg = (
29
- f"\n- Your file contains {len(df)} prompt-completion pairs{optional_suggestion}"
30
- )
31
- return Remediation(name="num_examples", immediate_msg=immediate_msg)
32
-
33
-
34
- def necessary_column_validator(df, necessary_column):
35
- """
36
- This validator will ensure that the necessary column is present in the dataframe.
37
- """
38
-
39
- def lower_case_column(df, column):
40
- cols = [c for c in df.columns if c.lower() == column]
41
- df.rename(columns={cols[0]: column.lower()}, inplace=True)
42
- return df
43
-
44
- immediate_msg = None
45
- necessary_fn = None
46
- necessary_msg = None
47
- error_msg = None
48
-
49
- if necessary_column not in df.columns:
50
- if necessary_column in [c.lower() for c in df.columns]:
51
-
52
- def lower_case_column_creator(df):
53
- return lower_case_column(df, necessary_column)
54
-
55
- necessary_fn = lower_case_column_creator
56
- immediate_msg = (
57
- f"\n- The `{necessary_column}` column/key should be lowercase"
58
- )
59
- necessary_msg = f"Lower case column name to `{necessary_column}`"
60
- else:
61
- error_msg = f"`{necessary_column}` column/key is missing. Please make sure you name your columns/keys appropriately, then retry"
62
-
63
- return Remediation(
64
- name="necessary_column",
65
- immediate_msg=immediate_msg,
66
- necessary_msg=necessary_msg,
67
- necessary_fn=necessary_fn,
68
- error_msg=error_msg,
69
- )
70
-
71
-
72
- def additional_column_validator(df, fields=["prompt", "completion"]):
73
- """
74
- This validator will remove additional columns from the dataframe.
75
- """
76
- additional_columns = []
77
- necessary_msg = None
78
- immediate_msg = None
79
- necessary_fn = None
80
- if len(df.columns) > 2:
81
- additional_columns = [c for c in df.columns if c not in fields]
82
- warn_message = ""
83
- for ac in additional_columns:
84
- dups = [c for c in additional_columns if ac in c]
85
- if len(dups) > 0:
86
- warn_message += f"\n WARNING: Some of the additional columns/keys contain `{ac}` in their name. These will be ignored, and the column/key `{ac}` will be used instead. This could also result from a duplicate column/key in the provided file."
87
- immediate_msg = f"\n- The input file should contain exactly two columns/keys per row. Additional columns/keys present are: {additional_columns}{warn_message}"
88
- necessary_msg = f"Remove additional columns/keys: {additional_columns}"
89
-
90
- def necessary_fn(x):
91
- return x[fields]
92
-
93
- return Remediation(
94
- name="additional_column",
95
- immediate_msg=immediate_msg,
96
- necessary_msg=necessary_msg,
97
- necessary_fn=necessary_fn,
98
- )
99
-
100
-
101
- def non_empty_field_validator(df, field="completion"):
102
- """
103
- This validator will ensure that no completion is empty.
104
- """
105
- necessary_msg = None
106
- necessary_fn = None
107
- immediate_msg = None
108
-
109
- if df[field].apply(lambda x: x == "").any() or df[field].isnull().any():
110
- empty_rows = (df[field] == "") | (df[field].isnull())
111
- empty_indexes = df.reset_index().index[empty_rows].tolist()
112
- immediate_msg = f"\n- `{field}` column/key should not contain empty strings. These are rows: {empty_indexes}"
113
-
114
- def necessary_fn(x):
115
- return x[x[field] != ""].dropna(subset=[field])
116
-
117
- necessary_msg = f"Remove {len(empty_indexes)} rows with empty {field}s"
118
- return Remediation(
119
- name=f"empty_{field}",
120
- immediate_msg=immediate_msg,
121
- necessary_msg=necessary_msg,
122
- necessary_fn=necessary_fn,
123
- )
124
-
125
-
126
- def duplicated_rows_validator(df, fields=["prompt", "completion"]):
127
- """
128
- This validator will suggest to the user to remove duplicate rows if they exist.
129
- """
130
- duplicated_rows = df.duplicated(subset=fields)
131
- duplicated_indexes = df.reset_index().index[duplicated_rows].tolist()
132
- immediate_msg = None
133
- optional_msg = None
134
- optional_fn = None
135
-
136
- if len(duplicated_indexes) > 0:
137
- immediate_msg = f"\n- There are {len(duplicated_indexes)} duplicated {'-'.join(fields)} sets. These are rows: {duplicated_indexes}"
138
- optional_msg = f"Remove {len(duplicated_indexes)} duplicate rows"
139
-
140
- def optional_fn(x):
141
- return x.drop_duplicates(subset=fields)
142
-
143
- return Remediation(
144
- name="duplicated_rows",
145
- immediate_msg=immediate_msg,
146
- optional_msg=optional_msg,
147
- optional_fn=optional_fn,
148
- )
149
-
150
-
151
- def long_examples_validator(df):
152
- """
153
- This validator will suggest to the user to remove examples that are too long.
154
- """
155
- immediate_msg = None
156
- optional_msg = None
157
- optional_fn = None
158
-
159
- ft_type = infer_task_type(df)
160
- if ft_type != "open-ended generation":
161
- long_examples = df.apply(
162
- lambda x: len(x.prompt) + len(x.completion) > 10000, axis=1
163
- )
164
- long_indexes = df.reset_index().index[long_examples].tolist()
165
-
166
- if len(long_indexes) > 0:
167
- immediate_msg = f"\n- There are {len(long_indexes)} examples that are very long. These are rows: {long_indexes}\nFor conditional generation, and for classification the examples shouldn't be longer than 2048 tokens."
168
- optional_msg = f"Remove {len(long_indexes)} long examples"
169
-
170
- def optional_fn(x):
171
- return x.drop(long_indexes)
172
-
173
- return Remediation(
174
- name="long_examples",
175
- immediate_msg=immediate_msg,
176
- optional_msg=optional_msg,
177
- optional_fn=optional_fn,
178
- )
179
-
180
-
181
- def common_prompt_suffix_validator(df):
182
- """
183
- This validator will suggest to add a common suffix to the prompt if one doesn't already exist in case of classification or conditional generation.
184
- """
185
- error_msg = None
186
- immediate_msg = None
187
- optional_msg = None
188
- optional_fn = None
189
-
190
- # Find a suffix which is not contained within the prompt otherwise
191
- suggested_suffix = "\n\n### =>\n\n"
192
- suffix_options = [
193
- " ->",
194
- "\n\n###\n\n",
195
- "\n\n===\n\n",
196
- "\n\n---\n\n",
197
- "\n\n===>\n\n",
198
- "\n\n--->\n\n",
199
- ]
200
- for suffix_option in suffix_options:
201
- if suffix_option == " ->":
202
- if df.prompt.str.contains("\n").any():
203
- continue
204
- if df.prompt.str.contains(suffix_option, regex=False).any():
205
- continue
206
- suggested_suffix = suffix_option
207
- break
208
- display_suggested_suffix = suggested_suffix.replace("\n", "\\n")
209
-
210
- ft_type = infer_task_type(df)
211
- if ft_type == "open-ended generation":
212
- return Remediation(name="common_suffix")
213
-
214
- def add_suffix(x, suffix):
215
- x["prompt"] += suffix
216
- return x
217
-
218
- common_suffix = get_common_xfix(df.prompt, xfix="suffix")
219
- if (df.prompt == common_suffix).all():
220
- error_msg = f"All prompts are identical: `{common_suffix}`\nConsider leaving the prompts blank if you want to do open-ended generation, otherwise ensure prompts are different"
221
- return Remediation(name="common_suffix", error_msg=error_msg)
222
-
223
- if common_suffix != "":
224
- common_suffix_new_line_handled = common_suffix.replace("\n", "\\n")
225
- immediate_msg = (
226
- f"\n- All prompts end with suffix `{common_suffix_new_line_handled}`"
227
- )
228
- if len(common_suffix) > 10:
229
- immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`"
230
- if (
231
- df.prompt.str[: -len(common_suffix)]
232
- .str.contains(common_suffix, regex=False)
233
- .any()
234
- ):
235
- immediate_msg += f"\n WARNING: Some of your prompts contain the suffix `{common_suffix}` more than once. We strongly suggest that you review your prompts and add a unique suffix"
236
-
237
- else:
238
- immediate_msg = "\n- Your data does not contain a common separator at the end of your prompts. Having a separator string appended to the end of the prompt makes it clearer to the fine-tuned model where the completion should begin. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples. If you intend to do open-ended generation, then you should leave the prompts empty"
239
-
240
- if common_suffix == "":
241
- optional_msg = (
242
- f"Add a suffix separator `{display_suggested_suffix}` to all prompts"
243
- )
244
-
245
- def optional_fn(x):
246
- return add_suffix(x, suggested_suffix)
247
-
248
- return Remediation(
249
- name="common_completion_suffix",
250
- immediate_msg=immediate_msg,
251
- optional_msg=optional_msg,
252
- optional_fn=optional_fn,
253
- error_msg=error_msg,
254
- )
255
-
256
-
257
- def common_prompt_prefix_validator(df):
258
- """
259
- This validator will suggest to remove a common prefix from the prompt if a long one exist.
260
- """
261
- MAX_PREFIX_LEN = 12
262
-
263
- immediate_msg = None
264
- optional_msg = None
265
- optional_fn = None
266
-
267
- common_prefix = get_common_xfix(df.prompt, xfix="prefix")
268
- if common_prefix == "":
269
- return Remediation(name="common_prefix")
270
-
271
- def remove_common_prefix(x, prefix):
272
- x["prompt"] = x["prompt"].str[len(prefix) :]
273
- return x
274
-
275
- if (df.prompt == common_prefix).all():
276
- # already handled by common_suffix_validator
277
- return Remediation(name="common_prefix")
278
-
279
- if common_prefix != "":
280
- immediate_msg = f"\n- All prompts start with prefix `{common_prefix}`"
281
- if MAX_PREFIX_LEN < len(common_prefix):
282
- immediate_msg += ". Fine-tuning doesn't require the instruction specifying the task, or a few-shot example scenario. Most of the time you should only add the input data into the prompt, and the desired output into the completion"
283
- optional_msg = f"Remove prefix `{common_prefix}` from all prompts"
284
-
285
- def optional_fn(x):
286
- return remove_common_prefix(x, common_prefix)
287
-
288
- return Remediation(
289
- name="common_prompt_prefix",
290
- immediate_msg=immediate_msg,
291
- optional_msg=optional_msg,
292
- optional_fn=optional_fn,
293
- )
294
-
295
-
296
- def common_completion_prefix_validator(df):
297
- """
298
- This validator will suggest to remove a common prefix from the completion if a long one exist.
299
- """
300
- MAX_PREFIX_LEN = 5
301
-
302
- common_prefix = get_common_xfix(df.completion, xfix="prefix")
303
- ws_prefix = len(common_prefix) > 0 and common_prefix[0] == " "
304
- if len(common_prefix) < MAX_PREFIX_LEN:
305
- return Remediation(name="common_prefix")
306
-
307
- def remove_common_prefix(x, prefix, ws_prefix):
308
- x["completion"] = x["completion"].str[len(prefix) :]
309
- if ws_prefix:
310
- # keep the single whitespace as prefix
311
- x["completion"] = " " + x["completion"]
312
- return x
313
-
314
- if (df.completion == common_prefix).all():
315
- # already handled by common_suffix_validator
316
- return Remediation(name="common_prefix")
317
-
318
- immediate_msg = f"\n- All completions start with prefix `{common_prefix}`. Most of the time you should only add the output data into the completion, without any prefix"
319
- optional_msg = f"Remove prefix `{common_prefix}` from all completions"
320
-
321
- def optional_fn(x):
322
- return remove_common_prefix(x, common_prefix, ws_prefix)
323
-
324
- return Remediation(
325
- name="common_completion_prefix",
326
- immediate_msg=immediate_msg,
327
- optional_msg=optional_msg,
328
- optional_fn=optional_fn,
329
- )
330
-
331
-
332
- def common_completion_suffix_validator(df):
333
- """
334
- This validator will suggest to add a common suffix to the completion if one doesn't already exist in case of classification or conditional generation.
335
- """
336
- error_msg = None
337
- immediate_msg = None
338
- optional_msg = None
339
- optional_fn = None
340
-
341
- ft_type = infer_task_type(df)
342
- if ft_type == "open-ended generation" or ft_type == "classification":
343
- return Remediation(name="common_suffix")
344
-
345
- common_suffix = get_common_xfix(df.completion, xfix="suffix")
346
- if (df.completion == common_suffix).all():
347
- error_msg = f"All completions are identical: `{common_suffix}`\nEnsure completions are different, otherwise the model will just repeat `{common_suffix}`"
348
- return Remediation(name="common_suffix", error_msg=error_msg)
349
-
350
- # Find a suffix which is not contained within the completion otherwise
351
- suggested_suffix = " [END]"
352
- suffix_options = [
353
- "\n",
354
- ".",
355
- " END",
356
- "***",
357
- "+++",
358
- "&&&",
359
- "$$$",
360
- "@@@",
361
- "%%%",
362
- ]
363
- for suffix_option in suffix_options:
364
- if df.completion.str.contains(suffix_option, regex=False).any():
365
- continue
366
- suggested_suffix = suffix_option
367
- break
368
- display_suggested_suffix = suggested_suffix.replace("\n", "\\n")
369
-
370
- def add_suffix(x, suffix):
371
- x["completion"] += suffix
372
- return x
373
-
374
- if common_suffix != "":
375
- common_suffix_new_line_handled = common_suffix.replace("\n", "\\n")
376
- immediate_msg = (
377
- f"\n- All completions end with suffix `{common_suffix_new_line_handled}`"
378
- )
379
- if len(common_suffix) > 10:
380
- immediate_msg += f". This suffix seems very long. Consider replacing with a shorter suffix, such as `{display_suggested_suffix}`"
381
- if (
382
- df.completion.str[: -len(common_suffix)]
383
- .str.contains(common_suffix, regex=False)
384
- .any()
385
- ):
386
- immediate_msg += f"\n WARNING: Some of your completions contain the suffix `{common_suffix}` more than once. We suggest that you review your completions and add a unique ending"
387
-
388
- else:
389
- immediate_msg = "\n- Your data does not contain a common ending at the end of your completions. Having a common ending string appended to the end of the completion makes it clearer to the fine-tuned model where the completion should end. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more detail and examples."
390
-
391
- if common_suffix == "":
392
- optional_msg = (
393
- f"Add a suffix ending `{display_suggested_suffix}` to all completions"
394
- )
395
-
396
- def optional_fn(x):
397
- return add_suffix(x, suggested_suffix)
398
-
399
- return Remediation(
400
- name="common_completion_suffix",
401
- immediate_msg=immediate_msg,
402
- optional_msg=optional_msg,
403
- optional_fn=optional_fn,
404
- error_msg=error_msg,
405
- )
406
-
407
-
408
- def completions_space_start_validator(df):
409
- """
410
- This validator will suggest to add a space at the start of the completion if it doesn't already exist. This helps with tokenization.
411
- """
412
-
413
- def add_space_start(x):
414
- x["completion"] = x["completion"].apply(
415
- lambda x: ("" if x[0] == " " else " ") + x
416
- )
417
- return x
418
-
419
- optional_msg = None
420
- optional_fn = None
421
- immediate_msg = None
422
-
423
- if df.completion.str[:1].nunique() != 1 or df.completion.values[0][0] != " ":
424
- immediate_msg = "\n- The completion should start with a whitespace character (` `). This tends to produce better results due to the tokenization we use. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details"
425
- optional_msg = "Add a whitespace character to the beginning of the completion"
426
- optional_fn = add_space_start
427
- return Remediation(
428
- name="completion_space_start",
429
- immediate_msg=immediate_msg,
430
- optional_msg=optional_msg,
431
- optional_fn=optional_fn,
432
- )
433
-
434
-
435
- def lower_case_validator(df, column):
436
- """
437
- This validator will suggest to lowercase the column values, if more than a third of letters are uppercase.
438
- """
439
-
440
- def lower_case(x):
441
- x[column] = x[column].str.lower()
442
- return x
443
-
444
- count_upper = (
445
- df[column]
446
- .apply(lambda x: sum(1 for c in x if c.isalpha() and c.isupper()))
447
- .sum()
448
- )
449
- count_lower = (
450
- df[column]
451
- .apply(lambda x: sum(1 for c in x if c.isalpha() and c.islower()))
452
- .sum()
453
- )
454
-
455
- if count_upper * 2 > count_lower:
456
- return Remediation(
457
- name="lower_case",
458
- immediate_msg=f"\n- More than a third of your `{column}` column/key is uppercase. Uppercase {column}s tends to perform worse than a mixture of case encountered in normal language. We recommend to lower case the data if that makes sense in your domain. See https://beta.openai.com/docs/guides/fine-tuning/preparing-your-dataset for more details",
459
- optional_msg=f"Lowercase all your data in column/key `{column}`",
460
- optional_fn=lower_case,
461
- )
462
-
463
-
464
- def read_any_format(fname, fields=["prompt", "completion"]):
465
- """
466
- This function will read a file saved in .csv, .json, .txt, .xlsx or .tsv format using pandas.
467
- - for .xlsx it will read the first sheet
468
- - for .txt it will assume completions and split on newline
469
- """
470
- remediation = None
471
- necessary_msg = None
472
- immediate_msg = None
473
- error_msg = None
474
- df = None
475
-
476
- if os.path.isfile(fname):
477
- for ending, separator in [(".csv", ","), (".tsv", "\t")]:
478
- if fname.lower().endswith(ending):
479
- immediate_msg = f"\n- Based on your file extension, your file is formatted as a {ending[1:].upper()} file"
480
- necessary_msg = (
481
- f"Your format `{ending[1:].upper()}` will be converted to `JSONL`"
482
- )
483
- df = pd.read_csv(fname, sep=separator, dtype=str)
484
- if fname.lower().endswith(".xlsx"):
485
- immediate_msg = "\n- Based on your file extension, your file is formatted as an Excel file"
486
- necessary_msg = "Your format `XLSX` will be converted to `JSONL`"
487
- xls = pd.ExcelFile(fname)
488
- sheets = xls.sheet_names
489
- if len(sheets) > 1:
490
- immediate_msg += "\n- Your Excel file contains more than one sheet. Please either save as csv or ensure all data is present in the first sheet. WARNING: Reading only the first sheet..."
491
- df = pd.read_excel(fname, dtype=str)
492
- if fname.lower().endswith(".txt"):
493
- immediate_msg = "\n- Based on your file extension, you provided a text file"
494
- necessary_msg = "Your format `TXT` will be converted to `JSONL`"
495
- with open(fname, "r") as f:
496
- content = f.read()
497
- df = pd.DataFrame(
498
- [["", line] for line in content.split("\n")],
499
- columns=fields,
500
- dtype=str,
501
- )
502
- if fname.lower().endswith("jsonl") or fname.lower().endswith("json"):
503
- try:
504
- df = pd.read_json(fname, lines=True, dtype=str)
505
- except (ValueError, TypeError):
506
- df = pd.read_json(fname, dtype=str)
507
- immediate_msg = "\n- Your file appears to be in a .JSON format. Your file will be converted to JSONL format"
508
- necessary_msg = "Your format `JSON` will be converted to `JSONL`"
509
-
510
- if df is None:
511
- error_msg = (
512
- "Your file is not saved as a .CSV, .TSV, .XLSX, .TXT or .JSONL file."
513
- )
514
- if "." in fname:
515
- error_msg += (
516
- f" Your file `{fname}` appears to end with `.{fname.split('.')[1]}`"
517
- )
518
- else:
519
- error_msg += f" Your file `{fname}` does not appear to have a file ending. Please ensure your filename ends with one of the supported file endings."
520
- else:
521
- df.fillna("", inplace=True)
522
- else:
523
- error_msg = f"File {fname} does not exist."
524
-
525
- remediation = Remediation(
526
- name="read_any_format",
527
- necessary_msg=necessary_msg,
528
- immediate_msg=immediate_msg,
529
- error_msg=error_msg,
530
- )
531
- return df, remediation
532
-
533
-
534
- def format_inferrer_validator(df):
535
- """
536
- This validator will infer the likely fine-tuning format of the data, and display it to the user if it is classification.
537
- It will also suggest to use ada and explain train/validation split benefits.
538
- """
539
- ft_type = infer_task_type(df)
540
- immediate_msg = None
541
- if ft_type == "classification":
542
- immediate_msg = f"\n- Based on your data it seems like you're trying to fine-tune a model for {ft_type}\n- For classification, we recommend you try one of the faster and cheaper models, such as `ada`\n- For classification, you can estimate the expected model performance by keeping a held out dataset, which is not used for training"
543
- return Remediation(name="num_examples", immediate_msg=immediate_msg)
544
-
545
-
546
- def apply_necessary_remediation(df, remediation):
547
- """
548
- This function will apply a necessary remediation to a dataframe, or print an error message if one exists.
549
- """
550
- if remediation.error_msg is not None:
551
- sys.stderr.write(
552
- f"\n\nERROR in {remediation.name} validator: {remediation.error_msg}\n\nAborting..."
553
- )
554
- sys.exit(1)
555
- if remediation.immediate_msg is not None:
556
- sys.stdout.write(remediation.immediate_msg)
557
- if remediation.necessary_fn is not None:
558
- df = remediation.necessary_fn(df)
559
- return df
560
-
561
-
562
- def accept_suggestion(input_text, auto_accept):
563
- sys.stdout.write(input_text)
564
- if auto_accept:
565
- sys.stdout.write("Y\n")
566
- return True
567
- return input().lower() != "n"
568
-
569
-
570
- def apply_optional_remediation(df, remediation, auto_accept):
571
- """
572
- This function will apply an optional remediation to a dataframe, based on the user input.
573
- """
574
- optional_applied = False
575
- input_text = f"- [Recommended] {remediation.optional_msg} [Y/n]: "
576
- if remediation.optional_msg is not None:
577
- if accept_suggestion(input_text, auto_accept):
578
- df = remediation.optional_fn(df)
579
- optional_applied = True
580
- if remediation.necessary_msg is not None:
581
- sys.stdout.write(f"- [Necessary] {remediation.necessary_msg}\n")
582
- return df, optional_applied
583
-
584
-
585
- def estimate_fine_tuning_time(df):
586
- """
587
- Estimate the time it'll take to fine-tune the dataset
588
- """
589
- ft_format = infer_task_type(df)
590
- expected_time = 1.0
591
- if ft_format == "classification":
592
- num_examples = len(df)
593
- expected_time = num_examples * 1.44
594
- else:
595
- size = df.memory_usage(index=True).sum()
596
- expected_time = size * 0.0515
597
-
598
- def format_time(time):
599
- if time < 60:
600
- return f"{round(time, 2)} seconds"
601
- elif time < 3600:
602
- return f"{round(time / 60, 2)} minutes"
603
- elif time < 86400:
604
- return f"{round(time / 3600, 2)} hours"
605
- else:
606
- return f"{round(time / 86400, 2)} days"
607
-
608
- time_string = format_time(expected_time + 140)
609
- sys.stdout.write(
610
- f"Once your model starts training, it'll approximately take {time_string} to train a `curie` model, and less for `ada` and `babbage`. Queue will approximately take half an hour per job ahead of you.\n"
611
- )
612
-
613
-
614
- def get_outfnames(fname, split):
615
- suffixes = ["_train", "_valid"] if split else [""]
616
- i = 0
617
- while True:
618
- index_suffix = f" ({i})" if i > 0 else ""
619
- candidate_fnames = [
620
- os.path.splitext(fname)[0] + "_prepared" + suffix + index_suffix + ".jsonl"
621
- for suffix in suffixes
622
- ]
623
- if not any(os.path.isfile(f) for f in candidate_fnames):
624
- return candidate_fnames
625
- i += 1
626
-
627
-
628
- def get_classification_hyperparams(df):
629
- n_classes = df.completion.nunique()
630
- pos_class = None
631
- if n_classes == 2:
632
- pos_class = df.completion.value_counts().index[0]
633
- return n_classes, pos_class
634
-
635
-
636
- def write_out_file(df, fname, any_remediations, auto_accept):
637
- """
638
- This function will write out a dataframe to a file, if the user would like to proceed, and also offer a fine-tuning command with the newly created file.
639
- For classification it will optionally ask the user if they would like to split the data into train/valid files, and modify the suggested command to include the valid set.
640
- """
641
- ft_format = infer_task_type(df)
642
- common_prompt_suffix = get_common_xfix(df.prompt, xfix="suffix")
643
- common_completion_suffix = get_common_xfix(df.completion, xfix="suffix")
644
-
645
- split = False
646
- input_text = "- [Recommended] Would you like to split into training and validation set? [Y/n]: "
647
- if ft_format == "classification":
648
- if accept_suggestion(input_text, auto_accept):
649
- split = True
650
-
651
- additional_params = ""
652
- common_prompt_suffix_new_line_handled = common_prompt_suffix.replace("\n", "\\n")
653
- common_completion_suffix_new_line_handled = common_completion_suffix.replace(
654
- "\n", "\\n"
655
- )
656
- optional_ending_string = (
657
- f' Make sure to include `stop=["{common_completion_suffix_new_line_handled}"]` so that the generated texts ends at the expected place.'
658
- if len(common_completion_suffix_new_line_handled) > 0
659
- else ""
660
- )
661
-
662
- input_text = "\n\nYour data will be written to a new JSONL file. Proceed [Y/n]: "
663
-
664
- if not any_remediations and not split:
665
- sys.stdout.write(
666
- f'\nYou can use your file for fine-tuning:\n> openai api fine_tunes.create -t "{fname}"{additional_params}\n\nAfter you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt.{optional_ending_string}\n'
667
- )
668
- estimate_fine_tuning_time(df)
669
-
670
- elif accept_suggestion(input_text, auto_accept):
671
- fnames = get_outfnames(fname, split)
672
- if split:
673
- assert len(fnames) == 2 and "train" in fnames[0] and "valid" in fnames[1]
674
- MAX_VALID_EXAMPLES = 1000
675
- n_train = max(len(df) - MAX_VALID_EXAMPLES, int(len(df) * 0.8))
676
- df_train = df.sample(n=n_train, random_state=42)
677
- df_valid = df.drop(df_train.index)
678
- df_train[["prompt", "completion"]].to_json(
679
- fnames[0], lines=True, orient="records", force_ascii=False
680
- )
681
- df_valid[["prompt", "completion"]].to_json(
682
- fnames[1], lines=True, orient="records", force_ascii=False
683
- )
684
-
685
- n_classes, pos_class = get_classification_hyperparams(df)
686
- additional_params += " --compute_classification_metrics"
687
- if n_classes == 2:
688
- additional_params += f' --classification_positive_class "{pos_class}"'
689
- else:
690
- additional_params += f" --classification_n_classes {n_classes}"
691
- else:
692
- assert len(fnames) == 1
693
- df[["prompt", "completion"]].to_json(
694
- fnames[0], lines=True, orient="records", force_ascii=False
695
- )
696
-
697
- # Add -v VALID_FILE if we split the file into train / valid
698
- files_string = ("s" if split else "") + " to `" + ("` and `".join(fnames))
699
- valid_string = f' -v "{fnames[1]}"' if split else ""
700
- separator_reminder = (
701
- ""
702
- if len(common_prompt_suffix_new_line_handled) == 0
703
- else f"After you’ve fine-tuned a model, remember that your prompt has to end with the indicator string `{common_prompt_suffix_new_line_handled}` for the model to start generating completions, rather than continuing with the prompt."
704
- )
705
- sys.stdout.write(
706
- f'\nWrote modified file{files_string}`\nFeel free to take a look!\n\nNow use that file when fine-tuning:\n> openai api fine_tunes.create -t "{fnames[0]}"{valid_string}{additional_params}\n\n{separator_reminder}{optional_ending_string}\n'
707
- )
708
- estimate_fine_tuning_time(df)
709
- else:
710
- sys.stdout.write("Aborting... did not write the file\n")
711
-
712
-
713
- def write_out_search_file(df, fname, any_remediations, auto_accept, fields, purpose):
714
- """
715
- This function will write out a dataframe to a file, if the user would like to proceed.
716
- """
717
- input_text = "\n\nYour data will be written to a new JSONL file. Proceed [Y/n]: "
718
-
719
- if not any_remediations:
720
- sys.stdout.write(
721
- f'\nYou can upload your file:\n> openai api files.create -f "{fname}" -p {purpose}'
722
- )
723
-
724
- elif accept_suggestion(input_text, auto_accept):
725
- fnames = get_outfnames(fname, split=False)
726
-
727
- assert len(fnames) == 1
728
- df[fields].to_json(fnames[0], lines=True, orient="records", force_ascii=False)
729
-
730
- sys.stdout.write(
731
- f'\nWrote modified file to {fnames[0]}`\nFeel free to take a look!\n\nNow upload that file:\n> openai api files.create -f "{fnames[0]}" -p {purpose}'
732
- )
733
- else:
734
- sys.stdout.write("Aborting... did not write the file\n")
735
-
736
-
737
- def infer_task_type(df):
738
- """
739
- Infer the likely fine-tuning task type from the data
740
- """
741
- CLASSIFICATION_THRESHOLD = 3 # min_average instances of each class
742
- if sum(df.prompt.str.len()) == 0:
743
- return "open-ended generation"
744
-
745
- if len(df.completion.unique()) < len(df) / CLASSIFICATION_THRESHOLD:
746
- return "classification"
747
-
748
- return "conditional generation"
749
-
750
-
751
- def get_common_xfix(series, xfix="suffix"):
752
- """
753
- Finds the longest common suffix or prefix of all the values in a series
754
- """
755
- common_xfix = ""
756
- while True:
757
- common_xfixes = (
758
- series.str[-(len(common_xfix) + 1) :]
759
- if xfix == "suffix"
760
- else series.str[: len(common_xfix) + 1]
761
- ) # first few or last few characters
762
- if (
763
- common_xfixes.nunique() != 1
764
- ): # we found the character at which we don't have a unique xfix anymore
765
- break
766
- elif (
767
- common_xfix == common_xfixes.values[0]
768
- ): # the entire first row is a prefix of every other row
769
- break
770
- else: # the first or last few characters are still common across all rows - let's try to add one more
771
- common_xfix = common_xfixes.values[0]
772
- return common_xfix
773
-
774
-
775
- def get_validators():
776
- return [
777
- num_examples_validator,
778
- lambda x: necessary_column_validator(x, "prompt"),
779
- lambda x: necessary_column_validator(x, "completion"),
780
- additional_column_validator,
781
- non_empty_field_validator,
782
- format_inferrer_validator,
783
- duplicated_rows_validator,
784
- long_examples_validator,
785
- lambda x: lower_case_validator(x, "prompt"),
786
- lambda x: lower_case_validator(x, "completion"),
787
- common_prompt_suffix_validator,
788
- common_prompt_prefix_validator,
789
- common_completion_prefix_validator,
790
- common_completion_suffix_validator,
791
- completions_space_start_validator,
792
- ]
793
-
794
-
795
- def get_search_validators(required_fields, optional_fields):
796
- validators = [
797
- lambda x: necessary_column_validator(x, field) for field in required_fields
798
- ]
799
- validators += [
800
- lambda x: non_empty_field_validator(x, field) for field in required_fields
801
- ]
802
- validators += [lambda x: duplicated_rows_validator(x, required_fields)]
803
- validators += [
804
- lambda x: additional_column_validator(
805
- x, fields=required_fields + optional_fields
806
- ),
807
- ]
808
-
809
- return validators
810
-
811
-
812
- def apply_validators(
813
- df,
814
- fname,
815
- remediation,
816
- validators,
817
- auto_accept,
818
- write_out_file_func,
819
- ):
820
- optional_remediations = []
821
- if remediation is not None:
822
- optional_remediations.append(remediation)
823
- for validator in validators:
824
- remediation = validator(df)
825
- if remediation is not None:
826
- optional_remediations.append(remediation)
827
- df = apply_necessary_remediation(df, remediation)
828
-
829
- any_optional_or_necessary_remediations = any(
830
- [
831
- remediation
832
- for remediation in optional_remediations
833
- if remediation.optional_msg is not None
834
- or remediation.necessary_msg is not None
835
- ]
836
- )
837
- any_necessary_applied = any(
838
- [
839
- remediation
840
- for remediation in optional_remediations
841
- if remediation.necessary_msg is not None
842
- ]
843
- )
844
- any_optional_applied = False
845
-
846
- if any_optional_or_necessary_remediations:
847
- sys.stdout.write(
848
- "\n\nBased on the analysis we will perform the following actions:\n"
849
- )
850
- for remediation in optional_remediations:
851
- df, optional_applied = apply_optional_remediation(
852
- df, remediation, auto_accept
853
- )
854
- any_optional_applied = any_optional_applied or optional_applied
855
- else:
856
- sys.stdout.write("\n\nNo remediations found.\n")
857
-
858
- any_optional_or_necessary_applied = any_optional_applied or any_necessary_applied
859
-
860
- write_out_file_func(df, fname, any_optional_or_necessary_applied, auto_accept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BramVanroy/spacey_conll/app.py DELETED
@@ -1,129 +0,0 @@
1
- from io import StringIO
2
- import logging
3
-
4
- import streamlit as st
5
-
6
- from utils import MODEL_MAP, create_download_link, load_nlp
7
-
8
-
9
- def _init():
10
- st.set_page_config(page_title="Parse text into CoNLL-U format", page_icon="📝")
11
- st.markdown("""<style>
12
- /* Special button alignment */
13
- [data-testid="column"] {
14
- align-self: center;
15
- vertical-align: middle;
16
- }
17
- </style>""", unsafe_allow_html=True)
18
-
19
- if "selected_model" not in st.session_state:
20
- st.session_state["selected_model"] = "en_core_web_sm"
21
-
22
- if "nlp" not in st.session_state:
23
- st.session_state["nlp"] = None
24
-
25
- if "text" not in st.session_state:
26
- st.session_state["text"] = None
27
-
28
- st.title("📝 Parse text into CoNLL-U format")
29
-
30
-
31
- def _model_selection():
32
- st.markdown("## Model ✨")
33
- lang_col, model_text_col, custom_col = st.columns((4, 1, 4))
34
-
35
- selected_lang = lang_col.selectbox("Language", tuple(MODEL_MAP.keys()),
36
- index=list(MODEL_MAP.keys()).index("English"))
37
- st.session_state["selected_model"] = MODEL_MAP[selected_lang]
38
-
39
- model_text_col.markdown("**-- or --**", unsafe_allow_html=True)
40
-
41
- st.session_state["selected_model"] = custom_col.text_input(label="Model to load",
42
- help="You can find spaCy models here: https://spacy.io/models/."
43
- " Only official spaCy models are supported.",
44
- value=st.session_state["selected_model"]).split("/")[-1]
45
-
46
- # Set extra options for the parser: disable tokenization/sentence segmentation
47
- tok_col, sbd_col, status_col = st.columns(3)
48
- is_tokenized = tok_col.checkbox("Is pre-tokenized?", help="When this option is enabled, tokens will be split by"
49
- " white-space and sentence splitting will also be disabled.")
50
- disable_sbd = sbd_col.checkbox("Disable sentence splitting?")
51
-
52
- ###################################
53
- # (Down)oading model with options #
54
- ###################################
55
- load_info = status_col.info(f"Loading {st.session_state['selected_model']}...")
56
-
57
- # Check if the model exists, if not download it. Return None when there was an error downloading the model
58
- try:
59
- st.session_state["nlp"] = load_nlp(st.session_state["selected_model"],
60
- is_tokenized=is_tokenized,
61
- disable_sbd=disable_sbd)
62
- except Exception:
63
- logging.exception("Could not load model.")
64
- load_info.error(f"Error when trying to load {st.session_state['selected_model']}!")
65
- else:
66
- load_info.success(f"{st.session_state['selected_model']} loaded!")
67
-
68
-
69
- def _data_input():
70
- inp_data_heading, input_col = st.columns((3, 1))
71
- inp_data_heading.markdown("## Input data 📄")
72
- fupload_check = input_col.checkbox("File upload?")
73
-
74
- if fupload_check:
75
- uploaded_file = st.file_uploader("Choose a plain text file to parse as CoNLL",
76
- label_visibility="hidden")
77
- if uploaded_file is not None:
78
- stringio = StringIO(uploaded_file.getvalue().decode("utf-8"))
79
- st.session_state["text"] = stringio.read()
80
- else:
81
- st.session_state["text"] = None
82
- else:
83
- st.session_state["text"] = st.text_area(label="Text to parse as CoNLL",
84
- value="Grandma is baking cookies! I love her cookies.",
85
- label_visibility="hidden")
86
-
87
-
88
- def _parse():
89
- if ("text" in st.session_state and st.session_state["text"]
90
- and "nlp" in st.session_state and st.session_state["nlp"]):
91
- parse_process = st.info("Parsing...")
92
- download_ct = st.empty()
93
-
94
- doc = st.session_state["nlp"](st.session_state["text"])
95
-
96
- df = doc._.conll_pd
97
- st.dataframe(df)
98
-
99
- excel_link = create_download_link(df,
100
- "conll.xlsx",
101
- "Excel file")
102
- txt_link = create_download_link(df.to_csv(index=False, encoding="utf-8", sep="\t"),
103
- "conll.txt",
104
- "tab-separated file")
105
-
106
- parse_process.success("Done parsing!")
107
- download_ct.markdown(f"You can download the table as an {excel_link}, or as a {txt_link}.",
108
- unsafe_allow_html=True)
109
-
110
-
111
- def _footer():
112
- st.markdown("## Github repository and contact ✒️")
113
- st.markdown("This demo shows limited options of what the `spacy-conll` package can do. For instance, it also works"
114
- " with stanza and UDPipe and you can parse CoNLL files directly into spaCy objects."
115
- " You can check it out on [Github](https://github.com/BramVanroy/spacy_conll).")
116
- st.markdown("Would you like additional functionality in the library? Or just want to get in touch?"
117
- " Give me a shout on [Twitter](https://twitter.com/BramVanroy)!")
118
-
119
-
120
- def main():
121
- _init()
122
- _model_selection()
123
- _data_input()
124
- _parse()
125
- _footer()
126
-
127
-
128
- if __name__ == '__main__':
129
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/distance.h DELETED
@@ -1,43 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- #pragma once
19
-
20
- #include <thrust/detail/config.h>
21
- #include <thrust/iterator/iterator_traits.h>
22
-
23
- namespace thrust
24
- {
25
- namespace system
26
- {
27
- namespace detail
28
- {
29
- namespace generic
30
- {
31
-
32
- template<typename InputIterator>
33
- inline __host__ __device__
34
- typename thrust::iterator_traits<InputIterator>::difference_type
35
- distance(InputIterator first, InputIterator last);
36
-
37
- } // end namespace generic
38
- } // end namespace detail
39
- } // end namespace system
40
- } // end namespace thrust
41
-
42
- #include <thrust/system/detail/generic/distance.inl>
43
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Text2Human/Text2Human/utils/options.py DELETED
@@ -1,129 +0,0 @@
1
- import os
2
- import os.path as osp
3
- from collections import OrderedDict
4
-
5
- import yaml
6
-
7
-
8
- def ordered_yaml():
9
- """Support OrderedDict for yaml.
10
-
11
- Returns:
12
- yaml Loader and Dumper.
13
- """
14
- try:
15
- from yaml import CDumper as Dumper
16
- from yaml import CLoader as Loader
17
- except ImportError:
18
- from yaml import Dumper, Loader
19
-
20
- _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
21
-
22
- def dict_representer(dumper, data):
23
- return dumper.represent_dict(data.items())
24
-
25
- def dict_constructor(loader, node):
26
- return OrderedDict(loader.construct_pairs(node))
27
-
28
- Dumper.add_representer(OrderedDict, dict_representer)
29
- Loader.add_constructor(_mapping_tag, dict_constructor)
30
- return Loader, Dumper
31
-
32
-
33
- def parse(opt_path, is_train=True):
34
- """Parse option file.
35
-
36
- Args:
37
- opt_path (str): Option file path.
38
- is_train (str): Indicate whether in training or not. Default: True.
39
-
40
- Returns:
41
- (dict): Options.
42
- """
43
- with open(opt_path, mode='r') as f:
44
- Loader, _ = ordered_yaml()
45
- opt = yaml.load(f, Loader=Loader)
46
-
47
- gpu_list = ','.join(str(x) for x in opt['gpu_ids'])
48
- if opt.get('set_CUDA_VISIBLE_DEVICES', None):
49
- os.environ['CUDA_VISIBLE_DEVICES'] = gpu_list
50
- print('export CUDA_VISIBLE_DEVICES=' + gpu_list, flush=True)
51
- else:
52
- print('gpu_list: ', gpu_list, flush=True)
53
-
54
- opt['is_train'] = is_train
55
-
56
- # paths
57
- opt['path'] = {}
58
- opt['path']['root'] = osp.abspath(
59
- osp.join(__file__, osp.pardir, osp.pardir))
60
- if is_train:
61
- experiments_root = osp.join(opt['path']['root'], 'experiments',
62
- opt['name'])
63
- opt['path']['experiments_root'] = experiments_root
64
- opt['path']['models'] = osp.join(experiments_root, 'models')
65
- opt['path']['log'] = experiments_root
66
- opt['path']['visualization'] = osp.join(experiments_root,
67
- 'visualization')
68
-
69
- # change some options for debug mode
70
- if 'debug' in opt['name']:
71
- opt['debug'] = True
72
- opt['val_freq'] = 1
73
- opt['print_freq'] = 1
74
- opt['save_checkpoint_freq'] = 1
75
- else: # test
76
- results_root = osp.join(opt['path']['root'], 'results', opt['name'])
77
- opt['path']['results_root'] = results_root
78
- opt['path']['log'] = results_root
79
- opt['path']['visualization'] = osp.join(results_root, 'visualization')
80
-
81
- return opt
82
-
83
-
84
- def dict2str(opt, indent_level=1):
85
- """dict to string for printing options.
86
-
87
- Args:
88
- opt (dict): Option dict.
89
- indent_level (int): Indent level. Default: 1.
90
-
91
- Return:
92
- (str): Option string for printing.
93
- """
94
- msg = ''
95
- for k, v in opt.items():
96
- if isinstance(v, dict):
97
- msg += ' ' * (indent_level * 2) + k + ':[\n'
98
- msg += dict2str(v, indent_level + 1)
99
- msg += ' ' * (indent_level * 2) + ']\n'
100
- else:
101
- msg += ' ' * (indent_level * 2) + k + ': ' + str(v) + '\n'
102
- return msg
103
-
104
-
105
- class NoneDict(dict):
106
- """None dict. It will return none if key is not in the dict."""
107
-
108
- def __missing__(self, key):
109
- return None
110
-
111
-
112
- def dict_to_nonedict(opt):
113
- """Convert to NoneDict, which returns None for missing keys.
114
-
115
- Args:
116
- opt (dict): Option dict.
117
-
118
- Returns:
119
- (dict): NoneDict for options.
120
- """
121
- if isinstance(opt, dict):
122
- new_opt = dict()
123
- for key, sub_opt in opt.items():
124
- new_opt[key] = dict_to_nonedict(sub_opt)
125
- return NoneDict(**new_opt)
126
- elif isinstance(opt, list):
127
- return [dict_to_nonedict(sub_opt) for sub_opt in opt]
128
- else:
129
- return opt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp DELETED
@@ -1,39 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates.
2
- #include "box_iou_rotated.h"
3
- #include "box_iou_rotated_utils.h"
4
-
5
- namespace detectron2 {
6
-
7
- template <typename T>
8
- void box_iou_rotated_cpu_kernel(
9
- const at::Tensor& boxes1,
10
- const at::Tensor& boxes2,
11
- at::Tensor& ious) {
12
- auto num_boxes1 = boxes1.size(0);
13
- auto num_boxes2 = boxes2.size(0);
14
-
15
- for (int i = 0; i < num_boxes1; i++) {
16
- for (int j = 0; j < num_boxes2; j++) {
17
- ious[i * num_boxes2 + j] = single_box_iou_rotated<T>(
18
- boxes1[i].data_ptr<T>(), boxes2[j].data_ptr<T>());
19
- }
20
- }
21
- }
22
-
23
- at::Tensor box_iou_rotated_cpu(
24
- // input must be contiguous:
25
- const at::Tensor& boxes1,
26
- const at::Tensor& boxes2) {
27
- auto num_boxes1 = boxes1.size(0);
28
- auto num_boxes2 = boxes2.size(0);
29
- at::Tensor ious =
30
- at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat));
31
-
32
- box_iou_rotated_cpu_kernel<float>(boxes1, boxes2, ious);
33
-
34
- // reshape from 1d array to 2d array
35
- auto shape = std::vector<int64_t>{num_boxes1, num_boxes2};
36
- return ious.reshape(shape);
37
- }
38
-
39
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/GroundingDINO/groundingdino/util/visualizer.py DELETED
@@ -1,318 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- @File : visualizer.py
4
- @Time : 2022/04/05 11:39:33
5
- @Author : Shilong Liu
6
- @Contact : [email protected]
7
- """
8
-
9
- import datetime
10
- import os
11
-
12
- import cv2
13
- import matplotlib.pyplot as plt
14
- import numpy as np
15
- import torch
16
- from matplotlib import transforms
17
- from matplotlib.collections import PatchCollection
18
- from matplotlib.patches import Polygon
19
- from pycocotools import mask as maskUtils
20
-
21
-
22
- def renorm(
23
- img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
24
- ) -> torch.FloatTensor:
25
- # img: tensor(3,H,W) or tensor(B,3,H,W)
26
- # return: same as img
27
- assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
28
- if img.dim() == 3:
29
- assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
30
- img.size(0),
31
- str(img.size()),
32
- )
33
- img_perm = img.permute(1, 2, 0)
34
- mean = torch.Tensor(mean)
35
- std = torch.Tensor(std)
36
- img_res = img_perm * std + mean
37
- return img_res.permute(2, 0, 1)
38
- else: # img.dim() == 4
39
- assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
40
- img.size(1),
41
- str(img.size()),
42
- )
43
- img_perm = img.permute(0, 2, 3, 1)
44
- mean = torch.Tensor(mean)
45
- std = torch.Tensor(std)
46
- img_res = img_perm * std + mean
47
- return img_res.permute(0, 3, 1, 2)
48
-
49
-
50
- class ColorMap:
51
- def __init__(self, basergb=[255, 255, 0]):
52
- self.basergb = np.array(basergb)
53
-
54
- def __call__(self, attnmap):
55
- # attnmap: h, w. np.uint8.
56
- # return: h, w, 4. np.uint8.
57
- assert attnmap.dtype == np.uint8
58
- h, w = attnmap.shape
59
- res = self.basergb.copy()
60
- res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3
61
- attn1 = attnmap.copy()[..., None] # h, w, 1
62
- res = np.concatenate((res, attn1), axis=-1).astype(np.uint8)
63
- return res
64
-
65
-
66
- def rainbow_text(x, y, ls, lc, **kw):
67
- """
68
- Take a list of strings ``ls`` and colors ``lc`` and place them next to each
69
- other, with text ls[i] being shown in color lc[i].
70
-
71
- This example shows how to do both vertical and horizontal text, and will
72
- pass all keyword arguments to plt.text, so you can set the font size,
73
- family, etc.
74
- """
75
- t = plt.gca().transData
76
- fig = plt.gcf()
77
- plt.show()
78
-
79
- # horizontal version
80
- for s, c in zip(ls, lc):
81
- text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw)
82
- text.draw(fig.canvas.get_renderer())
83
- ex = text.get_window_extent()
84
- t = transforms.offset_copy(text._transform, x=ex.width, units="dots")
85
-
86
- # #vertical version
87
- # for s,c in zip(ls,lc):
88
- # text = plt.text(x,y," "+s+" ",color=c, transform=t,
89
- # rotation=90,va='bottom',ha='center',**kw)
90
- # text.draw(fig.canvas.get_renderer())
91
- # ex = text.get_window_extent()
92
- # t = transforms.offset_copy(text._transform, y=ex.height, units='dots')
93
-
94
-
95
- class COCOVisualizer:
96
- def __init__(self, coco=None, tokenlizer=None) -> None:
97
- self.coco = coco
98
-
99
- def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"):
100
- """
101
- img: tensor(3, H, W)
102
- tgt: make sure they are all on cpu.
103
- must have items: 'image_id', 'boxes', 'size'
104
- """
105
- plt.figure(dpi=dpi)
106
- plt.rcParams["font.size"] = "5"
107
- ax = plt.gca()
108
- img = renorm(img).permute(1, 2, 0)
109
- # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
110
- # import ipdb; ipdb.set_trace()
111
- ax.imshow(img)
112
-
113
- self.addtgt(tgt)
114
-
115
- if tgt is None:
116
- image_id = 0
117
- elif "image_id" not in tgt:
118
- image_id = 0
119
- else:
120
- image_id = tgt["image_id"]
121
-
122
- if caption is None:
123
- savename = "{}/{}-{}.png".format(
124
- savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-")
125
- )
126
- else:
127
- savename = "{}/{}-{}-{}.png".format(
128
- savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-")
129
- )
130
- print("savename: {}".format(savename))
131
- os.makedirs(os.path.dirname(savename), exist_ok=True)
132
- plt.savefig(savename)
133
- plt.close()
134
-
135
- def addtgt(self, tgt):
136
- """ """
137
- if tgt is None or not "boxes" in tgt:
138
- ax = plt.gca()
139
-
140
- if "caption" in tgt:
141
- ax.set_title(tgt["caption"], wrap=True)
142
-
143
- ax.set_axis_off()
144
- return
145
-
146
- ax = plt.gca()
147
- H, W = tgt["size"]
148
- numbox = tgt["boxes"].shape[0]
149
-
150
- color = []
151
- polygons = []
152
- boxes = []
153
- for box in tgt["boxes"].cpu():
154
- unnormbbox = box * torch.Tensor([W, H, W, H])
155
- unnormbbox[:2] -= unnormbbox[2:] / 2
156
- [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()
157
- boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])
158
- poly = [
159
- [bbox_x, bbox_y],
160
- [bbox_x, bbox_y + bbox_h],
161
- [bbox_x + bbox_w, bbox_y + bbox_h],
162
- [bbox_x + bbox_w, bbox_y],
163
- ]
164
- np_poly = np.array(poly).reshape((4, 2))
165
- polygons.append(Polygon(np_poly))
166
- c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
167
- color.append(c)
168
-
169
- p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)
170
- ax.add_collection(p)
171
- p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
172
- ax.add_collection(p)
173
-
174
- if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0:
175
- assert (
176
- len(tgt["strings_positive"]) == numbox
177
- ), f"{len(tgt['strings_positive'])} = {numbox}, "
178
- for idx, strlist in enumerate(tgt["strings_positive"]):
179
- cate_id = int(tgt["labels"][idx])
180
- _string = str(cate_id) + ":" + " ".join(strlist)
181
- bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]
182
- # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})
183
- ax.text(
184
- bbox_x,
185
- bbox_y,
186
- _string,
187
- color="black",
188
- bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1},
189
- )
190
-
191
- if "box_label" in tgt:
192
- assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, "
193
- for idx, bl in enumerate(tgt["box_label"]):
194
- _string = str(bl)
195
- bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]
196
- # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})
197
- ax.text(
198
- bbox_x,
199
- bbox_y,
200
- _string,
201
- color="black",
202
- bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1},
203
- )
204
-
205
- if "caption" in tgt:
206
- ax.set_title(tgt["caption"], wrap=True)
207
- # plt.figure()
208
- # rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(),
209
- # ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black'])
210
-
211
- if "attn" in tgt:
212
- # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
213
- # import ipdb; ipdb.set_trace()
214
- if isinstance(tgt["attn"], tuple):
215
- tgt["attn"] = [tgt["attn"]]
216
- for item in tgt["attn"]:
217
- attn_map, basergb = item
218
- attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3)
219
- attn_map = (attn_map * 255).astype(np.uint8)
220
- cm = ColorMap(basergb)
221
- heatmap = cm(attn_map)
222
- ax.imshow(heatmap)
223
- ax.set_axis_off()
224
-
225
- def showAnns(self, anns, draw_bbox=False):
226
- """
227
- Display the specified annotations.
228
- :param anns (array of object): annotations to display
229
- :return: None
230
- """
231
- if len(anns) == 0:
232
- return 0
233
- if "segmentation" in anns[0] or "keypoints" in anns[0]:
234
- datasetType = "instances"
235
- elif "caption" in anns[0]:
236
- datasetType = "captions"
237
- else:
238
- raise Exception("datasetType not supported")
239
- if datasetType == "instances":
240
- ax = plt.gca()
241
- ax.set_autoscale_on(False)
242
- polygons = []
243
- color = []
244
- for ann in anns:
245
- c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
246
- if "segmentation" in ann:
247
- if type(ann["segmentation"]) == list:
248
- # polygon
249
- for seg in ann["segmentation"]:
250
- poly = np.array(seg).reshape((int(len(seg) / 2), 2))
251
- polygons.append(Polygon(poly))
252
- color.append(c)
253
- else:
254
- # mask
255
- t = self.imgs[ann["image_id"]]
256
- if type(ann["segmentation"]["counts"]) == list:
257
- rle = maskUtils.frPyObjects(
258
- [ann["segmentation"]], t["height"], t["width"]
259
- )
260
- else:
261
- rle = [ann["segmentation"]]
262
- m = maskUtils.decode(rle)
263
- img = np.ones((m.shape[0], m.shape[1], 3))
264
- if ann["iscrowd"] == 1:
265
- color_mask = np.array([2.0, 166.0, 101.0]) / 255
266
- if ann["iscrowd"] == 0:
267
- color_mask = np.random.random((1, 3)).tolist()[0]
268
- for i in range(3):
269
- img[:, :, i] = color_mask[i]
270
- ax.imshow(np.dstack((img, m * 0.5)))
271
- if "keypoints" in ann and type(ann["keypoints"]) == list:
272
- # turn skeleton into zero-based index
273
- sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1
274
- kp = np.array(ann["keypoints"])
275
- x = kp[0::3]
276
- y = kp[1::3]
277
- v = kp[2::3]
278
- for sk in sks:
279
- if np.all(v[sk] > 0):
280
- plt.plot(x[sk], y[sk], linewidth=3, color=c)
281
- plt.plot(
282
- x[v > 0],
283
- y[v > 0],
284
- "o",
285
- markersize=8,
286
- markerfacecolor=c,
287
- markeredgecolor="k",
288
- markeredgewidth=2,
289
- )
290
- plt.plot(
291
- x[v > 1],
292
- y[v > 1],
293
- "o",
294
- markersize=8,
295
- markerfacecolor=c,
296
- markeredgecolor=c,
297
- markeredgewidth=2,
298
- )
299
-
300
- if draw_bbox:
301
- [bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"]
302
- poly = [
303
- [bbox_x, bbox_y],
304
- [bbox_x, bbox_y + bbox_h],
305
- [bbox_x + bbox_w, bbox_y + bbox_h],
306
- [bbox_x + bbox_w, bbox_y],
307
- ]
308
- np_poly = np.array(poly).reshape((4, 2))
309
- polygons.append(Polygon(np_poly))
310
- color.append(c)
311
-
312
- # p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
313
- # ax.add_collection(p)
314
- p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
315
- ax.add_collection(p)
316
- elif datasetType == "captions":
317
- for ann in anns:
318
- print(ann["caption"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/CalculatorUI/style.css DELETED
@@ -1,28 +0,0 @@
1
- body {
2
- padding: 2rem;
3
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
- }
5
-
6
- h1 {
7
- font-size: 16px;
8
- margin-top: 0;
9
- }
10
-
11
- p {
12
- color: rgb(107, 114, 128);
13
- font-size: 15px;
14
- margin-bottom: 10px;
15
- margin-top: 5px;
16
- }
17
-
18
- .card {
19
- max-width: 620px;
20
- margin: 0 auto;
21
- padding: 16px;
22
- border: 1px solid lightgray;
23
- border-radius: 16px;
24
- }
25
-
26
- .card p:last-child {
27
- margin-bottom: 0;
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/Provider/Providers/DeepAi.py DELETED
@@ -1,46 +0,0 @@
1
- import os
2
- import json
3
- import random
4
- import hashlib
5
- import requests
6
-
7
- from ...typing import sha256, Dict, get_type_hints
8
-
9
- url = 'https://deepai.org'
10
- model = ['gpt-3.5-turbo']
11
- supports_stream = True
12
- needs_auth = False
13
-
14
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
15
- def md5(text: str) -> str:
16
- return hashlib.md5(text.encode()).hexdigest()[::-1]
17
-
18
-
19
- def get_api_key(user_agent: str) -> str:
20
- part1 = str(random.randint(0, 10**11))
21
- part2 = md5(user_agent + md5(user_agent + md5(user_agent + part1 + "x")))
22
-
23
- return f"tryit-{part1}-{part2}"
24
-
25
- user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
26
-
27
- headers = {
28
- "api-key": get_api_key(user_agent),
29
- "user-agent": user_agent
30
- }
31
-
32
- files = {
33
- "chat_style": (None, "chat"),
34
- "chatHistory": (None, json.dumps(messages))
35
- }
36
-
37
- r = requests.post("https://api.deepai.org/chat_response", headers=headers, files=files, stream=True)
38
-
39
- for chunk in r.iter_content(chunk_size=None):
40
- r.raise_for_status()
41
- yield chunk.decode()
42
-
43
-
44
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
45
- '(%s)' % ', '.join(
46
- [f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cpp4App/Cpp4App/CDM/input_examples/README.md DELETED
@@ -1,80 +0,0 @@
1
- # UIED - UI element detection, detecting UI elements from UI screenshots or drawnings
2
-
3
- This project is still ongoing and this repo may be updated irregularly, I developed a web app for the UIED in http://uied.online
4
-
5
- ## Related Publications:
6
- [1. UIED: a hybrid tool for GUI element detection](https://dl.acm.org/doi/10.1145/3368089.3417940)
7
-
8
- [2. Object Detection for Graphical User Interface: Old Fashioned or Deep Learning or a Combination?](https://arxiv.org/abs/2008.05132)
9
-
10
- >The repo has been **upgraded with Google OCR** for GUI text detection, to use the original version in our paper (using [EAST](https://github.com/argman/EAST) as text detector), check the relase [v2.3](https://github.com/MulongXie/UIED/releases/tag/v2.3) and download the pre-trained model in [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing).
11
-
12
- ## What is it?
13
-
14
- UI Element Detection (UIED) is an old-fashioned computer vision (CV) based element detection approach for graphic user interface.
15
-
16
- The input of UIED could be various UI image, such as mobile app or web page screenshot, UI design drawn by Photoshop or Sketch, and even some hand-drawn UI design. Then the approach detects and classifies text and graphic UI elements, and exports the detection result as JSON file for future application.
17
-
18
- UIED comprises two parts to detect UI text and graphic elements, such as button, image and input bar.
19
- * For text, it leverages [Google OCR](https://cloud.google.com/vision/docs/ocr) to perfrom detection.
20
-
21
- * For graphical elements, it uses old-fashioned CV approaches to locate the elements and a CNN classifier to achieve classification.
22
-
23
- > UIED is highly customizable, you can replace both parts by your choice (e.g. other text detection approaches). Unlike black-box end-to-end deep learning approach, you can revise the algorithms in the non-text detection and merging (partially or entirely) easily to fit your task.
24
-
25
- ![UIED Approach](https://github.com/MulongXie/UIED/blob/master/data/demo/approach.png)
26
-
27
- ## How to use?
28
-
29
- ### Dependency
30
- * **Python 3.5**
31
- * **Opencv 3.4.2**
32
- * **Pandas**
33
- <!-- * **Tensorflow 1.10.0**
34
- * **Keras 2.2.4**
35
- * **Sklearn 0.22.2** -->
36
-
37
- ### Installation
38
- <!-- Install the mentioned dependencies, and download two pre-trained models from [this link](https://drive.google.com/drive/folders/1MK0Om7Lx0wRXGDfNcyj21B0FL1T461v5?usp=sharing) for EAST text detection and GUI element classification. -->
39
-
40
- <!-- Change ``CNN_PATH`` and ``EAST_PATH`` in *config/CONFIG.py* to your locations. -->
41
-
42
- The new version of UIED equipped with Google OCR is easy to deploy and no pre-trained model is needed. Simply donwload the repo along with the dependencies.
43
-
44
- > Please replace the Google OCR key at `detect_text/ocr.py line 28` with your own (apply in [Google website](https://cloud.google.com/vision)).
45
-
46
- ### Usage
47
- To test your own image(s):
48
- * To test single image, change *input_path_img* in ``run_single.py`` to your input image and the results will be output to *output_root*.
49
- * To test mutiple images, change *input_img_root* in ``run_batch.py`` to your input directory and the results will be output to *output_root*.
50
- * To adjust the parameters lively, using ``run_testing.py``
51
-
52
- > Note: The best set of parameters vary for different types of GUI image (Mobile App, Web, PC). I highly recommend to first play with the ``run_testing.py`` to pick a good set of parameters for your data.
53
-
54
- ## Folder structure
55
- ``cnn/``
56
- * Used to train classifier for graphic UI elements
57
- * Set path of the CNN classification model
58
-
59
- ``config/``
60
- * Set data paths
61
- * Set parameters for graphic elements detection
62
-
63
- ``data/``
64
- * Input UI images and output detection results
65
-
66
- ``detect_compo/``
67
- * Non-text GUI component detection
68
-
69
- ``detect_text/``
70
- * GUI text detection using Google OCR
71
-
72
- ``detect_merge/``
73
- * Merge the detection results of non-text and text GUI elements
74
-
75
- The major detection algorithms are in ``detect_compo/``, ``detect_text/`` and ``detect_merge/``
76
-
77
- ## Demo
78
- GUI element detection result for web screenshot
79
-
80
- ![UI Components detection result](https://github.com/MulongXie/UIED/blob/master/data/demo/demo.png)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cvandi/remake/tests/test_utils.py DELETED
@@ -1,87 +0,0 @@
1
- import numpy as np
2
- from basicsr.archs.rrdbnet_arch import RRDBNet
3
-
4
- from realesrgan.utils import RealESRGANer
5
-
6
-
7
- def test_realesrganer():
8
- # initialize with default model
9
- restorer = RealESRGANer(
10
- scale=4,
11
- model_path='experiments/pretrained_models/RealESRGAN_x4plus.pth',
12
- model=None,
13
- tile=10,
14
- tile_pad=10,
15
- pre_pad=2,
16
- half=False)
17
- assert isinstance(restorer.model, RRDBNet)
18
- assert restorer.half is False
19
- # initialize with user-defined model
20
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
21
- restorer = RealESRGANer(
22
- scale=4,
23
- model_path='experiments/pretrained_models/RealESRGAN_x4plus_anime_6B.pth',
24
- model=model,
25
- tile=10,
26
- tile_pad=10,
27
- pre_pad=2,
28
- half=True)
29
- # test attribute
30
- assert isinstance(restorer.model, RRDBNet)
31
- assert restorer.half is True
32
-
33
- # ------------------ test pre_process ---------------- #
34
- img = np.random.random((12, 12, 3)).astype(np.float32)
35
- restorer.pre_process(img)
36
- assert restorer.img.shape == (1, 3, 14, 14)
37
- # with modcrop
38
- restorer.scale = 1
39
- restorer.pre_process(img)
40
- assert restorer.img.shape == (1, 3, 16, 16)
41
-
42
- # ------------------ test process ---------------- #
43
- restorer.process()
44
- assert restorer.output.shape == (1, 3, 64, 64)
45
-
46
- # ------------------ test post_process ---------------- #
47
- restorer.mod_scale = 4
48
- output = restorer.post_process()
49
- assert output.shape == (1, 3, 60, 60)
50
-
51
- # ------------------ test tile_process ---------------- #
52
- restorer.scale = 4
53
- img = np.random.random((12, 12, 3)).astype(np.float32)
54
- restorer.pre_process(img)
55
- restorer.tile_process()
56
- assert restorer.output.shape == (1, 3, 64, 64)
57
-
58
- # ------------------ test enhance ---------------- #
59
- img = np.random.random((12, 12, 3)).astype(np.float32)
60
- result = restorer.enhance(img, outscale=2)
61
- assert result[0].shape == (24, 24, 3)
62
- assert result[1] == 'RGB'
63
-
64
- # ------------------ test enhance with 16-bit image---------------- #
65
- img = np.random.random((4, 4, 3)).astype(np.uint16) + 512
66
- result = restorer.enhance(img, outscale=2)
67
- assert result[0].shape == (8, 8, 3)
68
- assert result[1] == 'RGB'
69
-
70
- # ------------------ test enhance with gray image---------------- #
71
- img = np.random.random((4, 4)).astype(np.float32)
72
- result = restorer.enhance(img, outscale=2)
73
- assert result[0].shape == (8, 8)
74
- assert result[1] == 'L'
75
-
76
- # ------------------ test enhance with RGBA---------------- #
77
- img = np.random.random((4, 4, 4)).astype(np.float32)
78
- result = restorer.enhance(img, outscale=2)
79
- assert result[0].shape == (8, 8, 4)
80
- assert result[1] == 'RGBA'
81
-
82
- # ------------------ test enhance with RGBA, alpha_upsampler---------------- #
83
- restorer.tile_size = 0
84
- img = np.random.random((4, 4, 4)).astype(np.float32)
85
- result = restorer.enhance(img, outscale=2, alpha_upsampler=None)
86
- assert result[0].shape == (8, 8, 4)
87
- assert result[1] == 'RGBA'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DEEMOSTECH/ChatAvatar/static/js/main.d852ae94.js DELETED
The diff for this file is too large to render. See raw diff
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/implementations/zip.py DELETED
@@ -1,127 +0,0 @@
1
- from __future__ import absolute_import, division, print_function
2
-
3
- import zipfile
4
-
5
- import fsspec
6
- from fsspec.archive import AbstractArchiveFileSystem
7
-
8
-
9
- class ZipFileSystem(AbstractArchiveFileSystem):
10
- """Read/Write contents of ZIP archive as a file-system
11
-
12
- Keeps file object open while instance lives.
13
-
14
- This class is pickleable, but not necessarily thread-safe
15
- """
16
-
17
- root_marker = ""
18
- protocol = "zip"
19
- cachable = False
20
-
21
- def __init__(
22
- self,
23
- fo="",
24
- mode="r",
25
- target_protocol=None,
26
- target_options=None,
27
- compression=zipfile.ZIP_STORED,
28
- allowZip64=True,
29
- compresslevel=None,
30
- **kwargs,
31
- ):
32
- """
33
- Parameters
34
- ----------
35
- fo: str or file-like
36
- Contains ZIP, and must exist. If a str, will fetch file using
37
- :meth:`~fsspec.open_files`, which must return one file exactly.
38
- mode: str
39
- Accept: "r", "w", "a"
40
- target_protocol: str (optional)
41
- If ``fo`` is a string, this value can be used to override the
42
- FS protocol inferred from a URL
43
- target_options: dict (optional)
44
- Kwargs passed when instantiating the target FS, if ``fo`` is
45
- a string.
46
- compression, allowZip64, compresslevel: passed to ZipFile
47
- Only relevant when creating a ZIP
48
- """
49
- super().__init__(self, **kwargs)
50
- if mode not in set("rwa"):
51
- raise ValueError(f"mode '{mode}' no understood")
52
- self.mode = mode
53
- if isinstance(fo, str):
54
- fo = fsspec.open(
55
- fo, mode=mode + "b", protocol=target_protocol, **(target_options or {})
56
- )
57
- self.of = fo
58
- self.fo = fo.__enter__() # the whole instance is a context
59
- self.zip = zipfile.ZipFile(
60
- self.fo,
61
- mode=mode,
62
- compression=compression,
63
- allowZip64=allowZip64,
64
- compresslevel=compresslevel,
65
- )
66
- self.dir_cache = None
67
-
68
- @classmethod
69
- def _strip_protocol(cls, path):
70
- # zip file paths are always relative to the archive root
71
- return super()._strip_protocol(path).lstrip("/")
72
-
73
- def __del__(self):
74
- if hasattr(self, "zip"):
75
- self.close()
76
- del self.zip
77
-
78
- def close(self):
79
- """Commits any write changes to the file. Done on ``del`` too."""
80
- self.zip.close()
81
-
82
- def _get_dirs(self):
83
- if self.dir_cache is None or self.mode in set("wa"):
84
- # when writing, dir_cache is always in the ZipFile's attributes,
85
- # not read from the file.
86
- files = self.zip.infolist()
87
- self.dir_cache = {
88
- dirname + "/": {"name": dirname + "/", "size": 0, "type": "directory"}
89
- for dirname in self._all_dirnames(self.zip.namelist())
90
- }
91
- for z in files:
92
- f = {s: getattr(z, s, None) for s in zipfile.ZipInfo.__slots__}
93
- f.update(
94
- {
95
- "name": z.filename,
96
- "size": z.file_size,
97
- "type": ("directory" if z.is_dir() else "file"),
98
- }
99
- )
100
- self.dir_cache[f["name"]] = f
101
-
102
- def pipe_file(self, path, value, **kwargs):
103
- # override upstream, because we know the exact file size in this case
104
- self.zip.writestr(path, value, **kwargs)
105
-
106
- def _open(
107
- self,
108
- path,
109
- mode="rb",
110
- block_size=None,
111
- autocommit=True,
112
- cache_options=None,
113
- **kwargs,
114
- ):
115
- path = self._strip_protocol(path)
116
- if "r" in mode and self.mode in set("wa"):
117
- if self.exists(path):
118
- raise IOError("ZipFS can only be open for reading or writing, not both")
119
- raise FileNotFoundError(path)
120
- if "r" in self.mode and "w" in mode:
121
- raise IOError("ZipFS can only be open for reading or writing, not both")
122
- out = self.zip.open(path, mode.strip("b"))
123
- if "r" in mode:
124
- info = self.info(path)
125
- out.size = info["size"]
126
- out.name = info["name"]
127
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/utils.py DELETED
@@ -1,1020 +0,0 @@
1
- """ Handy utility functions. """
2
-
3
- from __future__ import annotations
4
-
5
- import asyncio
6
- import copy
7
- import functools
8
- import inspect
9
- import json
10
- import json.decoder
11
- import os
12
- import pkgutil
13
- import random
14
- import re
15
- import sys
16
- import time
17
- import typing
18
- import warnings
19
- from contextlib import contextmanager
20
- from enum import Enum
21
- from io import BytesIO
22
- from numbers import Number
23
- from pathlib import Path
24
- from types import GeneratorType
25
- from typing import (
26
- TYPE_CHECKING,
27
- Any,
28
- Callable,
29
- Generator,
30
- TypeVar,
31
- Union,
32
- )
33
-
34
- import anyio
35
- import httpx
36
- import matplotlib
37
- import requests
38
- from gradio_client.serializing import Serializable
39
- from markdown_it import MarkdownIt
40
- from mdit_py_plugins.dollarmath.index import dollarmath_plugin
41
- from mdit_py_plugins.footnote.index import footnote_plugin
42
- from pydantic import BaseModel, parse_obj_as
43
-
44
- import gradio
45
- from gradio.context import Context
46
- from gradio.strings import en
47
-
48
- if TYPE_CHECKING: # Only import for type checking (is False at runtime).
49
- from gradio.blocks import Block, BlockContext, Blocks
50
- from gradio.components import Component
51
-
52
- JSON_PATH = os.path.join(os.path.dirname(gradio.__file__), "launches.json")
53
- GRADIO_VERSION = (
54
- (pkgutil.get_data(__name__, "version.txt") or b"").decode("ascii").strip()
55
- )
56
-
57
- T = TypeVar("T")
58
-
59
-
60
- def colab_check() -> bool:
61
- """
62
- Check if interface is launching from Google Colab
63
- :return is_colab (bool): True or False
64
- """
65
- is_colab = False
66
- try: # Check if running interactively using ipython.
67
- from IPython import get_ipython
68
-
69
- from_ipynb = get_ipython()
70
- if "google.colab" in str(from_ipynb):
71
- is_colab = True
72
- except (ImportError, NameError):
73
- pass
74
- return is_colab
75
-
76
-
77
- def kaggle_check() -> bool:
78
- return bool(
79
- os.environ.get("KAGGLE_KERNEL_RUN_TYPE") or os.environ.get("GFOOTBALL_DATA_DIR")
80
- )
81
-
82
-
83
- def sagemaker_check() -> bool:
84
- try:
85
- import boto3 # type: ignore
86
-
87
- client = boto3.client("sts")
88
- response = client.get_caller_identity()
89
- return "sagemaker" in response["Arn"].lower()
90
- except Exception:
91
- return False
92
-
93
-
94
- def ipython_check() -> bool:
95
- """
96
- Check if interface is launching from iPython (not colab)
97
- :return is_ipython (bool): True or False
98
- """
99
- is_ipython = False
100
- try: # Check if running interactively using ipython.
101
- from IPython import get_ipython
102
-
103
- if get_ipython() is not None:
104
- is_ipython = True
105
- except (ImportError, NameError):
106
- pass
107
- return is_ipython
108
-
109
-
110
- def get_space() -> str | None:
111
- if os.getenv("SYSTEM") == "spaces":
112
- return os.getenv("SPACE_ID")
113
- return None
114
-
115
-
116
- def is_zero_gpu_space() -> bool:
117
- return os.getenv("SPACES_ZERO_GPU") == "true"
118
-
119
-
120
- def readme_to_html(article: str) -> str:
121
- try:
122
- response = requests.get(article, timeout=3)
123
- if response.status_code == requests.codes.ok: # pylint: disable=no-member
124
- article = response.text
125
- except requests.exceptions.RequestException:
126
- pass
127
- return article
128
-
129
-
130
- def show_tip(interface: gradio.Blocks) -> None:
131
- if interface.show_tips and random.random() < 1.5:
132
- tip: str = random.choice(en["TIPS"])
133
- print(f"Tip: {tip}")
134
-
135
-
136
- def launch_counter() -> None:
137
- try:
138
- if not os.path.exists(JSON_PATH):
139
- launches = {"launches": 1}
140
- with open(JSON_PATH, "w+") as j:
141
- json.dump(launches, j)
142
- else:
143
- with open(JSON_PATH) as j:
144
- launches = json.load(j)
145
- launches["launches"] += 1
146
- if launches["launches"] in [25, 50, 150, 500, 1000]:
147
- print(en["BETA_INVITE"])
148
- with open(JSON_PATH, "w") as j:
149
- j.write(json.dumps(launches))
150
- except Exception:
151
- pass
152
-
153
-
154
- def get_default_args(func: Callable) -> list[Any]:
155
- signature = inspect.signature(func)
156
- return [
157
- v.default if v.default is not inspect.Parameter.empty else None
158
- for v in signature.parameters.values()
159
- ]
160
-
161
-
162
- def assert_configs_are_equivalent_besides_ids(
163
- config1: dict, config2: dict, root_keys: tuple = ("mode",)
164
- ):
165
- """Allows you to test if two different Blocks configs produce the same demo.
166
-
167
- Parameters:
168
- config1 (dict): nested dict with config from the first Blocks instance
169
- config2 (dict): nested dict with config from the second Blocks instance
170
- root_keys (Tuple): an interable consisting of which keys to test for equivalence at
171
- the root level of the config. By default, only "mode" is tested,
172
- so keys like "version" are ignored.
173
- """
174
- config1 = copy.deepcopy(config1)
175
- config2 = copy.deepcopy(config2)
176
-
177
- for key in root_keys:
178
- assert config1[key] == config2[key], f"Configs have different: {key}"
179
-
180
- assert len(config1["components"]) == len(
181
- config2["components"]
182
- ), "# of components are different"
183
-
184
- def assert_same_components(config1_id, config2_id):
185
- c1 = list(filter(lambda c: c["id"] == config1_id, config1["components"]))[0]
186
- c2 = list(filter(lambda c: c["id"] == config2_id, config2["components"]))[0]
187
- c1 = copy.deepcopy(c1)
188
- c1.pop("id")
189
- c2 = copy.deepcopy(c2)
190
- c2.pop("id")
191
- assert c1 == c2, f"{c1} does not match {c2}"
192
-
193
- def same_children_recursive(children1, chidren2):
194
- for child1, child2 in zip(children1, chidren2):
195
- assert_same_components(child1["id"], child2["id"])
196
- if "children" in child1 or "children" in child2:
197
- same_children_recursive(child1["children"], child2["children"])
198
-
199
- children1 = config1["layout"]["children"]
200
- children2 = config2["layout"]["children"]
201
- same_children_recursive(children1, children2)
202
-
203
- for d1, d2 in zip(config1["dependencies"], config2["dependencies"]):
204
- for t1, t2 in zip(d1.pop("targets"), d2.pop("targets")):
205
- assert_same_components(t1, t2)
206
- for i1, i2 in zip(d1.pop("inputs"), d2.pop("inputs")):
207
- assert_same_components(i1, i2)
208
- for o1, o2 in zip(d1.pop("outputs"), d2.pop("outputs")):
209
- assert_same_components(o1, o2)
210
-
211
- assert d1 == d2, f"{d1} does not match {d2}"
212
-
213
- return True
214
-
215
-
216
- def format_ner_list(input_string: str, ner_groups: list[dict[str, str | int]]):
217
- if len(ner_groups) == 0:
218
- return [(input_string, None)]
219
-
220
- output = []
221
- end = 0
222
- prev_end = 0
223
-
224
- for group in ner_groups:
225
- entity, start, end = group["entity_group"], group["start"], group["end"]
226
- output.append((input_string[prev_end:start], None))
227
- output.append((input_string[start:end], entity))
228
- prev_end = end
229
-
230
- output.append((input_string[end:], None))
231
- return output
232
-
233
-
234
- def delete_none(_dict: dict, skip_value: bool = False) -> dict:
235
- """
236
- Delete keys whose values are None from a dictionary
237
- """
238
- for key, value in list(_dict.items()):
239
- if skip_value and key == "value":
240
- continue
241
- elif value is None:
242
- del _dict[key]
243
- return _dict
244
-
245
-
246
- def resolve_singleton(_list: list[Any] | Any) -> Any:
247
- if len(_list) == 1:
248
- return _list[0]
249
- else:
250
- return _list
251
-
252
-
253
- def component_or_layout_class(cls_name: str) -> type[Component] | type[BlockContext]:
254
- """
255
- Returns the component, template, or layout class with the given class name, or
256
- raises a ValueError if not found.
257
-
258
- Parameters:
259
- cls_name (str): lower-case string class name of a component
260
- Returns:
261
- cls: the component class
262
- """
263
- import gradio.blocks
264
- import gradio.components
265
- import gradio.layouts
266
- import gradio.templates
267
-
268
- components = [
269
- (name, cls)
270
- for name, cls in gradio.components.__dict__.items()
271
- if isinstance(cls, type)
272
- ]
273
- templates = [
274
- (name, cls)
275
- for name, cls in gradio.templates.__dict__.items()
276
- if isinstance(cls, type)
277
- ]
278
- layouts = [
279
- (name, cls)
280
- for name, cls in gradio.layouts.__dict__.items()
281
- if isinstance(cls, type)
282
- ]
283
- for name, cls in components + templates + layouts:
284
- if name.lower() == cls_name.replace("_", "") and (
285
- issubclass(cls, gradio.components.Component)
286
- or issubclass(cls, gradio.blocks.BlockContext)
287
- ):
288
- return cls
289
- raise ValueError(f"No such component or layout: {cls_name}")
290
-
291
-
292
- def run_coro_in_background(func: Callable, *args, **kwargs):
293
- """
294
- Runs coroutines in background.
295
-
296
- Warning, be careful to not use this function in other than FastAPI scope, because the event_loop has not started yet.
297
- You can use it in any scope reached by FastAPI app.
298
-
299
- correct scope examples: endpoints in routes, Blocks.process_api
300
- incorrect scope examples: Blocks.launch
301
-
302
- Use startup_events in routes.py if you need to run a coro in background in Blocks.launch().
303
-
304
-
305
- Example:
306
- utils.run_coro_in_background(fn, *args, **kwargs)
307
-
308
- Args:
309
- func:
310
- *args:
311
- **kwargs:
312
-
313
- Returns:
314
-
315
- """
316
- event_loop = asyncio.get_event_loop()
317
- return event_loop.create_task(func(*args, **kwargs))
318
-
319
-
320
- def run_sync_iterator_async(iterator):
321
- """Helper for yielding StopAsyncIteration from sync iterators."""
322
- try:
323
- return next(iterator)
324
- except StopIteration:
325
- # raise a ValueError here because co-routines can't raise StopIteration themselves
326
- raise StopAsyncIteration() from None
327
-
328
-
329
- class SyncToAsyncIterator:
330
- """Treat a synchronous iterator as async one."""
331
-
332
- def __init__(self, iterator, limiter) -> None:
333
- self.iterator = iterator
334
- self.limiter = limiter
335
-
336
- def __aiter__(self):
337
- return self
338
-
339
- async def __anext__(self):
340
- return await anyio.to_thread.run_sync(
341
- run_sync_iterator_async, self.iterator, limiter=self.limiter
342
- )
343
-
344
-
345
- async def async_iteration(iterator):
346
- # anext not introduced until 3.10 :(
347
- return await iterator.__anext__()
348
-
349
-
350
- class AsyncRequest:
351
- """
352
- The AsyncRequest class is a low-level API that allow you to create asynchronous HTTP requests without a context manager.
353
- Compared to making calls by using httpx directly, AsyncRequest offers several advantages:
354
- (1) Includes response validation functionality both using validation models and functions.
355
- (2) Exceptions are handled silently during the request call, which provides the ability to inspect each one
356
- request call individually in the case where there are multiple asynchronous request calls and some of them fail.
357
- (3) Provides HTTP request types with AsyncRequest.Method Enum class for ease of usage
358
-
359
- AsyncRequest also offers some util functions such as has_exception, is_valid and status to inspect get detailed
360
- information about executed request call.
361
-
362
- The basic usage of AsyncRequest is as follows: create a AsyncRequest object with inputs(method, url etc.). Then use it
363
- with the "await" statement, and then you can use util functions to do some post request checks depending on your use-case.
364
- Finally, call the get_validated_data function to get the response data.
365
-
366
- You can see example usages in test_utils.py.
367
- """
368
-
369
- client = httpx.AsyncClient()
370
-
371
- class Method(str, Enum):
372
- """
373
- Method is an enumeration class that contains possible types of HTTP request methods.
374
- """
375
-
376
- ANY = "*"
377
- CONNECT = "CONNECT"
378
- HEAD = "HEAD"
379
- GET = "GET"
380
- DELETE = "DELETE"
381
- OPTIONS = "OPTIONS"
382
- PATCH = "PATCH"
383
- POST = "POST"
384
- PUT = "PUT"
385
- TRACE = "TRACE"
386
-
387
- def __init__(
388
- self,
389
- method: Method,
390
- url: str,
391
- *,
392
- validation_model: type[BaseModel] | None = None,
393
- validation_function: Union[Callable, None] = None,
394
- exception_type: type[Exception] = Exception,
395
- raise_for_status: bool = False,
396
- client: httpx.AsyncClient | None = None,
397
- **kwargs,
398
- ):
399
- """
400
- Initialize the Request instance.
401
- Args:
402
- method(Request.Method) : method of the request
403
- url(str): url of the request
404
- *
405
- validation_model(Type[BaseModel]): a pydantic validation class type to use in validation of the response
406
- validation_function(Callable): a callable instance to use in validation of the response
407
- exception_class(Type[Exception]): a exception type to throw with its type
408
- raise_for_status(bool): a flag that determines to raise httpx.Request.raise_for_status() exceptions.
409
- """
410
- self._exception: Union[Exception, None] = None
411
- self._status = None
412
- self._raise_for_status = raise_for_status
413
- self._validation_model = validation_model
414
- self._validation_function = validation_function
415
- self._exception_type = exception_type
416
- self._validated_data = None
417
- # Create request
418
- self._request = self._create_request(method, url, **kwargs)
419
- self.client_ = client or self.client
420
-
421
- def __await__(self) -> Generator[None, Any, AsyncRequest]:
422
- """
423
- Wrap Request's __await__ magic function to create request calls which are executed in one line.
424
- """
425
- return self.__run().__await__()
426
-
427
- async def __run(self) -> AsyncRequest:
428
- """
429
- Manage the request call lifecycle.
430
- Execute the request by sending it through the client, then check its status.
431
- Then parse the request into Json format. And then validate it using the provided validation methods.
432
- If a problem occurs in this sequential process,
433
- an exception will be raised within the corresponding method, and allowed to be examined.
434
- Manage the request call lifecycle.
435
-
436
- Returns:
437
- Request
438
- """
439
- try:
440
- # Send the request and get the response.
441
- self._response: httpx.Response = await self.client_.send(self._request)
442
- # Raise for _status
443
- self._status = self._response.status_code
444
- if self._raise_for_status:
445
- self._response.raise_for_status()
446
- # Parse client response data to JSON
447
- self._json_response_data = self._response.json()
448
- # Validate response data
449
- self._validated_data = self._validate_response_data(
450
- self._json_response_data
451
- )
452
- except Exception as exception:
453
- # If there is an exception, store it to do further inspections.
454
- self._exception = self._exception_type(exception)
455
- return self
456
-
457
- @staticmethod
458
- def _create_request(method: Method, url: str, **kwargs) -> httpx.Request:
459
- """
460
- Create a request. This is a httpx request wrapper function.
461
- Args:
462
- method(Request.Method): request method type
463
- url(str): target url of the request
464
- **kwargs
465
- Returns:
466
- Request
467
- """
468
- request = httpx.Request(method, url, **kwargs)
469
- return request
470
-
471
- def _validate_response_data(self, response):
472
- """
473
- Validate response using given validation methods. If there is a validation method and response is not valid,
474
- validation functions will raise an exception for them.
475
- Args:
476
- response(ResponseJson): response object
477
- Returns:
478
- ResponseJson: Validated Json object.
479
- """
480
-
481
- # We use raw response as a default value if there is no validation method or response is not valid.
482
- validated_response = response
483
-
484
- try:
485
- # If a validation model is provided, validate response using the validation model.
486
- if self._validation_model:
487
- validated_response = self._validate_response_by_model(response)
488
- # Then, If a validation function is provided, validate response using the validation function.
489
- if self._validation_function:
490
- validated_response = self._validate_response_by_validation_function(
491
- response
492
- )
493
- except Exception as exception:
494
- # If one of the validation methods does not confirm, raised exception will be silently handled.
495
- # We assign this exception to classes instance to do further inspections via is_valid function.
496
- self._exception = exception
497
-
498
- return validated_response
499
-
500
- def _validate_response_by_model(self, response) -> BaseModel:
501
- """
502
- Validate response json using the validation model.
503
- Args:
504
- response(ResponseJson): response object
505
- Returns:
506
- ResponseJson: Validated Json object.
507
- """
508
- validated_data = BaseModel()
509
- if self._validation_model:
510
- validated_data = parse_obj_as(self._validation_model, response)
511
- return validated_data
512
-
513
- def _validate_response_by_validation_function(self, response):
514
- """
515
- Validate response json using the validation function.
516
- Args:
517
- response(ResponseJson): response object
518
- Returns:
519
- ResponseJson: Validated Json object.
520
- """
521
- validated_data = None
522
-
523
- if self._validation_function:
524
- validated_data = self._validation_function(response)
525
-
526
- return validated_data
527
-
528
- def is_valid(self, raise_exceptions: bool = False) -> bool:
529
- """
530
- Check response object's validity+. Raise exceptions if raise_exceptions flag is True.
531
- Args:
532
- raise_exceptions(bool) : a flag to raise exceptions in this check
533
- Returns:
534
- bool: validity of the data
535
- """
536
- if self.has_exception and self._exception:
537
- if raise_exceptions:
538
- raise self._exception
539
- return False
540
- else:
541
- # If there is no exception, that means there is no validation error.
542
- return True
543
-
544
- def get_validated_data(self):
545
- return self._validated_data
546
-
547
- @property
548
- def json(self):
549
- return self._json_response_data
550
-
551
- @property
552
- def exception(self):
553
- return self._exception
554
-
555
- @property
556
- def has_exception(self):
557
- return self.exception is not None
558
-
559
- @property
560
- def raise_exceptions(self):
561
- if self.has_exception and self._exception:
562
- raise self._exception
563
-
564
- @property
565
- def status(self):
566
- return self._status
567
-
568
-
569
- @contextmanager
570
- def set_directory(path: Path | str):
571
- """Context manager that sets the working directory to the given path."""
572
- origin = Path().absolute()
573
- try:
574
- os.chdir(path)
575
- yield
576
- finally:
577
- os.chdir(origin)
578
-
579
-
580
- def sanitize_value_for_csv(value: str | Number) -> str | Number:
581
- """
582
- Sanitizes a value that is being written to a CSV file to prevent CSV injection attacks.
583
- Reference: https://owasp.org/www-community/attacks/CSV_Injection
584
- """
585
- if isinstance(value, Number):
586
- return value
587
- unsafe_prefixes = ["=", "+", "-", "@", "\t", "\n"]
588
- unsafe_sequences = [",=", ",+", ",-", ",@", ",\t", ",\n"]
589
- if any(value.startswith(prefix) for prefix in unsafe_prefixes) or any(
590
- sequence in value for sequence in unsafe_sequences
591
- ):
592
- value = f"'{value}"
593
- return value
594
-
595
-
596
- def sanitize_list_for_csv(values: list[Any]) -> list[Any]:
597
- """
598
- Sanitizes a list of values (or a list of list of values) that is being written to a
599
- CSV file to prevent CSV injection attacks.
600
- """
601
- sanitized_values = []
602
- for value in values:
603
- if isinstance(value, list):
604
- sanitized_value = [sanitize_value_for_csv(v) for v in value]
605
- sanitized_values.append(sanitized_value)
606
- else:
607
- sanitized_value = sanitize_value_for_csv(value)
608
- sanitized_values.append(sanitized_value)
609
- return sanitized_values
610
-
611
-
612
- def append_unique_suffix(name: str, list_of_names: list[str]):
613
- """Appends a numerical suffix to `name` so that it does not appear in `list_of_names`."""
614
- set_of_names: set[str] = set(list_of_names) # for O(1) lookup
615
- if name not in set_of_names:
616
- return name
617
- else:
618
- suffix_counter = 1
619
- new_name = f"{name}_{suffix_counter}"
620
- while new_name in set_of_names:
621
- suffix_counter += 1
622
- new_name = f"{name}_{suffix_counter}"
623
- return new_name
624
-
625
-
626
- def validate_url(possible_url: str) -> bool:
627
- headers = {"User-Agent": "gradio (https://gradio.app/; [email protected])"}
628
- try:
629
- head_request = requests.head(possible_url, headers=headers)
630
- # some URLs, such as AWS S3 presigned URLs, return a 405 or a 403 for HEAD requests
631
- if head_request.status_code == 405 or head_request.status_code == 403:
632
- return requests.get(possible_url, headers=headers).ok
633
- return head_request.ok
634
- except Exception:
635
- return False
636
-
637
-
638
- def is_update(val):
639
- return isinstance(val, dict) and "update" in val.get("__type__", "")
640
-
641
-
642
- def get_continuous_fn(fn: Callable, every: float) -> Callable:
643
- def continuous_fn(*args):
644
- while True:
645
- output = fn(*args)
646
- if isinstance(output, GeneratorType):
647
- yield from output
648
- else:
649
- yield output
650
- time.sleep(every)
651
-
652
- return continuous_fn
653
-
654
-
655
- def function_wrapper(
656
- f, before_fn=None, before_args=None, after_fn=None, after_args=None
657
- ):
658
- before_args = [] if before_args is None else before_args
659
- after_args = [] if after_args is None else after_args
660
- if inspect.isasyncgenfunction(f):
661
-
662
- @functools.wraps(f)
663
- async def asyncgen_wrapper(*args, **kwargs):
664
- if before_fn:
665
- before_fn(*before_args)
666
- async for response in f(*args, **kwargs):
667
- yield response
668
- if after_fn:
669
- after_fn(*after_args)
670
-
671
- return asyncgen_wrapper
672
-
673
- elif asyncio.iscoroutinefunction(f):
674
-
675
- @functools.wraps(f)
676
- async def async_wrapper(*args, **kwargs):
677
- if before_fn:
678
- before_fn(*before_args)
679
- response = await f(*args, **kwargs)
680
- if after_fn:
681
- after_fn(*after_args)
682
- return response
683
-
684
- return async_wrapper
685
-
686
- elif inspect.isgeneratorfunction(f):
687
-
688
- @functools.wraps(f)
689
- def gen_wrapper(*args, **kwargs):
690
- if before_fn:
691
- before_fn(*before_args)
692
- yield from f(*args, **kwargs)
693
- if after_fn:
694
- after_fn(*after_args)
695
-
696
- return gen_wrapper
697
-
698
- else:
699
-
700
- @functools.wraps(f)
701
- def wrapper(*args, **kwargs):
702
- if before_fn:
703
- before_fn(*before_args)
704
- response = f(*args, **kwargs)
705
- if after_fn:
706
- after_fn(*after_args)
707
- return response
708
-
709
- return wrapper
710
-
711
-
712
- def get_function_with_locals(fn: Callable, blocks: Blocks, event_id: str | None):
713
- def before_fn(blocks, event_id):
714
- from gradio.context import thread_data
715
-
716
- thread_data.blocks = blocks
717
- thread_data.event_id = event_id
718
-
719
- return function_wrapper(fn, before_fn=before_fn, before_args=(blocks, event_id))
720
-
721
-
722
- async def cancel_tasks(task_ids: set[str]):
723
- if sys.version_info < (3, 8):
724
- return None
725
-
726
- matching_tasks = [
727
- task for task in asyncio.all_tasks() if task.get_name() in task_ids
728
- ]
729
- for task in matching_tasks:
730
- task.cancel()
731
- await asyncio.gather(*matching_tasks, return_exceptions=True)
732
-
733
-
734
- def set_task_name(task, session_hash: str, fn_index: int, batch: bool):
735
- if sys.version_info >= (3, 8) and not (
736
- batch
737
- ): # You shouldn't be able to cancel a task if it's part of a batch
738
- task.set_name(f"{session_hash}_{fn_index}")
739
-
740
-
741
- def get_cancel_function(
742
- dependencies: list[dict[str, Any]]
743
- ) -> tuple[Callable, list[int]]:
744
- fn_to_comp = {}
745
- for dep in dependencies:
746
- if Context.root_block:
747
- fn_index = next(
748
- i for i, d in enumerate(Context.root_block.dependencies) if d == dep
749
- )
750
- fn_to_comp[fn_index] = [
751
- Context.root_block.blocks[o] for o in dep["outputs"]
752
- ]
753
-
754
- async def cancel(session_hash: str) -> None:
755
- task_ids = {f"{session_hash}_{fn}" for fn in fn_to_comp}
756
- await cancel_tasks(task_ids)
757
-
758
- return (
759
- cancel,
760
- list(fn_to_comp.keys()),
761
- )
762
-
763
-
764
- def get_type_hints(fn):
765
- # Importing gradio with the canonical abbreviation. Used in typing._eval_type.
766
- import gradio as gr # noqa: F401
767
- from gradio import Request # noqa: F401
768
-
769
- if inspect.isfunction(fn) or inspect.ismethod(fn):
770
- pass
771
- elif callable(fn):
772
- fn = fn.__call__
773
- else:
774
- return {}
775
-
776
- try:
777
- return typing.get_type_hints(fn)
778
- except TypeError:
779
- # On Python 3.9 or earlier, get_type_hints throws a TypeError if the function
780
- # has a type annotation that include "|". We resort to parsing the signature
781
- # manually using inspect.signature.
782
- type_hints = {}
783
- sig = inspect.signature(fn)
784
- for name, param in sig.parameters.items():
785
- if param.annotation is inspect.Parameter.empty:
786
- continue
787
- if "|" in str(param.annotation):
788
- continue
789
- # To convert the string annotation to a class, we use the
790
- # internal typing._eval_type function. This is not ideal, but
791
- # it's the only way to do it without eval-ing the string.
792
- # Since the API is internal, it may change in the future.
793
- try:
794
- type_hints[name] = typing._eval_type( # type: ignore
795
- typing.ForwardRef(param.annotation), globals(), locals()
796
- )
797
- except (NameError, TypeError):
798
- pass
799
- return type_hints
800
-
801
-
802
- def is_special_typed_parameter(name, parameter_types):
803
- from gradio.helpers import EventData
804
- from gradio.routes import Request
805
-
806
- """Checks if parameter has a type hint designating it as a gr.Request or gr.EventData"""
807
- hint = parameter_types.get(name)
808
- if not hint:
809
- return False
810
- is_request = hint == Request
811
- is_event_data = inspect.isclass(hint) and issubclass(hint, EventData)
812
- return is_request or is_event_data
813
-
814
-
815
- def check_function_inputs_match(fn: Callable, inputs: list, inputs_as_dict: bool):
816
- """
817
- Checks if the input component set matches the function
818
- Returns: None if valid, a string error message if mismatch
819
- """
820
-
821
- signature = inspect.signature(fn)
822
- parameter_types = get_type_hints(fn)
823
- min_args = 0
824
- max_args = 0
825
- infinity = -1
826
- for name, param in signature.parameters.items():
827
- has_default = param.default != param.empty
828
- if param.kind in [param.POSITIONAL_ONLY, param.POSITIONAL_OR_KEYWORD]:
829
- if not is_special_typed_parameter(name, parameter_types):
830
- if not has_default:
831
- min_args += 1
832
- max_args += 1
833
- elif param.kind == param.VAR_POSITIONAL:
834
- max_args = infinity
835
- elif param.kind == param.KEYWORD_ONLY and not has_default:
836
- return f"Keyword-only args must have default values for function {fn}"
837
- arg_count = 1 if inputs_as_dict else len(inputs)
838
- if min_args == max_args and max_args != arg_count:
839
- warnings.warn(
840
- f"Expected {max_args} arguments for function {fn}, received {arg_count}."
841
- )
842
- if arg_count < min_args:
843
- warnings.warn(
844
- f"Expected at least {min_args} arguments for function {fn}, received {arg_count}."
845
- )
846
- if max_args != infinity and arg_count > max_args:
847
- warnings.warn(
848
- f"Expected maximum {max_args} arguments for function {fn}, received {arg_count}."
849
- )
850
-
851
-
852
- class TupleNoPrint(tuple):
853
- # To remove printing function return in notebook
854
- def __repr__(self):
855
- return ""
856
-
857
- def __str__(self):
858
- return ""
859
-
860
-
861
- class MatplotlibBackendMananger:
862
- def __enter__(self):
863
- self._original_backend = matplotlib.get_backend()
864
- matplotlib.use("agg")
865
-
866
- def __exit__(self, exc_type, exc_val, exc_tb):
867
- matplotlib.use(self._original_backend)
868
-
869
-
870
- def tex2svg(formula, *args):
871
- with MatplotlibBackendMananger():
872
- import matplotlib.pyplot as plt
873
-
874
- fontsize = 20
875
- dpi = 300
876
- plt.rc("mathtext", fontset="cm")
877
- fig = plt.figure(figsize=(0.01, 0.01))
878
- fig.text(0, 0, rf"${formula}$", fontsize=fontsize)
879
- output = BytesIO()
880
- fig.savefig(
881
- output,
882
- dpi=dpi,
883
- transparent=True,
884
- format="svg",
885
- bbox_inches="tight",
886
- pad_inches=0.0,
887
- )
888
- plt.close(fig)
889
- output.seek(0)
890
- xml_code = output.read().decode("utf-8")
891
- svg_start = xml_code.index("<svg ")
892
- svg_code = xml_code[svg_start:]
893
- svg_code = re.sub(r"<metadata>.*<\/metadata>", "", svg_code, flags=re.DOTALL)
894
- svg_code = re.sub(r' width="[^"]+"', "", svg_code)
895
- height_match = re.search(r'height="([\d.]+)pt"', svg_code)
896
- if height_match:
897
- height = float(height_match.group(1))
898
- new_height = height / fontsize # conversion from pt to em
899
- svg_code = re.sub(
900
- r'height="[\d.]+pt"', f'height="{new_height}em"', svg_code
901
- )
902
- copy_code = f"<span style='font-size: 0px'>{formula}</span>"
903
- return f"{copy_code}{svg_code}"
904
-
905
-
906
- def abspath(path: str | Path) -> Path:
907
- """Returns absolute path of a str or Path path, but does not resolve symlinks."""
908
- path = Path(path)
909
-
910
- if path.is_absolute():
911
- return path
912
-
913
- # recursively check if there is a symlink within the path
914
- is_symlink = path.is_symlink() or any(
915
- parent.is_symlink() for parent in path.parents
916
- )
917
-
918
- if is_symlink or path == path.resolve(): # in case path couldn't be resolved
919
- return Path.cwd() / path
920
- else:
921
- return path.resolve()
922
-
923
-
924
- def is_in_or_equal(path_1: str | Path, path_2: str | Path):
925
- """
926
- True if path_1 is a descendant (i.e. located within) path_2 or if the paths are the
927
- same, returns False otherwise.
928
- Parameters:
929
- path_1: str or Path (should be a file)
930
- path_2: str or Path (can be a file or directory)
931
- """
932
- path_1, path_2 = abspath(path_1), abspath(path_2)
933
- try:
934
- if str(path_1.relative_to(path_2)).startswith(".."): # prevent path traversal
935
- return False
936
- except ValueError:
937
- return False
938
- return True
939
-
940
-
941
- def get_serializer_name(block: Block) -> str | None:
942
- if not hasattr(block, "serialize"):
943
- return None
944
-
945
- def get_class_that_defined_method(meth: Callable):
946
- # Adapted from: https://stackoverflow.com/a/25959545/5209347
947
- if isinstance(meth, functools.partial):
948
- return get_class_that_defined_method(meth.func)
949
- if inspect.ismethod(meth) or (
950
- inspect.isbuiltin(meth)
951
- and getattr(meth, "__self__", None) is not None
952
- and getattr(meth.__self__, "__class__", None)
953
- ):
954
- for cls in inspect.getmro(meth.__self__.__class__):
955
- # Find the first serializer defined in gradio_client that
956
- if issubclass(cls, Serializable) and "gradio_client" in cls.__module__:
957
- return cls
958
- if meth.__name__ in cls.__dict__:
959
- return cls
960
- meth = getattr(meth, "__func__", meth) # fallback to __qualname__ parsing
961
- if inspect.isfunction(meth):
962
- cls = getattr(
963
- inspect.getmodule(meth),
964
- meth.__qualname__.split(".<locals>", 1)[0].rsplit(".", 1)[0],
965
- None,
966
- )
967
- if isinstance(cls, type):
968
- return cls
969
- return getattr(meth, "__objclass__", None)
970
-
971
- cls = get_class_that_defined_method(block.serialize) # type: ignore
972
- if cls:
973
- return cls.__name__
974
-
975
-
976
- def get_markdown_parser() -> MarkdownIt:
977
- md = (
978
- MarkdownIt(
979
- "js-default",
980
- {
981
- "linkify": True,
982
- "typographer": True,
983
- "html": True,
984
- },
985
- )
986
- .use(dollarmath_plugin, renderer=tex2svg, allow_digits=False)
987
- .use(footnote_plugin)
988
- .enable("table")
989
- )
990
-
991
- # Add target="_blank" to all links. Taken from MarkdownIt docs: https://github.com/executablebooks/markdown-it-py/blob/master/docs/architecture.md
992
- def render_blank_link(self, tokens, idx, options, env):
993
- tokens[idx].attrSet("target", "_blank")
994
- return self.renderToken(tokens, idx, options, env)
995
-
996
- md.add_render_rule("link_open", render_blank_link)
997
-
998
- return md
999
-
1000
-
1001
- HTML_TAG_RE = re.compile("<.*?>")
1002
-
1003
-
1004
- def remove_html_tags(raw_html: str | None) -> str:
1005
- return re.sub(HTML_TAG_RE, "", raw_html or "")
1006
-
1007
-
1008
- def find_user_stack_level() -> int:
1009
- """
1010
- Find the first stack frame not inside Gradio.
1011
- """
1012
- frame = inspect.currentframe()
1013
- n = 0
1014
- while frame:
1015
- fname = inspect.getfile(frame)
1016
- if "/gradio/" not in fname.replace(os.sep, "/"):
1017
- break
1018
- frame = frame.f_back
1019
- n += 1
1020
- return n
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_hf_folder.py DELETED
@@ -1,102 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022-present, the HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Contain helper class to retrieve/store token from/to local cache."""
16
- import os
17
- import warnings
18
- from pathlib import Path
19
- from typing import Optional
20
-
21
- from .. import constants
22
-
23
-
24
- class HfFolder:
25
- path_token = Path(constants.HF_TOKEN_PATH)
26
- # Private attribute. Will be removed in v0.15
27
- _old_path_token = Path(constants._OLD_HF_TOKEN_PATH)
28
-
29
- @classmethod
30
- def save_token(cls, token: str) -> None:
31
- """
32
- Save token, creating folder as needed.
33
-
34
- Token is saved in the huggingface home folder. You can configure it by setting
35
- the `HF_HOME` environment variable.
36
-
37
- Args:
38
- token (`str`):
39
- The token to save to the [`HfFolder`]
40
- """
41
- cls.path_token.parent.mkdir(parents=True, exist_ok=True)
42
- cls.path_token.write_text(token)
43
-
44
- @classmethod
45
- def get_token(cls) -> Optional[str]:
46
- """
47
- Get token or None if not existent.
48
-
49
- Note that a token can be also provided using the `HUGGING_FACE_HUB_TOKEN` environment variable.
50
-
51
- Token is saved in the huggingface home folder. You can configure it by setting
52
- the `HF_HOME` environment variable. Previous location was `~/.huggingface/token`.
53
- If token is found in old location but not in new location, it is copied there first.
54
- For more details, see https://github.com/huggingface/huggingface_hub/issues/1232.
55
-
56
- Returns:
57
- `str` or `None`: The token, `None` if it doesn't exist.
58
- """
59
- # 0. Check if token exist in old path but not new location
60
- try:
61
- cls._copy_to_new_path_and_warn()
62
- except Exception: # if not possible (e.g. PermissionError), do not raise
63
- pass
64
-
65
- # 1. Is it set by environment variable ?
66
- token: Optional[str] = os.environ.get("HUGGING_FACE_HUB_TOKEN")
67
- if token is not None:
68
- return token
69
-
70
- # 2. Is it set in token path ?
71
- try:
72
- return cls.path_token.read_text()
73
- except FileNotFoundError:
74
- return None
75
-
76
- @classmethod
77
- def delete_token(cls) -> None:
78
- """
79
- Deletes the token from storage. Does not fail if token does not exist.
80
- """
81
- try:
82
- cls.path_token.unlink()
83
- except FileNotFoundError:
84
- pass
85
-
86
- try:
87
- cls._old_path_token.unlink()
88
- except FileNotFoundError:
89
- pass
90
-
91
- @classmethod
92
- def _copy_to_new_path_and_warn(cls):
93
- if cls._old_path_token.exists() and not cls.path_token.exists():
94
- cls.save_token(cls._old_path_token.read_text())
95
- warnings.warn(
96
- f"A token has been found in `{cls._old_path_token}`. This is the old"
97
- " path where tokens were stored. The new location is"
98
- f" `{cls.path_token}` which is configurable using `HF_HOME` environment"
99
- " variable. Your token has been copied to this new location. You can"
100
- " now safely delete the old token file manually or use"
101
- " `huggingface-cli logout`."
102
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/PSG/OpenPSG/configs/_base_/schedules/schedule_3x.py DELETED
@@ -1,10 +0,0 @@
1
- # optimizer
2
- optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
3
- optimizer_config = dict(grad_clip=None)
4
- # learning policy
5
- lr_config = dict(policy='step',
6
- warmup='linear',
7
- warmup_iters=1000,
8
- warmup_ratio=0.001,
9
- step=[27, 33])
10
- runner = dict(type='EpochBasedRunner', max_epochs=36)
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/exps/example/mot/yolox_l_mix_det.py DELETED
@@ -1,138 +0,0 @@
1
- # encoding: utf-8
2
- import os
3
- import random
4
- import torch
5
- import torch.nn as nn
6
- import torch.distributed as dist
7
-
8
- from yolox.exp import Exp as MyExp
9
- from yolox.data import get_yolox_datadir
10
-
11
- class Exp(MyExp):
12
- def __init__(self):
13
- super(Exp, self).__init__()
14
- self.num_classes = 1
15
- self.depth = 1.0
16
- self.width = 1.0
17
- self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
18
- self.train_ann = "train.json"
19
- self.val_ann = "train.json"
20
- self.input_size = (800, 1440)
21
- self.test_size = (800, 1440)
22
- self.random_size = (18, 32)
23
- self.max_epoch = 80
24
- self.print_interval = 20
25
- self.eval_interval = 5
26
- self.test_conf = 0.001
27
- self.nmsthre = 0.7
28
- self.no_aug_epochs = 10
29
- self.basic_lr_per_img = 0.001 / 64.0
30
- self.warmup_epochs = 1
31
-
32
- def get_data_loader(self, batch_size, is_distributed, no_aug=False):
33
- from yolox.data import (
34
- MOTDataset,
35
- TrainTransform,
36
- YoloBatchSampler,
37
- DataLoader,
38
- InfiniteSampler,
39
- MosaicDetection,
40
- )
41
-
42
- dataset = MOTDataset(
43
- data_dir=os.path.join(get_yolox_datadir(), "mix_det"),
44
- json_file=self.train_ann,
45
- name='',
46
- img_size=self.input_size,
47
- preproc=TrainTransform(
48
- rgb_means=(0.485, 0.456, 0.406),
49
- std=(0.229, 0.224, 0.225),
50
- max_labels=500,
51
- ),
52
- )
53
-
54
- dataset = MosaicDetection(
55
- dataset,
56
- mosaic=not no_aug,
57
- img_size=self.input_size,
58
- preproc=TrainTransform(
59
- rgb_means=(0.485, 0.456, 0.406),
60
- std=(0.229, 0.224, 0.225),
61
- max_labels=1000,
62
- ),
63
- degrees=self.degrees,
64
- translate=self.translate,
65
- scale=self.scale,
66
- shear=self.shear,
67
- perspective=self.perspective,
68
- enable_mixup=self.enable_mixup,
69
- )
70
-
71
- self.dataset = dataset
72
-
73
- if is_distributed:
74
- batch_size = batch_size // dist.get_world_size()
75
-
76
- sampler = InfiniteSampler(
77
- len(self.dataset), seed=self.seed if self.seed else 0
78
- )
79
-
80
- batch_sampler = YoloBatchSampler(
81
- sampler=sampler,
82
- batch_size=batch_size,
83
- drop_last=False,
84
- input_dimension=self.input_size,
85
- mosaic=not no_aug,
86
- )
87
-
88
- dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
89
- dataloader_kwargs["batch_sampler"] = batch_sampler
90
- train_loader = DataLoader(self.dataset, **dataloader_kwargs)
91
-
92
- return train_loader
93
-
94
- def get_eval_loader(self, batch_size, is_distributed, testdev=False):
95
- from yolox.data import MOTDataset, ValTransform
96
-
97
- valdataset = MOTDataset(
98
- data_dir=os.path.join(get_yolox_datadir(), "mot"),
99
- json_file=self.val_ann,
100
- img_size=self.test_size,
101
- name='train',
102
- preproc=ValTransform(
103
- rgb_means=(0.485, 0.456, 0.406),
104
- std=(0.229, 0.224, 0.225),
105
- ),
106
- )
107
-
108
- if is_distributed:
109
- batch_size = batch_size // dist.get_world_size()
110
- sampler = torch.utils.data.distributed.DistributedSampler(
111
- valdataset, shuffle=False
112
- )
113
- else:
114
- sampler = torch.utils.data.SequentialSampler(valdataset)
115
-
116
- dataloader_kwargs = {
117
- "num_workers": self.data_num_workers,
118
- "pin_memory": True,
119
- "sampler": sampler,
120
- }
121
- dataloader_kwargs["batch_size"] = batch_size
122
- val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)
123
-
124
- return val_loader
125
-
126
- def get_evaluator(self, batch_size, is_distributed, testdev=False):
127
- from yolox.evaluators import COCOEvaluator
128
-
129
- val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
130
- evaluator = COCOEvaluator(
131
- dataloader=val_loader,
132
- img_size=self.test_size,
133
- confthre=self.test_conf,
134
- nmsthre=self.nmsthre,
135
- num_classes=self.num_classes,
136
- testdev=testdev,
137
- )
138
- return evaluator
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EPFL-VILAB/MultiMAE/utils/semseg_metrics.py DELETED
@@ -1,231 +0,0 @@
1
- # --------------------------------------------------------
2
- # Code from the MMSegmentation code base
3
- # https://github.com/open-mmlab/mmsegmentation
4
- # --------------------------------------------------------
5
-
6
- import numpy as np
7
-
8
-
9
- def intersect_and_union(pred_label,
10
- label,
11
- num_classes,
12
- ignore_index,
13
- label_map=dict(),
14
- reduce_zero_label=False):
15
- """Calculate intersection and Union.
16
-
17
- Args:
18
- pred_label (ndarray): Prediction segmentation map.
19
- label (ndarray): Ground truth segmentation map.
20
- num_classes (int): Number of categories.
21
- ignore_index (int): Index that will be ignored in evaluation.
22
- label_map (dict): Mapping old labels to new labels. The parameter will
23
- work only when label is str. Default: dict().
24
- reduce_zero_label (bool): Wether ignore zero label. The parameter will
25
- work only when label is str. Default: False.
26
-
27
- Returns:
28
- ndarray: The intersection of prediction and ground truth histogram
29
- on all classes.
30
- ndarray: The union of prediction and ground truth histogram on all
31
- classes.
32
- ndarray: The prediction histogram on all classes.
33
- ndarray: The ground truth histogram on all classes.
34
- """
35
-
36
- if isinstance(pred_label, str):
37
- pred_label = np.load(pred_label)
38
-
39
- # modify if custom classes
40
- if label_map is not None:
41
- for old_id, new_id in label_map.items():
42
- label[label == old_id] = new_id
43
- if reduce_zero_label:
44
- # avoid using underflow conversion
45
- label[label == 0] = 255
46
- label = label - 1
47
- label[label == 254] = 255
48
-
49
- mask = (label != ignore_index)
50
- pred_label = pred_label[mask]
51
- label = label[mask]
52
-
53
- intersect = pred_label[pred_label == label]
54
- area_intersect, _ = np.histogram(
55
- intersect, bins=np.arange(num_classes + 1))
56
- area_pred_label, _ = np.histogram(
57
- pred_label, bins=np.arange(num_classes + 1))
58
- area_label, _ = np.histogram(label, bins=np.arange(num_classes + 1))
59
- area_union = area_pred_label + area_label - area_intersect
60
-
61
- return area_intersect, area_union, area_pred_label, area_label
62
-
63
-
64
- def total_intersect_and_union(results,
65
- gt_seg_maps,
66
- num_classes,
67
- ignore_index,
68
- label_map=dict(),
69
- reduce_zero_label=False):
70
- """Calculate Total Intersection and Union.
71
-
72
- Args:
73
- results (list[ndarray]): List of prediction segmentation maps.
74
- gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
75
- num_classes (int): Number of categories.
76
- ignore_index (int): Index that will be ignored in evaluation.
77
- label_map (dict): Mapping old labels to new labels. Default: dict().
78
- reduce_zero_label (bool): Wether ignore zero label. Default: False.
79
-
80
- Returns:
81
- ndarray: The intersection of prediction and ground truth histogram
82
- on all classes.
83
- ndarray: The union of prediction and ground truth histogram on all
84
- classes.
85
- ndarray: The prediction histogram on all classes.
86
- ndarray: The ground truth histogram on all classes.
87
- """
88
-
89
- num_imgs = len(results)
90
- assert len(gt_seg_maps) == num_imgs
91
- total_area_intersect = np.zeros((num_classes, ), dtype=np.float)
92
- total_area_union = np.zeros((num_classes, ), dtype=np.float)
93
- total_area_pred_label = np.zeros((num_classes, ), dtype=np.float)
94
- total_area_label = np.zeros((num_classes, ), dtype=np.float)
95
- for i in range(num_imgs):
96
- area_intersect, area_union, area_pred_label, area_label = \
97
- intersect_and_union(results[i], gt_seg_maps[i], num_classes,
98
- ignore_index, label_map, reduce_zero_label)
99
- total_area_intersect += area_intersect
100
- total_area_union += area_union
101
- total_area_pred_label += area_pred_label
102
- total_area_label += area_label
103
- return total_area_intersect, total_area_union, \
104
- total_area_pred_label, total_area_label
105
-
106
-
107
- def mean_iou(results,
108
- gt_seg_maps,
109
- num_classes,
110
- ignore_index,
111
- nan_to_num=None,
112
- label_map=dict(),
113
- reduce_zero_label=False):
114
- """Calculate Mean Intersection and Union (mIoU)
115
-
116
- Args:
117
- results (list[ndarray]): List of prediction segmentation maps.
118
- gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
119
- num_classes (int): Number of categories.
120
- ignore_index (int): Index that will be ignored in evaluation.
121
- nan_to_num (int, optional): If specified, NaN values will be replaced
122
- by the numbers defined by the user. Default: None.
123
- label_map (dict): Mapping old labels to new labels. Default: dict().
124
- reduce_zero_label (bool): Wether ignore zero label. Default: False.
125
-
126
- Returns:
127
- float: Overall accuracy on all images.
128
- ndarray: Per category accuracy, shape (num_classes, ).
129
- ndarray: Per category IoU, shape (num_classes, ).
130
- """
131
-
132
- all_acc, acc, iou = eval_metrics(
133
- results=results,
134
- gt_seg_maps=gt_seg_maps,
135
- num_classes=num_classes,
136
- ignore_index=ignore_index,
137
- metrics=['mIoU'],
138
- nan_to_num=nan_to_num,
139
- label_map=label_map,
140
- reduce_zero_label=reduce_zero_label)
141
- return all_acc, acc, iou
142
-
143
-
144
- def mean_dice(results,
145
- gt_seg_maps,
146
- num_classes,
147
- ignore_index,
148
- nan_to_num=None,
149
- label_map=dict(),
150
- reduce_zero_label=False):
151
- """Calculate Mean Dice (mDice)
152
-
153
- Args:
154
- results (list[ndarray]): List of prediction segmentation maps.
155
- gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
156
- num_classes (int): Number of categories.
157
- ignore_index (int): Index that will be ignored in evaluation.
158
- nan_to_num (int, optional): If specified, NaN values will be replaced
159
- by the numbers defined by the user. Default: None.
160
- label_map (dict): Mapping old labels to new labels. Default: dict().
161
- reduce_zero_label (bool): Wether ignore zero label. Default: False.
162
-
163
- Returns:
164
- float: Overall accuracy on all images.
165
- ndarray: Per category accuracy, shape (num_classes, ).
166
- ndarray: Per category dice, shape (num_classes, ).
167
- """
168
-
169
- all_acc, acc, dice = eval_metrics(
170
- results=results,
171
- gt_seg_maps=gt_seg_maps,
172
- num_classes=num_classes,
173
- ignore_index=ignore_index,
174
- metrics=['mDice'],
175
- nan_to_num=nan_to_num,
176
- label_map=label_map,
177
- reduce_zero_label=reduce_zero_label)
178
- return all_acc, acc, dice
179
-
180
-
181
- def eval_metrics(results,
182
- gt_seg_maps,
183
- num_classes,
184
- ignore_index,
185
- metrics=['mIoU'],
186
- nan_to_num=None,
187
- label_map=dict(),
188
- reduce_zero_label=False):
189
- """Calculate evaluation metrics
190
- Args:
191
- results (list[ndarray]): List of prediction segmentation maps.
192
- gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
193
- num_classes (int): Number of categories.
194
- ignore_index (int): Index that will be ignored in evaluation.
195
- metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
196
- nan_to_num (int, optional): If specified, NaN values will be replaced
197
- by the numbers defined by the user. Default: None.
198
- label_map (dict): Mapping old labels to new labels. Default: dict().
199
- reduce_zero_label (bool): Wether ignore zero label. Default: False.
200
- Returns:
201
- float: Overall accuracy on all images.
202
- ndarray: Per category accuracy, shape (num_classes, ).
203
- ndarray: Per category evalution metrics, shape (num_classes, ).
204
- """
205
-
206
- if isinstance(metrics, str):
207
- metrics = [metrics]
208
- allowed_metrics = ['mIoU', 'mDice']
209
- if not set(metrics).issubset(set(allowed_metrics)):
210
- raise KeyError('metrics {} is not supported'.format(metrics))
211
- total_area_intersect, total_area_union, total_area_pred_label, \
212
- total_area_label = total_intersect_and_union(results, gt_seg_maps,
213
- num_classes, ignore_index,
214
- label_map,
215
- reduce_zero_label)
216
- all_acc = total_area_intersect.sum() / total_area_label.sum()
217
- acc = total_area_intersect / total_area_label
218
- ret_metrics = [all_acc, acc]
219
- for metric in metrics:
220
- if metric == 'mIoU':
221
- iou = total_area_intersect / total_area_union
222
- ret_metrics.append(iou)
223
- elif metric == 'mDice':
224
- dice = 2 * total_area_intersect / (
225
- total_area_pred_label + total_area_label)
226
- ret_metrics.append(dice)
227
- if nan_to_num is not None:
228
- ret_metrics = [
229
- np.nan_to_num(metric, nan=nan_to_num) for metric in ret_metrics
230
- ]
231
- return ret_metrics
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EasyEasy/EasyProxy/greeting.md DELETED
@@ -1,51 +0,0 @@
1
- <center><img src="https://files.catbox.moe/jsfdbh.png"></center>
2
-
3
- <br>
4
-
5
- I'm letting people in on a first come first serve basis.
6
-
7
- Contact `[email protected]` for access. No requirements aside from a polite and formal business-style email. [Please review this short article on how to write a formal business email.](https://www.indeed.com/career-advice/career-development/format-for-formal-email)
8
-
9
- Proxy stats -> [https://chub-archive.evulid.cc/#/proxy-stats.html?proxy=easyeasy_easyproxy](https://chub-archive.evulid.cc/#/proxy-stats.html?proxy=easyeasy_easyproxy)
10
-
11
- <hr>
12
-
13
- **Update Oct 2:** I've handed out 55 user tokens. I'm still accepting applications, so send me an email if you'd like to get on the waiting list.
14
-
15
- **Update Oct 3:** I have issued a total of ~163~ 173 user tokens. My sources should be able to handle this many!
16
-
17
- <hr>
18
-
19
- <style>
20
- .easyrow > * {
21
- box-sizing: border-box;
22
- }
23
-
24
- .easyimg {
25
- margin: auto;
26
- width: 200px;
27
- display: block;
28
- }
29
-
30
- .easycolumn {
31
- float: left;
32
- width: 50%;
33
- padding: 5px;
34
- }
35
-
36
- /* Clearfix (clear floats) */
37
- .easyrow::after {
38
- content: "";
39
- clear: both;
40
- display: table;
41
- }
42
- </style>
43
-
44
- <div class="easyrow">
45
- <div class="easycolumn">
46
- <img class="easyimg" src="https://cdn-uploads.huggingface.co/production/uploads/6510fdd699fe56caa83dbdeb/Q-98KrBBRjAgEXi_1brUG.png">
47
- </div>
48
- <div class="easycolumn">
49
- <img class="easyimg" src="https://cdn-uploads.huggingface.co/production/uploads/6510fdd699fe56caa83dbdeb/EFnzILavgm318hSGdLi_5.png">
50
- </div>
51
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EcoCy/LoRA-DreamBooth-Training-UI/utils.py DELETED
@@ -1,59 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import pathlib
4
-
5
-
6
- def find_exp_dirs(ignore_repo: bool = False) -> list[str]:
7
- repo_dir = pathlib.Path(__file__).parent
8
- exp_root_dir = repo_dir / 'experiments'
9
- if not exp_root_dir.exists():
10
- return []
11
- exp_dirs = sorted(exp_root_dir.glob('*'))
12
- exp_dirs = [
13
- exp_dir for exp_dir in exp_dirs
14
- if (exp_dir / 'pytorch_lora_weights.bin').exists()
15
- ]
16
- if ignore_repo:
17
- exp_dirs = [
18
- exp_dir for exp_dir in exp_dirs if not (exp_dir / '.git').exists()
19
- ]
20
- return [path.relative_to(repo_dir).as_posix() for path in exp_dirs]
21
-
22
-
23
- def save_model_card(
24
- save_dir: pathlib.Path,
25
- base_model: str,
26
- instance_prompt: str,
27
- test_prompt: str = '',
28
- test_image_dir: str = '',
29
- ) -> None:
30
- image_str = ''
31
- if test_prompt and test_image_dir:
32
- image_paths = sorted((save_dir / test_image_dir).glob('*'))
33
- if image_paths:
34
- image_str = f'Test prompt: {test_prompt}\n'
35
- for image_path in image_paths:
36
- rel_path = image_path.relative_to(save_dir)
37
- image_str += f'![{image_path.stem}]({rel_path})\n'
38
-
39
- model_card = f'''---
40
- license: creativeml-openrail-m
41
- base_model: {base_model}
42
- instance_prompt: {instance_prompt}
43
- tags:
44
- - stable-diffusion
45
- - stable-diffusion-diffusers
46
- - text-to-image
47
- - diffusers
48
- - lora
49
- inference: true
50
- ---
51
- # LoRA DreamBooth - {save_dir.name}
52
-
53
- These are LoRA adaption weights for [{base_model}](https://huggingface.co/{base_model}). The weights were trained on the instance prompt "{instance_prompt}" using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following.
54
-
55
- {image_str}
56
- '''
57
-
58
- with open(save_dir / 'README.md', 'w') as f:
59
- f.write(model_card)