Commit
·
d5eba0a
1
Parent(s):
760e1a0
Update parquet files (step 76 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/EasyNote Crack __EXCLUSIVE__.md +0 -108
- spaces/1gistliPinn/ChatGPT4/Examples/Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download Fix.md +0 -62
- spaces/1gistliPinn/ChatGPT4/Examples/Facebook Chat Bubbles On Pc HOT!.md +0 -29
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Block Master for Minecraft PE The Ultimate Launcher for MC PE Mods.md +0 -138
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Chess Online APK Challenge Your Friends and Rivals in the Ultimate Strategy Game.md +0 -123
- spaces/1phancelerku/anime-remove-background/Braindom Mod APK Solve Puzzles and Brain Teasers with Free Rewards.md +0 -85
- spaces/1phancelerku/anime-remove-background/Download Clash of Clans MOD APK with Unlimited Gems and Troops (v15.297.217).md +0 -144
- spaces/1phancelerku/anime-remove-background/Endless Run Jungle Escape Mod APK Discover the Secrets of the Jungle.md +0 -127
- spaces/232labs/VToonify/vtoonify/model/stylegan/op/conv2d_gradfix.py +0 -227
- spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/spec_utils.py +0 -667
- spaces/834188divi/cardiffnlp-twitter-roberta-base-sentiment-latest/README.md +0 -12
- spaces/AI-Hobbyist/Hoyo-RVC/onnx_inference_demo.py +0 -20
- spaces/AIConsultant/MusicGen/audiocraft/grids/compression/__init__.py +0 -6
- spaces/AIConsultant/MusicGen/tests/utils/__init__.py +0 -5
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/syntaspeech/syntaspeech.py +0 -277
- spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/pann_model.py +0 -543
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/bert.py +0 -32
- spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/tokenizer.py +0 -180
- spaces/Abhilashvj/planogram-compliance/utils/segment/augmentations.py +0 -128
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/types/Conversation.ts +0 -17
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/anchor/Factory.js +0 -11
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/HideMethods.js +0 -30
- spaces/Alfasign/HuggingGPT-Lite/README.md +0 -14
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/op/fused_act.py +0 -40
- spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/training/coaches/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_detection/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py +0 -13
- spaces/Andy1621/uniformer_image_detection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_detection/configs/tridentnet/README.md +0 -28
- spaces/Andy1621/uniformer_image_detection/exp/cascade_mask_rcnn_3x_ms_hybrid_small/run.sh +0 -10
- spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/shared_heads/__init__.py +0 -3
- spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py +0 -5
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/tokens.py +0 -36
- spaces/AnnasBlackHat/Image-Similarity/src/similarity/similarity.py +0 -35
- spaces/AnnasBlackHat/Image-Similarity/src/util/image.py +0 -13
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/pipelines/__init__.py +0 -16
- spaces/Anonymous-sub/Rerender/ControlNet/ldm/data/util.py +0 -24
- spaces/AquaSuisei/ChatGPTXE/run_Linux.sh +0 -25
- spaces/Awesimo/jojogan/e4e/criteria/lpips/lpips.py +0 -35
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_fpn.py +0 -93
- spaces/Benson/text-generation/Examples/Apk.apkmonk.com.md +0 -45
- spaces/BernardoOlisan/vqganclip/CLIP/setup.py +0 -21
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/service.py +0 -110
- spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/parser/_parser.py +0 -1613
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/__init__.py +0 -0
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py +0 -141
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/windows.py +0 -195
- spaces/Billyosoro/ESRGAN/Training.md +0 -100
- spaces/BraydenMoore/MARCI-NFL-Betting/README.md +0 -10
- spaces/CVPR/LIVE/main.py +0 -1040
- spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/adjacent_difference.h +0 -50
spaces/1acneusushi/gradio-2dmoleculeeditor/data/EasyNote Crack __EXCLUSIVE__.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>EasyNote Crack: What Is It and How to Use It?</h1>
|
3 |
-
<p>Are you looking for a simple and effective way to take notes on your Android device? Do you want to enjoy all the premium features of EasyNote without paying for the subscription fee? If so, you might be interested in using a crack for EasyNote. But what is a crack and how can you use it safely and successfully? In this article, we will answer these questions and more. We will explain what EasyNote is, what a crack is, why you might need one for EasyNote, how to find and use one, and what are the potential risks and consequences of doing so. By the end of this article, you will have a clear understanding of EasyNote crack and how to use it.</p>
|
4 |
-
<h2>EasyNote Crack</h2><br /><p><b><b>Download</b> ✦ <a href="https://byltly.com/2uKxBI">https://byltly.com/2uKxBI</a></b></p><br /><br />
|
5 |
-
<h2>What is EasyNote?</h2>
|
6 |
-
<p>EasyNote is a free notepad and notebook app for Android devices that allows you to take quick notes, make easy to-do lists, task lists, and other important pieces of information on the go. You can also add photos, audio memos, file attachments, reminders, passwords, labels, colors, themes, fonts, widgets, and more to your notes. You can sync your notes across your devices with Google Drive or Dropbox. You can also share your notes with others via email, SMS, social media, or QR code. EasyNote is designed to be simple, fast, reliable, and user-friendly. It has over 10 million downloads on Google Play Store and an average rating of 4.7 out of 5 stars.</p>
|
7 |
-
<h3>A brief overview of EasyNote features and benefits</h3>
|
8 |
-
<p>Here are some of the main features and benefits of using EasyNote:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You can create unlimited notes with different types of content such as text, images, audio, files, etc.</li>
|
11 |
-
<li>You can organize your notes by labels, colors, themes, fonts, etc.</li>
|
12 |
-
<li>You can set reminders for your notes to never miss anything important.</li>
|
13 |
-
<li>You can protect your notes with passwords or fingerprints.</li>
|
14 |
-
<li>You can sync your notes across your devices with Google Drive or Dropbox.</li>
|
15 |
-
<li>You can share your notes with others via email, SMS, social media, or QR code.</li>
|
16 |
-
<li>You can customize your app settings according to your preferences.</li>
|
17 |
-
<li>You can access your notes from anywhere with the web version of EasyNote.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>How to download and install EasyNote on your device</h3>
|
20 |
-
<p>To download and install EasyNote on your Android device, you can follow these simple steps:</p>
|
21 |
-
<ol>
|
22 |
-
<li>Go to Google Play Store on your device and search for "Easy Note".</li>
|
23 |
-
<li>Select the app from the search results and tap on "Install".</li> <li>Wait for the app to download and install on your device.</li>
|
24 |
-
<li>Open the app and grant the necessary permissions.</li>
|
25 |
-
<li>Start creating and managing your notes with EasyNote.</li>
|
26 |
-
</ol>
|
27 |
-
<p>You can also download the APK file of EasyNote from other sources and install it manually on your device. However, this method is not recommended as it may expose your device to malware or viruses. Always download apps from trusted and official sources.</p>
|
28 |
-
<p></p>
|
29 |
-
<h2>What is a crack and why do you need it for EasyNote?</h2>
|
30 |
-
<p>A crack is a modified version of a software program that bypasses or removes its security features, such as license verification, activation, or subscription. A crack allows you to use the full or premium version of a software program without paying for it or following its terms and conditions. A crack is usually created by hackers or programmers who reverse engineer the original software code and modify it to their advantage.</p>
|
31 |
-
<h3>A brief explanation of what a crack is and how it works</h3>
|
32 |
-
<p>A crack is a type of software piracy that involves modifying or replacing the original executable file of a software program with a modified one that has been altered to remove or bypass its security features. A crack can also be a patch, a keygen, a serial number, or a loader that modifies the software code in memory or on disk. A crack works by tricking the software program into thinking that it has been properly licensed, activated, or subscribed, and thus allowing you to use all its features and functions without any restrictions or limitations.</p>
|
33 |
-
<h3>The advantages and disadvantages of using a crack for EasyNote</h3>
|
34 |
-
<p>Using a crack for EasyNote may seem tempting, as it can offer you some advantages, such as:</p>
|
35 |
-
<ul>
|
36 |
-
<li>You can use all the premium features of EasyNote without paying for the subscription fee.</li>
|
37 |
-
<li>You can save money and time by not having to deal with the payment process or customer service.</li>
|
38 |
-
<li>You can enjoy unlimited access to EasyNote without any interruptions or ads.</li>
|
39 |
-
</ul>
|
40 |
-
<p>However, using a crack for EasyNote also comes with some disadvantages, such as:</p>
|
41 |
-
<ul>
|
42 |
-
<li>You may violate the intellectual property rights of the developers and publishers of EasyNote, and thus expose yourself to legal actions or penalties.</li>
|
43 |
-
<li>You may compromise the security and performance of your device, as cracks may contain malware, viruses, spyware, or other harmful programs that can damage your device or steal your personal information.</li>
|
44 |
-
<li>You may miss out on the updates, bug fixes, new features, and customer support that are provided by the official version of EasyNote.</li>
|
45 |
-
<li>You may experience compatibility issues, errors, crashes, or glitches with the cracked version of EasyNote, as it may not work properly with your device or operating system.</li>
|
46 |
-
</ul>
|
47 |
-
<h3>The risks and consequences of using a crack for EasyNote</h3>
|
48 |
-
<p>Using a crack for EasyNote is not only unethical but also illegal. It is considered a form of software piracy, which is a serious crime in many countries. According to the Software & Information Industry Association (SIIA), software piracy is "the unauthorized copying or distribution of copyrighted software". Software piracy can result in civil lawsuits, criminal charges, fines, imprisonment, or both. For example, in the United States, software piracy can be punished by up to five years in prison and $250,000 in fines.</p>
|
49 |
-
<p>Moreover, using a crack for EasyNote can also have negative consequences for yourself and others. For yourself, you may risk losing your data, privacy, security, and device functionality. For others, you may deprive the developers and publishers of EasyNote of their rightful income and recognition. This can affect their ability to continue developing and improving EasyNote and other useful apps. You may also contribute to the spread of malware and cybercrime that can harm other users and devices.</p>
|
50 |
-
<h2>How to find and use a crack for EasyNote?</h2>
|
51 |
-
<p>If you still want to use a crack for EasyNote despite the risks and consequences involved, you will need to find and use one carefully and cautiously. Here are some steps that you can follow:</p>
|
52 |
-
<h3>The best sources to download a crack for EasyNote</h3>
|
53 |
-
<p>The first step is to find a reliable source to download a crack for EasyNote. This can be challenging, as there are many websites that claim to offer cracks for various software programs but are actually scams or malware distributors. You should avoid clicking on any suspicious links or pop-ups that promise you free or unlimited access to EasyNote or any other app. You should also avoid downloading any files that have unknown extensions or names.</p>
|
54 |
-
<p>Some of the best sources to download a crack for EasyNote are reputable online forums or communities that specialize in software cracking. These are places where users share their experiences, reviews, tips, and links related to software cracking. You can find such forums by searching for keywords like "EasyNote crack forum" or "EasyNote crack community" on Google or other search engines. You can also use websites like Reddit, Quora, or Stack Exchange to ask for recommendations or feedback from other users who have used a crack for EasyNote. However, you should always be careful and cautious when downloading any file from the internet, and scan it with a reputable antivirus program before opening it.</p>
|
55 |
-
<h3>How to apply the crack to EasyNote and activate it</h3>
|
56 |
-
<p>The second step is to apply the crack to EasyNote and activate it. This can vary depending on the type and format of the crack that you have downloaded. Some cracks are standalone files that you need to run or copy to the installation folder of EasyNote. Some cracks are patches that you need to apply to the original executable file of EasyNote. Some cracks are keygens that generate a serial number or a license key that you need to enter in EasyNote. Some cracks are loaders that launch EasyNote with the crack applied.</p>
|
57 |
-
<p>The general procedure to apply a crack to EasyNote is as follows:</p>
|
58 |
-
<ol>
|
59 |
-
<li>Make sure that you have downloaded and installed EasyNote on your device.</li>
|
60 |
-
<li>Make sure that you have closed or exited EasyNote if it is running.</li>
|
61 |
-
<li>Make sure that you have disabled your internet connection and antivirus program temporarily.</li>
|
62 |
-
<li>Extract the crack file from the zip or rar archive that you have downloaded.</li>
|
63 |
-
<li>Read the instructions or readme file that comes with the crack file carefully.</li>
|
64 |
-
<li>Follow the instructions or readme file to apply the crack to EasyNote.</li>
|
65 |
-
<li>Launch EasyNote and check if it has been activated successfully.</li>
|
66 |
-
<li>Re-enable your internet connection and antivirus program.</li>
|
67 |
-
</ol>
|
68 |
-
<h3>How to troubleshoot common problems with the crack</h3>
|
69 |
-
<p>The third step is to troubleshoot any common problems that you may encounter with the crack. Some of the common problems are:</p>
|
70 |
-
<ul>
|
71 |
-
<li>The crack does not work or is incompatible with your device or operating system.</li>
|
72 |
-
<li>The crack causes errors, crashes, or glitches in EasyNote.</li>
|
73 |
-
<li>The crack contains malware, viruses, spyware, or other harmful programs that infect your device.</li>
|
74 |
-
<li>The crack is detected by your antivirus program or by EasyNote as a threat or a violation.</li>
|
75 |
-
</ul>
|
76 |
-
<p>To troubleshoot these problems, you can try some of the following solutions:</p>
|
77 |
-
<ul>
|
78 |
-
<li>Make sure that you have downloaded the correct and latest version of the crack for EasyNote.</li>
|
79 |
-
<li>Make sure that you have followed the instructions or readme file correctly when applying the crack.</li>
|
80 |
-
<li>Make sure that you have backed up your data and device before using the crack.</li>
|
81 |
-
<li>Make sure that you have scanned the crack file with a reputable antivirus program before using it.</li>
|
82 |
-
<li>Make sure that you have disabled your internet connection and antivirus program temporarily when using the crack.</li>
|
83 |
-
<li>Make sure that you have updated your device and operating system to the latest version.</li>
|
84 |
-
<li>Contact the source or developer of the crack for support or assistance.</li>
|
85 |
-
</ul>
|
86 |
-
<h2>Conclusion</h2>
|
87 |
-
<p>In conclusion, EasyNote is a free notepad and notebook app for Android devices that allows you to take quick notes, make easy to-do lists, task lists, and other important pieces of information on the go. You can also add photos, audio memos, file attachments, reminders, passwords, labels, colors, themes, fonts, widgets, and more to your notes. You can sync your notes across your devices with Google Drive or Dropbox. You can also share your notes with others via email, SMS, social media, or QR code. However, if you want to use all the premium features of EasyNote without paying for the subscription fee, you might be tempted to use a crack for EasyNote. A crack is a modified version of a software program that bypasses or removes its security features, such as license verification, activation, or subscription. A crack allows you to use the full or premium version of a software program without paying for it or following its terms and conditions. However, using a crack for EasyNote is not only unethical but also illegal. It is considered a form of software piracy, which is a serious crime in many countries. It can also expose you to various risks and consequences, such as malware infection, data loss, device damage, legal action, or penalty. Therefore, we do not recommend using a crack for EasyNote. Instead, we suggest that you support the developers and publishers of EasyNote by purchasing their subscription fee or using their free version. This way, you can enjoy EasyNote safely and legally, and also help them continue developing and improving this useful app.</p>
|
88 |
-
<p>If you found this article helpful , please share it with your friends and family. If you have any questions or comments, please leave them below. We would love to hear from you.</p>
|
89 |
-
<h2>FAQs</h2>
|
90 |
-
<p>Here are some of the frequently asked questions about EasyNote crack:</p>
|
91 |
-
<h3>Q: Is EasyNote crack safe to use?</h3>
|
92 |
-
<p>A: No, EasyNote crack is not safe to use. It may contain malware, viruses, spyware, or other harmful programs that can infect your device or steal your personal information. It may also cause errors, crashes, or glitches in EasyNote or your device. It may also be detected by your antivirus program or by EasyNote as a threat or a violation.</p>
|
93 |
-
<h3>Q: Is EasyNote crack legal to use?</h3>
|
94 |
-
<p>A: No, EasyNote crack is not legal to use. It is considered a form of software piracy, which is a serious crime in many countries. It violates the intellectual property rights of the developers and publishers of EasyNote, and thus exposes you to legal actions or penalties. It can also affect their ability to continue developing and improving EasyNote and other useful apps.</p>
|
95 |
-
<h3>Q: How can I get the premium features of EasyNote without using a crack?</h3>
|
96 |
-
<p>A: The best way to get the premium features of EasyNote without using a crack is to purchase their subscription fee or use their free version. This way, you can enjoy EasyNote safely and legally, and also support the developers and publishers of EasyNote. You can also look for other alternatives or competitors of EasyNote that offer similar or better features at lower or no cost.</p>
|
97 |
-
<h3>Q: How can I remove the crack from EasyNote if I have already used it?</h3>
|
98 |
-
<p>A: If you have already used a crack for EasyNote and want to remove it, you can follow these steps:</p>
|
99 |
-
<ol>
|
100 |
-
<li>Uninstall EasyNote from your device.</li>
|
101 |
-
<li>Delete the crack file from your device.</li>
|
102 |
-
<li>Scan your device with a reputable antivirus program and remove any malware or viruses that may have been installed by the crack.</li>
|
103 |
-
<li>Download and install the official version of EasyNote from Google Play Store or their website.</li>
|
104 |
-
</ol>
|
105 |
-
<h3>Q: Where can I find more information about EasyNote and its features?</h3>
|
106 |
-
<p>A: You can find more information about EasyNote and its features on their website, their blog, their social media pages, their YouTube channel, or their Google Play Store page. You can also contact them via email, phone, or chat for any queries or feedback.</p> b2dd77e56b<br />
|
107 |
-
<br />
|
108 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download Fix.md
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download: A Guide for Movie Lovers</h1>
|
3 |
-
|
4 |
-
<p>If you are a fan of comedy, drama, and romance, you might want to check out Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download. Barfi is a 2012 Hindi movie that tells the story of a deaf and mute boy who falls in love with two different women. The movie is a heartwarming and hilarious tale of love, friendship, and happiness.</p>
|
5 |
-
|
6 |
-
<p>Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download are subtitles that are compatible with the movie file that has been ripped by Charmeleon Silver Rg. You can use these subtitles to watch the movie in English or any other language you prefer. These subtitles are high quality and sync well with the movie.</p>
|
7 |
-
<h2>Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download</h2><br /><p><b><b>Download File</b> ⏩ <a href="https://imgfil.com/2uxY8H">https://imgfil.com/2uxY8H</a></b></p><br /><br />
|
8 |
-
|
9 |
-
<h2>Where to Find Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download?</h2>
|
10 |
-
|
11 |
-
<p>There are many websites that offer Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download for free or for a small fee. Some of the popular ones are:</p>
|
12 |
-
|
13 |
-
<ul>
|
14 |
-
<li>OpenSubtitles - OpenSubtitles.com: This website has a huge collection of subtitles for movies and TV shows in various languages. You can download or stream the subtitles online without any hassle. You can also request, upload, or rate subtitles.</li>
|
15 |
-
<li>Subscene - Subscene.com: This website also has a wide range of subtitles for movies and TV shows in different languages. You can download or watch the subtitles online with ease. You can also browse, search, or comment on subtitles.</li>
|
16 |
-
<li>YIFY Subtitles - Yifysubtitles.com: This website is dedicated to providing subtitles for movies that have been ripped by YIFY, a popular torrent group. You can find subtitles for Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg and other movies in various languages. You can also view the movie details, ratings, and trailers.</li>
|
17 |
-
</ul>
|
18 |
-
|
19 |
-
<h2>Why Watch Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download?</h2>
|
20 |
-
|
21 |
-
<p>There are many reasons why you should watch Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download. Here are some of them:</p>
|
22 |
-
|
23 |
-
<ul>
|
24 |
-
<li>You will get to experience a beautiful and touching Indian movie that combines elements of comedy, drama, and romance. The movie has a captivating plot, engaging characters, and stunning visuals. The movie also has won several awards and accolades, including five Filmfare Awards.</li>
|
25 |
-
<li>You will get to enjoy the movie in your preferred language or learn a new one. You can choose to watch the movie in Hindi or English, or any other language you like. You can also improve your language skills by listening to the dialogues and reading the subtitles.</li>
|
26 |
-
<li>You will get to access the movie easily and conveniently from various websites. You can download or stream the movie and the subtitles online without any trouble. You can also watch the movie on your device of choice, such as your laptop, smartphone, tablet, or TV.</li>
|
27 |
-
</ul>
|
28 |
-
|
29 |
-
<p>So what are you waiting for? Grab some popcorn and watch Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download today!</p>
|
30 |
-
<h1>Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download: A Guide for Movie Lovers</h1>
|
31 |
-
|
32 |
-
<p>If you are a fan of comedy, drama, and romance, you might want to check out Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download. Barfi is a 2012 Hindi movie that tells the story of a deaf and mute boy who falls in love with two different women. The movie is a heartwarming and hilarious tale of love, friendship, and happiness.</p>
|
33 |
-
|
34 |
-
<p>Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download are subtitles that are compatible with the movie file that has been ripped by Charmeleon Silver Rg. You can use these subtitles to watch the movie in English or any other language you prefer. These subtitles are high quality and sync well with the movie.</p>
|
35 |
-
|
36 |
-
<h2>Where to Find Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download?</h2>
|
37 |
-
|
38 |
-
<p>There are many websites that offer Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download for free or for a small fee. Some of the popular ones are:</p>
|
39 |
-
<p></p>
|
40 |
-
|
41 |
-
<ul>
|
42 |
-
<li>OpenSubtitles - OpenSubtitles.com: This website has a huge collection of subtitles for movies and TV shows in various languages. You can download or stream the subtitles online without any hassle. You can also request, upload, or rate subtitles.</li>
|
43 |
-
<li>Subscene - Subscene.com: This website also has a wide range of subtitles for movies and TV shows in different languages. You can download or watch the subtitles online with ease. You can also browse, search, or comment on subtitles.</li>
|
44 |
-
<li>YIFY Subtitles - Yifysubtitles.com: This website is dedicated to providing subtitles for movies that have been ripped by YIFY, a popular torrent group. You can find subtitles for Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg and other movies in various languages. You can also view the movie details, ratings, and trailers.</li>
|
45 |
-
</ul>
|
46 |
-
|
47 |
-
<h2>Why Watch Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download?</h2>
|
48 |
-
|
49 |
-
<p>There are many reasons why you should watch Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download. Here are some of them:</p>
|
50 |
-
|
51 |
-
<ul>
|
52 |
-
<li>You will get to experience a beautiful and touching Indian movie that combines elements of comedy, drama, and romance. The movie has a captivating plot, engaging characters, and stunning visuals. The movie also has won several awards and accolades, including five Filmfare Awards.</li>
|
53 |
-
<li>You will get to enjoy the movie in your preferred language or learn a new one. You can choose to watch the movie in Hindi or English, or any other language you like. You can also improve your language skills by listening to the dialogues and reading the subtitles.</li>
|
54 |
-
<li>You will get to access the movie easily and conveniently from various websites. You can download or stream the movie and the subtitles online without any trouble. You can also watch the movie on your device of choice, such as your laptop, smartphone, tablet, or TV.</li>
|
55 |
-
</ul>
|
56 |
-
|
57 |
-
<p>So what are you waiting for? Grab some popcorn and watch Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download today!</p>
|
58 |
-
<h2>Conclusion</h2>
|
59 |
-
|
60 |
-
<p>Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download is a great option for movie lovers who want to watch a charming and funny Indian movie with subtitles. You can find the subtitles from various websites that offer them for free or for a small fee. You can also choose the language you want to watch the movie in, and enjoy the movie on your device of choice. Barfi 2012 Hindi 720p Dvdrip Charmeleon Silver Rg Subtitles Download is a movie that will make you laugh, cry, and smile.</p> 3cee63e6c2<br />
|
61 |
-
<br />
|
62 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Facebook Chat Bubbles On Pc HOT!.md
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Enable Facebook Chat Bubbles on Your PC</h1>
|
3 |
-
<p>Facebook chat bubbles are a convenient feature that lets you quickly access your Messenger conversations from any app on your phone. But did you know that you can also get chat bubbles on your PC? In this article, we'll show you how to enable Facebook chat bubbles on your Windows or macOS computer using a simple Chrome extension.</p>
|
4 |
-
<h2>facebook chat bubbles on pc</h2><br /><p><b><b>Download File</b> ⇔ <a href="https://imgfil.com/2uxXtd">https://imgfil.com/2uxXtd</a></b></p><br /><br />
|
5 |
-
<h2>What are Facebook chat bubbles?</h2>
|
6 |
-
<p>Facebook chat bubbles are small circles that appear on the edge of your screen when you receive a message on Messenger. They show the profile picture of the sender and a snippet of their message. You can tap on them to open a chat window without leaving the app you're currently using. You can also drag them around the screen or dismiss them by swiping them down.</p>
|
7 |
-
<p>Facebook chat bubbles are similar to Android's native bubbles feature, which works with other messaging apps like WhatsApp and Telegram. However, Facebook chat bubbles are exclusive to Messenger and only available on Android devices. If you want to get chat bubbles on your PC, you'll need to use a third-party extension.</p>
|
8 |
-
<h2>How to enable Facebook chat bubbles on your PC</h2>
|
9 |
-
<p>To get Facebook chat bubbles on your PC, you'll need to use Google Chrome as your default browser and install an extension called Faces.im. Faces.im is a free extension that adds chat bubbles functionality to the desktop versions of Messenger. Here's how to set it up:</p>
|
10 |
-
<p></p>
|
11 |
-
<ol>
|
12 |
-
<li>Download Google Chrome from <a href="https://www.google.com/chrome/">here</a> if you don't already have it installed.</li>
|
13 |
-
<li>Download Faces.im from <a href="https://chrome.google.com/webstore/detail/facesim/ldjgjgkdfkjbhjgkjhkcfmflimnnphnk">here</a> by clicking the blue + Free button.</li>
|
14 |
-
<li>Log in to your Facebook account on Chrome and allow Faces.im to access your Messenger data.</li>
|
15 |
-
<li>Click on the Faces.im icon on the top right of your browser to open a Messenger window. You can use this window to send and receive messages at any time.</li>
|
16 |
-
<li>When you get a message from a friend, you'll see a chat bubble on the right side of your browser. You can click on it to reply or drag it around the screen.</li>
|
17 |
-
</ol>
|
18 |
-
<h2>Tips and tricks for using Facebook chat bubbles on your PC</h2>
|
19 |
-
<p>Here are some tips and tricks for using Facebook chat bubbles on your PC:</p>
|
20 |
-
<ul>
|
21 |
-
<li>You can disable or enable chat bubbles for specific conversations by clicking on the gear icon in the Messenger window and toggling the Bubbles option.</li>
|
22 |
-
<li>You can customize the appearance and behavior of chat bubbles by clicking on the Faces.im icon and accessing the Settings menu.</li>
|
23 |
-
<li>You can mute notifications for chat bubbles by clicking on the bell icon in the Messenger window or by right-clicking on the Faces.im icon and selecting Mute notifications.</li>
|
24 |
-
<li>You can uninstall Faces.im by right-clicking on the icon and selecting Remove from Chrome.</li>
|
25 |
-
</ul>
|
26 |
-
<h2>Conclusion</h2>
|
27 |
-
<p>Facebook chat bubbles are a handy way to stay connected with your friends and family on Messenger without interrupting your workflow. With Faces.im, you can easily enable chat bubbles on your PC and enjoy a seamless messaging experience across all your devices. Try it out today and let us know what you think!</p> d5da3c52bf<br />
|
28 |
-
<br />
|
29 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Block Master for Minecraft PE The Ultimate Launcher for MC PE Mods.md
DELETED
@@ -1,138 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Block Master for Minecraft PE Mod APK: A Guide for Beginners</h1>
|
3 |
-
<p>If you are a fan of Minecraft, you might have heard of Block Master for Minecraft PE, a free utility launcher that allows you to access and apply hundreds of new maps, addons, skins, buildings, textures and seeds for your Minecraft Pocket Edition game. But did you know that there is also a modded version of this app that gives you even more features and advantages? In this article, we will tell you everything you need to know about Block Master for Minecraft PE Mod APK, including what it is, how to download and install it, why you should use it, and some tips and tricks for making the most out of it.</p>
|
4 |
-
<h2>block master for minecraft pe mod apk</h2><br /><p><b><b>Download File</b> ··· <a href="https://urlin.us/2uSYeU">https://urlin.us/2uSYeU</a></b></p><br /><br />
|
5 |
-
<h2>What is Block Master for Minecraft PE?</h2>
|
6 |
-
<p>Block Master for Minecraft PE is a free utility launcher for MC PE, where you can find newest maps, addons, skins, buildings, textures and seeds. Download and automatically install into the game through our launcher. It is compatible with all versions of Minecraft Pocket Edition, from 0.14.0 to 1.16.0. You can browse through different categories of content, such as adventure, survival, creative, parkour, horror, mini-games, PvP and more. You can also preview the content before downloading it, and rate and comment on it after trying it out.</p>
|
7 |
-
<h3>Features of Block Master for Minecraft PE</h3>
|
8 |
-
<p>Some of the features of Block Master for Minecraft PE are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Easy to use interface: You can easily navigate through the app and find what you are looking for.</li>
|
11 |
-
<li>Fast download and installation: You can download and install any content with just one click.</li>
|
12 |
-
<li>Regular updates: The app is constantly updated with new and fresh content from the community.</li>
|
13 |
-
<li>Offline mode: You can access and play the downloaded content even without an internet connection.</li>
|
14 |
-
<li>Favorites: You can save your favorite content in a separate tab for quick access.</li>
|
15 |
-
<li>Search: You can search for any content by name or keyword.</li>
|
16 |
-
<li>Feedback: You can rate and comment on any content and see what others think about it.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>How to download and install Block Master for Minecraft PE Mod APK</h3>
|
19 |
-
<p>If you want to enjoy more features and benefits from Block Master for Minecraft PE, you can download and install the modded version of the app, which is called Block Master for Minecraft PE Mod APK. This version removes ads, gives you unlimited money and free purchases within the app. Here are the steps to download and install Block Master for Minecraft PE Mod APK:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Go to [this link](^1^) and download the latest version of Block Master for Minecraft PE Mod APK.</li>
|
22 |
-
<li>Enable unknown sources on your device by going to Settings > Security > Unknown Sources.</li>
|
23 |
-
<li>Locate the downloaded file in your file manager and tap on it to install it.</li>
|
24 |
-
<li>Open the app and grant the necessary permissions.</li>
|
25 |
-
<li>Enjoy using Block Master for Minecraft PE Mod APK!</li>
|
26 |
-
</ol>
|
27 |
-
<h2>Why use Block Master for Minecraft PE Mod APK?</h2>
|
28 |
-
<p>You might be wondering why you should use Block Master for Minecraft PE Mod APK instead of the original version of the app. Here are some reasons why:</p>
|
29 |
-
<h3>Benefits of using Block Master for Minecraft PE Mod APK</h3>
|
30 |
-
<p>Some of the benefits of using Block Master for Minecraft PE Mod APK are:</p>
|
31 |
-
<ul>
|
32 |
-
<li>No ads: You can use the app without any annoying ads that interrupt your browsing or downloading experience.</li>
|
33 |
-
<li>Unlimited money: You can use the in-app currency to buy anything you want without worrying about running out of money.</li>
|
34 |
-
<li>Free purchases: You can unlock all the premium features and content without paying anything.</li>
|
35 |
-
<li>More content: You can access and apply more maps, addons, skins, buildings, textures and seeds than the original version of the app.</li>
|
36 |
-
<li>More customization: You can create your own custom content with more options and tools than the original version of the app.</li>
|
37 |
-
</ul>
|
38 |
-
<h3>Drawbacks of using Block Master for Minecraft PE Mod APK</h3>
|
39 |
-
<p>Some of the drawbacks of using Block Master for Minecraft PE Mod APK are:</p>
|
40 |
-
<ul>
|
41 |
-
<li>Potential risks: You might encounter some bugs, glitches, errors or crashes while using the modded version of the app. You might also expose your device to malware or viruses if you download the app from an untrusted source.</li>
|
42 |
-
<li>Legal issues: You might violate the terms and conditions of the original app or the Minecraft game by using the modded version of the app. You might also infringe the intellectual property rights of the content creators by using their content without their permission.</li>
|
43 |
-
<li>Ethical issues: You might be unfair to other players or content creators by using the modded version of the app. You might also lose the sense of achievement or challenge by using the modded version of the app.</li>
|
44 |
-
</ul>
|
45 |
-
<h2>Tips and tricks for using Block Master for Minecraft PE Mod APK</h2>
|
46 |
-
<p>If you decide to use Block Master for Minecraft PE Mod APK, here are some tips and tricks that can help you make the most out of it:</p>
|
47 |
-
<h3>How to find and apply the best maps, addons, skins, buildings, textures and seeds</h3>
|
48 |
-
<p>Some of the ways to find and apply the best content for your Minecraft game are:</p>
|
49 |
-
<ul>
|
50 |
-
<li>Use the categories and filters: You can browse through different categories of content, such as adventure, survival, creative, parkour, horror, mini-games, PvP and more. You can also filter the content by popularity, rating, date or name.</li>
|
51 |
-
<li>Use the search function: You can search for any content by name or keyword. You can also use hashtags to find specific types of content, such as #city, #castle, #zombie, #parkour, etc.</li>
|
52 |
-
<li>Use the preview function: You can preview any content before downloading it. You can see screenshots, videos, descriptions and ratings of the content. You can also see how many downloads and comments it has.</li>
|
53 |
-
<li>Use the apply function: You can apply any content with just one click. The app will automatically download and install the content into your game. You can then launch your game and enjoy the new content.</li>
|
54 |
-
</ul>
|
55 |
-
<h3>How to create your own custom content with Block Master for Minecraft PE Mod APK</h3>
|
56 |
-
<p>Some of the ways to create your own custom content for your Minecraft game are:</p>
|
57 |
-
<p>block master for minecraft pe free download mod apk<br />
|
58 |
-
block master for minecraft pe unlimited money mod apk<br />
|
59 |
-
block master for minecraft pe latest version mod apk<br />
|
60 |
-
block master for minecraft pe maps addons skins mod apk<br />
|
61 |
-
block master for minecraft pe launcher mod apk<br />
|
62 |
-
block master for minecraft pe no ads mod apk<br />
|
63 |
-
block master for minecraft pe premium mod apk<br />
|
64 |
-
block master for minecraft pe cheats hack mod apk<br />
|
65 |
-
block master for minecraft pe online multiplayer mod apk<br />
|
66 |
-
block master for minecraft pe offline mode mod apk<br />
|
67 |
-
block master for minecraft pe 2.12.9 mod apk<br />
|
68 |
-
block master for minecraft pe pro mod apk<br />
|
69 |
-
block master for minecraft pe full unlocked mod apk<br />
|
70 |
-
block master for minecraft pe buildings textures seeds mod apk<br />
|
71 |
-
block master for minecraft pe best mods apk<br />
|
72 |
-
block master for minecraft pe android mod apk<br />
|
73 |
-
block master for minecraft pe ios mod apk<br />
|
74 |
-
block master for minecraft pe windows 10 mod apk<br />
|
75 |
-
block master for minecraft pe xbox one mod apk<br />
|
76 |
-
block master for minecraft pe ps4 mod apk<br />
|
77 |
-
block master for minecraft pe pc mod apk<br />
|
78 |
-
block master for minecraft pe mac mod apk<br />
|
79 |
-
block master for minecraft pe linux mod apk<br />
|
80 |
-
block master for minecraft pe chromebook mod apk<br />
|
81 |
-
block master for minecraft pe fire tablet mod apk<br />
|
82 |
-
block master for minecraft pe nintendo switch mod apk<br />
|
83 |
-
block master for minecraft pe vr mod apk<br />
|
84 |
-
block master for minecraft pe ar mod apk<br />
|
85 |
-
block master for minecraft pe 3d mod apk<br />
|
86 |
-
block master for minecraft pe 4k mod apk<br />
|
87 |
-
block master for minecraft pe hd mod apk<br />
|
88 |
-
block master for minecraft pe realistic graphics mod apk<br />
|
89 |
-
block master for minecraft pe shaders mod apk<br />
|
90 |
-
block master for minecraft pe texture packs mod apk<br />
|
91 |
-
block master for minecraft pe resource packs mod apk<br />
|
92 |
-
block master for minecraft pe data packs mod apk<br />
|
93 |
-
block master for minecraft pe skins editor mod apk<br />
|
94 |
-
block master for minecraft pe custom maps mod apk<br />
|
95 |
-
block master for minecraft pe adventure maps mod apk<br />
|
96 |
-
block master for minecraft pe survival maps mod apk<br />
|
97 |
-
block master for minecraft pe creative maps mod apk<br />
|
98 |
-
block master for minecraft pe parkour maps mod apk<br />
|
99 |
-
block master for minecraft pe horror maps mod apk<br />
|
100 |
-
block master for minecraft pe mini games maps mod apk<br />
|
101 |
-
block master for minecraft pe addons maker mod apk<br />
|
102 |
-
block master for minecraft pe mods installer mod apk<br />
|
103 |
-
block master for minecraft pe mods downloader mod apk<br />
|
104 |
-
block master for minecraft pe mods updater mod apk</p>
|
105 |
-
<ul>
|
106 |
-
<li>Use the map editor: You can create your own maps with different biomes, structures, items and mobs. You can also import existing maps and edit them to your liking.</li>
|
107 |
-
<li>Use the addon editor: You can create your own addons with different behaviors, animations and sounds. You can also import existing addons and edit them to your liking.</li>
|
108 |
-
<li>Use the skin editor: You can create your own skins with different colors, textures and accessories. You can also import existing skins and edit them to your liking.</li>
|
109 |
-
<li>Use the building editor: You can create your own buildings with different blocks, shapes and styles. You can also import existing buildings and edit them to your liking.</li>
|
110 |
-
<li>Use the texture editor: You can create your own textures with different patterns, colors and effects. You can also import existing textures and edit them to your liking.</li>
|
111 |
-
<li>Use the seed generator: You can create your own seeds with different parameters, such as world type, biome size, biome distribution, structures frequency, etc. You can also import existing seeds and edit them to your liking.</li>
|
112 |
-
</ul>
|
113 |
-
<h2>Conclusion</h2>
|
114 |
-
<h3>Summary of the main points</h3>
|
115 |
-
<p>In conclusion, Block Master for Minecraft PE Mod APK is a modded version of a free utility launcher that allows you to access and apply hundreds of new maps, addons, skins, buildings, textures and seeds for your Minecraft Pocket Edition game. It also gives you more features and benefits, such as no ads, unlimited money, free purchases, more content and more customization. However, it also has some drawbacks, such as potential risks, legal issues and ethical issues. Therefore, you should use it at your own discretion and responsibility. If you decide to use it, you can follow some tips and tricks to find and apply the best content for your game, or create your own custom content with the app.</p>
|
116 |
-
<h3>Call to action</h3>
|
117 |
-
<p>If you are interested in trying out Block Master for Minecraft PE Mod APK, you can download it from [this link] and follow the instructions to install it on your device. You can also check out the original version of the app from [this link] if you prefer a more authentic and safe experience. Either way, we hope you enjoy playing Minecraft with Block Master for Minecraft PE!</p>
|
118 |
-
<h2>FAQs</h2>
|
119 |
-
<p>Here are some frequently asked questions about Block Master for Minecraft PE Mod APK:</p>
|
120 |
-
<ol>
|
121 |
-
<li>Is Block Master for Minecraft PE Mod APK safe to use?</li>
|
122 |
-
<p>Block Master for Minecraft PE Mod APK is not an official app from Mojang or Microsoft, and it is not endorsed or affiliated with them in any way. Therefore, it might not be safe to use, as it might contain malware or viruses, or cause bugs, glitches, errors or crashes in your game. You should only download the app from a trusted source, and scan it with an antivirus before installing it. You should also backup your game data before using the app, in case something goes wrong.</p>
|
123 |
-
<li>Is Block Master for Minecraft PE Mod APK legal to use?</li>
|
124 |
-
<p>Block Master for Minecraft PE Mod APK might violate the terms and conditions of the original app or the Minecraft game by modifying or altering their features or content. It might also infringe the intellectual property rights of the content creators by using their content without their permission. Therefore, it might not be legal to use, and you might face legal consequences if you use it. You should only use the app for personal and educational purposes, and respect the rights of the original app developers and content creators.</p>
|
125 |
-
<li>Is Block Master for Minecraft PE Mod APK ethical to use?</li>
|
126 |
-
<p>Block Master for Minecraft PE Mod APK might be unfair to other players or content creators by giving you an unfair advantage or access to premium features or content without paying anything. It might also ruin the sense of achievement or challenge by making the game too easy or boring. Therefore, it might not be ethical to use, and you might lose the respect of other players or content creators if you use it. You should only use the app for fun and entertainment purposes, and not abuse or exploit its features or content.</p>
|
127 |
-
<li>How do I uninstall Block Master for Minecraft PE Mod APK?</li>
|
128 |
-
<p>If you want to uninstall Block Master for Minecraft PE Mod APK from your device, you can follow these steps:</p>
|
129 |
-
<ul>
|
130 |
-
<li>Go to Settings > Apps > Block Master for Minecraft PE Mod APK.</li>
|
131 |
-
<li>Tap on Uninstall and confirm your choice.</li>
|
132 |
-
<li>Delete any leftover files or folders from your file manager.</li>
|
133 |
-
</ul>
|
134 |
-
<li>How do I contact the developers of Block Master for Minecraft PE Mod APK?</li>
|
135 |
-
<p>If you have any questions, suggestions, feedback or complaints about Block Master for Minecraft PE Mod APK, you can contact the developers of the app by sending them an email at [this address]. You can also visit their website at [this link] or follow them on social media at [these links].</p>
|
136 |
-
</ol></p> 197e85843d<br />
|
137 |
-
<br />
|
138 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Chess Online APK Challenge Your Friends and Rivals in the Ultimate Strategy Game.md
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Chess Chess Online Apk: A Guide for Beginners</h1>
|
3 |
-
<p>If you are looking for a fun, easy, and convenient way to play chess online, you might want to check out Chess Chess Online Apk. This is a free app that allows you to play chess with millions of players from around the world, or with your friends and family. You can also learn chess rules and strategies, improve your skills, and enjoy various features and options in the app. In this article, we will give you a brief introduction to Chess Chess Online Apk, explain why playing chess online is beneficial for your brain and mental health, show you how to play chess online with the app, and answer some frequently asked questions.</p>
|
4 |
-
<h2>chess chess online apk</h2><br /><p><b><b>Download Zip</b> > <a href="https://urlin.us/2uSTtl">https://urlin.us/2uSTtl</a></b></p><br /><br />
|
5 |
-
<h2>What is Chess Chess Online Apk?</h2>
|
6 |
-
<h3>A brief introduction to the app and its features</h3>
|
7 |
-
<p>Chess Chess Online Apk is an app that lets you play chess online for free on your Android device. You can download it from <a href="(^1^)">Chess.com</a>, which is one of the most popular and trusted chess websites in the world. The app has over 100 million members from different countries and skill levels, so you can always find someone to play with at any time. You can also play against customizable training bots, solve chess puzzles, take chess lessons, follow chess news and events, and more.</p>
|
8 |
-
<h3>How to download and install the app on your device</h3>
|
9 |
-
<p>To download and install Chess Chess Online Apk on your device, you need to follow these simple steps:</p>
|
10 |
-
<ol>
|
11 |
-
<li>Go to <a href="(^1^)">Chess.com</a> on your browser and click on the "Play" button.</li>
|
12 |
-
<li>Select "Play on Android" from the menu.</li>
|
13 |
-
<li>You will be redirected to the Google Play Store page of the app. Click on the "Install" button.</li>
|
14 |
-
<li>Wait for the app to download and install on your device.</li>
|
15 |
-
<li>Open the app and sign up with your email address or Facebook account.</li>
|
16 |
-
<li>Enjoy playing chess online!</li>
|
17 |
-
</ol>
|
18 |
-
<h2>Why Play Chess Online?</h2>
|
19 |
-
<h3>The benefits of playing chess for your brain and mental health</h3>
|
20 |
-
<p>Playing chess is not only a fun and challenging game, but also a great way to exercise your brain and improve your mental health. According to various studies, playing chess can have many benefits for your brain, such as:</p>
|
21 |
-
<p>chess online multiplayer apk<br />
|
22 |
-
chess puzzles apk<br />
|
23 |
-
chessbase online apk<br />
|
24 |
-
chess play and learn apk<br />
|
25 |
-
chess online free apk<br />
|
26 |
-
chess tactics apk<br />
|
27 |
-
chess lessons apk<br />
|
28 |
-
chess variants apk<br />
|
29 |
-
chess 3d apk<br />
|
30 |
-
chess offline apk<br />
|
31 |
-
chess online with friends apk<br />
|
32 |
-
chess analysis apk<br />
|
33 |
-
chess coach apk<br />
|
34 |
-
chess strategy apk<br />
|
35 |
-
chess master apk<br />
|
36 |
-
chess live apk<br />
|
37 |
-
chess clock apk<br />
|
38 |
-
chess notation apk<br />
|
39 |
-
chess board apk<br />
|
40 |
-
chess games apk<br />
|
41 |
-
chess engine apk<br />
|
42 |
-
chess ratings apk<br />
|
43 |
-
chess news apk<br />
|
44 |
-
chess videos apk<br />
|
45 |
-
chess books apk<br />
|
46 |
-
chess openings apk<br />
|
47 |
-
chess endgames apk<br />
|
48 |
-
chess problems apk<br />
|
49 |
-
chess mate apk<br />
|
50 |
-
chess moves apk<br />
|
51 |
-
chess rules apk<br />
|
52 |
-
chess history apk<br />
|
53 |
-
chess trivia apk<br />
|
54 |
-
chess quiz apk<br />
|
55 |
-
chess wallpaper apk<br />
|
56 |
-
chess themes apk<br />
|
57 |
-
chess sounds apk<br />
|
58 |
-
chess music apk<br />
|
59 |
-
chess art apk<br />
|
60 |
-
chess fun apk<br />
|
61 |
-
chess jokes apk<br />
|
62 |
-
chess memes apk<br />
|
63 |
-
chess quotes apk<br />
|
64 |
-
chess facts apk<br />
|
65 |
-
chess tips apk<br />
|
66 |
-
chess tricks apk<br />
|
67 |
-
chess secrets apk<br />
|
68 |
-
chess stories apk<br />
|
69 |
-
chess legends apk</p>
|
70 |
-
<ul>
|
71 |
-
<li>Raising your IQ. Playing chess can stimulate your logical thinking, pattern recognition, decision making, memory, and problem solving skills, which are all related to intelligence. A study of 4,000 Venezuelan students showed significant increases in their IQ scores after four months of chess instruction. </li>
|
72 |
-
<li>Preventing Alzheimer's disease. Playing chess can keep your brain active, sharp, and young, which can reduce the chances of developing dementia later in life. A study found that people over 75 who engaged in brain-targeted activities like chess were less likely to develop dementia than those who didn't. </li>
|
73 |
-
<h3>Exercising both sides of the brain. Playing chess can activate both the left hemisphere (responsible for logic, analysis, and calculation) and the right hemisphere (responsible for creativity, intuition, and imagination) of the brain. A study using MRI scans showed that chess players use both hemispheres more than non-chess players. </h3>
|
74 |
-
<h3>Reducing stress and anxiety. Playing chess can help you relax, focus, and cope with difficult situations. Chess can also provide a sense of achievement, satisfaction, and social interaction, which can boost your mood and self-esteem. A study found that chess players had lower levels of cortisol (a stress hormone) than non-chess players. </h3>
|
75 |
-
<h2>How to Play Chess Online with Chess Chess Online Apk?</h2>
|
76 |
-
<h3>The basics of chess rules and strategies</h3>
|
77 |
-
<p>If you are new to chess, you might want to learn the basics of chess rules and strategies before playing online. Chess is a game played by two players on a board with 64 squares of alternating colors. Each player has 16 pieces: one king, one queen, two rooks, two bishops, two knights, and eight pawns. The goal of the game is to checkmate the opponent's king, which means to trap it so that it cannot escape or be protected by another piece.</p>
|
78 |
-
<p>The pieces move in different ways according to their type. The king can move one square in any direction. The queen can move any number of squares in any direction. The rook can move any number of squares horizontally or vertically. The bishop can move any number of squares diagonally. The knight can move in an L-shape: two squares horizontally or vertically and then one square perpendicular to that direction. The pawn can move one square forward, or two squares forward on its first move. It can also capture an enemy piece by moving one square diagonally forward.</p>
|
79 |
-
<p>There are some special rules in chess that you should know. For example, castling is a move that allows you to move your king and one of your rooks at the same time. En passant is a move that allows you to capture an enemy pawn that has just moved two squares forward next to your pawn. Promotion is a move that allows you to change your pawn into another piece (usually a queen) when it reaches the last rank of the board.</p>
|
80 |
-
<p>There are also some basic strategies in chess that you should learn. For example, you should try to control the center of the board, develop your pieces quickly, protect your king, attack your opponent's weaknesses, and coordinate your pieces.</p>
|
81 |
-
<h3>The different modes and options available in the app</h3>
|
82 |
-
<p>Chess Chess Online Apk offers various modes and options for playing chess online. You can choose from:</p>
|
83 |
-
<ul>
|
84 |
-
<li>Play online: You can play online with other players from around the world. You can choose your time control (from bullet to classical), your rating range (from beginner to master), and your color preference (white, black, or random). You can also chat with your opponent during the game.</li>
|
85 |
-
<li>Play offline: You can play offline with a computer bot. You can choose the difficulty level (from easy to hard), the time control (from unlimited to blitz), and the color preference (white, black, or random). You can also undo moves, get hints, and analyze the game.</li>
|
86 |
-
<li>Puzzles: You can solve chess puzzles that test your tactical skills. You can choose the difficulty level (from easy to hard), the theme (from checkmate to endgame), and the number of puzzles (from 10 to 100). You can also get hints, solutions, and explanations.</li>
|
87 |
-
<li>Lessons: You can take chess lessons that teach you various aspects of chess. You can choose the topic (from openings to strategy), the level (from beginner to advanced), and the format (from video to interactive). You can also get feedback, quizzes, and exercises.</li>
|
88 |
-
<li>News: You can follow chess news and events from around the world. You can read articles, watch videos, listen to podcasts, and view live games.</li>
|
89 |
-
<li>More: You can access more features and options in the app. You can view your profile, statistics, achievements, friends list, messages, settings, and more.</li>
|
90 |
-
</ul>
|
91 |
-
<h3>The tips and tricks to improve your skills and win more games</h3>
|
92 |
-
<p>If you want to improve your skills and win more games with Chess Chess Online Apk, you might want to follow these tips and tricks:</p>
|
93 |
-
<ul>
|
94 |
-
<ul>
|
95 |
-
<li>Practice regularly: The best way to improve your chess skills is to practice regularly. You can play online or offline games, solve puzzles, take lessons, or watch live games. The more you play, the more you learn and improve.</li>
|
96 |
-
<li>Study the basics: The basics of chess are the foundation of your chess skills. You should study the basic rules, moves, strategies, tactics, openings, endgames, and checkmates. You can use the app's lessons, puzzles, and analysis tools to help you with this.</li>
|
97 |
-
<li>Analyze your games: Analyzing your games is a great way to learn from your mistakes and successes. You can use the app's analysis tools to review your moves, evaluate your position, find better alternatives, and get feedback. You can also compare your games with other players or masters.</li>
|
98 |
-
<li>Challenge yourself: Challenging yourself is a good way to test your skills and push your limits. You can play against stronger opponents, try different time controls, explore new openings, or set yourself specific goals. You can also join tournaments, clubs, or teams in the app.</li>
|
99 |
-
<li>Have fun: Having fun is the most important thing when playing chess online. You should enjoy the game, be respectful to your opponents, be humble in victory and graceful in defeat, and have a positive attitude. You can also chat with other players, make friends, and share your passion for chess.</li>
|
100 |
-
</ul>
|
101 |
-
<h2>Conclusion</h2>
|
102 |
-
<p>Chess Chess Online Apk is a free app that allows you to play chess online with millions of players from around the world, or with your friends and family. You can also learn chess rules and strategies, improve your skills, and enjoy various features and options in the app. Playing chess online is beneficial for your brain and mental health, as it can raise your IQ, prevent Alzheimer's disease, exercise both sides of the brain, and reduce stress and anxiety. To play chess online with Chess Chess Online Apk, you need to download and install the app on your device, sign up with your email address or Facebook account, and choose your preferred mode and option. To improve your skills and win more games with Chess Chess Online Apk, you need to practice regularly, study the basics, analyze your games, challenge yourself, and have fun.</p>
|
103 |
-
<p>If you are interested in playing chess online with Chess Chess Online Apk, you can download it from <a href="">Chess.com</a> today. You will not regret it!</p>
|
104 |
-
<h2>FAQs</h2>
|
105 |
-
<h3>What are some of the best chess online platforms besides Chess Chess Online Apk?</h3>
|
106 |
-
<p>Some of the best chess online platforms besides Chess Chess Online Apk are:</p>
|
107 |
-
<ul>
|
108 |
-
<li><a href="">Lichess.org</a>: A free and open-source platform that offers unlimited games, puzzles, tournaments, analysis tools, and more.</li>
|
109 |
-
<li><a href="">Chess24.com</a>: A premium platform that offers live broadcasts, video series, interactive courses, grandmaster commentary, and more.</li>
|
110 |
-
<li><a href="">Chessbase.com</a>: A professional platform that offers a database of millions of games, a cloud engine service, a training system, and more.</li>
|
111 |
-
<li><a href="">Chesskid.com</a>: A kid-friendly platform that offers fun games, puzzles, <ul>
|
112 |
-
<li><a href="">Chesskid.com</a>: A kid-friendly platform that offers fun games, puzzles, lessons, videos, and more for children and parents.</li>
|
113 |
-
</ul>
|
114 |
-
<h3>How can I play chess online with my friends and family?</h3>
|
115 |
-
<p>To play chess online with your friends and family, you can use Chess Chess Online Apk's "Play a Friend" option. You can either invite your friends and family by sending them a link or a code, or accept their invitations by entering their link or code. You can also chat with them during the game and send them emojis.</p>
|
116 |
-
<h3>How can I learn more about chess history and origin?</h3>
|
117 |
-
<p>To learn more about chess history and origin, you can use Chess Chess Online Apk's "News" option. You can read articles, watch videos, listen to podcasts, and view live games that cover various topics related to chess history and origin. You can also use the app's "Lessons" option to learn about the history of chess openings, endgames, and famous players.</p>
|
118 |
-
<h3>How can I customize my chess board and pieces in the app?</h3>
|
119 |
-
<p>To customize your chess board and pieces in the app, you can use Chess Chess Online Apk's "Settings" option. You can choose from different themes, colors, styles, sounds, and animations for your chess board and pieces. You can also adjust the board size, orientation, coordinates, and notation.</p>
|
120 |
-
<h3>How can I contact the developers of Chess Chess Online Apk for feedback and support?</h3>
|
121 |
-
<p>To contact the developers of Chess Chess Online Apk for feedback and support, you can use Chess Chess Online Apk's "More" option. You can send them an email, a message, or a review. You can also follow them on social media platforms like Facebook, Twitter, Instagram, and YouTube.</p> 197e85843d<br />
|
122 |
-
<br />
|
123 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Braindom Mod APK Solve Puzzles and Brain Teasers with Free Rewards.md
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Braindom: Brain Games Test Mod APK - A Fun and Challenging Word Game</h1>
|
3 |
-
<p>Do you love word games that make you think outside the box? Do you enjoy solving puzzles that test your brain power and creativity? If yes, then you should try <strong>Braindom: Brain Games Test</strong>, a fun and challenging word game that will keep you entertained for hours.</p>
|
4 |
-
<h2>braindom brain games test mod apk</h2><br /><p><b><b>Download File</b> ► <a href="https://jinyurl.com/2uNPW9">https://jinyurl.com/2uNPW9</a></b></p><br /><br />
|
5 |
-
<h2>What is Braindom: Brain Games Test?</h2>
|
6 |
-
<p>Braindom: Brain Games Test is a word game that combines logic, memory, and vocabulary skills. You will have to answer various questions that range from easy to hard, from simple to absurd, from common sense to brain teaser. You will have to use your imagination, intuition, and knowledge to find the correct answer.</p>
|
7 |
-
<h3>Features of Braindom: Brain Games Test</h3>
|
8 |
-
<h4>- Hundreds of levels with different themes and difficulties</h4>
|
9 |
-
<p>The game has hundreds of levels that will challenge your brain in different ways. You will encounter questions about animals, celebrities, movies, music, history, geography, and more. Each level has a different theme and difficulty level, so you will never get bored or stuck.</p>
|
10 |
-
<h4>- Creative and humorous puzzles that test your logic, memory, and vocabulary</h4>
|
11 |
-
<p>The game has puzzles that are not only challenging but also funny and witty. You will have to use your logic, memory, and vocabulary skills to solve them. Some puzzles will make you laugh, some will make you think, and some will make you scratch your head. You will have to be smart and clever to find the right answer.</p>
|
12 |
-
<h4>- Earn coins and hints to help you solve tricky questions</h4>
|
13 |
-
<p>The game rewards you with coins and hints for every level you complete. You can use coins to buy more hints or skip levels if you are stuck. You can use hints to reveal letters or words in the answer or eliminate wrong options. You can also watch videos or share the game with your friends to get more coins and hints.</p>
|
14 |
-
<p>braindom brain games test mod apk download<br />
|
15 |
-
braindom brain games test mod apk unlimited money<br />
|
16 |
-
braindom brain games test mod apk latest version<br />
|
17 |
-
braindom brain games test mod apk android<br />
|
18 |
-
braindom brain games test mod apk ios<br />
|
19 |
-
braindom brain games test mod apk free<br />
|
20 |
-
braindom brain games test mod apk hack<br />
|
21 |
-
braindom brain games test mod apk online<br />
|
22 |
-
braindom brain games test mod apk offline<br />
|
23 |
-
braindom brain games test mod apk no ads<br />
|
24 |
-
braindom brain games test mod apk 2.0.4<br />
|
25 |
-
braindom brain games test mod apk 2023<br />
|
26 |
-
braindom brain games test mod apk for pc<br />
|
27 |
-
braindom brain games test mod apk rexdl<br />
|
28 |
-
braindom brain games test mod apk revdl<br />
|
29 |
-
braindom brain games test mod apk apkpure<br />
|
30 |
-
braindom brain games test mod apk apkloli<br />
|
31 |
-
braindom brain games test mod apk happymod<br />
|
32 |
-
braindom brain games test mod apk an1<br />
|
33 |
-
braindom brain games test mod apk android 1<br />
|
34 |
-
download game braindom:brain games test mod apk<br />
|
35 |
-
how to install braindom:brain games test mod apk<br />
|
36 |
-
how to play braindom:brain games test mod apk<br />
|
37 |
-
how to update braindom:brain games test mod apk<br />
|
38 |
-
how to get unlimited money in braindom:brain games test mod apk<br />
|
39 |
-
how to remove ads in braindom:brain games test mod apk<br />
|
40 |
-
how to hack braindom:brain games test mod apk<br />
|
41 |
-
is there a virus in the Braindom:Brain Games Test Mod APK?<br />
|
42 |
-
is Braindom:Brain Games Test Mod APK safe?<br />
|
43 |
-
is Braindom:Brain Games Test Mod APK legal?<br />
|
44 |
-
what is Braindom:Brain Games Test Mod APK?<br />
|
45 |
-
what are the features of Braindom:Brain Games Test Mod APK?<br />
|
46 |
-
what are the benefits of Braindom:Brain Games Test Mod APK?<br />
|
47 |
-
what are the disadvantages of Braindom:Brain Games Test Mod APK?<br />
|
48 |
-
what are the requirements for Braindom:Brain Games Test Mod APK?<br />
|
49 |
-
where can I download Braindom:Brain Games Test Mod APK?<br />
|
50 |
-
where can I find more information about Braindom:Brain Games Test Mod APK?<br />
|
51 |
-
why should I download Braindom:Brain Games Test Mod APK?<br />
|
52 |
-
why is Braindom:Brain Games Test Mod APK popular?<br />
|
53 |
-
why is Braindom:Brain Games Test Mod APK fun?</p>
|
54 |
-
<h4>- Play offline or online with friends and family</h4>
|
55 |
-
<p>The game can be played offline or online with friends and family. You can play offline without an internet connection anytime and anywhere. You can play online with your Facebook friends or other players around the world. You can also chat with them, send them gifts, or challenge them to beat your score.</p>
|
56 |
-
<h3>Why download Braindom: Brain Games Test Mod APK?</h3>
|
57 |
-
<h4>- Unlimited money to buy more hints and coins</h4>
|
58 |
-
<p>If you want to enjoy the game without any limitations, you should download <strong>Braindom: Brain Games Test Mod APK</strong>. This modded version of the game gives you unlimited money to buy more hints and coins. You can use them as much as you want without worrying about running out of them.</p>
|
59 |
-
<h4>- No ads to interrupt your gameplay</h4>
|
60 |
-
<p>Another benefit of downloading <strong>Braindom: Brain Games Test Mod APK</strong> is that it removes all the ads from the game. You will not have to watch any annoying or intrusive ads that interrupt your gameplay. You can play the game smoothly and comfortably without any distractions.</p>
|
61 |
-
<h4>- Easy installation and compatibility with most devices</h4>
|
62 |
-
<p><strong>Braindom: Brain Games Test Mod APK </strong> is easy to install and compatible with most devices. You just need to download the APK file from a trusted source and follow the simple steps to install it on your device. You do not need to root or jailbreak your device to use the mod. You can enjoy the game on your Android or iOS device without any problems.</p>
|
63 |
-
<h2>How to download and install Braindom: Brain Games Test Mod APK?</h2>
|
64 |
-
<p>If you want to download and install <strong>Braindom: Brain Games Test Mod APK</strong>, you can follow these steps:</p>
|
65 |
-
<h3>Step 1: Download the APK file from a trusted source</h3>
|
66 |
-
<p>You can download the APK file from a trusted source such as [APKPure] or [APKMirror]. These are reliable websites that offer safe and secure downloads of modded apps and games. You can search for <strong>Braindom: Brain Games Test Mod APK</strong> on these websites and click on the download button.</p>
|
67 |
-
<h3>Step 2: Enable unknown sources on your device settings</h3>
|
68 |
-
<p>Before you can install the APK file, you need to enable unknown sources on your device settings. This will allow you to install apps and games from sources other than the official app store. To enable unknown sources, you can go to your device settings, then security, then unknown sources, and toggle it on.</p>
|
69 |
-
<h3>Step 3: Install the APK file and launch the game</h3>
|
70 |
-
<p>After you have enabled unknown sources, you can install the APK file by locating it in your downloads folder and tapping on it. You will see a prompt asking you to confirm the installation. Tap on install and wait for the process to finish. Once the installation is done, you can launch the game and enjoy it.</p>
|
71 |
-
<h2>Conclusion</h2>
|
72 |
-
<p><strong>Braindom: Brain Games Test</strong> is a fun and challenging word game that will test your brain power and creativity. You will have to answer various questions that range from easy to hard, from simple to absurd, from common sense to brain teaser. You will have to use your imagination, intuition, and knowledge to find the correct answer.</p>
|
73 |
-
<p>If you want to enjoy the game without any limitations, you should download <strong>Braindom: Brain Games Test Mod APK</strong>. This modded version of the game gives you unlimited money to buy more hints and coins, removes all the ads from the game, and makes it easy to install and compatible with most devices.</p>
|
74 |
-
<p>So what are you waiting for? Download <strong>Braindom: Brain Games Test Mod APK</strong> today and have fun with this amazing word game.</p>
|
75 |
-
<h2>FAQs</h2>
|
76 |
-
<p>Here are some frequently asked questions about <strong>Braindom: Brain Games Test Mod APK</strong>:</p>
|
77 |
-
<table>
|
78 |
-
<tr><td><strong>Q: Is Braindom: Brain Games Test Mod APK safe to use?</strong></td><td><strong>A: Yes, it is safe to use as long as you download it from a trusted source. However, you should always be careful when downloading modded apps and games as they may contain viruses or malware that can harm your device.</strong></td></tr>
|
79 |
-
<tr><td><strong>Q: Do I need an internet connection to play Braindom: Brain Games Test?</strong></td><td><strong>A: No, you do not need an internet connection to play the game. You can play it offline anytime and anywhere. However, if you want to play online with your friends or other players, you will need an internet connection.</strong></td></tr>
|
80 |
-
<tr><td><strong>Q: How can I update Braindom: Brain Games Test Mod APK?</strong></td><td><strong>A: You can update the modded version of the game by downloading the latest version of the APK file from the same source where you downloaded it before. You can also check for updates on the game itself by tapping on the settings icon and then checking for updates.</strong></td></tr>
|
81 |
-
<tr><td><strong>Q: How can I contact the developers of Braindom: Brain Games Test?</strong></td><td><strong>A: You can contact the developers of the game by sending them an email at [[email protected]] or by visiting their website at [https://www.matchingham.gs/]. You can also follow them on Facebook at [https://www.facebook.com/matchinghamgames] or on Instagram at [https://www.instagram.com/matchingham.games/].</strong></td></tr>
|
82 |
-
<tr><td><strong>Q: How can I rate and review Braindom: Brain Games Test?</strong></td><td><strong>A: You can rate and review the game by going to the official app store where you downloaded it from. You can also rate and review the modded version of the game by going to the website where you downloaded it from. You can share your feedback, suggestions, and opinions with the developers and other players. You can also give the game a thumbs up or a thumbs down on Facebook or Instagram.</strong></td></tr>
|
83 |
-
</table></p> 401be4b1e0<br />
|
84 |
-
<br />
|
85 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Clash of Clans MOD APK with Unlimited Gems and Troops (v15.297.217).md
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Clash of Clans Mod APK Download (Unlimited Gems Troops) Latest Version</h1>
|
3 |
-
<p>Are you a fan of strategy games? Do you love building your own village, training your own army, and battling with millions of other players online? If yes, then you must have heard of <strong>Clash of Clans</strong>, one of the most popular and addictive games for both iOS and Android devices.</p>
|
4 |
-
<h2>clash of clans mod apk download (unlimited gems troops) latest version</h2><br /><p><b><b>Download</b> ✶ <a href="https://jinyurl.com/2uNOOV">https://jinyurl.com/2uNOOV</a></b></p><br /><br />
|
5 |
-
<p>But what if we tell you that you can enjoy this game even more with a modded version that gives you unlimited resources, gems, troops, and access to all the features that you normally have to pay for? Sounds amazing, right? Well, that's exactly what <strong>Clash of Clans Mod APK</strong> is all about.</p>
|
6 |
-
<p>In this article, we will tell you everything you need to know about Clash of Clans Mod APK, how to download and install it on your Android device, what are its features, how to play it, and some FAQs that you might have. So, without further ado, let's get started.</p>
|
7 |
-
<h2>What is Clash of Clans?</h2>
|
8 |
-
<h3>A brief introduction to the game and its features</h3>
|
9 |
-
<p>Clash of Clans is a strategy game developed by Supercell, a Finnish company that also created other popular games like Hay Day, Boom Beach, and Brawl Stars. The game was released in 2012 for iOS and in 2013 for Android, and since then it has become one of the most downloaded and played games in the world.</p>
|
10 |
-
<p>The game is set in a fantasy world where you have to build your own village, train your own troops, and fight with other players in clan wars and clan games. You can also join or create your own clan, where you can chat with other players, donate and receive troops, and participate in clan events.</p>
|
11 |
-
<p>The game has various types of resources that you need to collect and spend to upgrade your village and troops. These resources include gold, elixir, dark elixir, gems, and magic items. You can get these resources by raiding other players' villages, completing achievements, winning clan wars and clan games, or buying them with real money.</p>
|
12 |
-
<p>The game also has various types of buildings that you can construct and upgrade in your village. These buildings include town hall, barracks, army camps, laboratory, spell factory, gold mines, elixir collectors, dark elixir drills, gold storages, elixir storages, dark elixir storages, walls, cannons, archer towers, mortars, air defenses, wizard towers, hidden teslas, bomb towers, x-bows, inferno towers, eagle artillery, scattershots, air sweepers, air bombs , traps, clan castle, builder's hut, and decorations. You can also unlock and upgrade various types of troops and spells that you can use in battles. These troops and spells include barbarians, archers, giants, goblins, wall breakers, balloons, wizards, healers, dragons, pekkas, minions, hog riders, valkyries, golems, witches, lava hounds, bowlers, miners, baby dragons, electro dragons, yetis, ice golems, headhunters, super troops, lightning spell, healing spell, rage spell, jump spell, freeze spell, clone spell, poison spell, earthquake spell, haste spell, skeleton spell, bat spell and invisibility spell.</p>
|
13 |
-
<h3>The benefits of playing Clash of Clans Mod APK</h3>
|
14 |
-
<p>As you can see, Clash of Clans is a very fun and exciting game that offers a lot of content and features for you to enjoy. However, it can also be very challenging and time-consuming to progress in the game. You need to spend a lot of resources and gems to upgrade your village and troops. You also need to wait for long hours or days for the upgrades to finish. You might also face difficulties in finding suitable opponents or winning battles against stronger players.</p>
|
15 |
-
<p>That's why many players look for ways to hack or mod the game to get unlimited resources and gems. This way, they can skip the waiting time and enjoy the game without any limitations or restrictions. They can also experiment with different strategies and tactics without worrying about losing resources or trophies.</p>
|
16 |
-
<p>Clash of Clans Mod APK is one of the best and most reliable ways to hack or mod the game. It is a modified version of the original game that gives you access to unlimited resources and gems. It also unlocks all the buildings and upgrades that you normally have to pay for. It also gives you unlimited troops and spells that you can use in battles. It also allows you to customize and personalize your village and troops according to your preferences.</p>
|
17 |
-
<p>clash of clans hack apk free download (unlimited everything) 2023<br />
|
18 |
-
clash of clans modded apk with unlimited gold and elixir<br />
|
19 |
-
download clash of clans mod apk latest version (unlimited troops/gems)<br />
|
20 |
-
how to install clash of clans mod apk on android device<br />
|
21 |
-
clash of clans mod apk offline mode (no internet required)<br />
|
22 |
-
clash of clans cheats apk download for unlimited resources<br />
|
23 |
-
clash of clans unlimited gems mod apk 2023 (working)<br />
|
24 |
-
clash of clans mod apk with private server and custom mods<br />
|
25 |
-
clash of clans mod apk unlimited dark elixir and heroes<br />
|
26 |
-
best clash of clans mod apk download site (safe and secure)<br />
|
27 |
-
clash of clans mod apk for pc windows 10/8/7<br />
|
28 |
-
clash of clans mod apk unlimited builder base troops<br />
|
29 |
-
clash of clans mod apk with th14 and new troops<br />
|
30 |
-
clash of clans mod apk download link (direct and fast)<br />
|
31 |
-
clash of clans mod apk latest update 2023 (new features)<br />
|
32 |
-
clash of clans mod apk no root required (easy to use)<br />
|
33 |
-
clash of clans mod apk unlimited super troops and spells<br />
|
34 |
-
clash of clans mod apk with unlimited clan games rewards<br />
|
35 |
-
clash of clans mod apk for ios iphone/ipad/ipod<br />
|
36 |
-
clash of clans mod apk with unlimited war stars and trophies<br />
|
37 |
-
clash of clans hack tool apk download (no survey no password)<br />
|
38 |
-
clash of clans mod apk with unlimited season pass and skins<br />
|
39 |
-
clash of clans mod apk with all maps unlocked and unlimited gems<br />
|
40 |
-
how to update clash of clans mod apk to the latest version<br />
|
41 |
-
clash of clans mod apk with unlimited cwl medals and league shop items<br />
|
42 |
-
clash of clans cracked apk download (unlimited gems/troops) 2023<br />
|
43 |
-
clash of clans modded server apk with unlimited events and challenges<br />
|
44 |
-
download coc mod apk latest version (unlimited gems/troops) 2023<br />
|
45 |
-
how to play clash of clans mod apk online with friends<br />
|
46 |
-
clash of clans premium apk download (unlimited gems/troops) 2023<br />
|
47 |
-
clash of clans pro mod apk with unlimited training potions and books<br />
|
48 |
-
clash of clans hacked version download (unlimited gems/troops) 2023<br />
|
49 |
-
how to backup and restore clash of clans mod apk data<br />
|
50 |
-
coc hack apk download latest version 2023 (unlimited gems/troops)<br />
|
51 |
-
how to fix clash of clans mod apk not working or crashing issues<br />
|
52 |
-
coc cheat codes apk download for unlimited gems and resources<br />
|
53 |
-
coc unlimited money mod apk download 2023 (working)<br />
|
54 |
-
how to transfer clash of clans mod apk account to another device<br />
|
55 |
-
coc god mode mod apk download (unlimited gems/troops) 2023<br />
|
56 |
-
how to uninstall or remove clash of clans mod apk from your device</p>
|
57 |
-
<p>By playing Clash of Clans Mod APK, you can enjoy the game to the fullest without spending any money or wasting any time. You can build your dream village and army in no time. You can also dominate the leaderboards and impress your friends with your achievements. You can also have more fun and excitement in clan wars and clan games with your unlimited resources and troops.</p>
|
58 |
-
<h2>How to Download and Install Clash of Clans Mod APK on Android?</h2>
|
59 |
-
<h3>The steps to download and install the modded version of the game</h3>
|
60 |
-
<p>If you are interested in playing Clash of Clans Mod APK on your Android device, you need to follow these simple steps:</p>
|
61 |
-
<ol>
|
62 |
-
<li>First of all, you need to download the Clash of Clans Mod APK file from a trusted source. You can find many websites that offer the mod apk file for free. However, you need to be careful as some of them might contain viruses or malware that can harm your device. We recommend you to use this link to download the latest version of Clash of Clans Mod APK safely and securely.</li>
|
63 |
-
<li>Next, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps that are not from the Google Play Store.</li>
|
64 |
-
<li>Then, you need to locate the downloaded Clash of Clans Mod APK file on your device. You can use a file manager app or your browser's download history to find it. Once you find it, tap on it to start the installation process.</li>
|
65 |
-
<li>Finally, you need to follow the on-screen instructions and grant the necessary permissions to complete the installation process. It might take a few minutes for the app to install depending on your device's performance.</li>
|
66 |
-
</ol>
|
67 |
-
<p>Congratulations! You have successfully installed Clash of Clans Mod APK on your Android device. Now you can launch the app and enjoy the game with unlimited resources and gems.</p>
|
68 |
-
<h3>The precautions to take before installing the mod apk</h3>
|
69 |
-
<p>Before you install Clash of Clans Mod APK on your Android device, there are some precautions that you need to take:</p>
|
70 |
-
<ul>
|
71 |
-
<li>Make sure that you have enough storage space on your device for the mod apk file and its data. The mod apk file is about 200 MB in size while its data is about 2 GB in size.</li>
|
72 |
-
<li>Make sure that you have a stable internet connection for downloading and installing the mod apk file and its data.</li>
|
73 |
-
<li>Make sure that you have backed up your original Clash of Clans game data before installing the mod apk file. This way, you can restore your original game data if anything goes wrong with the mod apk file or if you want to switch back to the original game.</li>
|
74 |
-
<li>Make sure that you have uninstalled the original Clash of Clans game from your device before installing the mod apk file. This is to avoid any conflicts or errors between the two versions of the game.</li>
|
75 |
-
<li>Make sure that you do not use your original Clash of Clans account or Google Play account to log in to the mod apk file. This is to avoid any risk of getting banned or suspended by Supercell for using a modded version of the game. You can create a new account or use a guest account to play the mod apk file.</li>
|
76 |
-
</ul>
|
77 |
-
<p>By following these precautions, you can ensure a smooth and safe installation and gameplay experience with Clash of Clans Mod APK.</p>
|
78 |
-
<h2>What are the Features of Clash of Clans Mod APK?</h2>
|
79 |
-
<h3>Unlimited resources and gems</h3>
|
80 |
-
<p>One of the main features of Clash of Clans Mod APK is that it gives you unlimited resources and gems. You can use these resources and gems to upgrade your village and troops without any limitations or restrictions. You can also use them to buy anything you want from the shop, such as magic items, decorations, shields, and more.</p>
|
81 |
-
<p>You can also use these resources and gems to instantly finish any upgrade or training process. You don't have to wait for hours or days for the upgrades or training to complete. You can also use them to boost your resource production, troop training, spell brewing, and hero regeneration.</p>
|
82 |
-
<p>With unlimited resources and gems, you can enjoy the game without any worries or hassles. You can build your dream village and army in no time. You can also experiment with different combinations and strategies without losing anything.</p>
|
83 |
-
<h3>Unlimited troops and spells</h3>
|
84 |
-
<p>Another feature of Clash of Clans Mod APK is that it gives you unlimited troops and spells. You can train as many troops as you want in your barracks and army camps. You can also brew as many spells as you want in your spell factory. You don't have to worry about running out of space or elixir.</p>
|
85 |
-
<p>You can also use any type of troop or spell in your battles. You don't have to unlock them or upgrade them first. You can access all the troops and spells that are available in the game, including the super troops and the new invisibility spell.</p>
|
86 |
-
<p>With unlimited troops and spells, you can unleash your full potential in battles. You can create powerful armies and devastating spells that can crush any opponent. You can also have more fun and variety in your attacks and defenses.</p>
|
87 |
-
<h3>Access to all buildings and upgrades</h3>
|
88 |
-
<p>A third feature of Clash of Clans Mod APK is that it gives you access to all buildings and upgrades. You can build and upgrade any building that you want in your village. You don't have to meet any requirements or prerequisites. You can also skip the town hall levels and jump to the highest level possible.</p>
|
89 |
-
<p>You can also access all the buildings and upgrades that are normally exclusive to certain town hall levels or seasons. For example, you can build and upgrade the scattershot, the royal champion, the giga inferno, the giga tesla, the builder base, the otto hut, the battle machine, the super pekka, the mega tesla, and more.</p>
|
90 |
-
<p>With access to all buildings and upgrades, you can enhance your village and troops with ease. You can also explore all the features and content that the game has to offer. You can also challenge yourself with different modes and difficulties.</p>
|
91 |
-
<h3>Customization and personalization options</h3>
|
92 |
-
<p>A fourth feature of Clash of Clans Mod APK is that it gives you customization and personalization options. You can change the appearance and design of your village and troops according to your preferences. You can also modify the settings and parameters of the game according to your needs.</p>
|
93 |
-
<p>You can choose from different themes and skins for your village and troops. You can also change the colors, shapes, sizes, names, icons, sounds, animations, effects, and more. You can also create your own custom themes and skins using various tools and resources.</p>
|
94 |
-
<p>You can also adjust the difficulty level, speed, damage, health, range, capacity, cost, cooldown, duration, frequency, and more of your village and troops. You can also enable or disable certain features and functions of the game. You can also use cheats and hacks to manipulate the game in your favor.</p>
|
95 |
-
<p>With customization and personalization options, you can make the game more fun and interesting. You can also express your creativity and personality through your village and troops. You can also have more control and flexibility over the game.</p>
|
96 |
-
<h2>How to Play Clash of Clans Mod APK?</h2>
|
97 |
-
<h3>The basics of building your village and training your troops</h3>
|
98 |
-
<p>Playing Clash of Clans Mod APK is very similar to playing the original game. You still have to build your village and train your troops. However, with the mod apk, you have unlimited resources and gems, so you don't have to worry about collecting or spending them.</p>
|
99 |
-
<p>To build your village, you have to tap on the shop icon on the bottom right corner of the screen. There, you can find all the buildings that you can construct and upgrade in your village. You can also find the decorations and magic items that you can buy and use in your village.</p>
|
100 |
-
<p>To train your troops, you have to tap on the barracks icon on the bottom left corner of the screen. There, you can find all the troops that you can train in your barracks and army camps. You can also find the spells that you can brew in your spell factory.</p>
|
101 |
-
<p>To build or upgrade a building, or to train a troop or a spell, you just have to tap on it and then tap on the green button that says "Build" or "Train". The building or troop or spell will be instantly built or trained without any waiting time or cost.</p>
|
102 |
-
<p>You can also move, rotate, or remove any building or decoration in your village by tapping and holding on it. You can also edit the layout of your village by tapping on the edit mode icon on the top right corner of the screen.</p>
|
103 |
-
<h3>The strategies to attack and defend in clan wars and clan games</h3>
|
104 |
-
<p>Another aspect of playing Clash of Clans Mod APK is attacking and defending in clan wars and clan games. You still have to join or create a clan, where you can chat with other players, donate and receive troops, and participate in clan events.</p>
|
105 |
-
<p>To join or create a clan, you have to tap on the clan icon on the bottom left corner of the screen. There, you can find all the clans that are available for you to join or create. You can also find the clan chat, clan profile, clan settings, clan war, clan games, and clan perks tabs.</p>
|
106 |
-
<p>To attack in a clan war or a clan game, you have to tap on the clan war or clan game icon on the top left corner of the screen. There, you can find all the details and information about the current clan war or clan game. You can also find the map of the enemy clans' villages that you can attack.</p>
|
107 |
-
<p>To attack an enemy village, you just have to tap on it and then tap on the red button that says "Attack". You will be taken to the battle screen, where you can deploy your troops and spells on the enemy's territory. You will also see your own village's defenses on the bottom of the screen. You can also use the buttons on the bottom right corner of the screen to zoom in or out, to end the battle, or to surrender.</p>
|
108 |
-
<p>To defend your village, you have to make sure that you have a strong and well-designed layout that can withstand enemy attacks. You also have to make sure that you have enough troops in your clan castle that can help you in defending your village. You can also use the shield and guard features that can protect your village from attacks for a certain period of time.</p>
|
109 |
-
<p>To win a battle, you have to destroy more percentage of the enemy's village than they do to yours. You also have to destroy their town hall, which gives you an extra star. The more stars you get, the more loot and trophies you earn. You also help your clan in winning the clan war or clan game.</p>
|
110 |
-
<h3>The tips and tricks to enjoy the game to the fullest</h3>
|
111 |
-
<p>The last aspect of playing Clash of Clans Mod APK is enjoying the game to the fullest. You can do this by following these tips and tricks:</p>
|
112 |
-
<ul>
|
113 |
-
<li>Experiment with different troops and spells combinations and find out what works best for you. You can also watch replays of other players' attacks and learn from their strategies and mistakes.</li>
|
114 |
-
<li>Join an active and friendly clan that can help you with donations, advice, and support. You can also chat with other players and make new friends. You can also participate in clan events and earn rewards and perks for your clan.</li>
|
115 |
-
<li>Complete achievements and quests that can give you extra resources, gems, and magic items. You can also use these items to boost your progress and performance in the game.</li>
|
116 |
-
<li>Have fun and don't take the game too seriously. Remember that it is just a game and not a real war. Don't get frustrated or angry if you lose a battle or if someone attacks your village. Just learn from your experience and try again.</li>
|
117 |
-
</ul>
|
118 |
-
<p>By following these tips and tricks, you can have more fun and excitement in playing Clash of Clans Mod APK.</p>
|
119 |
-
<h2>Conclusion</h2>
|
120 |
-
<h3>A summary of the main points and a call to action</h3>
|
121 |
-
<p>In conclusion, Clash of Clans Mod APK is a modded version of the original game that gives you unlimited resources, gems, troops, and access to all features that you normally have to pay for. It also allows you to customize and personalize your village and troops according to your preferences.</p>
|
122 |
-
<p>By playing Clash of Clans Mod APK, you can enjoy the game without any limitations or restrictions. You can build your dream village and army in no time. You can also dominate the leaderboards and impress your friends with your achievements. You can also have more fun and excitement in clan wars and clan games with your unlimited resources and troops.</p>
|
123 |
-
<p>If you are interested in playing Clash of Clans Mod APK, you can download it from this link safely and securely. You just have to follow the steps and precautions that we have mentioned in this article. Then, you can launch the app and enjoy the game.</p>
|
124 |
-
<p>So, what are you waiting for? Download Clash of Clans Mod APK now and experience the ultimate strategy game like never before.</p>
|
125 |
-
<h2>FAQs</h2>
|
126 |
-
<h3>Q1. Is Clash of Clans Mod APK safe to use?</h3>
|
127 |
-
<p>A1. Yes, Clash of Clans Mod APK is safe to use as long as you download it from a trusted source like this link. However, you still need to be careful as some websites might offer fake or malicious mod apk files that can harm your device or steal your data. You also need to follow the precautions that we have mentioned in this article before installing the mod apk file.</p>
|
128 |
-
<h3>Q2. Do I need to root my device to use Clash of Clans Mod APK?</h3>
|
129 |
-
<p>A2. No, you don't need to root your device to use Clash of Clans Mod APK. The mod apk file works on both rooted and non-rooted devices without any problems.</p>
|
130 |
-
<h3>Q3. Can I play Clash of Clans Mod APK with my friends?</h3>
|
131 |
-
<p>A3. Yes, you can play Clash of Clans Mod APK with your friends as long as they also have the same mod apk file installed on their devices. You can join or create a clan with them and chat with them in the game. You can also attack or defend each other's villages in clan wars and clan games.</p>
|
132 |
-
<h3>Q4. Will I get banned for using Clash of Clans Mod APK?</h3>
|
133 |
-
<p>A4. There is a possibility that you might get banned for using Clash of Clans Mod APK as it violates the terms of service of Supercell, the developer of the original game. Supercell has a system that can detect and ban players who use modded or hacked versions of the game. However, you can reduce the risk of getting banned by following these tips:</p>
|
134 |
-
<ul>
|
135 |
-
<li>Do not use your original Clash of Clans account or Google Play account to log in to the mod apk file. Use a new account or a guest account instead.</li>
|
136 |
-
<li>Do not play the mod apk file on public servers or networks. Use a private server or a VPN service instead.</li>
|
137 |
-
<li>Do not brag or boast about using the mod apk file in the game chat or social media. Keep it a secret and avoid drawing attention to yourself.</li>
|
138 |
-
<li>Do not use the mod apk file excessively or abusively. Use it moderately and responsibly.</li>
|
139 |
-
</ul>
|
140 |
-
<p>By following these tips, you can enjoy the mod apk file without worrying too much about getting banned.</p>
|
141 |
-
<h3>Q5. How can I update Clash of Clans Mod APK?</h3>
|
142 |
-
<p>A5. To update Clash of Clans Mod APK, you have to download the latest version of the mod apk file from the same source that you downloaded it from before. You can check this link for the latest updates and news about the mod apk file. You also have to uninstall the previous version of the mod apk file from your device before installing the new version. You don't have to worry about losing your game data as it will be saved on your device's memory.</p> 197e85843d<br />
|
143 |
-
<br />
|
144 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Endless Run Jungle Escape Mod APK Discover the Secrets of the Jungle.md
DELETED
@@ -1,127 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Endless Run Jungle Escape Mod APK: A Thrilling Adventure Game</h1>
|
3 |
-
<p>If you are looking for a fun and exciting game that will keep you on the edge of your seat, then you should try Endless Run Jungle Escape Mod APK. This is a modified version of the original game that offers more features and benefits for the players. In this article, we will tell you everything you need to know about this game, including what it is, how to download and install it, how to play it, tips and tricks, review, and alternatives.</p>
|
4 |
-
<h2>What is Endless Run Jungle Escape Mod APK?</h2>
|
5 |
-
<p>Endless Run Jungle Escape is an addictive endless runner game that puts you in the shoes of a charismatic archaeologist trapped in an immensely endless jungle. You have to run, jump, slide, and dodge obstacles while collecting coins, gems, and power-ups. The game has stunning graphics, smooth controls, and immersive sound effects that will make you feel like you are in a real adventure.</p>
|
6 |
-
<h2>endless run jungle escape mod apk</h2><br /><p><b><b>Download Zip</b> ↔ <a href="https://jinyurl.com/2uNUac">https://jinyurl.com/2uNUac</a></b></p><br /><br />
|
7 |
-
<h3>The original game</h3>
|
8 |
-
<p>The original game was developed by Huskaimm Com and released in 2019. It has over 10 million downloads on Google Play Store and a rating of 4.1 out of 5 stars. The game is free to play but contains ads and in-app purchases. You can download the original game from Google Play Store or from other sources.</p>
|
9 |
-
<h3>The modded version</h3>
|
10 |
-
<p>The modded version is a modified version of the original game that offers more features and benefits for the players. The main difference between the modded version and the original version is that the modded version has unlocked all the characters and props in the game. This means that you can choose any character you want and use any prop you like without spending any money or coins. You can also enjoy unlimited coins, gems, and power-ups in the modded version. The modded version is not available on Google Play Store but you can download it from HappyMod or from other sources.</p>
|
11 |
-
<p>endless run jungle escape mod apk download<br />
|
12 |
-
endless run jungle escape mod apk unlimited money<br />
|
13 |
-
endless run jungle escape mod apk latest version<br />
|
14 |
-
endless run jungle escape mod apk android 1<br />
|
15 |
-
endless run jungle escape mod apk revdl<br />
|
16 |
-
endless run jungle escape mod apk happymod<br />
|
17 |
-
endless run jungle escape mod apk rexdl<br />
|
18 |
-
endless run jungle escape mod apk free shopping<br />
|
19 |
-
endless run jungle escape mod apk offline<br />
|
20 |
-
endless run jungle escape mod apk no ads<br />
|
21 |
-
endless run jungle escape hack mod apk<br />
|
22 |
-
endless run jungle escape cheat mod apk<br />
|
23 |
-
endless run jungle escape premium mod apk<br />
|
24 |
-
endless run jungle escape pro mod apk<br />
|
25 |
-
endless run jungle escape vip mod apk<br />
|
26 |
-
temple spirit endless run mod apk<br />
|
27 |
-
temple spirit endless run hack apk<br />
|
28 |
-
temple spirit endless run cheat apk<br />
|
29 |
-
temple spirit endless run unlimited coins apk<br />
|
30 |
-
temple spirit endless run unlocked apk<br />
|
31 |
-
temple spirit endless run latest apk<br />
|
32 |
-
temple spirit endless run free download apk<br />
|
33 |
-
temple spirit endless run android game apk<br />
|
34 |
-
temple spirit endless run 3d adventure apk<br />
|
35 |
-
temple spirit endless run offline game apk<br />
|
36 |
-
jungle runner adventure fun game mod apk<br />
|
37 |
-
jungle runner adventure fun game hack apk<br />
|
38 |
-
jungle runner adventure fun game cheat apk<br />
|
39 |
-
jungle runner adventure fun game unlimited gems apk<br />
|
40 |
-
jungle runner adventure fun game unlocked all levels apk<br />
|
41 |
-
jungle runner adventure fun game latest version apk<br />
|
42 |
-
jungle runner adventure fun game free download apk<br />
|
43 |
-
jungle runner adventure fun game android app apk<br />
|
44 |
-
jungle runner adventure fun game 3d graphics apk<br />
|
45 |
-
jungle runner adventure fun game offline mode apk<br />
|
46 |
-
lost temple endless run 2 mod apk<br />
|
47 |
-
lost temple endless run 2 hack apk<br />
|
48 |
-
lost temple endless run 2 cheat apk<br />
|
49 |
-
lost temple endless run 2 unlimited diamonds apk<br />
|
50 |
-
lost temple endless run 2 unlocked characters apk<br />
|
51 |
-
lost temple endless run 2 new version apk<br />
|
52 |
-
lost temple endless run 2 free download apk<br />
|
53 |
-
lost temple endless run 2 android game apk<br />
|
54 |
-
lost temple endless run 2 hd quality apk<br />
|
55 |
-
lost temple endless run 2 online play apk</p>
|
56 |
-
<h2>Features of Endless Run Jungle Escape Mod APK</h2>
|
57 |
-
<p>Endless Run Jungle Escape Mod APK has many features that make it more enjoyable and challenging than the original game. Here are some of the features that you can expect from this game:</p>
|
58 |
-
<h3>Unlocked characters and props</h3>
|
59 |
-
<p>The modded version has unlocked all the characters and props in the game. You can choose from 22 main roles, each with their own skills and abilities. You can also use different props, such as shields, magnets, wings, rockets, etc., to help you overcome obstacles and enemies. You can customize your character and prop according to your preference.</p>
|
60 |
-
<h3>Dual handle operation</h3>
|
61 |
-
<h3>Tasks and scores</h3>
|
62 |
-
<p>The modded version has various tasks and scores that you can complete and achieve. You can collect coins, gems, and power-ups to increase your score and unlock more rewards. You can also complete daily tasks, weekly tasks, and achievements to earn more coins, gems, and items. You can compare your score with other players on the leaderboard and challenge yourself to improve your rank.</p>
|
63 |
-
<h2>How to download and install Endless Run Jungle Escape Mod APK?</h2>
|
64 |
-
<p>If you want to download and install Endless Run Jungle Escape Mod APK, you need to follow these simple steps:</p>
|
65 |
-
<h3>Download from a reliable source</h3>
|
66 |
-
<p>The first step is to download the modded version from a reliable source. You can use HappyMod or other sources that offer safe and verified APK files. You need to make sure that the file you download is compatible with your device and has the latest version of the game.</p>
|
67 |
-
<h3>Enable unknown sources</h3>
|
68 |
-
<p>The second step is to enable unknown sources on your device. This is necessary because the modded version is not from Google Play Store and you need to allow your device to install apps from other sources. To do this, you need to go to your device settings, security, and enable unknown sources.</p>
|
69 |
-
<h3>Install the APK file</h3>
|
70 |
-
<p>The third step is to install the APK file on your device. You need to locate the file you downloaded and tap on it to start the installation process. You need to follow the instructions on the screen and wait for the installation to finish. Once it is done, you can launch the game and enjoy it.</p>
|
71 |
-
<h2>How to play Endless Run Jungle Escape Mod APK?</h2>
|
72 |
-
<p>If you want to play Endless Run Jungle Escape Mod APK, you need to follow these simple steps:</p>
|
73 |
-
<h3>Choose your character and prop</h3>
|
74 |
-
<p>The first step is to choose your character and prop from the unlocked ones. You can select any character you want and use any prop you like. You can also customize your character and prop according to your preference.</p>
|
75 |
-
<h3>Swipe to move and jump</h3>
|
76 |
-
<p>The second step is to swipe to move and jump in the game. You need to swipe left or right to switch roads or swipe up or down to turn gravity. You need to avoid obstacles and enemies while running in the jungle.</p>
|
77 |
-
<h3>Collect coins and gems</h3>
|
78 |
-
<p>The third step is to collect coins and gems in the game. You need to collect as many coins and gems as possible while running in the jungle. You can use them to upgrade your skills and items or buy new characters and props.</p>
|
79 |
-
<h2>Tips and tricks for Endless Run Jungle Escape Mod APK</h2>
|
80 |
-
<p>If you want to master Endless Run Jungle Escape Mod APK, you need to follow these tips and tricks:</p>
|
81 |
-
<h3>Use the tunnel level</h3>
|
82 |
-
<p>One of the tips is to use the tunnel level in the game. The tunnel level is a special level that appears randomly in the game. It allows you to run in a tunnel without any obstacles or enemies. You can collect a lot of coins and gems in this level without any risk.</p>
|
83 |
-
<h3>Switch roads and turn gravity</h3>
|
84 |
-
<p>Another tip is to switch roads and turn gravity in the game. This will help you avoid obstacles and enemies that are blocking your way. You can also find hidden paths and shortcuts by switching roads and turning gravity.</p>
|
85 |
-
<h3>Upgrade your skills and items</h3>
|
86 |
-
<p>A final tip is to upgrade your skills and items in the game. This will help you improve your performance and survival in the game. You can upgrade your skills such as speed, magnet, shield, etc., or your items such as wings, rockets, etc., using coins and gems.</p>
|
87 |
-
<h2>Review of Endless Run Jungle Escape Mod APK</h2>
|
88 |
-
<p>Endless Run Jungle Escape Mod APK is a thrilling adventure game that will keep you entertained for hours. Here is a review of this game based on its pros and cons, user ratings, and feedback.</p>
|
89 |
-
<h3>Pros and cons</h3>
|
90 |
-
<table>
|
91 |
-
<tr><th>Pros</th><th>Cons</th></tr>
|
92 |
-
<tr><td>- Unlocked characters and props</td><td>- Ads may still appear</td></tr>
|
93 |
-
<tr><td>- Unlimited coins, gems, and power-ups</td><td>- May not work on some devices</td></tr>
|
94 |
-
<tr><td>- Dual handle operation</td><td>- May cause battery drain</td></tr>
|
95 |
-
players</td></tr>
|
96 |
-
</table>
|
97 |
-
<h3>User ratings and feedback</h3>
|
98 |
-
<p>Endless Run Jungle Escape Mod APK has received positive ratings and feedback from most of the users who have tried it. The game has a rating of 4.6 out of 5 stars on HappyMod and a rating of 4.1 out of 5 stars on Google Play Store. Here are some of the user reviews from HappyMod:</p>
|
99 |
-
<ul>
|
100 |
-
<li>"This game is awesome. I love the graphics and the gameplay. It is very addictive and fun. I recommend it to everyone who likes endless runner games."</li>
|
101 |
-
<li>"This is the best mod ever. It has everything unlocked and unlimited. I can play with any character and prop I want. It is very easy to install and use."</li>
|
102 |
-
<li>"This game is amazing. It has a lot of features and challenges. It is very smooth and fast. It is better than the original game."</li>
|
103 |
-
</ul>
|
104 |
-
<h2>Alternatives to Endless Run Jungle Escape Mod APK</h2>
|
105 |
-
<p>If you are looking for alternatives to Endless Run Jungle Escape Mod APK, you can try these other games that are similar in genre and style:</p>
|
106 |
-
<h3>Temple Run and Temple Run 2</h3>
|
107 |
-
<p>Temple Run and Temple Run 2 are classic endless runner games that have inspired many other games in this genre. You have to run away from a group of monkeys that are chasing you after you stole a cursed idol from a temple. You have to swipe to turn, jump, slide, and tilt to avoid obstacles and collect coins and power-ups. You can also unlock different characters and abilities as you progress in the game.</p>
|
108 |
-
<h3>Subway Surfers and Minion Rush</h3>
|
109 |
-
<p>Subway Surfers and Minion Rush are popular endless runner games that feature colorful graphics and characters. You have to run on the subway tracks or the streets while dodging trains, buses, cars, and other obstacles. You can also collect coins, power-ups, and items that will help you in your run. You can also customize your character and use different gadgets and vehicles.</p>
|
110 |
-
<h2>Conclusion</h2>
|
111 |
-
<p>Endless Run Jungle Escape Mod APK is a thrilling adventure game that will keep you entertained for hours. It is a modified version of the original game that offers more features and benefits for the players. You can enjoy unlocked characters and props, unlimited coins, gems, and power-ups, dual handle operation, tasks and scores, and more in this game. You can download and install it easily from a reliable source and play it on your device. You can also follow some tips and tricks to master this game and compare your score with other players. If you are looking for alternatives to this game, you can try Temple Run, Temple Run 2, Subway Surfers, or Minion Rush.</p>
|
112 |
-
<h2>FAQs</h2>
|
113 |
-
<ul>
|
114 |
-
<li><b>Q: Is Endless Run Jungle Escape Mod APK safe to download and install?</b></li>
|
115 |
-
<li>A: Yes, Endless Run Jungle Escape Mod APK is safe to download and install if you use a reliable source that offers verified APK files. You should also scan the file with an antivirus before installing it on your device.</li>
|
116 |
-
<li><b>Q: What are the requirements to play Endless Run Jungle Escape Mod APK?</b></li>
|
117 |
-
<li>A: Endless Run Jungle Escape Mod APK requires Android 4.1 or higher to run smoothly on your device. You also need at least 100 MB of free storage space on your device to install it.</li>
|
118 |
-
<li><b>Q: How can I remove ads from Endless Run Jungle Escape Mod APK?</b></li>
|
119 |
-
<li>A: Endless Run Jungle Escape Mod APK may still show some ads in the game even though it is a modded version. You can remove ads by turning off your internet connection or using an ad blocker app.</li>
|
120 |
-
<li><b>Q: How can I get more coins and gems in Endless Run Jungle Escape Mod APK?</b></li>
|
121 |
-
<li>A: Endless Run Jungle Escape Mod APK gives you unlimited coins, gems, and power-ups in the game so you don't need to worry about running out of them. However, if you want to get more coins and gems, you can collect them while running in the jungle or complete tasks and achievements.</li>
|
122 |
-
<li><b>Q: How can I update Endless Run Jungle Escape Mod APK?</b></li>
|
123 |
-
<li>A: Endless Run Jungle Escape Mod APK may not update automatically on your device because it is not from Google Play Store. You need to check for updates manually from the source where you downloaded it or from other sources that offer the latest version of the game.</li>
|
124 |
-
</ul>
|
125 |
-
<p></p</p> 401be4b1e0<br />
|
126 |
-
<br />
|
127 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/stylegan/op/conv2d_gradfix.py
DELETED
@@ -1,227 +0,0 @@
|
|
1 |
-
import contextlib
|
2 |
-
import warnings
|
3 |
-
|
4 |
-
import torch
|
5 |
-
from torch import autograd
|
6 |
-
from torch.nn import functional as F
|
7 |
-
|
8 |
-
enabled = True
|
9 |
-
weight_gradients_disabled = False
|
10 |
-
|
11 |
-
|
12 |
-
@contextlib.contextmanager
|
13 |
-
def no_weight_gradients():
|
14 |
-
global weight_gradients_disabled
|
15 |
-
|
16 |
-
old = weight_gradients_disabled
|
17 |
-
weight_gradients_disabled = True
|
18 |
-
yield
|
19 |
-
weight_gradients_disabled = old
|
20 |
-
|
21 |
-
|
22 |
-
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
23 |
-
if could_use_op(input):
|
24 |
-
return conv2d_gradfix(
|
25 |
-
transpose=False,
|
26 |
-
weight_shape=weight.shape,
|
27 |
-
stride=stride,
|
28 |
-
padding=padding,
|
29 |
-
output_padding=0,
|
30 |
-
dilation=dilation,
|
31 |
-
groups=groups,
|
32 |
-
).apply(input, weight, bias)
|
33 |
-
|
34 |
-
return F.conv2d(
|
35 |
-
input=input,
|
36 |
-
weight=weight,
|
37 |
-
bias=bias,
|
38 |
-
stride=stride,
|
39 |
-
padding=padding,
|
40 |
-
dilation=dilation,
|
41 |
-
groups=groups,
|
42 |
-
)
|
43 |
-
|
44 |
-
|
45 |
-
def conv_transpose2d(
|
46 |
-
input,
|
47 |
-
weight,
|
48 |
-
bias=None,
|
49 |
-
stride=1,
|
50 |
-
padding=0,
|
51 |
-
output_padding=0,
|
52 |
-
groups=1,
|
53 |
-
dilation=1,
|
54 |
-
):
|
55 |
-
if could_use_op(input):
|
56 |
-
return conv2d_gradfix(
|
57 |
-
transpose=True,
|
58 |
-
weight_shape=weight.shape,
|
59 |
-
stride=stride,
|
60 |
-
padding=padding,
|
61 |
-
output_padding=output_padding,
|
62 |
-
groups=groups,
|
63 |
-
dilation=dilation,
|
64 |
-
).apply(input, weight, bias)
|
65 |
-
|
66 |
-
return F.conv_transpose2d(
|
67 |
-
input=input,
|
68 |
-
weight=weight,
|
69 |
-
bias=bias,
|
70 |
-
stride=stride,
|
71 |
-
padding=padding,
|
72 |
-
output_padding=output_padding,
|
73 |
-
dilation=dilation,
|
74 |
-
groups=groups,
|
75 |
-
)
|
76 |
-
|
77 |
-
|
78 |
-
def could_use_op(input):
|
79 |
-
if (not enabled) or (not torch.backends.cudnn.enabled):
|
80 |
-
return False
|
81 |
-
|
82 |
-
if input.device.type != "cuda":
|
83 |
-
return False
|
84 |
-
|
85 |
-
if any(torch.__version__.startswith(x) for x in ["1.7.", "1.8."]):
|
86 |
-
return True
|
87 |
-
|
88 |
-
#warnings.warn(
|
89 |
-
# f"conv2d_gradfix not supported on PyTorch {torch.__version__}. Falling back to torch.nn.functional.conv2d()."
|
90 |
-
#)
|
91 |
-
|
92 |
-
return False
|
93 |
-
|
94 |
-
|
95 |
-
def ensure_tuple(xs, ndim):
|
96 |
-
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
|
97 |
-
|
98 |
-
return xs
|
99 |
-
|
100 |
-
|
101 |
-
conv2d_gradfix_cache = dict()
|
102 |
-
|
103 |
-
|
104 |
-
def conv2d_gradfix(
|
105 |
-
transpose, weight_shape, stride, padding, output_padding, dilation, groups
|
106 |
-
):
|
107 |
-
ndim = 2
|
108 |
-
weight_shape = tuple(weight_shape)
|
109 |
-
stride = ensure_tuple(stride, ndim)
|
110 |
-
padding = ensure_tuple(padding, ndim)
|
111 |
-
output_padding = ensure_tuple(output_padding, ndim)
|
112 |
-
dilation = ensure_tuple(dilation, ndim)
|
113 |
-
|
114 |
-
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
|
115 |
-
if key in conv2d_gradfix_cache:
|
116 |
-
return conv2d_gradfix_cache[key]
|
117 |
-
|
118 |
-
common_kwargs = dict(
|
119 |
-
stride=stride, padding=padding, dilation=dilation, groups=groups
|
120 |
-
)
|
121 |
-
|
122 |
-
def calc_output_padding(input_shape, output_shape):
|
123 |
-
if transpose:
|
124 |
-
return [0, 0]
|
125 |
-
|
126 |
-
return [
|
127 |
-
input_shape[i + 2]
|
128 |
-
- (output_shape[i + 2] - 1) * stride[i]
|
129 |
-
- (1 - 2 * padding[i])
|
130 |
-
- dilation[i] * (weight_shape[i + 2] - 1)
|
131 |
-
for i in range(ndim)
|
132 |
-
]
|
133 |
-
|
134 |
-
class Conv2d(autograd.Function):
|
135 |
-
@staticmethod
|
136 |
-
def forward(ctx, input, weight, bias):
|
137 |
-
if not transpose:
|
138 |
-
out = F.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
|
139 |
-
|
140 |
-
else:
|
141 |
-
out = F.conv_transpose2d(
|
142 |
-
input=input,
|
143 |
-
weight=weight,
|
144 |
-
bias=bias,
|
145 |
-
output_padding=output_padding,
|
146 |
-
**common_kwargs,
|
147 |
-
)
|
148 |
-
|
149 |
-
ctx.save_for_backward(input, weight)
|
150 |
-
|
151 |
-
return out
|
152 |
-
|
153 |
-
@staticmethod
|
154 |
-
def backward(ctx, grad_output):
|
155 |
-
input, weight = ctx.saved_tensors
|
156 |
-
grad_input, grad_weight, grad_bias = None, None, None
|
157 |
-
|
158 |
-
if ctx.needs_input_grad[0]:
|
159 |
-
p = calc_output_padding(
|
160 |
-
input_shape=input.shape, output_shape=grad_output.shape
|
161 |
-
)
|
162 |
-
grad_input = conv2d_gradfix(
|
163 |
-
transpose=(not transpose),
|
164 |
-
weight_shape=weight_shape,
|
165 |
-
output_padding=p,
|
166 |
-
**common_kwargs,
|
167 |
-
).apply(grad_output, weight, None)
|
168 |
-
|
169 |
-
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
|
170 |
-
grad_weight = Conv2dGradWeight.apply(grad_output, input)
|
171 |
-
|
172 |
-
if ctx.needs_input_grad[2]:
|
173 |
-
grad_bias = grad_output.sum((0, 2, 3))
|
174 |
-
|
175 |
-
return grad_input, grad_weight, grad_bias
|
176 |
-
|
177 |
-
class Conv2dGradWeight(autograd.Function):
|
178 |
-
@staticmethod
|
179 |
-
def forward(ctx, grad_output, input):
|
180 |
-
op = torch._C._jit_get_operation(
|
181 |
-
"aten::cudnn_convolution_backward_weight"
|
182 |
-
if not transpose
|
183 |
-
else "aten::cudnn_convolution_transpose_backward_weight"
|
184 |
-
)
|
185 |
-
flags = [
|
186 |
-
torch.backends.cudnn.benchmark,
|
187 |
-
torch.backends.cudnn.deterministic,
|
188 |
-
torch.backends.cudnn.allow_tf32,
|
189 |
-
]
|
190 |
-
grad_weight = op(
|
191 |
-
weight_shape,
|
192 |
-
grad_output,
|
193 |
-
input,
|
194 |
-
padding,
|
195 |
-
stride,
|
196 |
-
dilation,
|
197 |
-
groups,
|
198 |
-
*flags,
|
199 |
-
)
|
200 |
-
ctx.save_for_backward(grad_output, input)
|
201 |
-
|
202 |
-
return grad_weight
|
203 |
-
|
204 |
-
@staticmethod
|
205 |
-
def backward(ctx, grad_grad_weight):
|
206 |
-
grad_output, input = ctx.saved_tensors
|
207 |
-
grad_grad_output, grad_grad_input = None, None
|
208 |
-
|
209 |
-
if ctx.needs_input_grad[0]:
|
210 |
-
grad_grad_output = Conv2d.apply(input, grad_grad_weight, None)
|
211 |
-
|
212 |
-
if ctx.needs_input_grad[1]:
|
213 |
-
p = calc_output_padding(
|
214 |
-
input_shape=input.shape, output_shape=grad_output.shape
|
215 |
-
)
|
216 |
-
grad_grad_input = conv2d_gradfix(
|
217 |
-
transpose=(not transpose),
|
218 |
-
weight_shape=weight_shape,
|
219 |
-
output_padding=p,
|
220 |
-
**common_kwargs,
|
221 |
-
).apply(grad_output, grad_grad_weight, None)
|
222 |
-
|
223 |
-
return grad_grad_output, grad_grad_input
|
224 |
-
|
225 |
-
conv2d_gradfix_cache[key] = Conv2d
|
226 |
-
|
227 |
-
return Conv2d
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/801artistry/RVC801/lib/uvr5_pack/lib_v5/spec_utils.py
DELETED
@@ -1,667 +0,0 @@
|
|
1 |
-
import os, librosa
|
2 |
-
import numpy as np
|
3 |
-
import soundfile as sf
|
4 |
-
from tqdm import tqdm
|
5 |
-
import json, math, hashlib
|
6 |
-
|
7 |
-
|
8 |
-
def crop_center(h1, h2):
|
9 |
-
h1_shape = h1.size()
|
10 |
-
h2_shape = h2.size()
|
11 |
-
|
12 |
-
if h1_shape[3] == h2_shape[3]:
|
13 |
-
return h1
|
14 |
-
elif h1_shape[3] < h2_shape[3]:
|
15 |
-
raise ValueError("h1_shape[3] must be greater than h2_shape[3]")
|
16 |
-
|
17 |
-
# s_freq = (h2_shape[2] - h1_shape[2]) // 2
|
18 |
-
# e_freq = s_freq + h1_shape[2]
|
19 |
-
s_time = (h1_shape[3] - h2_shape[3]) // 2
|
20 |
-
e_time = s_time + h2_shape[3]
|
21 |
-
h1 = h1[:, :, :, s_time:e_time]
|
22 |
-
|
23 |
-
return h1
|
24 |
-
|
25 |
-
|
26 |
-
def wave_to_spectrogram(
|
27 |
-
wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
|
28 |
-
):
|
29 |
-
if reverse:
|
30 |
-
wave_left = np.flip(np.asfortranarray(wave[0]))
|
31 |
-
wave_right = np.flip(np.asfortranarray(wave[1]))
|
32 |
-
elif mid_side:
|
33 |
-
wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
|
34 |
-
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
|
35 |
-
elif mid_side_b2:
|
36 |
-
wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
|
37 |
-
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
|
38 |
-
else:
|
39 |
-
wave_left = np.asfortranarray(wave[0])
|
40 |
-
wave_right = np.asfortranarray(wave[1])
|
41 |
-
|
42 |
-
spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length)
|
43 |
-
spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
|
44 |
-
|
45 |
-
spec = np.asfortranarray([spec_left, spec_right])
|
46 |
-
|
47 |
-
return spec
|
48 |
-
|
49 |
-
|
50 |
-
def wave_to_spectrogram_mt(
|
51 |
-
wave, hop_length, n_fft, mid_side=False, mid_side_b2=False, reverse=False
|
52 |
-
):
|
53 |
-
import threading
|
54 |
-
|
55 |
-
if reverse:
|
56 |
-
wave_left = np.flip(np.asfortranarray(wave[0]))
|
57 |
-
wave_right = np.flip(np.asfortranarray(wave[1]))
|
58 |
-
elif mid_side:
|
59 |
-
wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2)
|
60 |
-
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1]))
|
61 |
-
elif mid_side_b2:
|
62 |
-
wave_left = np.asfortranarray(np.add(wave[1], wave[0] * 0.5))
|
63 |
-
wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * 0.5))
|
64 |
-
else:
|
65 |
-
wave_left = np.asfortranarray(wave[0])
|
66 |
-
wave_right = np.asfortranarray(wave[1])
|
67 |
-
|
68 |
-
def run_thread(**kwargs):
|
69 |
-
global spec_left
|
70 |
-
spec_left = librosa.stft(**kwargs)
|
71 |
-
|
72 |
-
thread = threading.Thread(
|
73 |
-
target=run_thread,
|
74 |
-
kwargs={"y": wave_left, "n_fft": n_fft, "hop_length": hop_length},
|
75 |
-
)
|
76 |
-
thread.start()
|
77 |
-
spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length)
|
78 |
-
thread.join()
|
79 |
-
|
80 |
-
spec = np.asfortranarray([spec_left, spec_right])
|
81 |
-
|
82 |
-
return spec
|
83 |
-
|
84 |
-
|
85 |
-
def combine_spectrograms(specs, mp):
|
86 |
-
l = min([specs[i].shape[2] for i in specs])
|
87 |
-
spec_c = np.zeros(shape=(2, mp.param["bins"] + 1, l), dtype=np.complex64)
|
88 |
-
offset = 0
|
89 |
-
bands_n = len(mp.param["band"])
|
90 |
-
|
91 |
-
for d in range(1, bands_n + 1):
|
92 |
-
h = mp.param["band"][d]["crop_stop"] - mp.param["band"][d]["crop_start"]
|
93 |
-
spec_c[:, offset : offset + h, :l] = specs[d][
|
94 |
-
:, mp.param["band"][d]["crop_start"] : mp.param["band"][d]["crop_stop"], :l
|
95 |
-
]
|
96 |
-
offset += h
|
97 |
-
|
98 |
-
if offset > mp.param["bins"]:
|
99 |
-
raise ValueError("Too much bins")
|
100 |
-
|
101 |
-
# lowpass fiter
|
102 |
-
if (
|
103 |
-
mp.param["pre_filter_start"] > 0
|
104 |
-
): # and mp.param['band'][bands_n]['res_type'] in ['scipy', 'polyphase']:
|
105 |
-
if bands_n == 1:
|
106 |
-
spec_c = fft_lp_filter(
|
107 |
-
spec_c, mp.param["pre_filter_start"], mp.param["pre_filter_stop"]
|
108 |
-
)
|
109 |
-
else:
|
110 |
-
gp = 1
|
111 |
-
for b in range(
|
112 |
-
mp.param["pre_filter_start"] + 1, mp.param["pre_filter_stop"]
|
113 |
-
):
|
114 |
-
g = math.pow(
|
115 |
-
10, -(b - mp.param["pre_filter_start"]) * (3.5 - gp) / 20.0
|
116 |
-
)
|
117 |
-
gp = g
|
118 |
-
spec_c[:, b, :] *= g
|
119 |
-
|
120 |
-
return np.asfortranarray(spec_c)
|
121 |
-
|
122 |
-
|
123 |
-
def spectrogram_to_image(spec, mode="magnitude"):
|
124 |
-
if mode == "magnitude":
|
125 |
-
if np.iscomplexobj(spec):
|
126 |
-
y = np.abs(spec)
|
127 |
-
else:
|
128 |
-
y = spec
|
129 |
-
y = np.log10(y**2 + 1e-8)
|
130 |
-
elif mode == "phase":
|
131 |
-
if np.iscomplexobj(spec):
|
132 |
-
y = np.angle(spec)
|
133 |
-
else:
|
134 |
-
y = spec
|
135 |
-
|
136 |
-
y -= y.min()
|
137 |
-
y *= 255 / y.max()
|
138 |
-
img = np.uint8(y)
|
139 |
-
|
140 |
-
if y.ndim == 3:
|
141 |
-
img = img.transpose(1, 2, 0)
|
142 |
-
img = np.concatenate([np.max(img, axis=2, keepdims=True), img], axis=2)
|
143 |
-
|
144 |
-
return img
|
145 |
-
|
146 |
-
|
147 |
-
def reduce_vocal_aggressively(X, y, softmask):
|
148 |
-
v = X - y
|
149 |
-
y_mag_tmp = np.abs(y)
|
150 |
-
v_mag_tmp = np.abs(v)
|
151 |
-
|
152 |
-
v_mask = v_mag_tmp > y_mag_tmp
|
153 |
-
y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf)
|
154 |
-
|
155 |
-
return y_mag * np.exp(1.0j * np.angle(y))
|
156 |
-
|
157 |
-
|
158 |
-
def mask_silence(mag, ref, thres=0.2, min_range=64, fade_size=32):
|
159 |
-
if min_range < fade_size * 2:
|
160 |
-
raise ValueError("min_range must be >= fade_area * 2")
|
161 |
-
|
162 |
-
mag = mag.copy()
|
163 |
-
|
164 |
-
idx = np.where(ref.mean(axis=(0, 1)) < thres)[0]
|
165 |
-
starts = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0])
|
166 |
-
ends = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1])
|
167 |
-
uninformative = np.where(ends - starts > min_range)[0]
|
168 |
-
if len(uninformative) > 0:
|
169 |
-
starts = starts[uninformative]
|
170 |
-
ends = ends[uninformative]
|
171 |
-
old_e = None
|
172 |
-
for s, e in zip(starts, ends):
|
173 |
-
if old_e is not None and s - old_e < fade_size:
|
174 |
-
s = old_e - fade_size * 2
|
175 |
-
|
176 |
-
if s != 0:
|
177 |
-
weight = np.linspace(0, 1, fade_size)
|
178 |
-
mag[:, :, s : s + fade_size] += weight * ref[:, :, s : s + fade_size]
|
179 |
-
else:
|
180 |
-
s -= fade_size
|
181 |
-
|
182 |
-
if e != mag.shape[2]:
|
183 |
-
weight = np.linspace(1, 0, fade_size)
|
184 |
-
mag[:, :, e - fade_size : e] += weight * ref[:, :, e - fade_size : e]
|
185 |
-
else:
|
186 |
-
e += fade_size
|
187 |
-
|
188 |
-
mag[:, :, s + fade_size : e - fade_size] += ref[
|
189 |
-
:, :, s + fade_size : e - fade_size
|
190 |
-
]
|
191 |
-
old_e = e
|
192 |
-
|
193 |
-
return mag
|
194 |
-
|
195 |
-
|
196 |
-
def align_wave_head_and_tail(a, b):
|
197 |
-
l = min([a[0].size, b[0].size])
|
198 |
-
|
199 |
-
return a[:l, :l], b[:l, :l]
|
200 |
-
|
201 |
-
|
202 |
-
def cache_or_load(mix_path, inst_path, mp):
|
203 |
-
mix_basename = os.path.splitext(os.path.basename(mix_path))[0]
|
204 |
-
inst_basename = os.path.splitext(os.path.basename(inst_path))[0]
|
205 |
-
|
206 |
-
cache_dir = "mph{}".format(
|
207 |
-
hashlib.sha1(json.dumps(mp.param, sort_keys=True).encode("utf-8")).hexdigest()
|
208 |
-
)
|
209 |
-
mix_cache_dir = os.path.join("cache", cache_dir)
|
210 |
-
inst_cache_dir = os.path.join("cache", cache_dir)
|
211 |
-
|
212 |
-
os.makedirs(mix_cache_dir, exist_ok=True)
|
213 |
-
os.makedirs(inst_cache_dir, exist_ok=True)
|
214 |
-
|
215 |
-
mix_cache_path = os.path.join(mix_cache_dir, mix_basename + ".npy")
|
216 |
-
inst_cache_path = os.path.join(inst_cache_dir, inst_basename + ".npy")
|
217 |
-
|
218 |
-
if os.path.exists(mix_cache_path) and os.path.exists(inst_cache_path):
|
219 |
-
X_spec_m = np.load(mix_cache_path)
|
220 |
-
y_spec_m = np.load(inst_cache_path)
|
221 |
-
else:
|
222 |
-
X_wave, y_wave, X_spec_s, y_spec_s = {}, {}, {}, {}
|
223 |
-
|
224 |
-
for d in range(len(mp.param["band"]), 0, -1):
|
225 |
-
bp = mp.param["band"][d]
|
226 |
-
|
227 |
-
if d == len(mp.param["band"]): # high-end band
|
228 |
-
X_wave[d], _ = librosa.load(
|
229 |
-
mix_path, bp["sr"], False, dtype=np.float32, res_type=bp["res_type"]
|
230 |
-
)
|
231 |
-
y_wave[d], _ = librosa.load(
|
232 |
-
inst_path,
|
233 |
-
bp["sr"],
|
234 |
-
False,
|
235 |
-
dtype=np.float32,
|
236 |
-
res_type=bp["res_type"],
|
237 |
-
)
|
238 |
-
else: # lower bands
|
239 |
-
X_wave[d] = librosa.resample(
|
240 |
-
X_wave[d + 1],
|
241 |
-
mp.param["band"][d + 1]["sr"],
|
242 |
-
bp["sr"],
|
243 |
-
res_type=bp["res_type"],
|
244 |
-
)
|
245 |
-
y_wave[d] = librosa.resample(
|
246 |
-
y_wave[d + 1],
|
247 |
-
mp.param["band"][d + 1]["sr"],
|
248 |
-
bp["sr"],
|
249 |
-
res_type=bp["res_type"],
|
250 |
-
)
|
251 |
-
|
252 |
-
X_wave[d], y_wave[d] = align_wave_head_and_tail(X_wave[d], y_wave[d])
|
253 |
-
|
254 |
-
X_spec_s[d] = wave_to_spectrogram(
|
255 |
-
X_wave[d],
|
256 |
-
bp["hl"],
|
257 |
-
bp["n_fft"],
|
258 |
-
mp.param["mid_side"],
|
259 |
-
mp.param["mid_side_b2"],
|
260 |
-
mp.param["reverse"],
|
261 |
-
)
|
262 |
-
y_spec_s[d] = wave_to_spectrogram(
|
263 |
-
y_wave[d],
|
264 |
-
bp["hl"],
|
265 |
-
bp["n_fft"],
|
266 |
-
mp.param["mid_side"],
|
267 |
-
mp.param["mid_side_b2"],
|
268 |
-
mp.param["reverse"],
|
269 |
-
)
|
270 |
-
|
271 |
-
del X_wave, y_wave
|
272 |
-
|
273 |
-
X_spec_m = combine_spectrograms(X_spec_s, mp)
|
274 |
-
y_spec_m = combine_spectrograms(y_spec_s, mp)
|
275 |
-
|
276 |
-
if X_spec_m.shape != y_spec_m.shape:
|
277 |
-
raise ValueError("The combined spectrograms are different: " + mix_path)
|
278 |
-
|
279 |
-
_, ext = os.path.splitext(mix_path)
|
280 |
-
|
281 |
-
np.save(mix_cache_path, X_spec_m)
|
282 |
-
np.save(inst_cache_path, y_spec_m)
|
283 |
-
|
284 |
-
return X_spec_m, y_spec_m
|
285 |
-
|
286 |
-
|
287 |
-
def spectrogram_to_wave(spec, hop_length, mid_side, mid_side_b2, reverse):
|
288 |
-
spec_left = np.asfortranarray(spec[0])
|
289 |
-
spec_right = np.asfortranarray(spec[1])
|
290 |
-
|
291 |
-
wave_left = librosa.istft(spec_left, hop_length=hop_length)
|
292 |
-
wave_right = librosa.istft(spec_right, hop_length=hop_length)
|
293 |
-
|
294 |
-
if reverse:
|
295 |
-
return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
|
296 |
-
elif mid_side:
|
297 |
-
return np.asfortranarray(
|
298 |
-
[np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
|
299 |
-
)
|
300 |
-
elif mid_side_b2:
|
301 |
-
return np.asfortranarray(
|
302 |
-
[
|
303 |
-
np.add(wave_right / 1.25, 0.4 * wave_left),
|
304 |
-
np.subtract(wave_left / 1.25, 0.4 * wave_right),
|
305 |
-
]
|
306 |
-
)
|
307 |
-
else:
|
308 |
-
return np.asfortranarray([wave_left, wave_right])
|
309 |
-
|
310 |
-
|
311 |
-
def spectrogram_to_wave_mt(spec, hop_length, mid_side, reverse, mid_side_b2):
|
312 |
-
import threading
|
313 |
-
|
314 |
-
spec_left = np.asfortranarray(spec[0])
|
315 |
-
spec_right = np.asfortranarray(spec[1])
|
316 |
-
|
317 |
-
def run_thread(**kwargs):
|
318 |
-
global wave_left
|
319 |
-
wave_left = librosa.istft(**kwargs)
|
320 |
-
|
321 |
-
thread = threading.Thread(
|
322 |
-
target=run_thread, kwargs={"stft_matrix": spec_left, "hop_length": hop_length}
|
323 |
-
)
|
324 |
-
thread.start()
|
325 |
-
wave_right = librosa.istft(spec_right, hop_length=hop_length)
|
326 |
-
thread.join()
|
327 |
-
|
328 |
-
if reverse:
|
329 |
-
return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)])
|
330 |
-
elif mid_side:
|
331 |
-
return np.asfortranarray(
|
332 |
-
[np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]
|
333 |
-
)
|
334 |
-
elif mid_side_b2:
|
335 |
-
return np.asfortranarray(
|
336 |
-
[
|
337 |
-
np.add(wave_right / 1.25, 0.4 * wave_left),
|
338 |
-
np.subtract(wave_left / 1.25, 0.4 * wave_right),
|
339 |
-
]
|
340 |
-
)
|
341 |
-
else:
|
342 |
-
return np.asfortranarray([wave_left, wave_right])
|
343 |
-
|
344 |
-
|
345 |
-
def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None):
|
346 |
-
wave_band = {}
|
347 |
-
bands_n = len(mp.param["band"])
|
348 |
-
offset = 0
|
349 |
-
|
350 |
-
for d in range(1, bands_n + 1):
|
351 |
-
bp = mp.param["band"][d]
|
352 |
-
spec_s = np.ndarray(
|
353 |
-
shape=(2, bp["n_fft"] // 2 + 1, spec_m.shape[2]), dtype=complex
|
354 |
-
)
|
355 |
-
h = bp["crop_stop"] - bp["crop_start"]
|
356 |
-
spec_s[:, bp["crop_start"] : bp["crop_stop"], :] = spec_m[
|
357 |
-
:, offset : offset + h, :
|
358 |
-
]
|
359 |
-
|
360 |
-
offset += h
|
361 |
-
if d == bands_n: # higher
|
362 |
-
if extra_bins_h: # if --high_end_process bypass
|
363 |
-
max_bin = bp["n_fft"] // 2
|
364 |
-
spec_s[:, max_bin - extra_bins_h : max_bin, :] = extra_bins[
|
365 |
-
:, :extra_bins_h, :
|
366 |
-
]
|
367 |
-
if bp["hpf_start"] > 0:
|
368 |
-
spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
|
369 |
-
if bands_n == 1:
|
370 |
-
wave = spectrogram_to_wave(
|
371 |
-
spec_s,
|
372 |
-
bp["hl"],
|
373 |
-
mp.param["mid_side"],
|
374 |
-
mp.param["mid_side_b2"],
|
375 |
-
mp.param["reverse"],
|
376 |
-
)
|
377 |
-
else:
|
378 |
-
wave = np.add(
|
379 |
-
wave,
|
380 |
-
spectrogram_to_wave(
|
381 |
-
spec_s,
|
382 |
-
bp["hl"],
|
383 |
-
mp.param["mid_side"],
|
384 |
-
mp.param["mid_side_b2"],
|
385 |
-
mp.param["reverse"],
|
386 |
-
),
|
387 |
-
)
|
388 |
-
else:
|
389 |
-
sr = mp.param["band"][d + 1]["sr"]
|
390 |
-
if d == 1: # lower
|
391 |
-
spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
|
392 |
-
wave = librosa.resample(
|
393 |
-
spectrogram_to_wave(
|
394 |
-
spec_s,
|
395 |
-
bp["hl"],
|
396 |
-
mp.param["mid_side"],
|
397 |
-
mp.param["mid_side_b2"],
|
398 |
-
mp.param["reverse"],
|
399 |
-
),
|
400 |
-
bp["sr"],
|
401 |
-
sr,
|
402 |
-
res_type="sinc_fastest",
|
403 |
-
)
|
404 |
-
else: # mid
|
405 |
-
spec_s = fft_hp_filter(spec_s, bp["hpf_start"], bp["hpf_stop"] - 1)
|
406 |
-
spec_s = fft_lp_filter(spec_s, bp["lpf_start"], bp["lpf_stop"])
|
407 |
-
wave2 = np.add(
|
408 |
-
wave,
|
409 |
-
spectrogram_to_wave(
|
410 |
-
spec_s,
|
411 |
-
bp["hl"],
|
412 |
-
mp.param["mid_side"],
|
413 |
-
mp.param["mid_side_b2"],
|
414 |
-
mp.param["reverse"],
|
415 |
-
),
|
416 |
-
)
|
417 |
-
# wave = librosa.core.resample(wave2, bp['sr'], sr, res_type="sinc_fastest")
|
418 |
-
wave = librosa.core.resample(wave2, bp["sr"], sr, res_type="scipy")
|
419 |
-
|
420 |
-
return wave.T
|
421 |
-
|
422 |
-
|
423 |
-
def fft_lp_filter(spec, bin_start, bin_stop):
|
424 |
-
g = 1.0
|
425 |
-
for b in range(bin_start, bin_stop):
|
426 |
-
g -= 1 / (bin_stop - bin_start)
|
427 |
-
spec[:, b, :] = g * spec[:, b, :]
|
428 |
-
|
429 |
-
spec[:, bin_stop:, :] *= 0
|
430 |
-
|
431 |
-
return spec
|
432 |
-
|
433 |
-
|
434 |
-
def fft_hp_filter(spec, bin_start, bin_stop):
|
435 |
-
g = 1.0
|
436 |
-
for b in range(bin_start, bin_stop, -1):
|
437 |
-
g -= 1 / (bin_start - bin_stop)
|
438 |
-
spec[:, b, :] = g * spec[:, b, :]
|
439 |
-
|
440 |
-
spec[:, 0 : bin_stop + 1, :] *= 0
|
441 |
-
|
442 |
-
return spec
|
443 |
-
|
444 |
-
|
445 |
-
def mirroring(a, spec_m, input_high_end, mp):
|
446 |
-
if "mirroring" == a:
|
447 |
-
mirror = np.flip(
|
448 |
-
np.abs(
|
449 |
-
spec_m[
|
450 |
-
:,
|
451 |
-
mp.param["pre_filter_start"]
|
452 |
-
- 10
|
453 |
-
- input_high_end.shape[1] : mp.param["pre_filter_start"]
|
454 |
-
- 10,
|
455 |
-
:,
|
456 |
-
]
|
457 |
-
),
|
458 |
-
1,
|
459 |
-
)
|
460 |
-
mirror = mirror * np.exp(1.0j * np.angle(input_high_end))
|
461 |
-
|
462 |
-
return np.where(
|
463 |
-
np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror
|
464 |
-
)
|
465 |
-
|
466 |
-
if "mirroring2" == a:
|
467 |
-
mirror = np.flip(
|
468 |
-
np.abs(
|
469 |
-
spec_m[
|
470 |
-
:,
|
471 |
-
mp.param["pre_filter_start"]
|
472 |
-
- 10
|
473 |
-
- input_high_end.shape[1] : mp.param["pre_filter_start"]
|
474 |
-
- 10,
|
475 |
-
:,
|
476 |
-
]
|
477 |
-
),
|
478 |
-
1,
|
479 |
-
)
|
480 |
-
mi = np.multiply(mirror, input_high_end * 1.7)
|
481 |
-
|
482 |
-
return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi)
|
483 |
-
|
484 |
-
|
485 |
-
def ensembling(a, specs):
|
486 |
-
for i in range(1, len(specs)):
|
487 |
-
if i == 1:
|
488 |
-
spec = specs[0]
|
489 |
-
|
490 |
-
ln = min([spec.shape[2], specs[i].shape[2]])
|
491 |
-
spec = spec[:, :, :ln]
|
492 |
-
specs[i] = specs[i][:, :, :ln]
|
493 |
-
|
494 |
-
if "min_mag" == a:
|
495 |
-
spec = np.where(np.abs(specs[i]) <= np.abs(spec), specs[i], spec)
|
496 |
-
if "max_mag" == a:
|
497 |
-
spec = np.where(np.abs(specs[i]) >= np.abs(spec), specs[i], spec)
|
498 |
-
|
499 |
-
return spec
|
500 |
-
|
501 |
-
|
502 |
-
def stft(wave, nfft, hl):
|
503 |
-
wave_left = np.asfortranarray(wave[0])
|
504 |
-
wave_right = np.asfortranarray(wave[1])
|
505 |
-
spec_left = librosa.stft(wave_left, nfft, hop_length=hl)
|
506 |
-
spec_right = librosa.stft(wave_right, nfft, hop_length=hl)
|
507 |
-
spec = np.asfortranarray([spec_left, spec_right])
|
508 |
-
|
509 |
-
return spec
|
510 |
-
|
511 |
-
|
512 |
-
def istft(spec, hl):
|
513 |
-
spec_left = np.asfortranarray(spec[0])
|
514 |
-
spec_right = np.asfortranarray(spec[1])
|
515 |
-
|
516 |
-
wave_left = librosa.istft(spec_left, hop_length=hl)
|
517 |
-
wave_right = librosa.istft(spec_right, hop_length=hl)
|
518 |
-
wave = np.asfortranarray([wave_left, wave_right])
|
519 |
-
|
520 |
-
|
521 |
-
if __name__ == "__main__":
|
522 |
-
import cv2
|
523 |
-
import sys
|
524 |
-
import time
|
525 |
-
import argparse
|
526 |
-
from model_param_init import ModelParameters
|
527 |
-
|
528 |
-
p = argparse.ArgumentParser()
|
529 |
-
p.add_argument(
|
530 |
-
"--algorithm",
|
531 |
-
"-a",
|
532 |
-
type=str,
|
533 |
-
choices=["invert", "invert_p", "min_mag", "max_mag", "deep", "align"],
|
534 |
-
default="min_mag",
|
535 |
-
)
|
536 |
-
p.add_argument(
|
537 |
-
"--model_params",
|
538 |
-
"-m",
|
539 |
-
type=str,
|
540 |
-
default=os.path.join("modelparams", "1band_sr44100_hl512.json"),
|
541 |
-
)
|
542 |
-
p.add_argument("--output_name", "-o", type=str, default="output")
|
543 |
-
p.add_argument("--vocals_only", "-v", action="store_true")
|
544 |
-
p.add_argument("input", nargs="+")
|
545 |
-
args = p.parse_args()
|
546 |
-
|
547 |
-
start_time = time.time()
|
548 |
-
|
549 |
-
if args.algorithm.startswith("invert") and len(args.input) != 2:
|
550 |
-
raise ValueError("There should be two input files.")
|
551 |
-
|
552 |
-
if not args.algorithm.startswith("invert") and len(args.input) < 2:
|
553 |
-
raise ValueError("There must be at least two input files.")
|
554 |
-
|
555 |
-
wave, specs = {}, {}
|
556 |
-
mp = ModelParameters(args.model_params)
|
557 |
-
|
558 |
-
for i in range(len(args.input)):
|
559 |
-
spec = {}
|
560 |
-
|
561 |
-
for d in range(len(mp.param["band"]), 0, -1):
|
562 |
-
bp = mp.param["band"][d]
|
563 |
-
|
564 |
-
if d == len(mp.param["band"]): # high-end band
|
565 |
-
wave[d], _ = librosa.load(
|
566 |
-
args.input[i],
|
567 |
-
bp["sr"],
|
568 |
-
False,
|
569 |
-
dtype=np.float32,
|
570 |
-
res_type=bp["res_type"],
|
571 |
-
)
|
572 |
-
|
573 |
-
if len(wave[d].shape) == 1: # mono to stereo
|
574 |
-
wave[d] = np.array([wave[d], wave[d]])
|
575 |
-
else: # lower bands
|
576 |
-
wave[d] = librosa.resample(
|
577 |
-
wave[d + 1],
|
578 |
-
mp.param["band"][d + 1]["sr"],
|
579 |
-
bp["sr"],
|
580 |
-
res_type=bp["res_type"],
|
581 |
-
)
|
582 |
-
|
583 |
-
spec[d] = wave_to_spectrogram(
|
584 |
-
wave[d],
|
585 |
-
bp["hl"],
|
586 |
-
bp["n_fft"],
|
587 |
-
mp.param["mid_side"],
|
588 |
-
mp.param["mid_side_b2"],
|
589 |
-
mp.param["reverse"],
|
590 |
-
)
|
591 |
-
|
592 |
-
specs[i] = combine_spectrograms(spec, mp)
|
593 |
-
|
594 |
-
del wave
|
595 |
-
|
596 |
-
if args.algorithm == "deep":
|
597 |
-
d_spec = np.where(np.abs(specs[0]) <= np.abs(spec[1]), specs[0], spec[1])
|
598 |
-
v_spec = d_spec - specs[1]
|
599 |
-
sf.write(
|
600 |
-
os.path.join("{}.wav".format(args.output_name)),
|
601 |
-
cmb_spectrogram_to_wave(v_spec, mp),
|
602 |
-
mp.param["sr"],
|
603 |
-
)
|
604 |
-
|
605 |
-
if args.algorithm.startswith("invert"):
|
606 |
-
ln = min([specs[0].shape[2], specs[1].shape[2]])
|
607 |
-
specs[0] = specs[0][:, :, :ln]
|
608 |
-
specs[1] = specs[1][:, :, :ln]
|
609 |
-
|
610 |
-
if "invert_p" == args.algorithm:
|
611 |
-
X_mag = np.abs(specs[0])
|
612 |
-
y_mag = np.abs(specs[1])
|
613 |
-
max_mag = np.where(X_mag >= y_mag, X_mag, y_mag)
|
614 |
-
v_spec = specs[1] - max_mag * np.exp(1.0j * np.angle(specs[0]))
|
615 |
-
else:
|
616 |
-
specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2)
|
617 |
-
v_spec = specs[0] - specs[1]
|
618 |
-
|
619 |
-
if not args.vocals_only:
|
620 |
-
X_mag = np.abs(specs[0])
|
621 |
-
y_mag = np.abs(specs[1])
|
622 |
-
v_mag = np.abs(v_spec)
|
623 |
-
|
624 |
-
X_image = spectrogram_to_image(X_mag)
|
625 |
-
y_image = spectrogram_to_image(y_mag)
|
626 |
-
v_image = spectrogram_to_image(v_mag)
|
627 |
-
|
628 |
-
cv2.imwrite("{}_X.png".format(args.output_name), X_image)
|
629 |
-
cv2.imwrite("{}_y.png".format(args.output_name), y_image)
|
630 |
-
cv2.imwrite("{}_v.png".format(args.output_name), v_image)
|
631 |
-
|
632 |
-
sf.write(
|
633 |
-
"{}_X.wav".format(args.output_name),
|
634 |
-
cmb_spectrogram_to_wave(specs[0], mp),
|
635 |
-
mp.param["sr"],
|
636 |
-
)
|
637 |
-
sf.write(
|
638 |
-
"{}_y.wav".format(args.output_name),
|
639 |
-
cmb_spectrogram_to_wave(specs[1], mp),
|
640 |
-
mp.param["sr"],
|
641 |
-
)
|
642 |
-
|
643 |
-
sf.write(
|
644 |
-
"{}_v.wav".format(args.output_name),
|
645 |
-
cmb_spectrogram_to_wave(v_spec, mp),
|
646 |
-
mp.param["sr"],
|
647 |
-
)
|
648 |
-
else:
|
649 |
-
if not args.algorithm == "deep":
|
650 |
-
sf.write(
|
651 |
-
os.path.join("ensembled", "{}.wav".format(args.output_name)),
|
652 |
-
cmb_spectrogram_to_wave(ensembling(args.algorithm, specs), mp),
|
653 |
-
mp.param["sr"],
|
654 |
-
)
|
655 |
-
|
656 |
-
if args.algorithm == "align":
|
657 |
-
trackalignment = [
|
658 |
-
{
|
659 |
-
"file1": '"{}"'.format(args.input[0]),
|
660 |
-
"file2": '"{}"'.format(args.input[1]),
|
661 |
-
}
|
662 |
-
]
|
663 |
-
|
664 |
-
for i, e in tqdm(enumerate(trackalignment), desc="Performing Alignment..."):
|
665 |
-
os.system(f"python lib/align_tracks.py {e['file1']} {e['file2']}")
|
666 |
-
|
667 |
-
# print('Total time: {0:.{1}f}s'.format(time.time() - start_time, 1))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/834188divi/cardiffnlp-twitter-roberta-base-sentiment-latest/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Cardiffnlp Twitter Roberta Base Sentiment Latest
|
3 |
-
emoji: 📉
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/onnx_inference_demo.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
import soundfile
|
2 |
-
from infer_pack.onnx_inference import OnnxRVC
|
3 |
-
|
4 |
-
hop_size = 512
|
5 |
-
sampling_rate = 40000 # 采样率
|
6 |
-
f0_up_key = 0 # 升降调
|
7 |
-
sid = 0 # 角色ID
|
8 |
-
f0_method = "dio" # F0提取算法
|
9 |
-
model_path = "ShirohaRVC.onnx" # 模型的完整路径
|
10 |
-
vec_name = "vec-256-layer-9" # 内部自动补齐为 f"pretrained/{vec_name}.onnx" 需要onnx的vec模型
|
11 |
-
wav_path = "123.wav" # 输入路径或ByteIO实例
|
12 |
-
out_path = "out.wav" # 输出路径或ByteIO实例
|
13 |
-
|
14 |
-
model = OnnxRVC(
|
15 |
-
model_path, vec_path=vec_name, sr=sampling_rate, hop_size=hop_size, device="cuda"
|
16 |
-
)
|
17 |
-
|
18 |
-
audio = model.inference(wav_path, sid, f0_method=f0_method, f0_up_key=f0_up_key)
|
19 |
-
|
20 |
-
soundfile.write(out_path, audio, sampling_rate)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/grids/compression/__init__.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
"""EnCodec grids."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/tests/utils/__init__.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/syntaspeech/syntaspeech.py
DELETED
@@ -1,277 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import Linear
|
6 |
-
|
7 |
-
from text_to_speech.modules.commons.conv import ConvBlocks, ConditionalConvBlocks
|
8 |
-
from text_to_speech.modules.commons.layers import Embedding
|
9 |
-
from text_to_speech.modules.commons.rel_transformer import RelTransformerEncoder
|
10 |
-
from text_to_speech.modules.commons.transformer import MultiheadAttention, FFTBlocks
|
11 |
-
from text_to_speech.modules.tts.commons.align_ops import clip_mel2token_to_multiple, build_word_mask, expand_states, mel2ph_to_mel2word
|
12 |
-
from text_to_speech.modules.tts.fs import FS_DECODERS, FastSpeech
|
13 |
-
from text_to_speech.modules.tts.portaspeech.fvae import SyntaFVAE, FVAE
|
14 |
-
from text_to_speech.utils.commons.meters import Timer
|
15 |
-
from text_to_speech.utils.nn.seq_utils import group_hidden_by_segs
|
16 |
-
from text_to_speech.modules.commons.nar_tts_modules import SyntaDurationPredictor
|
17 |
-
|
18 |
-
|
19 |
-
class SinusoidalPosEmb(nn.Module):
|
20 |
-
def __init__(self, dim):
|
21 |
-
super().__init__()
|
22 |
-
self.dim = dim
|
23 |
-
|
24 |
-
def forward(self, x):
|
25 |
-
"""
|
26 |
-
|
27 |
-
:param x: [B, T]
|
28 |
-
:return: [B, T, H]
|
29 |
-
"""
|
30 |
-
device = x.device
|
31 |
-
half_dim = self.dim // 2
|
32 |
-
emb = math.log(10000) / (half_dim - 1)
|
33 |
-
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
|
34 |
-
emb = x[:, :, None] * emb[None, :]
|
35 |
-
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
|
36 |
-
return emb
|
37 |
-
|
38 |
-
|
39 |
-
class SyntaSpeech(FastSpeech):
|
40 |
-
def __init__(self, ph_dict_size, word_dict_size, hparams, out_dims=None):
|
41 |
-
super().__init__(ph_dict_size, hparams, out_dims)
|
42 |
-
# build linguistic encoder
|
43 |
-
if hparams['num_spk'] > 1:
|
44 |
-
self.spk_embed_proj = Embedding(hparams['num_spk'], self.hidden_size)
|
45 |
-
if hparams['use_word_encoder']:
|
46 |
-
self.word_encoder = RelTransformerEncoder(
|
47 |
-
word_dict_size, self.hidden_size, self.hidden_size, self.hidden_size, 2,
|
48 |
-
hparams['word_enc_layers'], hparams['enc_ffn_kernel_size'])
|
49 |
-
if hparams['dur_level'] == 'word':
|
50 |
-
if hparams['word_encoder_type'] == 'rel_fft':
|
51 |
-
self.ph2word_encoder = RelTransformerEncoder(
|
52 |
-
0, self.hidden_size, self.hidden_size, self.hidden_size, 2,
|
53 |
-
hparams['word_enc_layers'], hparams['enc_ffn_kernel_size'])
|
54 |
-
if hparams['word_encoder_type'] == 'fft':
|
55 |
-
self.ph2word_encoder = FFTBlocks(
|
56 |
-
self.hidden_size, hparams['word_enc_layers'], 1, num_heads=hparams['num_heads'])
|
57 |
-
self.sin_pos = SinusoidalPosEmb(self.hidden_size)
|
58 |
-
self.enc_pos_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
|
59 |
-
self.dec_query_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
|
60 |
-
self.dec_res_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
|
61 |
-
self.attn = MultiheadAttention(self.hidden_size, 1, encoder_decoder_attention=True, bias=False)
|
62 |
-
self.attn.enable_torch_version = False
|
63 |
-
if hparams['text_encoder_postnet']:
|
64 |
-
self.text_encoder_postnet = ConvBlocks(
|
65 |
-
self.hidden_size, self.hidden_size, [1] * 3, 5, layers_in_block=2)
|
66 |
-
else:
|
67 |
-
self.sin_pos = SinusoidalPosEmb(self.hidden_size)
|
68 |
-
|
69 |
-
predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
|
70 |
-
self.dur_predictor = SyntaDurationPredictor(
|
71 |
-
self.hidden_size,
|
72 |
-
n_chans=predictor_hidden,
|
73 |
-
n_layers=hparams['dur_predictor_layers'],
|
74 |
-
dropout_rate=hparams['predictor_dropout'],
|
75 |
-
kernel_size=hparams['dur_predictor_kernel'])
|
76 |
-
# build VAE decoder
|
77 |
-
if hparams['use_fvae']:
|
78 |
-
del self.decoder
|
79 |
-
del self.mel_out
|
80 |
-
if hparams.get("use_gae_in_prior", True):
|
81 |
-
self.fvae = SyntaFVAE(
|
82 |
-
c_in_out=self.out_dims,
|
83 |
-
hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'],
|
84 |
-
kernel_size=hparams['fvae_kernel_size'],
|
85 |
-
enc_n_layers=hparams['fvae_enc_n_layers'],
|
86 |
-
dec_n_layers=hparams['fvae_dec_n_layers'],
|
87 |
-
c_cond=self.hidden_size,
|
88 |
-
use_prior_flow=hparams['use_prior_flow'],
|
89 |
-
flow_hidden=hparams['prior_flow_hidden'],
|
90 |
-
flow_kernel_size=hparams['prior_flow_kernel_size'],
|
91 |
-
flow_n_steps=hparams['prior_flow_n_blocks'],
|
92 |
-
strides=[hparams['fvae_strides']],
|
93 |
-
encoder_type=hparams['fvae_encoder_type'],
|
94 |
-
decoder_type=hparams['fvae_decoder_type'],
|
95 |
-
)
|
96 |
-
else:
|
97 |
-
self.fvae = FVAE(
|
98 |
-
c_in_out=self.out_dims,
|
99 |
-
hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'],
|
100 |
-
kernel_size=hparams['fvae_kernel_size'],
|
101 |
-
enc_n_layers=hparams['fvae_enc_n_layers'],
|
102 |
-
dec_n_layers=hparams['fvae_dec_n_layers'],
|
103 |
-
c_cond=self.hidden_size,
|
104 |
-
use_prior_flow=hparams['use_prior_flow'],
|
105 |
-
flow_hidden=hparams['prior_flow_hidden'],
|
106 |
-
flow_kernel_size=hparams['prior_flow_kernel_size'],
|
107 |
-
flow_n_steps=hparams['prior_flow_n_blocks'],
|
108 |
-
strides=[hparams['fvae_strides']],
|
109 |
-
encoder_type=hparams['fvae_encoder_type'],
|
110 |
-
decoder_type=hparams['fvae_decoder_type'],
|
111 |
-
)
|
112 |
-
else:
|
113 |
-
self.decoder = FS_DECODERS[hparams['decoder_type']](hparams)
|
114 |
-
self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True)
|
115 |
-
if hparams['use_pitch_embed']:
|
116 |
-
self.pitch_embed = Embedding(300, self.hidden_size, 0)
|
117 |
-
if self.hparams['add_word_pos']:
|
118 |
-
self.word_pos_proj = Linear(self.hidden_size, self.hidden_size)
|
119 |
-
|
120 |
-
def build_embedding(self, dictionary, embed_dim):
|
121 |
-
num_embeddings = len(dictionary)
|
122 |
-
emb = Embedding(num_embeddings, embed_dim, self.padding_idx)
|
123 |
-
return emb
|
124 |
-
|
125 |
-
def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None,
|
126 |
-
spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None,
|
127 |
-
global_step=None, graph_lst=None, etypes_lst=None, *args, **kwargs):
|
128 |
-
|
129 |
-
if self.hparams['use_spk_embed']:
|
130 |
-
spk_embed = spk_embed
|
131 |
-
elif self.hparams['use_spk_id']:
|
132 |
-
spk_embed = self.spk_embed_proj(spk_id)[:, None, :]
|
133 |
-
else:
|
134 |
-
spk_embed = 0
|
135 |
-
|
136 |
-
ret = {}
|
137 |
-
style_embed = self.forward_style_embed(spk_embed, spk_id) # speaker embedding, [B, 1, C]
|
138 |
-
x, tgt_nonpadding = self.run_text_encoder(
|
139 |
-
txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst=graph_lst, etypes_lst=etypes_lst, **kwargs)
|
140 |
-
x = x + style_embed # it maybe necessary to achieve multi-speaker
|
141 |
-
x = x * tgt_nonpadding
|
142 |
-
ret['nonpadding'] = tgt_nonpadding
|
143 |
-
if self.hparams['use_pitch_embed']:
|
144 |
-
x = x + self.pitch_embed(pitch)
|
145 |
-
ret['decoder_inp'] = x
|
146 |
-
if infer and (mel2ph is None or mel2word is None):
|
147 |
-
mel2word = ret['mel2word']
|
148 |
-
ret['mel_out_fvae'] = ret['mel_out'] = self.run_decoder(x, tgt_nonpadding, ret, infer, tgt_mels, global_step,
|
149 |
-
mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst)
|
150 |
-
return ret
|
151 |
-
|
152 |
-
def run_text_encoder(self, txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst, etypes_lst, **kwargs):
|
153 |
-
word2word = torch.arange(word_len)[None, :].to(ph2word.device) + 1 # [B, T_mel, T_word]
|
154 |
-
src_nonpadding = (txt_tokens > 0).float()[:, :, None]
|
155 |
-
use_bert = self.hparams.get("use_bert") is True
|
156 |
-
if use_bert:
|
157 |
-
ph_encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=ph2word,
|
158 |
-
graph_lst=graph_lst, etypes_lst=etypes_lst,
|
159 |
-
cl_feats=kwargs['cl_feats'], ret=ret) * src_nonpadding + style_embed
|
160 |
-
else:
|
161 |
-
ph_encoder_out = self.encoder(txt_tokens) * src_nonpadding + style_embed
|
162 |
-
if self.hparams['use_word_encoder']:
|
163 |
-
word_encoder_out = self.word_encoder(word_tokens) + style_embed
|
164 |
-
ph_encoder_out = ph_encoder_out + expand_states(word_encoder_out, ph2word)
|
165 |
-
|
166 |
-
dur_input = ph_encoder_out * src_nonpadding
|
167 |
-
if self.hparams['dur_level'] == 'word':
|
168 |
-
word_encoder_out = 0
|
169 |
-
h_ph_gb_word = group_hidden_by_segs(ph_encoder_out, ph2word, word_len)[0]
|
170 |
-
word_encoder_out = word_encoder_out + self.ph2word_encoder(h_ph_gb_word)
|
171 |
-
if self.hparams['use_word_encoder']:
|
172 |
-
word_encoder_out = word_encoder_out + self.word_encoder(word_tokens)
|
173 |
-
mel2word = self.forward_dur(dur_input, mel2word, ret, ph2word=ph2word, word_len=word_len, graph_lst=graph_lst, etypes_lst=etypes_lst)
|
174 |
-
mel2word = clip_mel2token_to_multiple(mel2word, self.hparams['frames_multiple'])
|
175 |
-
ret['mel2word'] = mel2word
|
176 |
-
tgt_nonpadding = (mel2word > 0).float()[:, :, None]
|
177 |
-
enc_pos = self.get_pos_embed(word2word, ph2word) # [B, T_ph, H]
|
178 |
-
dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H]
|
179 |
-
dec_word_mask = build_word_mask(mel2word, ph2word) # [B, T_mel, T_ph]
|
180 |
-
x, weight = self.attention(ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask)
|
181 |
-
if self.hparams['add_word_pos']:
|
182 |
-
x = x + self.word_pos_proj(dec_pos)
|
183 |
-
ret['attn'] = weight
|
184 |
-
else:
|
185 |
-
mel2ph = self.forward_dur(dur_input, mel2ph, ret)
|
186 |
-
mel2ph = clip_mel2token_to_multiple(mel2ph, self.hparams['frames_multiple'])
|
187 |
-
mel2word = mel2ph_to_mel2word(mel2ph, ph2word)
|
188 |
-
x = expand_states(ph_encoder_out, mel2ph)
|
189 |
-
if self.hparams['add_word_pos']:
|
190 |
-
dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H]
|
191 |
-
x = x + self.word_pos_proj(dec_pos)
|
192 |
-
tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
|
193 |
-
if self.hparams['use_word_encoder']:
|
194 |
-
x = x + expand_states(word_encoder_out, mel2word)
|
195 |
-
return x, tgt_nonpadding
|
196 |
-
|
197 |
-
def attention(self, ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask):
|
198 |
-
ph_kv = self.enc_pos_proj(torch.cat([ph_encoder_out, enc_pos], -1))
|
199 |
-
word_enc_out_expend = expand_states(word_encoder_out, mel2word)
|
200 |
-
word_enc_out_expend = torch.cat([word_enc_out_expend, dec_pos], -1)
|
201 |
-
if self.hparams['text_encoder_postnet']:
|
202 |
-
word_enc_out_expend = self.dec_res_proj(word_enc_out_expend)
|
203 |
-
word_enc_out_expend = self.text_encoder_postnet(word_enc_out_expend)
|
204 |
-
dec_q = x_res = word_enc_out_expend
|
205 |
-
else:
|
206 |
-
dec_q = self.dec_query_proj(word_enc_out_expend)
|
207 |
-
x_res = self.dec_res_proj(word_enc_out_expend)
|
208 |
-
ph_kv, dec_q = ph_kv.transpose(0, 1), dec_q.transpose(0, 1)
|
209 |
-
x, (weight, _) = self.attn(dec_q, ph_kv, ph_kv, attn_mask=(1 - dec_word_mask) * -1e9)
|
210 |
-
x = x.transpose(0, 1)
|
211 |
-
x = x + x_res
|
212 |
-
return x, weight
|
213 |
-
|
214 |
-
def run_decoder(self, x, tgt_nonpadding, ret, infer, tgt_mels=None, global_step=0,
|
215 |
-
mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None):
|
216 |
-
if not self.hparams['use_fvae']:
|
217 |
-
x = self.decoder(x)
|
218 |
-
x = self.mel_out(x)
|
219 |
-
ret['kl'] = 0
|
220 |
-
return x * tgt_nonpadding
|
221 |
-
else:
|
222 |
-
# x is the phoneme encoding
|
223 |
-
x = x.transpose(1, 2) # [B, H, T]
|
224 |
-
tgt_nonpadding_BHT = tgt_nonpadding.transpose(1, 2) # [B, H, T]
|
225 |
-
if infer:
|
226 |
-
z = self.fvae(cond=x, infer=True, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst)
|
227 |
-
else:
|
228 |
-
tgt_mels = tgt_mels.transpose(1, 2) # [B, 80, T]
|
229 |
-
z, ret['kl'], ret['z_p'], ret['m_q'], ret['logs_q'] = self.fvae(
|
230 |
-
tgt_mels, tgt_nonpadding_BHT, cond=x, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst)
|
231 |
-
if global_step < self.hparams['posterior_start_steps']:
|
232 |
-
z = torch.randn_like(z)
|
233 |
-
x_recon = self.fvae.decoder(z, nonpadding=tgt_nonpadding_BHT, cond=x).transpose(1, 2)
|
234 |
-
ret['pre_mel_out'] = x_recon
|
235 |
-
return x_recon
|
236 |
-
|
237 |
-
def forward_dur(self, dur_input, mel2word, ret, **kwargs):
|
238 |
-
"""
|
239 |
-
|
240 |
-
:param dur_input: [B, T_txt, H]
|
241 |
-
:param mel2ph: [B, T_mel]
|
242 |
-
:param txt_tokens: [B, T_txt]
|
243 |
-
:param ret:
|
244 |
-
:return:
|
245 |
-
"""
|
246 |
-
word_len = kwargs['word_len']
|
247 |
-
ph2word = kwargs['ph2word']
|
248 |
-
graph_lst = kwargs['graph_lst']
|
249 |
-
etypes_lst = kwargs['etypes_lst']
|
250 |
-
src_padding = dur_input.data.abs().sum(-1) == 0
|
251 |
-
dur_input = dur_input.detach() + self.hparams['predictor_grad'] * (dur_input - dur_input.detach())
|
252 |
-
dur = self.dur_predictor(dur_input, src_padding, ph2word, graph_lst, etypes_lst)
|
253 |
-
|
254 |
-
B, T_ph = ph2word.shape
|
255 |
-
dur = torch.zeros([B, word_len.max() + 1]).to(ph2word.device).scatter_add(1, ph2word, dur)
|
256 |
-
dur = dur[:, 1:]
|
257 |
-
ret['dur'] = dur
|
258 |
-
if mel2word is None:
|
259 |
-
mel2word = self.length_regulator(dur).detach()
|
260 |
-
return mel2word
|
261 |
-
|
262 |
-
def get_pos_embed(self, word2word, x2word):
|
263 |
-
x_pos = build_word_mask(word2word, x2word).float() # [B, T_word, T_ph]
|
264 |
-
x_pos = (x_pos.cumsum(-1) / x_pos.sum(-1).clamp(min=1)[..., None] * x_pos).sum(1)
|
265 |
-
x_pos = self.sin_pos(x_pos.float()) # [B, T_ph, H]
|
266 |
-
return x_pos
|
267 |
-
|
268 |
-
def store_inverse_all(self):
|
269 |
-
def remove_weight_norm(m):
|
270 |
-
try:
|
271 |
-
if hasattr(m, 'store_inverse'):
|
272 |
-
m.store_inverse()
|
273 |
-
nn.utils.remove_weight_norm(m)
|
274 |
-
except ValueError: # this module didn't have weight norm
|
275 |
-
return
|
276 |
-
|
277 |
-
self.apply(remove_weight_norm)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/encoders/open_clap/pann_model.py
DELETED
@@ -1,543 +0,0 @@
|
|
1 |
-
# PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition
|
2 |
-
# Reference from https://github.com/qiuqiangkong/audioset_tagging_cnn
|
3 |
-
# Some layers are re-designed for CLAP
|
4 |
-
import os
|
5 |
-
os.environ['NUMBA_CACHE_DIR'] = '/tmp/'
|
6 |
-
|
7 |
-
import torch
|
8 |
-
import torch.nn as nn
|
9 |
-
import torch.nn.functional as F
|
10 |
-
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
|
11 |
-
from torchlibrosa.augmentation import SpecAugmentation
|
12 |
-
|
13 |
-
from .utils import do_mixup, interpolate, pad_framewise_output
|
14 |
-
from .feature_fusion import iAFF, AFF, DAF
|
15 |
-
|
16 |
-
|
17 |
-
def init_layer(layer):
|
18 |
-
"""Initialize a Linear or Convolutional layer. """
|
19 |
-
nn.init.xavier_uniform_(layer.weight)
|
20 |
-
|
21 |
-
if hasattr(layer, 'bias'):
|
22 |
-
if layer.bias is not None:
|
23 |
-
layer.bias.data.fill_(0.)
|
24 |
-
|
25 |
-
|
26 |
-
def init_bn(bn):
|
27 |
-
"""Initialize a Batchnorm layer. """
|
28 |
-
bn.bias.data.fill_(0.)
|
29 |
-
bn.weight.data.fill_(1.)
|
30 |
-
|
31 |
-
|
32 |
-
class ConvBlock(nn.Module):
|
33 |
-
def __init__(self, in_channels, out_channels):
|
34 |
-
|
35 |
-
super(ConvBlock, self).__init__()
|
36 |
-
|
37 |
-
self.conv1 = nn.Conv2d(in_channels=in_channels,
|
38 |
-
out_channels=out_channels,
|
39 |
-
kernel_size=(3, 3), stride=(1, 1),
|
40 |
-
padding=(1, 1), bias=False)
|
41 |
-
|
42 |
-
self.conv2 = nn.Conv2d(in_channels=out_channels,
|
43 |
-
out_channels=out_channels,
|
44 |
-
kernel_size=(3, 3), stride=(1, 1),
|
45 |
-
padding=(1, 1), bias=False)
|
46 |
-
|
47 |
-
self.bn1 = nn.BatchNorm2d(out_channels)
|
48 |
-
self.bn2 = nn.BatchNorm2d(out_channels)
|
49 |
-
|
50 |
-
self.init_weight()
|
51 |
-
|
52 |
-
def init_weight(self):
|
53 |
-
init_layer(self.conv1)
|
54 |
-
init_layer(self.conv2)
|
55 |
-
init_bn(self.bn1)
|
56 |
-
init_bn(self.bn2)
|
57 |
-
|
58 |
-
|
59 |
-
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
|
60 |
-
|
61 |
-
x = input
|
62 |
-
x = F.relu_(self.bn1(self.conv1(x)))
|
63 |
-
x = F.relu_(self.bn2(self.conv2(x)))
|
64 |
-
if pool_type == 'max':
|
65 |
-
x = F.max_pool2d(x, kernel_size=pool_size)
|
66 |
-
elif pool_type == 'avg':
|
67 |
-
x = F.avg_pool2d(x, kernel_size=pool_size)
|
68 |
-
elif pool_type == 'avg+max':
|
69 |
-
x1 = F.avg_pool2d(x, kernel_size=pool_size)
|
70 |
-
x2 = F.max_pool2d(x, kernel_size=pool_size)
|
71 |
-
x = x1 + x2
|
72 |
-
else:
|
73 |
-
raise Exception('Incorrect argument!')
|
74 |
-
|
75 |
-
return x
|
76 |
-
|
77 |
-
|
78 |
-
class ConvBlock5x5(nn.Module):
|
79 |
-
def __init__(self, in_channels, out_channels):
|
80 |
-
|
81 |
-
super(ConvBlock5x5, self).__init__()
|
82 |
-
|
83 |
-
self.conv1 = nn.Conv2d(in_channels=in_channels,
|
84 |
-
out_channels=out_channels,
|
85 |
-
kernel_size=(5, 5), stride=(1, 1),
|
86 |
-
padding=(2, 2), bias=False)
|
87 |
-
|
88 |
-
self.bn1 = nn.BatchNorm2d(out_channels)
|
89 |
-
|
90 |
-
self.init_weight()
|
91 |
-
|
92 |
-
def init_weight(self):
|
93 |
-
init_layer(self.conv1)
|
94 |
-
init_bn(self.bn1)
|
95 |
-
|
96 |
-
|
97 |
-
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
|
98 |
-
|
99 |
-
x = input
|
100 |
-
x = F.relu_(self.bn1(self.conv1(x)))
|
101 |
-
if pool_type == 'max':
|
102 |
-
x = F.max_pool2d(x, kernel_size=pool_size)
|
103 |
-
elif pool_type == 'avg':
|
104 |
-
x = F.avg_pool2d(x, kernel_size=pool_size)
|
105 |
-
elif pool_type == 'avg+max':
|
106 |
-
x1 = F.avg_pool2d(x, kernel_size=pool_size)
|
107 |
-
x2 = F.max_pool2d(x, kernel_size=pool_size)
|
108 |
-
x = x1 + x2
|
109 |
-
else:
|
110 |
-
raise Exception('Incorrect argument!')
|
111 |
-
|
112 |
-
return x
|
113 |
-
|
114 |
-
|
115 |
-
class AttBlock(nn.Module):
|
116 |
-
def __init__(self, n_in, n_out, activation='linear', temperature=1.):
|
117 |
-
super(AttBlock, self).__init__()
|
118 |
-
|
119 |
-
self.activation = activation
|
120 |
-
self.temperature = temperature
|
121 |
-
self.att = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
|
122 |
-
self.cla = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
|
123 |
-
|
124 |
-
self.bn_att = nn.BatchNorm1d(n_out)
|
125 |
-
self.init_weights()
|
126 |
-
|
127 |
-
def init_weights(self):
|
128 |
-
init_layer(self.att)
|
129 |
-
init_layer(self.cla)
|
130 |
-
init_bn(self.bn_att)
|
131 |
-
|
132 |
-
def forward(self, x):
|
133 |
-
# x: (n_samples, n_in, n_time)
|
134 |
-
norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
|
135 |
-
cla = self.nonlinear_transform(self.cla(x))
|
136 |
-
x = torch.sum(norm_att * cla, dim=2)
|
137 |
-
return x, norm_att, cla
|
138 |
-
|
139 |
-
def nonlinear_transform(self, x):
|
140 |
-
if self.activation == 'linear':
|
141 |
-
return x
|
142 |
-
elif self.activation == 'sigmoid':
|
143 |
-
return torch.sigmoid(x)
|
144 |
-
|
145 |
-
|
146 |
-
class Cnn14(nn.Module):
|
147 |
-
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
|
148 |
-
fmax, classes_num, enable_fusion=False, fusion_type='None'):
|
149 |
-
|
150 |
-
super(Cnn14, self).__init__()
|
151 |
-
|
152 |
-
window = 'hann'
|
153 |
-
center = True
|
154 |
-
pad_mode = 'reflect'
|
155 |
-
ref = 1.0
|
156 |
-
amin = 1e-10
|
157 |
-
top_db = None
|
158 |
-
|
159 |
-
self.enable_fusion = enable_fusion
|
160 |
-
self.fusion_type = fusion_type
|
161 |
-
|
162 |
-
# Spectrogram extractor
|
163 |
-
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
|
164 |
-
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
|
165 |
-
freeze_parameters=True)
|
166 |
-
|
167 |
-
# Logmel feature extractor
|
168 |
-
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
|
169 |
-
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
|
170 |
-
freeze_parameters=True)
|
171 |
-
|
172 |
-
# Spec augmenter
|
173 |
-
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
|
174 |
-
freq_drop_width=8, freq_stripes_num=2)
|
175 |
-
|
176 |
-
self.bn0 = nn.BatchNorm2d(64)
|
177 |
-
|
178 |
-
if (self.enable_fusion) and (self.fusion_type == 'channel_map'):
|
179 |
-
self.conv_block1 = ConvBlock(in_channels=4, out_channels=64)
|
180 |
-
else:
|
181 |
-
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
|
182 |
-
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
|
183 |
-
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
|
184 |
-
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
|
185 |
-
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
|
186 |
-
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
|
187 |
-
|
188 |
-
self.fc1 = nn.Linear(2048, 2048, bias=True)
|
189 |
-
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
|
190 |
-
|
191 |
-
if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']):
|
192 |
-
self.mel_conv1d = nn.Sequential(
|
193 |
-
nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
|
194 |
-
nn.BatchNorm1d(64) # No Relu
|
195 |
-
)
|
196 |
-
if self.fusion_type == 'daf_1d':
|
197 |
-
self.fusion_model = DAF()
|
198 |
-
elif self.fusion_type == 'aff_1d':
|
199 |
-
self.fusion_model = AFF(channels=64, type='1D')
|
200 |
-
elif self.fusion_type == 'iaff_1d':
|
201 |
-
self.fusion_model = iAFF(channels=64, type='1D')
|
202 |
-
|
203 |
-
if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
|
204 |
-
self.mel_conv2d = nn.Sequential(
|
205 |
-
nn.Conv2d(1, 64, kernel_size=(5,5), stride=(6, 2), padding=(2,2)),
|
206 |
-
nn.BatchNorm2d(64),
|
207 |
-
nn.ReLU(inplace=True)
|
208 |
-
)
|
209 |
-
|
210 |
-
if self.fusion_type == 'daf_2d':
|
211 |
-
self.fusion_model = DAF()
|
212 |
-
elif self.fusion_type == 'aff_2d':
|
213 |
-
self.fusion_model = AFF(channels=64, type='2D')
|
214 |
-
elif self.fusion_type == 'iaff_2d':
|
215 |
-
self.fusion_model = iAFF(channels=64, type='2D')
|
216 |
-
self.init_weight()
|
217 |
-
|
218 |
-
def init_weight(self):
|
219 |
-
init_bn(self.bn0)
|
220 |
-
init_layer(self.fc1)
|
221 |
-
init_layer(self.fc_audioset)
|
222 |
-
|
223 |
-
def forward(self, input, mixup_lambda=None, device=None):
|
224 |
-
"""
|
225 |
-
Input: (batch_size, data_length)"""
|
226 |
-
|
227 |
-
if self.enable_fusion and input["longer"].sum() == 0:
|
228 |
-
# if no audio is longer than 10s, then randomly select one audio to be longer
|
229 |
-
input["longer"][torch.randint(0, input["longer"].shape[0], (1,))] = True
|
230 |
-
|
231 |
-
if not self.enable_fusion:
|
232 |
-
x = self.spectrogram_extractor(input['waveform'].to(device=device, non_blocking=True)) # (batch_size, 1, time_steps, freq_bins)
|
233 |
-
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
|
234 |
-
|
235 |
-
x = x.transpose(1, 3)
|
236 |
-
x = self.bn0(x)
|
237 |
-
x = x.transpose(1, 3)
|
238 |
-
else:
|
239 |
-
longer_list = input["longer"].to(device=device, non_blocking=True)
|
240 |
-
x = input["mel_fusion"].to(device=device, non_blocking=True)
|
241 |
-
longer_list_idx = torch.where(longer_list)[0]
|
242 |
-
x = x.transpose(1, 3)
|
243 |
-
x = self.bn0(x)
|
244 |
-
x = x.transpose(1, 3)
|
245 |
-
if self.fusion_type in ['daf_1d','aff_1d','iaff_1d']:
|
246 |
-
new_x = x[:,0:1,:,:].clone().contiguous()
|
247 |
-
# local processing
|
248 |
-
if len(longer_list_idx) > 0:
|
249 |
-
fusion_x_local = x[longer_list_idx,1:,:,:].clone().contiguous()
|
250 |
-
FB,FC,FT,FF = fusion_x_local.size()
|
251 |
-
fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
|
252 |
-
fusion_x_local = torch.permute(fusion_x_local, (0,2,1)).contiguous()
|
253 |
-
fusion_x_local = self.mel_conv1d(fusion_x_local)
|
254 |
-
fusion_x_local = fusion_x_local.view(FB,FC,FF,fusion_x_local.size(-1))
|
255 |
-
fusion_x_local = torch.permute(fusion_x_local, (0,2,1,3)).contiguous().flatten(2)
|
256 |
-
if fusion_x_local.size(-1) < FT:
|
257 |
-
fusion_x_local = torch.cat([fusion_x_local, torch.zeros((FB,FF,FT- fusion_x_local.size(-1)), device=device)], dim=-1)
|
258 |
-
else:
|
259 |
-
fusion_x_local = fusion_x_local[:,:,:FT]
|
260 |
-
# 1D fusion
|
261 |
-
new_x = new_x.squeeze(1).permute((0,2,1)).contiguous()
|
262 |
-
new_x[longer_list_idx] = self.fusion_model(new_x[longer_list_idx], fusion_x_local)
|
263 |
-
x = new_x.permute((0,2,1)).contiguous()[:,None,:,:]
|
264 |
-
else:
|
265 |
-
x = new_x
|
266 |
-
elif self.fusion_type in ['daf_2d','aff_2d','iaff_2d','channel_map']:
|
267 |
-
x = x # no change
|
268 |
-
|
269 |
-
if self.training:
|
270 |
-
x = self.spec_augmenter(x)
|
271 |
-
# Mixup on spectrogram
|
272 |
-
if self.training and mixup_lambda is not None:
|
273 |
-
x = do_mixup(x, mixup_lambda)
|
274 |
-
if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
|
275 |
-
global_x = x[:,0:1,:,:]
|
276 |
-
|
277 |
-
# global processing
|
278 |
-
B, C, H, W = global_x.shape
|
279 |
-
global_x = self.conv_block1(global_x, pool_size=(2, 2), pool_type='avg')
|
280 |
-
if len(longer_list_idx) > 0:
|
281 |
-
local_x = x[longer_list_idx,1:,:,:].contiguous()
|
282 |
-
TH = global_x.size(-2)
|
283 |
-
# local processing
|
284 |
-
B, C, H, W = local_x.shape
|
285 |
-
local_x = local_x.view(B*C,1,H,W)
|
286 |
-
local_x = self.mel_conv2d(local_x)
|
287 |
-
local_x = local_x.view(B,C,local_x.size(1),local_x.size(2),local_x.size(3))
|
288 |
-
local_x = local_x.permute((0,2,1,3,4)).contiguous().flatten(2,3)
|
289 |
-
TB,TC,_,TW = local_x.size()
|
290 |
-
if local_x.size(-2) < TH:
|
291 |
-
local_x = torch.cat([local_x, torch.zeros((TB,TC,TH-local_x.size(-2),TW), device=global_x.device)], dim=-2)
|
292 |
-
else:
|
293 |
-
local_x = local_x[:,:,:TH,:]
|
294 |
-
|
295 |
-
global_x[longer_list_idx] = self.fusion_model(global_x[longer_list_idx],local_x)
|
296 |
-
x = global_x
|
297 |
-
else:
|
298 |
-
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
|
299 |
-
|
300 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
301 |
-
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
|
302 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
303 |
-
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
|
304 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
305 |
-
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
|
306 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
307 |
-
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
|
308 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
309 |
-
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
|
310 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
311 |
-
x = torch.mean(x, dim=3)
|
312 |
-
|
313 |
-
latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
|
314 |
-
latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
|
315 |
-
latent_x = latent_x1 + latent_x2
|
316 |
-
latent_x = latent_x.transpose(1, 2)
|
317 |
-
latent_x = F.relu_(self.fc1(latent_x))
|
318 |
-
latent_output = interpolate(latent_x, 32)
|
319 |
-
|
320 |
-
|
321 |
-
(x1, _) = torch.max(x, dim=2)
|
322 |
-
x2 = torch.mean(x, dim=2)
|
323 |
-
x = x1 + x2
|
324 |
-
x = F.dropout(x, p=0.5, training=self.training)
|
325 |
-
x = F.relu_(self.fc1(x))
|
326 |
-
embedding = F.dropout(x, p=0.5, training=self.training)
|
327 |
-
clipwise_output = torch.sigmoid(self.fc_audioset(x))
|
328 |
-
|
329 |
-
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output}
|
330 |
-
return output_dict
|
331 |
-
|
332 |
-
|
333 |
-
class Cnn6(nn.Module):
|
334 |
-
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
|
335 |
-
fmax, classes_num, enable_fusion=False, fusion_type='None'):
|
336 |
-
|
337 |
-
super(Cnn6, self).__init__()
|
338 |
-
|
339 |
-
window = 'hann'
|
340 |
-
center = True
|
341 |
-
pad_mode = 'reflect'
|
342 |
-
ref = 1.0
|
343 |
-
amin = 1e-10
|
344 |
-
top_db = None
|
345 |
-
|
346 |
-
self.enable_fusion = enable_fusion
|
347 |
-
self.fusion_type = fusion_type
|
348 |
-
|
349 |
-
# Spectrogram extractor
|
350 |
-
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
|
351 |
-
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
|
352 |
-
freeze_parameters=True)
|
353 |
-
|
354 |
-
# Logmel feature extractor
|
355 |
-
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
|
356 |
-
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
|
357 |
-
freeze_parameters=True)
|
358 |
-
|
359 |
-
# Spec augmenter
|
360 |
-
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
|
361 |
-
freq_drop_width=8, freq_stripes_num=2)
|
362 |
-
|
363 |
-
self.bn0 = nn.BatchNorm2d(64)
|
364 |
-
|
365 |
-
self.conv_block1 = ConvBlock5x5(in_channels=1, out_channels=64)
|
366 |
-
self.conv_block2 = ConvBlock5x5(in_channels=64, out_channels=128)
|
367 |
-
self.conv_block3 = ConvBlock5x5(in_channels=128, out_channels=256)
|
368 |
-
self.conv_block4 = ConvBlock5x5(in_channels=256, out_channels=512)
|
369 |
-
|
370 |
-
self.fc1 = nn.Linear(512, 512, bias=True)
|
371 |
-
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
|
372 |
-
|
373 |
-
self.init_weight()
|
374 |
-
|
375 |
-
def init_weight(self):
|
376 |
-
init_bn(self.bn0)
|
377 |
-
init_layer(self.fc1)
|
378 |
-
init_layer(self.fc_audioset)
|
379 |
-
|
380 |
-
def forward(self, input, mixup_lambda=None, device=None):
|
381 |
-
"""
|
382 |
-
Input: (batch_size, data_length)"""
|
383 |
-
|
384 |
-
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
|
385 |
-
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
|
386 |
-
|
387 |
-
x = x.transpose(1, 3)
|
388 |
-
x = self.bn0(x)
|
389 |
-
x = x.transpose(1, 3)
|
390 |
-
|
391 |
-
if self.training:
|
392 |
-
x = self.spec_augmenter(x)
|
393 |
-
|
394 |
-
# Mixup on spectrogram
|
395 |
-
if self.training and mixup_lambda is not None:
|
396 |
-
x = do_mixup(x, mixup_lambda)
|
397 |
-
|
398 |
-
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
|
399 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
400 |
-
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
|
401 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
402 |
-
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
|
403 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
404 |
-
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
|
405 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
406 |
-
x = torch.mean(x, dim=3)
|
407 |
-
|
408 |
-
latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
|
409 |
-
latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
|
410 |
-
latent_x = latent_x1 + latent_x2
|
411 |
-
latent_x = latent_x.transpose(1, 2)
|
412 |
-
latent_x = F.relu_(self.fc1(latent_x))
|
413 |
-
latent_output = interpolate(latent_x, 16)
|
414 |
-
|
415 |
-
(x1, _) = torch.max(x, dim=2)
|
416 |
-
x2 = torch.mean(x, dim=2)
|
417 |
-
x = x1 + x2
|
418 |
-
x = F.dropout(x, p=0.5, training=self.training)
|
419 |
-
x = F.relu_(self.fc1(x))
|
420 |
-
embedding = F.dropout(x, p=0.5, training=self.training)
|
421 |
-
clipwise_output = torch.sigmoid(self.fc_audioset(x))
|
422 |
-
|
423 |
-
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output}
|
424 |
-
|
425 |
-
return output_dict
|
426 |
-
|
427 |
-
|
428 |
-
class Cnn10(nn.Module):
|
429 |
-
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
|
430 |
-
fmax, classes_num, enable_fusion=False, fusion_type='None'):
|
431 |
-
|
432 |
-
super(Cnn10, self).__init__()
|
433 |
-
|
434 |
-
window = 'hann'
|
435 |
-
center = True
|
436 |
-
pad_mode = 'reflect'
|
437 |
-
ref = 1.0
|
438 |
-
amin = 1e-10
|
439 |
-
top_db = None
|
440 |
-
|
441 |
-
self.enable_fusion = enable_fusion
|
442 |
-
self.fusion_type = fusion_type
|
443 |
-
|
444 |
-
# Spectrogram extractor
|
445 |
-
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
|
446 |
-
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
|
447 |
-
freeze_parameters=True)
|
448 |
-
|
449 |
-
# Logmel feature extractor
|
450 |
-
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
|
451 |
-
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
|
452 |
-
freeze_parameters=True)
|
453 |
-
|
454 |
-
# Spec augmenter
|
455 |
-
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
|
456 |
-
freq_drop_width=8, freq_stripes_num=2)
|
457 |
-
|
458 |
-
self.bn0 = nn.BatchNorm2d(64)
|
459 |
-
|
460 |
-
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
|
461 |
-
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
|
462 |
-
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
|
463 |
-
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
|
464 |
-
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
|
465 |
-
|
466 |
-
self.fc1 = nn.Linear(1024, 1024, bias=True)
|
467 |
-
self.fc_audioset = nn.Linear(1024, classes_num, bias=True)
|
468 |
-
|
469 |
-
self.init_weight()
|
470 |
-
|
471 |
-
def init_weight(self):
|
472 |
-
init_bn(self.bn0)
|
473 |
-
init_layer(self.fc1)
|
474 |
-
init_layer(self.fc_audioset)
|
475 |
-
|
476 |
-
def forward(self, input, mixup_lambda=None, device=None):
|
477 |
-
"""
|
478 |
-
Input: (batch_size, data_length)"""
|
479 |
-
|
480 |
-
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
|
481 |
-
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
|
482 |
-
|
483 |
-
x = x.transpose(1, 3)
|
484 |
-
x = self.bn0(x)
|
485 |
-
x = x.transpose(1, 3)
|
486 |
-
|
487 |
-
if self.training:
|
488 |
-
x = self.spec_augmenter(x)
|
489 |
-
|
490 |
-
# Mixup on spectrogram
|
491 |
-
if self.training and mixup_lambda is not None:
|
492 |
-
x = do_mixup(x, mixup_lambda)
|
493 |
-
|
494 |
-
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
|
495 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
496 |
-
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
|
497 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
498 |
-
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
|
499 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
500 |
-
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
|
501 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
502 |
-
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
|
503 |
-
x = F.dropout(x, p=0.2, training=self.training)
|
504 |
-
x = torch.mean(x, dim=3)
|
505 |
-
|
506 |
-
latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
|
507 |
-
latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
|
508 |
-
latent_x = latent_x1 + latent_x2
|
509 |
-
latent_x = latent_x.transpose(1, 2)
|
510 |
-
latent_x = F.relu_(self.fc1(latent_x))
|
511 |
-
latent_output = interpolate(latent_x, 32)
|
512 |
-
|
513 |
-
(x1, _) = torch.max(x, dim=2)
|
514 |
-
x2 = torch.mean(x, dim=2)
|
515 |
-
x = x1 + x2
|
516 |
-
x = F.dropout(x, p=0.5, training=self.training)
|
517 |
-
x = F.relu_(self.fc1(x))
|
518 |
-
embedding = F.dropout(x, p=0.5, training=self.training)
|
519 |
-
clipwise_output = torch.sigmoid(self.fc_audioset(x))
|
520 |
-
|
521 |
-
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output}
|
522 |
-
|
523 |
-
return output_dict
|
524 |
-
|
525 |
-
|
526 |
-
def create_pann_model(audio_cfg, enable_fusion=False, fusion_type='None'):
|
527 |
-
try:
|
528 |
-
ModelProto = eval(audio_cfg.model_name)
|
529 |
-
model = ModelProto(
|
530 |
-
sample_rate = audio_cfg.sample_rate,
|
531 |
-
window_size = audio_cfg.window_size,
|
532 |
-
hop_size =audio_cfg.hop_size,
|
533 |
-
mel_bins = audio_cfg.mel_bins,
|
534 |
-
fmin = audio_cfg.fmin,
|
535 |
-
fmax = audio_cfg.fmax,
|
536 |
-
classes_num = audio_cfg.class_num,
|
537 |
-
enable_fusion = enable_fusion,
|
538 |
-
fusion_type = fusion_type
|
539 |
-
)
|
540 |
-
return model
|
541 |
-
except:
|
542 |
-
raise RuntimeError(f'Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough.')
|
543 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/bert.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
from transformers import BertTokenizer, BertModel
|
2 |
-
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
|
3 |
-
model = BertModel.from_pretrained("bert-base-uncased")
|
4 |
-
text = "Replace me by any text you'd like."
|
5 |
-
|
6 |
-
def bert_embeddings(text):
|
7 |
-
# text = "Replace me by any text you'd like."
|
8 |
-
encoded_input = tokenizer(text, return_tensors='pt')
|
9 |
-
output = model(**encoded_input)
|
10 |
-
return output
|
11 |
-
|
12 |
-
from transformers import RobertaTokenizer, RobertaModel
|
13 |
-
|
14 |
-
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
|
15 |
-
model = RobertaModel.from_pretrained('roberta-base')
|
16 |
-
text = "Replace me by any text you'd like."
|
17 |
-
def Roberta_embeddings(text):
|
18 |
-
# text = "Replace me by any text you'd like."
|
19 |
-
encoded_input = tokenizer(text, return_tensors='pt')
|
20 |
-
output = model(**encoded_input)
|
21 |
-
return output
|
22 |
-
|
23 |
-
from transformers import BartTokenizer, BartModel
|
24 |
-
|
25 |
-
tokenizer = BartTokenizer.from_pretrained('facebook/bart-base')
|
26 |
-
model = BartModel.from_pretrained('facebook/bart-base')
|
27 |
-
text = "Replace me by any text you'd like."
|
28 |
-
def bart_embeddings(text):
|
29 |
-
# text = "Replace me by any text you'd like."
|
30 |
-
encoded_input = tokenizer(text, return_tensors='pt')
|
31 |
-
output = model(**encoded_input)
|
32 |
-
return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/modules/encoders/open_clap/tokenizer.py
DELETED
@@ -1,180 +0,0 @@
|
|
1 |
-
""" CLIP tokenizer
|
2 |
-
|
3 |
-
Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
|
4 |
-
"""
|
5 |
-
import gzip
|
6 |
-
import html
|
7 |
-
import os
|
8 |
-
from functools import lru_cache
|
9 |
-
from typing import Union, List
|
10 |
-
|
11 |
-
import ftfy
|
12 |
-
import regex as re
|
13 |
-
import torch
|
14 |
-
|
15 |
-
|
16 |
-
@lru_cache()
|
17 |
-
def default_bpe():
|
18 |
-
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
|
19 |
-
|
20 |
-
|
21 |
-
@lru_cache()
|
22 |
-
def bytes_to_unicode():
|
23 |
-
"""
|
24 |
-
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
25 |
-
The reversible bpe codes work on unicode strings.
|
26 |
-
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
|
27 |
-
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
|
28 |
-
This is a signficant percentage of your normal, say, 32K bpe vocab.
|
29 |
-
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
30 |
-
And avoids mapping to whitespace/control characters the bpe code barfs on.
|
31 |
-
"""
|
32 |
-
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
|
33 |
-
cs = bs[:]
|
34 |
-
n = 0
|
35 |
-
for b in range(2**8):
|
36 |
-
if b not in bs:
|
37 |
-
bs.append(b)
|
38 |
-
cs.append(2**8+n)
|
39 |
-
n += 1
|
40 |
-
cs = [chr(n) for n in cs]
|
41 |
-
return dict(zip(bs, cs))
|
42 |
-
|
43 |
-
|
44 |
-
def get_pairs(word):
|
45 |
-
"""Return set of symbol pairs in a word.
|
46 |
-
Word is represented as tuple of symbols (symbols being variable-length strings).
|
47 |
-
"""
|
48 |
-
pairs = set()
|
49 |
-
prev_char = word[0]
|
50 |
-
for char in word[1:]:
|
51 |
-
pairs.add((prev_char, char))
|
52 |
-
prev_char = char
|
53 |
-
return pairs
|
54 |
-
|
55 |
-
|
56 |
-
def basic_clean(text):
|
57 |
-
text = ftfy.fix_text(text)
|
58 |
-
text = html.unescape(html.unescape(text))
|
59 |
-
return text.strip()
|
60 |
-
|
61 |
-
|
62 |
-
def whitespace_clean(text):
|
63 |
-
text = re.sub(r'\s+', ' ', text)
|
64 |
-
text = text.strip()
|
65 |
-
return text
|
66 |
-
|
67 |
-
|
68 |
-
class SimpleTokenizer(object):
|
69 |
-
def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
|
70 |
-
self.byte_encoder = bytes_to_unicode()
|
71 |
-
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
|
72 |
-
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
|
73 |
-
merges = merges[1:49152-256-2+1]
|
74 |
-
merges = [tuple(merge.split()) for merge in merges]
|
75 |
-
vocab = list(bytes_to_unicode().values())
|
76 |
-
vocab = vocab + [v+'</w>' for v in vocab]
|
77 |
-
for merge in merges:
|
78 |
-
vocab.append(''.join(merge))
|
79 |
-
if not special_tokens:
|
80 |
-
special_tokens = ['<start_of_text>', '<end_of_text>']
|
81 |
-
else:
|
82 |
-
special_tokens = ['<start_of_text>', '<end_of_text>'] + special_tokens
|
83 |
-
vocab.extend(special_tokens)
|
84 |
-
self.encoder = dict(zip(vocab, range(len(vocab))))
|
85 |
-
self.decoder = {v: k for k, v in self.encoder.items()}
|
86 |
-
self.bpe_ranks = dict(zip(merges, range(len(merges))))
|
87 |
-
self.cache = {t:t for t in special_tokens}
|
88 |
-
special = "|".join(special_tokens)
|
89 |
-
self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
|
90 |
-
|
91 |
-
self.vocab_size = len(self.encoder)
|
92 |
-
self.all_special_ids = [self.encoder[t] for t in special_tokens]
|
93 |
-
|
94 |
-
def bpe(self, token):
|
95 |
-
if token in self.cache:
|
96 |
-
return self.cache[token]
|
97 |
-
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
|
98 |
-
pairs = get_pairs(word)
|
99 |
-
|
100 |
-
if not pairs:
|
101 |
-
return token+'</w>'
|
102 |
-
|
103 |
-
while True:
|
104 |
-
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
|
105 |
-
if bigram not in self.bpe_ranks:
|
106 |
-
break
|
107 |
-
first, second = bigram
|
108 |
-
new_word = []
|
109 |
-
i = 0
|
110 |
-
while i < len(word):
|
111 |
-
try:
|
112 |
-
j = word.index(first, i)
|
113 |
-
new_word.extend(word[i:j])
|
114 |
-
i = j
|
115 |
-
except:
|
116 |
-
new_word.extend(word[i:])
|
117 |
-
break
|
118 |
-
|
119 |
-
if word[i] == first and i < len(word)-1 and word[i+1] == second:
|
120 |
-
new_word.append(first+second)
|
121 |
-
i += 2
|
122 |
-
else:
|
123 |
-
new_word.append(word[i])
|
124 |
-
i += 1
|
125 |
-
new_word = tuple(new_word)
|
126 |
-
word = new_word
|
127 |
-
if len(word) == 1:
|
128 |
-
break
|
129 |
-
else:
|
130 |
-
pairs = get_pairs(word)
|
131 |
-
word = ' '.join(word)
|
132 |
-
self.cache[token] = word
|
133 |
-
return word
|
134 |
-
|
135 |
-
def encode(self, text):
|
136 |
-
bpe_tokens = []
|
137 |
-
text = whitespace_clean(basic_clean(text)).lower()
|
138 |
-
for token in re.findall(self.pat, text):
|
139 |
-
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
|
140 |
-
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
|
141 |
-
return bpe_tokens
|
142 |
-
|
143 |
-
def decode(self, tokens):
|
144 |
-
text = ''.join([self.decoder[token] for token in tokens])
|
145 |
-
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
|
146 |
-
return text
|
147 |
-
|
148 |
-
|
149 |
-
_tokenizer = SimpleTokenizer()
|
150 |
-
|
151 |
-
|
152 |
-
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
|
153 |
-
"""
|
154 |
-
Returns the tokenized representation of given input string(s)
|
155 |
-
|
156 |
-
Parameters
|
157 |
-
----------
|
158 |
-
texts : Union[str, List[str]]
|
159 |
-
An input string or a list of input strings to tokenize
|
160 |
-
context_length : int
|
161 |
-
The context length to use; all CLIP models use 77 as the context length
|
162 |
-
|
163 |
-
Returns
|
164 |
-
-------
|
165 |
-
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
|
166 |
-
"""
|
167 |
-
if isinstance(texts, str):
|
168 |
-
texts = [texts]
|
169 |
-
|
170 |
-
sot_token = _tokenizer.encoder["<start_of_text>"]
|
171 |
-
eot_token = _tokenizer.encoder["<end_of_text>"]
|
172 |
-
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
|
173 |
-
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
|
174 |
-
|
175 |
-
for i, tokens in enumerate(all_tokens):
|
176 |
-
if len(tokens) > context_length:
|
177 |
-
tokens = tokens[:context_length] # Truncate
|
178 |
-
result[i, :len(tokens)] = torch.tensor(tokens)
|
179 |
-
|
180 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/utils/segment/augmentations.py
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Image augmentation functions
|
4 |
-
"""
|
5 |
-
|
6 |
-
import math
|
7 |
-
import random
|
8 |
-
|
9 |
-
import cv2
|
10 |
-
import numpy as np
|
11 |
-
|
12 |
-
from ..augmentations import box_candidates
|
13 |
-
from ..general import resample_segments, segment2box
|
14 |
-
|
15 |
-
|
16 |
-
def mixup(im, labels, segments, im2, labels2, segments2):
|
17 |
-
# Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
|
18 |
-
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
|
19 |
-
im = (im * r + im2 * (1 - r)).astype(np.uint8)
|
20 |
-
labels = np.concatenate((labels, labels2), 0)
|
21 |
-
segments = np.concatenate((segments, segments2), 0)
|
22 |
-
return im, labels, segments
|
23 |
-
|
24 |
-
|
25 |
-
def random_perspective(
|
26 |
-
im,
|
27 |
-
targets=(),
|
28 |
-
segments=(),
|
29 |
-
degrees=10,
|
30 |
-
translate=0.1,
|
31 |
-
scale=0.1,
|
32 |
-
shear=10,
|
33 |
-
perspective=0.0,
|
34 |
-
border=(0, 0),
|
35 |
-
):
|
36 |
-
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
|
37 |
-
# targets = [cls, xyxy]
|
38 |
-
|
39 |
-
height = im.shape[0] + border[0] * 2 # shape(h,w,c)
|
40 |
-
width = im.shape[1] + border[1] * 2
|
41 |
-
|
42 |
-
# Center
|
43 |
-
C = np.eye(3)
|
44 |
-
C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
|
45 |
-
C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
|
46 |
-
|
47 |
-
# Perspective
|
48 |
-
P = np.eye(3)
|
49 |
-
P[2, 0] = random.uniform(
|
50 |
-
-perspective, perspective
|
51 |
-
) # x perspective (about y)
|
52 |
-
P[2, 1] = random.uniform(
|
53 |
-
-perspective, perspective
|
54 |
-
) # y perspective (about x)
|
55 |
-
|
56 |
-
# Rotation and Scale
|
57 |
-
R = np.eye(3)
|
58 |
-
a = random.uniform(-degrees, degrees)
|
59 |
-
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
|
60 |
-
s = random.uniform(1 - scale, 1 + scale)
|
61 |
-
# s = 2 ** random.uniform(-scale, scale)
|
62 |
-
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
|
63 |
-
|
64 |
-
# Shear
|
65 |
-
S = np.eye(3)
|
66 |
-
S[0, 1] = math.tan(
|
67 |
-
random.uniform(-shear, shear) * math.pi / 180
|
68 |
-
) # x shear (deg)
|
69 |
-
S[1, 0] = math.tan(
|
70 |
-
random.uniform(-shear, shear) * math.pi / 180
|
71 |
-
) # y shear (deg)
|
72 |
-
|
73 |
-
# Translation
|
74 |
-
T = np.eye(3)
|
75 |
-
T[0, 2] = (
|
76 |
-
random.uniform(0.5 - translate, 0.5 + translate) * width
|
77 |
-
) # x translation (pixels)
|
78 |
-
T[1, 2] = (
|
79 |
-
random.uniform(0.5 - translate, 0.5 + translate) * height
|
80 |
-
) # y translation (pixels)
|
81 |
-
|
82 |
-
# Combined rotation matrix
|
83 |
-
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
|
84 |
-
if (
|
85 |
-
(border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any()
|
86 |
-
): # image changed
|
87 |
-
if perspective:
|
88 |
-
im = cv2.warpPerspective(
|
89 |
-
im, M, dsize=(width, height), borderValue=(114, 114, 114)
|
90 |
-
)
|
91 |
-
else: # affine
|
92 |
-
im = cv2.warpAffine(
|
93 |
-
im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)
|
94 |
-
)
|
95 |
-
|
96 |
-
# Visualize
|
97 |
-
# import matplotlib.pyplot as plt
|
98 |
-
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
|
99 |
-
# ax[0].imshow(im[:, :, ::-1]) # base
|
100 |
-
# ax[1].imshow(im2[:, :, ::-1]) # warped
|
101 |
-
|
102 |
-
# Transform label coordinates
|
103 |
-
n = len(targets)
|
104 |
-
new_segments = []
|
105 |
-
if n:
|
106 |
-
new = np.zeros((n, 4))
|
107 |
-
segments = resample_segments(segments) # upsample
|
108 |
-
for i, segment in enumerate(segments):
|
109 |
-
xy = np.ones((len(segment), 3))
|
110 |
-
xy[:, :2] = segment
|
111 |
-
xy = xy @ M.T # transform
|
112 |
-
xy = (
|
113 |
-
xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]
|
114 |
-
) # perspective rescale or affine
|
115 |
-
|
116 |
-
# clip
|
117 |
-
new[i] = segment2box(xy, width, height)
|
118 |
-
new_segments.append(xy)
|
119 |
-
|
120 |
-
# filter candidates
|
121 |
-
i = box_candidates(
|
122 |
-
box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01
|
123 |
-
)
|
124 |
-
targets = targets[i]
|
125 |
-
targets[:, 1:5] = new[i]
|
126 |
-
new_segments = np.array(new_segments)[i]
|
127 |
-
|
128 |
-
return im, targets, new_segments
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/types/Conversation.ts
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
import type { Message } from "./Message";
|
2 |
-
import type { Timestamps } from "./Timestamps";
|
3 |
-
import type { User } from "./User";
|
4 |
-
|
5 |
-
export interface Conversation extends Timestamps {
|
6 |
-
sessionId?: string;
|
7 |
-
userId?: User["_id"];
|
8 |
-
|
9 |
-
model: string;
|
10 |
-
|
11 |
-
title: string;
|
12 |
-
messages: Message[];
|
13 |
-
|
14 |
-
meta?: {
|
15 |
-
fromShareId?: string;
|
16 |
-
};
|
17 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/anchor/Factory.js
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
import Anchor from "./Anchor.js";
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('anchor', function (gameObject, config) {
|
6 |
-
return new Anchor(gameObject, config);
|
7 |
-
});
|
8 |
-
|
9 |
-
SetValue(window, 'RexPlugins.UI.Anchor', Anchor);
|
10 |
-
|
11 |
-
export default Anchor;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/HideMethods.js
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import {
|
2 |
-
Show,
|
3 |
-
Hide,
|
4 |
-
IsShown,
|
5 |
-
} from '../utils/Hide.js';
|
6 |
-
|
7 |
-
export default {
|
8 |
-
show(gameObject) {
|
9 |
-
if (gameObject === undefined) {
|
10 |
-
gameObject = this;
|
11 |
-
}
|
12 |
-
Show(gameObject, false);
|
13 |
-
return this;
|
14 |
-
},
|
15 |
-
|
16 |
-
hide(gameObject) {
|
17 |
-
if (gameObject === undefined) {
|
18 |
-
gameObject = this;
|
19 |
-
}
|
20 |
-
Hide(gameObject, true);
|
21 |
-
return this;
|
22 |
-
},
|
23 |
-
|
24 |
-
isShow(gameObject) {
|
25 |
-
if (gameObject === undefined) {
|
26 |
-
gameObject = this;
|
27 |
-
}
|
28 |
-
return IsShown(gameObject);
|
29 |
-
}
|
30 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alfasign/HuggingGPT-Lite/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: HuggingGPT - Lite
|
3 |
-
emoji: 🎐
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.27.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: taesiri/HuggingGPT-Lite
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/models/stylegan2/op/fused_act.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
module_path = os.path.dirname(__file__)
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
class FusedLeakyReLU(nn.Module):
|
12 |
-
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
|
13 |
-
super().__init__()
|
14 |
-
|
15 |
-
self.bias = nn.Parameter(torch.zeros(channel))
|
16 |
-
self.negative_slope = negative_slope
|
17 |
-
self.scale = scale
|
18 |
-
|
19 |
-
def forward(self, input):
|
20 |
-
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
|
21 |
-
|
22 |
-
|
23 |
-
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
|
24 |
-
rest_dim = [1] * (input.ndim - bias.ndim - 1)
|
25 |
-
input = input.cuda()
|
26 |
-
if input.ndim == 3:
|
27 |
-
return (
|
28 |
-
F.leaky_relu(
|
29 |
-
input + bias.view(1, *rest_dim, bias.shape[0]), negative_slope=negative_slope
|
30 |
-
)
|
31 |
-
* scale
|
32 |
-
)
|
33 |
-
else:
|
34 |
-
return (
|
35 |
-
F.leaky_relu(
|
36 |
-
input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope
|
37 |
-
)
|
38 |
-
* scale
|
39 |
-
)
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/training/coaches/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_detection/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnext101_64x4d',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNeXt',
|
6 |
-
depth=101,
|
7 |
-
groups=64,
|
8 |
-
base_width=4,
|
9 |
-
num_stages=4,
|
10 |
-
out_indices=(0, 1, 2, 3),
|
11 |
-
frozen_stages=1,
|
12 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
13 |
-
style='pytorch'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py'
|
2 |
-
# learning policy
|
3 |
-
lr_config = dict(step=[28, 34])
|
4 |
-
runner = dict(type='EpochBasedRunner', max_epochs=36)
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/tridentnet/README.md
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
# Scale-Aware Trident Networks for Object Detection
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
```
|
8 |
-
@InProceedings{li2019scale,
|
9 |
-
title={Scale-Aware Trident Networks for Object Detection},
|
10 |
-
author={Li, Yanghao and Chen, Yuntao and Wang, Naiyan and Zhang, Zhaoxiang},
|
11 |
-
journal={The International Conference on Computer Vision (ICCV)},
|
12 |
-
year={2019}
|
13 |
-
}
|
14 |
-
```
|
15 |
-
|
16 |
-
## Results and models
|
17 |
-
|
18 |
-
We reports the test results using only one branch for inference.
|
19 |
-
|
20 |
-
| Backbone | Style | mstrain | Lr schd | Mem (GB) | Inf time (fps) | box AP | Download |
|
21 |
-
| :-------------: | :-----: | :-----: | :-----: | :------: | :------------: | :----: | :------: |
|
22 |
-
| R-50 | caffe | N | 1x | | | 37.7 |[model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838.log.json) |
|
23 |
-
| R-50 | caffe | Y | 1x | | | 37.6 |[model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839-6ce55ccb.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839.log.json) |
|
24 |
-
| R-50 | caffe | Y | 3x | | | 40.3 |[model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539-46d227ba.pth) | [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539.log.json) |
|
25 |
-
|
26 |
-
**Note**
|
27 |
-
|
28 |
-
Similar to [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/projects/TridentNet), we haven't implemented the Scale-aware Training Scheme in section 4.2 of the paper.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/exp/cascade_mask_rcnn_3x_ms_hybrid_small/run.sh
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
#!/usr/bin/env bash
|
2 |
-
|
3 |
-
work_path=$(dirname $0)
|
4 |
-
PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
|
5 |
-
python -m torch.distributed.launch --nproc_per_node=8 \
|
6 |
-
tools/train.py ${work_path}/config.py \
|
7 |
-
--launcher pytorch \
|
8 |
-
--cfg-options model.backbone.pretrained_path='your_model_path/uniformer_small_in1k.pth' \
|
9 |
-
--work-dir ${work_path}/ckpt \
|
10 |
-
2>&1 | tee -a ${work_path}/log.txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/shared_heads/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
from .res_layer import ResLayer
|
2 |
-
|
3 |
-
__all__ = ['ResLayer']
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/fcn_hr18.py', '../_base_/datasets/pascal_voc12_aug.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py'
|
4 |
-
]
|
5 |
-
model = dict(decode_head=dict(num_classes=21))
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/openai/tokens.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
from modules.text_generation import decode, encode
|
2 |
-
|
3 |
-
|
4 |
-
def token_count(prompt):
|
5 |
-
tokens = encode(prompt)[0]
|
6 |
-
|
7 |
-
return {
|
8 |
-
'results': [{
|
9 |
-
'tokens': len(tokens)
|
10 |
-
}]
|
11 |
-
}
|
12 |
-
|
13 |
-
|
14 |
-
def token_encode(input, encoding_format):
|
15 |
-
# if isinstance(input, list):
|
16 |
-
tokens = encode(input)[0]
|
17 |
-
|
18 |
-
return {
|
19 |
-
'results': [{
|
20 |
-
'tokens': tokens,
|
21 |
-
'length': len(tokens),
|
22 |
-
}]
|
23 |
-
}
|
24 |
-
|
25 |
-
|
26 |
-
def token_decode(tokens, encoding_format):
|
27 |
-
# if isinstance(input, list):
|
28 |
-
# if encoding_format == "base64":
|
29 |
-
# tokens = base64_to_float_list(tokens)
|
30 |
-
output = decode(tokens)[0]
|
31 |
-
|
32 |
-
return {
|
33 |
-
'results': [{
|
34 |
-
'text': output
|
35 |
-
}]
|
36 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnnasBlackHat/Image-Similarity/src/similarity/similarity.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
from src.model import simlarity_model as model
|
2 |
-
from src.util import image as image_util
|
3 |
-
from src.util import matrix
|
4 |
-
from .model_implements.mobilenet_v3 import ModelnetV3
|
5 |
-
from .model_implements.vit_base import VitBase
|
6 |
-
from .model_implements.bit import BigTransfer
|
7 |
-
|
8 |
-
|
9 |
-
class Similarity:
|
10 |
-
def get_models(self):
|
11 |
-
return [
|
12 |
-
model.SimilarityModel(name= 'Mobilenet V3', image_size= 224, model_cls = ModelnetV3()),
|
13 |
-
model.SimilarityModel(name= 'Big Transfer (BiT)', image_size= 224, model_cls = BigTransfer()),
|
14 |
-
model.SimilarityModel(name= 'Vision Transformer', image_size= 224, model_cls = VitBase(), image_input_type='pil'),
|
15 |
-
]
|
16 |
-
|
17 |
-
def check_similarity(self, img_urls, model):
|
18 |
-
imgs = []
|
19 |
-
for url in img_urls:
|
20 |
-
if url == "": continue
|
21 |
-
imgs.append(image_util.load_image_url(url, required_size=(model.image_size, model.image_size), image_type=model.image_input_type))
|
22 |
-
|
23 |
-
features = model.model_cls.extract_feature(imgs)
|
24 |
-
results = []
|
25 |
-
for i, v in enumerate(features):
|
26 |
-
if i == 0: continue
|
27 |
-
dist = matrix.cosine(features[0], v)
|
28 |
-
print(f'{i} -- distance: {dist}')
|
29 |
-
# results.append((imgs[i], f'similarity: {int(dist*100)}%'))
|
30 |
-
original_img = image_util.load_image_url(img_urls[i], required_size=None, image_type='pil')
|
31 |
-
results.append((original_img, f'similarity: {int(dist*100)}%'))
|
32 |
-
|
33 |
-
return results
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnnasBlackHat/Image-Similarity/src/util/image.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
from PIL import Image
|
2 |
-
import numpy as np
|
3 |
-
import requests
|
4 |
-
|
5 |
-
def load_image_url(url, required_size = (224,224), image_type = 'array'):
|
6 |
-
print(f'downloading.. {url}, type: {image_type}')
|
7 |
-
img = Image.open(requests.get(url, stream=True).raw)
|
8 |
-
img = Image.fromarray(np.array(img))
|
9 |
-
if required_size is not None:
|
10 |
-
img = img.resize(required_size)
|
11 |
-
if image_type == 'array':
|
12 |
-
img = (np.expand_dims(np.array(img), 0)/255).astype(np.float32)
|
13 |
-
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/pipelines/__init__.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
from .compose import Compose
|
2 |
-
from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
|
3 |
-
Transpose, to_tensor)
|
4 |
-
from .loading import LoadAnnotations, LoadImageFromFile
|
5 |
-
from .test_time_aug import MultiScaleFlipAug
|
6 |
-
from .transforms import (CLAHE, AdjustGamma, Normalize, Pad,
|
7 |
-
PhotoMetricDistortion, RandomCrop, RandomFlip,
|
8 |
-
RandomRotate, Rerange, Resize, RGB2Gray, SegRescale)
|
9 |
-
|
10 |
-
__all__ = [
|
11 |
-
'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
|
12 |
-
'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
|
13 |
-
'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
|
14 |
-
'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
|
15 |
-
'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray'
|
16 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/ldm/data/util.py
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
from ldm.modules.midas.api import load_midas_transform
|
4 |
-
|
5 |
-
|
6 |
-
class AddMiDaS(object):
|
7 |
-
def __init__(self, model_type):
|
8 |
-
super().__init__()
|
9 |
-
self.transform = load_midas_transform(model_type)
|
10 |
-
|
11 |
-
def pt2np(self, x):
|
12 |
-
x = ((x + 1.0) * .5).detach().cpu().numpy()
|
13 |
-
return x
|
14 |
-
|
15 |
-
def np2pt(self, x):
|
16 |
-
x = torch.from_numpy(x) * 2 - 1.
|
17 |
-
return x
|
18 |
-
|
19 |
-
def __call__(self, sample):
|
20 |
-
# sample['jpg'] is tensor hwc in [-1, 1] at this point
|
21 |
-
x = self.pt2np(sample['jpg'])
|
22 |
-
x = self.transform({"image": x})["image"]
|
23 |
-
sample['midas_in'] = x
|
24 |
-
return sample
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AquaSuisei/ChatGPTXE/run_Linux.sh
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
#!/bin/bash
|
2 |
-
|
3 |
-
# 获取脚本所在目录
|
4 |
-
script_dir=$(dirname "$0")
|
5 |
-
|
6 |
-
# 将工作目录更改为脚本所在目录
|
7 |
-
cd "$script_dir"
|
8 |
-
|
9 |
-
# 检查Git仓库是否有更新
|
10 |
-
git remote update
|
11 |
-
pwd
|
12 |
-
|
13 |
-
if ! git status -uno | grep 'up to date' > /dev/null; then
|
14 |
-
# 如果有更新,关闭当前运行的服务器
|
15 |
-
pkill -f ChuanhuChatbot.py
|
16 |
-
|
17 |
-
# 拉取最新更改
|
18 |
-
git pull
|
19 |
-
|
20 |
-
# 安装依赖
|
21 |
-
pip3 install -r requirements.txt
|
22 |
-
|
23 |
-
# 重新启动服务器
|
24 |
-
nohup python3 ChuanhuChatbot.py &
|
25 |
-
fi
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/criteria/lpips/lpips.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
from criteria.lpips.networks import get_network, LinLayers
|
5 |
-
from criteria.lpips.utils import get_state_dict
|
6 |
-
|
7 |
-
|
8 |
-
class LPIPS(nn.Module):
|
9 |
-
r"""Creates a criterion that measures
|
10 |
-
Learned Perceptual Image Patch Similarity (LPIPS).
|
11 |
-
Arguments:
|
12 |
-
net_type (str): the network type to compare the features:
|
13 |
-
'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
|
14 |
-
version (str): the version of LPIPS. Default: 0.1.
|
15 |
-
"""
|
16 |
-
def __init__(self, net_type: str = 'alex', version: str = '0.1'):
|
17 |
-
|
18 |
-
assert version in ['0.1'], 'v0.1 is only supported now'
|
19 |
-
|
20 |
-
super(LPIPS, self).__init__()
|
21 |
-
|
22 |
-
# pretrained network
|
23 |
-
self.net = get_network(net_type).to("cuda")
|
24 |
-
|
25 |
-
# linear layers
|
26 |
-
self.lin = LinLayers(self.net.n_channels_list).to("cuda")
|
27 |
-
self.lin.load_state_dict(get_state_dict(net_type, version))
|
28 |
-
|
29 |
-
def forward(self, x: torch.Tensor, y: torch.Tensor):
|
30 |
-
feat_x, feat_y = self.net(x), self.net(y)
|
31 |
-
|
32 |
-
diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]
|
33 |
-
res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]
|
34 |
-
|
35 |
-
return torch.sum(torch.cat(res, 0)) / x.shape[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/models/mask_rcnn_fpn.py
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
from detectron2.config import LazyCall as L
|
2 |
-
from detectron2.layers import ShapeSpec
|
3 |
-
from detectron2.modeling.meta_arch import GeneralizedRCNN
|
4 |
-
from detectron2.modeling.anchor_generator import DefaultAnchorGenerator
|
5 |
-
from detectron2.modeling.backbone.fpn import LastLevelMaxPool
|
6 |
-
from detectron2.modeling.backbone import BasicStem, FPN, ResNet
|
7 |
-
from detectron2.modeling.box_regression import Box2BoxTransform
|
8 |
-
from detectron2.modeling.matcher import Matcher
|
9 |
-
from detectron2.modeling.poolers import ROIPooler
|
10 |
-
from detectron2.modeling.proposal_generator import RPN, StandardRPNHead
|
11 |
-
from detectron2.modeling.roi_heads import (
|
12 |
-
StandardROIHeads,
|
13 |
-
FastRCNNOutputLayers,
|
14 |
-
MaskRCNNConvUpsampleHead,
|
15 |
-
FastRCNNConvFCHead,
|
16 |
-
)
|
17 |
-
|
18 |
-
model = L(GeneralizedRCNN)(
|
19 |
-
backbone=L(FPN)(
|
20 |
-
bottom_up=L(ResNet)(
|
21 |
-
stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"),
|
22 |
-
stages=L(ResNet.make_default_stages)(
|
23 |
-
depth=50,
|
24 |
-
stride_in_1x1=True,
|
25 |
-
norm="FrozenBN",
|
26 |
-
),
|
27 |
-
out_features=["res2", "res3", "res4", "res5"],
|
28 |
-
),
|
29 |
-
in_features="${.bottom_up.out_features}",
|
30 |
-
out_channels=256,
|
31 |
-
top_block=L(LastLevelMaxPool)(),
|
32 |
-
),
|
33 |
-
proposal_generator=L(RPN)(
|
34 |
-
in_features=["p2", "p3", "p4", "p5", "p6"],
|
35 |
-
head=L(StandardRPNHead)(in_channels=256, num_anchors=3),
|
36 |
-
anchor_generator=L(DefaultAnchorGenerator)(
|
37 |
-
sizes=[[32], [64], [128], [256], [512]],
|
38 |
-
aspect_ratios=[0.5, 1.0, 2.0],
|
39 |
-
strides=[4, 8, 16, 32, 64],
|
40 |
-
offset=0.0,
|
41 |
-
),
|
42 |
-
anchor_matcher=L(Matcher)(
|
43 |
-
thresholds=[0.3, 0.7], labels=[0, -1, 1], allow_low_quality_matches=True
|
44 |
-
),
|
45 |
-
box2box_transform=L(Box2BoxTransform)(weights=[1.0, 1.0, 1.0, 1.0]),
|
46 |
-
batch_size_per_image=256,
|
47 |
-
positive_fraction=0.5,
|
48 |
-
pre_nms_topk=(2000, 1000),
|
49 |
-
post_nms_topk=(1000, 1000),
|
50 |
-
nms_thresh=0.7,
|
51 |
-
),
|
52 |
-
roi_heads=L(StandardROIHeads)(
|
53 |
-
num_classes=80,
|
54 |
-
batch_size_per_image=512,
|
55 |
-
positive_fraction=0.25,
|
56 |
-
proposal_matcher=L(Matcher)(
|
57 |
-
thresholds=[0.5], labels=[0, 1], allow_low_quality_matches=False
|
58 |
-
),
|
59 |
-
box_in_features=["p2", "p3", "p4", "p5"],
|
60 |
-
box_pooler=L(ROIPooler)(
|
61 |
-
output_size=7,
|
62 |
-
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
|
63 |
-
sampling_ratio=0,
|
64 |
-
pooler_type="ROIAlignV2",
|
65 |
-
),
|
66 |
-
box_head=L(FastRCNNConvFCHead)(
|
67 |
-
input_shape=ShapeSpec(channels=256, height=7, width=7),
|
68 |
-
conv_dims=[],
|
69 |
-
fc_dims=[1024, 1024],
|
70 |
-
),
|
71 |
-
box_predictor=L(FastRCNNOutputLayers)(
|
72 |
-
input_shape=ShapeSpec(channels=1024),
|
73 |
-
test_score_thresh=0.05,
|
74 |
-
box2box_transform=L(Box2BoxTransform)(weights=(10, 10, 5, 5)),
|
75 |
-
num_classes="${..num_classes}",
|
76 |
-
),
|
77 |
-
mask_in_features=["p2", "p3", "p4", "p5"],
|
78 |
-
mask_pooler=L(ROIPooler)(
|
79 |
-
output_size=14,
|
80 |
-
scales=(1.0 / 4, 1.0 / 8, 1.0 / 16, 1.0 / 32),
|
81 |
-
sampling_ratio=0,
|
82 |
-
pooler_type="ROIAlignV2",
|
83 |
-
),
|
84 |
-
mask_head=L(MaskRCNNConvUpsampleHead)(
|
85 |
-
input_shape=ShapeSpec(channels=256, width=14, height=14),
|
86 |
-
num_classes="${..num_classes}",
|
87 |
-
conv_dims=[256, 256, 256, 256, 256],
|
88 |
-
),
|
89 |
-
),
|
90 |
-
pixel_mean=[103.530, 116.280, 123.675],
|
91 |
-
pixel_std=[1.0, 1.0, 1.0],
|
92 |
-
input_format="BGR",
|
93 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Apk.apkmonk.com.md
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Modo de datos de Facebook APK Descargar: Cómo guardar datos y disfrutar de Facebook</h1>
|
3 |
-
<p>¿Te encanta usar Facebook pero odias la cantidad de datos que consume? ¿Desea mantenerse conectado con sus amigos y familiares sin preocuparse por su plan de datos o velocidad de red? Si respondiste afirmativamente a cualquiera de estas preguntas, es posible que quieras probar el Modo de datos de Facebook.</p>
|
4 |
-
<h2>¿Qué es el modo de datos de Facebook? </h2>
|
5 |
-
<p>Facebook Data Mode es una función que te permite reducir la cantidad de datos que Facebook utiliza en tu dispositivo Android. Para ello, comprime imágenes, vídeos y otros archivos multimedia antes de cargarlos en la pantalla. También limita algunas actividades en segundo plano y las notificaciones que podrían drenar sus datos. </p>
|
6 |
-
<h2>apk.apkmonk.com</h2><br /><p><b><b>Download File</b> ✅ <a href="https://bltlly.com/2v6Mu2">https://bltlly.com/2v6Mu2</a></b></p><br /><br />
|
7 |
-
<p>Al utilizar el modo de datos, puede disfrutar de Facebook sin sacrificar su presupuesto de datos o la calidad de la experiencia. Todavía puede navegar por su canal de noticias, chatear con sus amigos, ver videos y más. También puede volver al modo normal en cualquier momento. </p>
|
8 |
-
<p>El modo de datos es diferente de Facebook Lite, que es una aplicación separada que ofrece una versión simplificada de Facebook para dispositivos de gama baja o redes lentas. El modo de datos está integrado en la aplicación principal de Facebook y le da más control sobre el uso y las preferencias de datos. </p>
|
9 |
-
<h2>Cómo descargar el modo de datos de Facebook APK? </h2>
|
10 |
-
<p>Si desea probar el modo de datos en su dispositivo Android, es necesario descargar e instalar la última versión de la aplicación de Facebook de Google Play Store u otras fuentes de confianza. También puede descargar el archivo APK del modo de datos de Facebook desde [aquí]( 1 ) o [aquí]( 2 ) si lo prefiere. </p>
|
11 |
-
<p>Aquí están los pasos para descargar e instalar Facebook Data Mode APK:</p>
|
12 |
-
<ol>
|
13 |
-
<li>Descargar el archivo APK de uno de los enlaces anteriores. </li>
|
14 |
-
<li>Vaya a la configuración del dispositivo y habilite la instalación desde fuentes desconocidas. </li>
|
15 |
-
<li>Busque el archivo descargado en su administrador de archivos y toque en él. </li>
|
16 |
-
<li>Siga las instrucciones en la pantalla para completar el proceso de instalación. </li>
|
17 |
-
</ol>
|
18 |
-
|
19 |
-
<p>Sin embargo, tenga cuidado al descargar archivos APK de fuentes desconocidas, ya que podrían contener malware o virus que podrían dañar su dispositivo o comprometer su privacidad. Siempre escanee los archivos antes de instalarlos y solo descargue de fuentes confiables. </p>
|
20 |
-
<h2>¿Cómo usar el modo de datos de Facebook? </h2>
|
21 |
-
<p>Usar el modo de datos de Facebook es muy fácil y conveniente. Aquí hay algunos consejos sobre cómo usarlo:</p>
|
22 |
-
<ul>
|
23 |
-
<li>Para cambiar entre el modo de datos y el modo regular, toque en el icono de tres líneas horizontales en la esquina superior derecha de la aplicación. Luego, desplácese hacia abajo y toque en Configuración y privacidad. A continuación, toque en Ahorro de datos y cambie el interruptor para encenderlo o apagarlo. </li>
|
24 |
-
<li>Para optimizar el uso y el rendimiento de sus datos, puede ajustar algunos ajustes en el menú Ahorro de datos. Por ejemplo, puede optar por activar automáticamente el modo de datos cuando no esté conectado a Wi-Fi, o usar siempre el modo de datos independientemente de su conexión de red. También puede elegir cargar imágenes o vídeos de menor calidad, o desactivar la reproducción automática de vídeos. </li>
|
25 |
-
<li>Para acceder a algunas características y funciones que están limitadas o no disponibles en el modo de datos, puede volver temporalmente al modo regular tocando el banner azul en la parte superior de la aplicación. Por ejemplo, puede ver fotos o videos de alta resolución, ver transmisiones en vivo o usar videollamadas. Sin embargo, tenga en cuenta que esto consumirá más datos de lo habitual. </li>
|
26 |
-
</ul>
|
27 |
-
<p>El modo de datos es una gran manera de guardar datos y disfrutar de Facebook sin comprometer su experiencia. Sin embargo, también tiene algunas limitaciones y desventajas que usted debe tener en cuenta. Por ejemplo, es posible que el modo de datos no funcione bien con algunas aplicaciones o servicios de terceros que se integran con Facebook, como Instagram o Messenger. El modo de datos también puede afectar la precisión o la puntualidad de alguna información o notificaciones que recibas de Facebook, como actualizaciones de noticias o solicitudes de amistad. </p>
|
28 |
-
<p></p>
|
29 |
-
<h2>Conclusión</h2>
|
30 |
-
|
31 |
-
<p>Si tiene alguna pregunta o comentario sobre el modo de datos, no dude en dejar un comentario a continuación o contáctenos a través de nuestro sitio web. Nos encantaría saber de ti y ayudarte. </p>
|
32 |
-
<p>Además, si te gustó este artículo, no olvides compartirlo con tus amigos y familiares que podrían encontrarlo útil. Y si quieres saber más sobre Facebook u otros temas relacionados, echa un vistazo a nuestros otros artículos o suscríbete a nuestro boletín para más actualizaciones. </p>
|
33 |
-
<h2>Preguntas frecuentes</h2>
|
34 |
-
<h3>¿Cuál es la diferencia entre el modo de datos de Facebook y Facebook Lite? </h3>
|
35 |
-
<p>Facebook Data Mode es una característica dentro de la aplicación principal de Facebook que le permite reducir la cantidad de datos que Facebook utiliza en su dispositivo. Facebook Lite es una aplicación independiente que ofrece una versión simplificada de Facebook para dispositivos de gama baja o redes lentas. El modo de datos le da más control sobre el uso y las preferencias de datos, mientras que Lite le ofrece una experiencia más rápida y ligera. </p>
|
36 |
-
<h3> ¿Cuántos datos puedo guardar usando el modo de datos de Facebook? </h3>
|
37 |
-
<p>La cantidad de datos que puede guardar usando el modo de datos depende de varios factores, como su conexión de red, su configuración, sus patrones de uso y el tipo de contenido que ve o carga. Sin embargo, según Facebook, el modo de datos puede ayudarte a ahorrar hasta un 50% de tus datos en comparación con el modo normal. </p>
|
38 |
-
<h3>¿Afecta el modo de datos de Facebook a mi privacidad o seguridad? </h3>
|
39 |
-
<p>No, el modo de datos no afecta su privacidad o seguridad de ninguna manera. El modo de datos solo comprime o limita algunos de los archivos multimedia o actividades que consumen más datos en su dispositivo. No cambia ni accede a ninguna información personal o configuración de cuenta. Todavía puedes usar todas las funciones de privacidad y seguridad que Facebook ofrece en modo regular. </p>
|
40 |
-
<h3>¿Puedo usar el modo de datos de Facebook en otros dispositivos o plataformas? </h3>
|
41 |
-
|
42 |
-
<h3>¿Dónde puedo obtener más información o soporte sobre el modo de datos de Facebook? </h3>
|
43 |
-
<p>Si necesita más información o soporte sobre el modo de datos, puede visitar el [Centro de ayuda de Facebook] o el [Foro de la comunidad de Facebook]. También puede ponerse en contacto con Facebook directamente a través de su página [Contáctenos] o su página [Comentarios]. </ Ya he escrito el artículo basado en el esquema que he proporcionado. No hay nada más que escribir. Espero que esté satisfecho con mi trabajo y que encuentre el artículo útil e informativo. Si tiene algún comentario o sugerencia, por favor hágamelo saber. Agradezco su aportación y cooperación. Gracias por elegirme como tu escritor de contenido. </p> 64aa2da5cf<br />
|
44 |
-
<br />
|
45 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BernardoOlisan/vqganclip/CLIP/setup.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import pkg_resources
|
4 |
-
from setuptools import setup, find_packages
|
5 |
-
|
6 |
-
setup(
|
7 |
-
name="clip",
|
8 |
-
py_modules=["clip"],
|
9 |
-
version="1.0",
|
10 |
-
description="",
|
11 |
-
author="OpenAI",
|
12 |
-
packages=find_packages(exclude=["tests*"]),
|
13 |
-
install_requires=[
|
14 |
-
str(r)
|
15 |
-
for r in pkg_resources.parse_requirements(
|
16 |
-
open(os.path.join(os.path.dirname(__file__), "requirements.txt"))
|
17 |
-
)
|
18 |
-
],
|
19 |
-
include_package_data=True,
|
20 |
-
extras_require={'dev': ['pytest']},
|
21 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/service.py
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
from botocore.docs.bcdoc.restdoc import DocumentStructure
|
14 |
-
from botocore.docs.client import ClientDocumenter, ClientExceptionsDocumenter
|
15 |
-
from botocore.docs.paginator import PaginatorDocumenter
|
16 |
-
from botocore.docs.waiter import WaiterDocumenter
|
17 |
-
from botocore.exceptions import DataNotFoundError
|
18 |
-
|
19 |
-
|
20 |
-
class ServiceDocumenter:
|
21 |
-
def __init__(self, service_name, session, root_docs_path):
|
22 |
-
self._session = session
|
23 |
-
self._service_name = service_name
|
24 |
-
self._root_docs_path = root_docs_path
|
25 |
-
|
26 |
-
self._client = self._session.create_client(
|
27 |
-
service_name,
|
28 |
-
region_name='us-east-1',
|
29 |
-
aws_access_key_id='foo',
|
30 |
-
aws_secret_access_key='bar',
|
31 |
-
)
|
32 |
-
self._event_emitter = self._client.meta.events
|
33 |
-
|
34 |
-
self.sections = [
|
35 |
-
'title',
|
36 |
-
'client-api',
|
37 |
-
'client-exceptions',
|
38 |
-
'paginator-api',
|
39 |
-
'waiter-api',
|
40 |
-
]
|
41 |
-
|
42 |
-
def document_service(self):
|
43 |
-
"""Documents an entire service.
|
44 |
-
|
45 |
-
:returns: The reStructured text of the documented service.
|
46 |
-
"""
|
47 |
-
doc_structure = DocumentStructure(
|
48 |
-
self._service_name, section_names=self.sections, target='html'
|
49 |
-
)
|
50 |
-
self.title(doc_structure.get_section('title'))
|
51 |
-
self.client_api(doc_structure.get_section('client-api'))
|
52 |
-
self.client_exceptions(doc_structure.get_section('client-exceptions'))
|
53 |
-
self.paginator_api(doc_structure.get_section('paginator-api'))
|
54 |
-
self.waiter_api(doc_structure.get_section('waiter-api'))
|
55 |
-
return doc_structure.flush_structure()
|
56 |
-
|
57 |
-
def title(self, section):
|
58 |
-
section.style.h1(self._client.__class__.__name__)
|
59 |
-
self._event_emitter.emit(
|
60 |
-
f"docs.title.{self._service_name}", section=section
|
61 |
-
)
|
62 |
-
|
63 |
-
def table_of_contents(self, section):
|
64 |
-
section.style.table_of_contents(title='Table of Contents', depth=2)
|
65 |
-
|
66 |
-
def client_api(self, section):
|
67 |
-
examples = None
|
68 |
-
try:
|
69 |
-
examples = self.get_examples(self._service_name)
|
70 |
-
except DataNotFoundError:
|
71 |
-
pass
|
72 |
-
|
73 |
-
ClientDocumenter(
|
74 |
-
self._client, self._root_docs_path, examples
|
75 |
-
).document_client(section)
|
76 |
-
|
77 |
-
def client_exceptions(self, section):
|
78 |
-
ClientExceptionsDocumenter(
|
79 |
-
self._client, self._root_docs_path
|
80 |
-
).document_exceptions(section)
|
81 |
-
|
82 |
-
def paginator_api(self, section):
|
83 |
-
try:
|
84 |
-
service_paginator_model = self._session.get_paginator_model(
|
85 |
-
self._service_name
|
86 |
-
)
|
87 |
-
except DataNotFoundError:
|
88 |
-
return
|
89 |
-
if service_paginator_model._paginator_config:
|
90 |
-
paginator_documenter = PaginatorDocumenter(
|
91 |
-
self._client, service_paginator_model, self._root_docs_path
|
92 |
-
)
|
93 |
-
paginator_documenter.document_paginators(section)
|
94 |
-
|
95 |
-
def waiter_api(self, section):
|
96 |
-
if self._client.waiter_names:
|
97 |
-
service_waiter_model = self._session.get_waiter_model(
|
98 |
-
self._service_name
|
99 |
-
)
|
100 |
-
waiter_documenter = WaiterDocumenter(
|
101 |
-
self._client, service_waiter_model, self._root_docs_path
|
102 |
-
)
|
103 |
-
waiter_documenter.document_waiters(section)
|
104 |
-
|
105 |
-
def get_examples(self, service_name, api_version=None):
|
106 |
-
loader = self._session.get_component('data_loader')
|
107 |
-
examples = loader.load_service_model(
|
108 |
-
service_name, 'examples-1', api_version
|
109 |
-
)
|
110 |
-
return examples['examples']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/parser/_parser.py
DELETED
@@ -1,1613 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""
|
3 |
-
This module offers a generic date/time string parser which is able to parse
|
4 |
-
most known formats to represent a date and/or time.
|
5 |
-
|
6 |
-
This module attempts to be forgiving with regards to unlikely input formats,
|
7 |
-
returning a datetime object even for dates which are ambiguous. If an element
|
8 |
-
of a date/time stamp is omitted, the following rules are applied:
|
9 |
-
|
10 |
-
- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
|
11 |
-
on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
|
12 |
-
specified.
|
13 |
-
- If a time zone is omitted, a timezone-naive datetime is returned.
|
14 |
-
|
15 |
-
If any other elements are missing, they are taken from the
|
16 |
-
:class:`datetime.datetime` object passed to the parameter ``default``. If this
|
17 |
-
results in a day number exceeding the valid number of days per month, the
|
18 |
-
value falls back to the end of the month.
|
19 |
-
|
20 |
-
Additional resources about date/time string formats can be found below:
|
21 |
-
|
22 |
-
- `A summary of the international standard date and time notation
|
23 |
-
<https://www.cl.cam.ac.uk/~mgk25/iso-time.html>`_
|
24 |
-
- `W3C Date and Time Formats <https://www.w3.org/TR/NOTE-datetime>`_
|
25 |
-
- `Time Formats (Planetary Rings Node) <https://pds-rings.seti.org:443/tools/time_formats.html>`_
|
26 |
-
- `CPAN ParseDate module
|
27 |
-
<https://metacpan.org/pod/release/MUIR/Time-modules-2013.0912/lib/Time/ParseDate.pm>`_
|
28 |
-
- `Java SimpleDateFormat Class
|
29 |
-
<https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html>`_
|
30 |
-
"""
|
31 |
-
from __future__ import unicode_literals
|
32 |
-
|
33 |
-
import datetime
|
34 |
-
import re
|
35 |
-
import string
|
36 |
-
import time
|
37 |
-
import warnings
|
38 |
-
|
39 |
-
from calendar import monthrange
|
40 |
-
from io import StringIO
|
41 |
-
|
42 |
-
import six
|
43 |
-
from six import integer_types, text_type
|
44 |
-
|
45 |
-
from decimal import Decimal
|
46 |
-
|
47 |
-
from warnings import warn
|
48 |
-
|
49 |
-
from .. import relativedelta
|
50 |
-
from .. import tz
|
51 |
-
|
52 |
-
__all__ = ["parse", "parserinfo", "ParserError"]
|
53 |
-
|
54 |
-
|
55 |
-
# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth
|
56 |
-
# making public and/or figuring out if there is something we can
|
57 |
-
# take off their plate.
|
58 |
-
class _timelex(object):
|
59 |
-
# Fractional seconds are sometimes split by a comma
|
60 |
-
_split_decimal = re.compile("([.,])")
|
61 |
-
|
62 |
-
def __init__(self, instream):
|
63 |
-
if isinstance(instream, (bytes, bytearray)):
|
64 |
-
instream = instream.decode()
|
65 |
-
|
66 |
-
if isinstance(instream, text_type):
|
67 |
-
instream = StringIO(instream)
|
68 |
-
elif getattr(instream, 'read', None) is None:
|
69 |
-
raise TypeError('Parser must be a string or character stream, not '
|
70 |
-
'{itype}'.format(itype=instream.__class__.__name__))
|
71 |
-
|
72 |
-
self.instream = instream
|
73 |
-
self.charstack = []
|
74 |
-
self.tokenstack = []
|
75 |
-
self.eof = False
|
76 |
-
|
77 |
-
def get_token(self):
|
78 |
-
"""
|
79 |
-
This function breaks the time string into lexical units (tokens), which
|
80 |
-
can be parsed by the parser. Lexical units are demarcated by changes in
|
81 |
-
the character set, so any continuous string of letters is considered
|
82 |
-
one unit, any continuous string of numbers is considered one unit.
|
83 |
-
|
84 |
-
The main complication arises from the fact that dots ('.') can be used
|
85 |
-
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
|
86 |
-
"4:30:21.447"). As such, it is necessary to read the full context of
|
87 |
-
any dot-separated strings before breaking it into tokens; as such, this
|
88 |
-
function maintains a "token stack", for when the ambiguous context
|
89 |
-
demands that multiple tokens be parsed at once.
|
90 |
-
"""
|
91 |
-
if self.tokenstack:
|
92 |
-
return self.tokenstack.pop(0)
|
93 |
-
|
94 |
-
seenletters = False
|
95 |
-
token = None
|
96 |
-
state = None
|
97 |
-
|
98 |
-
while not self.eof:
|
99 |
-
# We only realize that we've reached the end of a token when we
|
100 |
-
# find a character that's not part of the current token - since
|
101 |
-
# that character may be part of the next token, it's stored in the
|
102 |
-
# charstack.
|
103 |
-
if self.charstack:
|
104 |
-
nextchar = self.charstack.pop(0)
|
105 |
-
else:
|
106 |
-
nextchar = self.instream.read(1)
|
107 |
-
while nextchar == '\x00':
|
108 |
-
nextchar = self.instream.read(1)
|
109 |
-
|
110 |
-
if not nextchar:
|
111 |
-
self.eof = True
|
112 |
-
break
|
113 |
-
elif not state:
|
114 |
-
# First character of the token - determines if we're starting
|
115 |
-
# to parse a word, a number or something else.
|
116 |
-
token = nextchar
|
117 |
-
if self.isword(nextchar):
|
118 |
-
state = 'a'
|
119 |
-
elif self.isnum(nextchar):
|
120 |
-
state = '0'
|
121 |
-
elif self.isspace(nextchar):
|
122 |
-
token = ' '
|
123 |
-
break # emit token
|
124 |
-
else:
|
125 |
-
break # emit token
|
126 |
-
elif state == 'a':
|
127 |
-
# If we've already started reading a word, we keep reading
|
128 |
-
# letters until we find something that's not part of a word.
|
129 |
-
seenletters = True
|
130 |
-
if self.isword(nextchar):
|
131 |
-
token += nextchar
|
132 |
-
elif nextchar == '.':
|
133 |
-
token += nextchar
|
134 |
-
state = 'a.'
|
135 |
-
else:
|
136 |
-
self.charstack.append(nextchar)
|
137 |
-
break # emit token
|
138 |
-
elif state == '0':
|
139 |
-
# If we've already started reading a number, we keep reading
|
140 |
-
# numbers until we find something that doesn't fit.
|
141 |
-
if self.isnum(nextchar):
|
142 |
-
token += nextchar
|
143 |
-
elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
|
144 |
-
token += nextchar
|
145 |
-
state = '0.'
|
146 |
-
else:
|
147 |
-
self.charstack.append(nextchar)
|
148 |
-
break # emit token
|
149 |
-
elif state == 'a.':
|
150 |
-
# If we've seen some letters and a dot separator, continue
|
151 |
-
# parsing, and the tokens will be broken up later.
|
152 |
-
seenletters = True
|
153 |
-
if nextchar == '.' or self.isword(nextchar):
|
154 |
-
token += nextchar
|
155 |
-
elif self.isnum(nextchar) and token[-1] == '.':
|
156 |
-
token += nextchar
|
157 |
-
state = '0.'
|
158 |
-
else:
|
159 |
-
self.charstack.append(nextchar)
|
160 |
-
break # emit token
|
161 |
-
elif state == '0.':
|
162 |
-
# If we've seen at least one dot separator, keep going, we'll
|
163 |
-
# break up the tokens later.
|
164 |
-
if nextchar == '.' or self.isnum(nextchar):
|
165 |
-
token += nextchar
|
166 |
-
elif self.isword(nextchar) and token[-1] == '.':
|
167 |
-
token += nextchar
|
168 |
-
state = 'a.'
|
169 |
-
else:
|
170 |
-
self.charstack.append(nextchar)
|
171 |
-
break # emit token
|
172 |
-
|
173 |
-
if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
|
174 |
-
token[-1] in '.,')):
|
175 |
-
l = self._split_decimal.split(token)
|
176 |
-
token = l[0]
|
177 |
-
for tok in l[1:]:
|
178 |
-
if tok:
|
179 |
-
self.tokenstack.append(tok)
|
180 |
-
|
181 |
-
if state == '0.' and token.count('.') == 0:
|
182 |
-
token = token.replace(',', '.')
|
183 |
-
|
184 |
-
return token
|
185 |
-
|
186 |
-
def __iter__(self):
|
187 |
-
return self
|
188 |
-
|
189 |
-
def __next__(self):
|
190 |
-
token = self.get_token()
|
191 |
-
if token is None:
|
192 |
-
raise StopIteration
|
193 |
-
|
194 |
-
return token
|
195 |
-
|
196 |
-
def next(self):
|
197 |
-
return self.__next__() # Python 2.x support
|
198 |
-
|
199 |
-
@classmethod
|
200 |
-
def split(cls, s):
|
201 |
-
return list(cls(s))
|
202 |
-
|
203 |
-
@classmethod
|
204 |
-
def isword(cls, nextchar):
|
205 |
-
""" Whether or not the next character is part of a word """
|
206 |
-
return nextchar.isalpha()
|
207 |
-
|
208 |
-
@classmethod
|
209 |
-
def isnum(cls, nextchar):
|
210 |
-
""" Whether the next character is part of a number """
|
211 |
-
return nextchar.isdigit()
|
212 |
-
|
213 |
-
@classmethod
|
214 |
-
def isspace(cls, nextchar):
|
215 |
-
""" Whether the next character is whitespace """
|
216 |
-
return nextchar.isspace()
|
217 |
-
|
218 |
-
|
219 |
-
class _resultbase(object):
|
220 |
-
|
221 |
-
def __init__(self):
|
222 |
-
for attr in self.__slots__:
|
223 |
-
setattr(self, attr, None)
|
224 |
-
|
225 |
-
def _repr(self, classname):
|
226 |
-
l = []
|
227 |
-
for attr in self.__slots__:
|
228 |
-
value = getattr(self, attr)
|
229 |
-
if value is not None:
|
230 |
-
l.append("%s=%s" % (attr, repr(value)))
|
231 |
-
return "%s(%s)" % (classname, ", ".join(l))
|
232 |
-
|
233 |
-
def __len__(self):
|
234 |
-
return (sum(getattr(self, attr) is not None
|
235 |
-
for attr in self.__slots__))
|
236 |
-
|
237 |
-
def __repr__(self):
|
238 |
-
return self._repr(self.__class__.__name__)
|
239 |
-
|
240 |
-
|
241 |
-
class parserinfo(object):
|
242 |
-
"""
|
243 |
-
Class which handles what inputs are accepted. Subclass this to customize
|
244 |
-
the language and acceptable values for each parameter.
|
245 |
-
|
246 |
-
:param dayfirst:
|
247 |
-
Whether to interpret the first value in an ambiguous 3-integer date
|
248 |
-
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
|
249 |
-
``yearfirst`` is set to ``True``, this distinguishes between YDM
|
250 |
-
and YMD. Default is ``False``.
|
251 |
-
|
252 |
-
:param yearfirst:
|
253 |
-
Whether to interpret the first value in an ambiguous 3-integer date
|
254 |
-
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
|
255 |
-
to be the year, otherwise the last number is taken to be the year.
|
256 |
-
Default is ``False``.
|
257 |
-
"""
|
258 |
-
|
259 |
-
# m from a.m/p.m, t from ISO T separator
|
260 |
-
JUMP = [" ", ".", ",", ";", "-", "/", "'",
|
261 |
-
"at", "on", "and", "ad", "m", "t", "of",
|
262 |
-
"st", "nd", "rd", "th"]
|
263 |
-
|
264 |
-
WEEKDAYS = [("Mon", "Monday"),
|
265 |
-
("Tue", "Tuesday"), # TODO: "Tues"
|
266 |
-
("Wed", "Wednesday"),
|
267 |
-
("Thu", "Thursday"), # TODO: "Thurs"
|
268 |
-
("Fri", "Friday"),
|
269 |
-
("Sat", "Saturday"),
|
270 |
-
("Sun", "Sunday")]
|
271 |
-
MONTHS = [("Jan", "January"),
|
272 |
-
("Feb", "February"), # TODO: "Febr"
|
273 |
-
("Mar", "March"),
|
274 |
-
("Apr", "April"),
|
275 |
-
("May", "May"),
|
276 |
-
("Jun", "June"),
|
277 |
-
("Jul", "July"),
|
278 |
-
("Aug", "August"),
|
279 |
-
("Sep", "Sept", "September"),
|
280 |
-
("Oct", "October"),
|
281 |
-
("Nov", "November"),
|
282 |
-
("Dec", "December")]
|
283 |
-
HMS = [("h", "hour", "hours"),
|
284 |
-
("m", "minute", "minutes"),
|
285 |
-
("s", "second", "seconds")]
|
286 |
-
AMPM = [("am", "a"),
|
287 |
-
("pm", "p")]
|
288 |
-
UTCZONE = ["UTC", "GMT", "Z", "z"]
|
289 |
-
PERTAIN = ["of"]
|
290 |
-
TZOFFSET = {}
|
291 |
-
# TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate",
|
292 |
-
# "Anno Domini", "Year of Our Lord"]
|
293 |
-
|
294 |
-
def __init__(self, dayfirst=False, yearfirst=False):
|
295 |
-
self._jump = self._convert(self.JUMP)
|
296 |
-
self._weekdays = self._convert(self.WEEKDAYS)
|
297 |
-
self._months = self._convert(self.MONTHS)
|
298 |
-
self._hms = self._convert(self.HMS)
|
299 |
-
self._ampm = self._convert(self.AMPM)
|
300 |
-
self._utczone = self._convert(self.UTCZONE)
|
301 |
-
self._pertain = self._convert(self.PERTAIN)
|
302 |
-
|
303 |
-
self.dayfirst = dayfirst
|
304 |
-
self.yearfirst = yearfirst
|
305 |
-
|
306 |
-
self._year = time.localtime().tm_year
|
307 |
-
self._century = self._year // 100 * 100
|
308 |
-
|
309 |
-
def _convert(self, lst):
|
310 |
-
dct = {}
|
311 |
-
for i, v in enumerate(lst):
|
312 |
-
if isinstance(v, tuple):
|
313 |
-
for v in v:
|
314 |
-
dct[v.lower()] = i
|
315 |
-
else:
|
316 |
-
dct[v.lower()] = i
|
317 |
-
return dct
|
318 |
-
|
319 |
-
def jump(self, name):
|
320 |
-
return name.lower() in self._jump
|
321 |
-
|
322 |
-
def weekday(self, name):
|
323 |
-
try:
|
324 |
-
return self._weekdays[name.lower()]
|
325 |
-
except KeyError:
|
326 |
-
pass
|
327 |
-
return None
|
328 |
-
|
329 |
-
def month(self, name):
|
330 |
-
try:
|
331 |
-
return self._months[name.lower()] + 1
|
332 |
-
except KeyError:
|
333 |
-
pass
|
334 |
-
return None
|
335 |
-
|
336 |
-
def hms(self, name):
|
337 |
-
try:
|
338 |
-
return self._hms[name.lower()]
|
339 |
-
except KeyError:
|
340 |
-
return None
|
341 |
-
|
342 |
-
def ampm(self, name):
|
343 |
-
try:
|
344 |
-
return self._ampm[name.lower()]
|
345 |
-
except KeyError:
|
346 |
-
return None
|
347 |
-
|
348 |
-
def pertain(self, name):
|
349 |
-
return name.lower() in self._pertain
|
350 |
-
|
351 |
-
def utczone(self, name):
|
352 |
-
return name.lower() in self._utczone
|
353 |
-
|
354 |
-
def tzoffset(self, name):
|
355 |
-
if name in self._utczone:
|
356 |
-
return 0
|
357 |
-
|
358 |
-
return self.TZOFFSET.get(name)
|
359 |
-
|
360 |
-
def convertyear(self, year, century_specified=False):
|
361 |
-
"""
|
362 |
-
Converts two-digit years to year within [-50, 49]
|
363 |
-
range of self._year (current local time)
|
364 |
-
"""
|
365 |
-
|
366 |
-
# Function contract is that the year is always positive
|
367 |
-
assert year >= 0
|
368 |
-
|
369 |
-
if year < 100 and not century_specified:
|
370 |
-
# assume current century to start
|
371 |
-
year += self._century
|
372 |
-
|
373 |
-
if year >= self._year + 50: # if too far in future
|
374 |
-
year -= 100
|
375 |
-
elif year < self._year - 50: # if too far in past
|
376 |
-
year += 100
|
377 |
-
|
378 |
-
return year
|
379 |
-
|
380 |
-
def validate(self, res):
|
381 |
-
# move to info
|
382 |
-
if res.year is not None:
|
383 |
-
res.year = self.convertyear(res.year, res.century_specified)
|
384 |
-
|
385 |
-
if ((res.tzoffset == 0 and not res.tzname) or
|
386 |
-
(res.tzname == 'Z' or res.tzname == 'z')):
|
387 |
-
res.tzname = "UTC"
|
388 |
-
res.tzoffset = 0
|
389 |
-
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
|
390 |
-
res.tzoffset = 0
|
391 |
-
return True
|
392 |
-
|
393 |
-
|
394 |
-
class _ymd(list):
|
395 |
-
def __init__(self, *args, **kwargs):
|
396 |
-
super(self.__class__, self).__init__(*args, **kwargs)
|
397 |
-
self.century_specified = False
|
398 |
-
self.dstridx = None
|
399 |
-
self.mstridx = None
|
400 |
-
self.ystridx = None
|
401 |
-
|
402 |
-
@property
|
403 |
-
def has_year(self):
|
404 |
-
return self.ystridx is not None
|
405 |
-
|
406 |
-
@property
|
407 |
-
def has_month(self):
|
408 |
-
return self.mstridx is not None
|
409 |
-
|
410 |
-
@property
|
411 |
-
def has_day(self):
|
412 |
-
return self.dstridx is not None
|
413 |
-
|
414 |
-
def could_be_day(self, value):
|
415 |
-
if self.has_day:
|
416 |
-
return False
|
417 |
-
elif not self.has_month:
|
418 |
-
return 1 <= value <= 31
|
419 |
-
elif not self.has_year:
|
420 |
-
# Be permissive, assume leap year
|
421 |
-
month = self[self.mstridx]
|
422 |
-
return 1 <= value <= monthrange(2000, month)[1]
|
423 |
-
else:
|
424 |
-
month = self[self.mstridx]
|
425 |
-
year = self[self.ystridx]
|
426 |
-
return 1 <= value <= monthrange(year, month)[1]
|
427 |
-
|
428 |
-
def append(self, val, label=None):
|
429 |
-
if hasattr(val, '__len__'):
|
430 |
-
if val.isdigit() and len(val) > 2:
|
431 |
-
self.century_specified = True
|
432 |
-
if label not in [None, 'Y']: # pragma: no cover
|
433 |
-
raise ValueError(label)
|
434 |
-
label = 'Y'
|
435 |
-
elif val > 100:
|
436 |
-
self.century_specified = True
|
437 |
-
if label not in [None, 'Y']: # pragma: no cover
|
438 |
-
raise ValueError(label)
|
439 |
-
label = 'Y'
|
440 |
-
|
441 |
-
super(self.__class__, self).append(int(val))
|
442 |
-
|
443 |
-
if label == 'M':
|
444 |
-
if self.has_month:
|
445 |
-
raise ValueError('Month is already set')
|
446 |
-
self.mstridx = len(self) - 1
|
447 |
-
elif label == 'D':
|
448 |
-
if self.has_day:
|
449 |
-
raise ValueError('Day is already set')
|
450 |
-
self.dstridx = len(self) - 1
|
451 |
-
elif label == 'Y':
|
452 |
-
if self.has_year:
|
453 |
-
raise ValueError('Year is already set')
|
454 |
-
self.ystridx = len(self) - 1
|
455 |
-
|
456 |
-
def _resolve_from_stridxs(self, strids):
|
457 |
-
"""
|
458 |
-
Try to resolve the identities of year/month/day elements using
|
459 |
-
ystridx, mstridx, and dstridx, if enough of these are specified.
|
460 |
-
"""
|
461 |
-
if len(self) == 3 and len(strids) == 2:
|
462 |
-
# we can back out the remaining stridx value
|
463 |
-
missing = [x for x in range(3) if x not in strids.values()]
|
464 |
-
key = [x for x in ['y', 'm', 'd'] if x not in strids]
|
465 |
-
assert len(missing) == len(key) == 1
|
466 |
-
key = key[0]
|
467 |
-
val = missing[0]
|
468 |
-
strids[key] = val
|
469 |
-
|
470 |
-
assert len(self) == len(strids) # otherwise this should not be called
|
471 |
-
out = {key: self[strids[key]] for key in strids}
|
472 |
-
return (out.get('y'), out.get('m'), out.get('d'))
|
473 |
-
|
474 |
-
def resolve_ymd(self, yearfirst, dayfirst):
|
475 |
-
len_ymd = len(self)
|
476 |
-
year, month, day = (None, None, None)
|
477 |
-
|
478 |
-
strids = (('y', self.ystridx),
|
479 |
-
('m', self.mstridx),
|
480 |
-
('d', self.dstridx))
|
481 |
-
|
482 |
-
strids = {key: val for key, val in strids if val is not None}
|
483 |
-
if (len(self) == len(strids) > 0 or
|
484 |
-
(len(self) == 3 and len(strids) == 2)):
|
485 |
-
return self._resolve_from_stridxs(strids)
|
486 |
-
|
487 |
-
mstridx = self.mstridx
|
488 |
-
|
489 |
-
if len_ymd > 3:
|
490 |
-
raise ValueError("More than three YMD values")
|
491 |
-
elif len_ymd == 1 or (mstridx is not None and len_ymd == 2):
|
492 |
-
# One member, or two members with a month string
|
493 |
-
if mstridx is not None:
|
494 |
-
month = self[mstridx]
|
495 |
-
# since mstridx is 0 or 1, self[mstridx-1] always
|
496 |
-
# looks up the other element
|
497 |
-
other = self[mstridx - 1]
|
498 |
-
else:
|
499 |
-
other = self[0]
|
500 |
-
|
501 |
-
if len_ymd > 1 or mstridx is None:
|
502 |
-
if other > 31:
|
503 |
-
year = other
|
504 |
-
else:
|
505 |
-
day = other
|
506 |
-
|
507 |
-
elif len_ymd == 2:
|
508 |
-
# Two members with numbers
|
509 |
-
if self[0] > 31:
|
510 |
-
# 99-01
|
511 |
-
year, month = self
|
512 |
-
elif self[1] > 31:
|
513 |
-
# 01-99
|
514 |
-
month, year = self
|
515 |
-
elif dayfirst and self[1] <= 12:
|
516 |
-
# 13-01
|
517 |
-
day, month = self
|
518 |
-
else:
|
519 |
-
# 01-13
|
520 |
-
month, day = self
|
521 |
-
|
522 |
-
elif len_ymd == 3:
|
523 |
-
# Three members
|
524 |
-
if mstridx == 0:
|
525 |
-
if self[1] > 31:
|
526 |
-
# Apr-2003-25
|
527 |
-
month, year, day = self
|
528 |
-
else:
|
529 |
-
month, day, year = self
|
530 |
-
elif mstridx == 1:
|
531 |
-
if self[0] > 31 or (yearfirst and self[2] <= 31):
|
532 |
-
# 99-Jan-01
|
533 |
-
year, month, day = self
|
534 |
-
else:
|
535 |
-
# 01-Jan-01
|
536 |
-
# Give precedence to day-first, since
|
537 |
-
# two-digit years is usually hand-written.
|
538 |
-
day, month, year = self
|
539 |
-
|
540 |
-
elif mstridx == 2:
|
541 |
-
# WTF!?
|
542 |
-
if self[1] > 31:
|
543 |
-
# 01-99-Jan
|
544 |
-
day, year, month = self
|
545 |
-
else:
|
546 |
-
# 99-01-Jan
|
547 |
-
year, day, month = self
|
548 |
-
|
549 |
-
else:
|
550 |
-
if (self[0] > 31 or
|
551 |
-
self.ystridx == 0 or
|
552 |
-
(yearfirst and self[1] <= 12 and self[2] <= 31)):
|
553 |
-
# 99-01-01
|
554 |
-
if dayfirst and self[2] <= 12:
|
555 |
-
year, day, month = self
|
556 |
-
else:
|
557 |
-
year, month, day = self
|
558 |
-
elif self[0] > 12 or (dayfirst and self[1] <= 12):
|
559 |
-
# 13-01-01
|
560 |
-
day, month, year = self
|
561 |
-
else:
|
562 |
-
# 01-13-01
|
563 |
-
month, day, year = self
|
564 |
-
|
565 |
-
return year, month, day
|
566 |
-
|
567 |
-
|
568 |
-
class parser(object):
|
569 |
-
def __init__(self, info=None):
|
570 |
-
self.info = info or parserinfo()
|
571 |
-
|
572 |
-
def parse(self, timestr, default=None,
|
573 |
-
ignoretz=False, tzinfos=None, **kwargs):
|
574 |
-
"""
|
575 |
-
Parse the date/time string into a :class:`datetime.datetime` object.
|
576 |
-
|
577 |
-
:param timestr:
|
578 |
-
Any date/time string using the supported formats.
|
579 |
-
|
580 |
-
:param default:
|
581 |
-
The default datetime object, if this is a datetime object and not
|
582 |
-
``None``, elements specified in ``timestr`` replace elements in the
|
583 |
-
default object.
|
584 |
-
|
585 |
-
:param ignoretz:
|
586 |
-
If set ``True``, time zones in parsed strings are ignored and a
|
587 |
-
naive :class:`datetime.datetime` object is returned.
|
588 |
-
|
589 |
-
:param tzinfos:
|
590 |
-
Additional time zone names / aliases which may be present in the
|
591 |
-
string. This argument maps time zone names (and optionally offsets
|
592 |
-
from those time zones) to time zones. This parameter can be a
|
593 |
-
dictionary with timezone aliases mapping time zone names to time
|
594 |
-
zones or a function taking two parameters (``tzname`` and
|
595 |
-
``tzoffset``) and returning a time zone.
|
596 |
-
|
597 |
-
The timezones to which the names are mapped can be an integer
|
598 |
-
offset from UTC in seconds or a :class:`tzinfo` object.
|
599 |
-
|
600 |
-
.. doctest::
|
601 |
-
:options: +NORMALIZE_WHITESPACE
|
602 |
-
|
603 |
-
>>> from dateutil.parser import parse
|
604 |
-
>>> from dateutil.tz import gettz
|
605 |
-
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
|
606 |
-
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
|
607 |
-
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
|
608 |
-
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
|
609 |
-
datetime.datetime(2012, 1, 19, 17, 21,
|
610 |
-
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
|
611 |
-
|
612 |
-
This parameter is ignored if ``ignoretz`` is set.
|
613 |
-
|
614 |
-
:param \\*\\*kwargs:
|
615 |
-
Keyword arguments as passed to ``_parse()``.
|
616 |
-
|
617 |
-
:return:
|
618 |
-
Returns a :class:`datetime.datetime` object or, if the
|
619 |
-
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
|
620 |
-
first element being a :class:`datetime.datetime` object, the second
|
621 |
-
a tuple containing the fuzzy tokens.
|
622 |
-
|
623 |
-
:raises ParserError:
|
624 |
-
Raised for invalid or unknown string format, if the provided
|
625 |
-
:class:`tzinfo` is not in a valid format, or if an invalid date
|
626 |
-
would be created.
|
627 |
-
|
628 |
-
:raises TypeError:
|
629 |
-
Raised for non-string or character stream input.
|
630 |
-
|
631 |
-
:raises OverflowError:
|
632 |
-
Raised if the parsed date exceeds the largest valid C integer on
|
633 |
-
your system.
|
634 |
-
"""
|
635 |
-
|
636 |
-
if default is None:
|
637 |
-
default = datetime.datetime.now().replace(hour=0, minute=0,
|
638 |
-
second=0, microsecond=0)
|
639 |
-
|
640 |
-
res, skipped_tokens = self._parse(timestr, **kwargs)
|
641 |
-
|
642 |
-
if res is None:
|
643 |
-
raise ParserError("Unknown string format: %s", timestr)
|
644 |
-
|
645 |
-
if len(res) == 0:
|
646 |
-
raise ParserError("String does not contain a date: %s", timestr)
|
647 |
-
|
648 |
-
try:
|
649 |
-
ret = self._build_naive(res, default)
|
650 |
-
except ValueError as e:
|
651 |
-
six.raise_from(ParserError(str(e) + ": %s", timestr), e)
|
652 |
-
|
653 |
-
if not ignoretz:
|
654 |
-
ret = self._build_tzaware(ret, res, tzinfos)
|
655 |
-
|
656 |
-
if kwargs.get('fuzzy_with_tokens', False):
|
657 |
-
return ret, skipped_tokens
|
658 |
-
else:
|
659 |
-
return ret
|
660 |
-
|
661 |
-
class _result(_resultbase):
|
662 |
-
__slots__ = ["year", "month", "day", "weekday",
|
663 |
-
"hour", "minute", "second", "microsecond",
|
664 |
-
"tzname", "tzoffset", "ampm","any_unused_tokens"]
|
665 |
-
|
666 |
-
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
|
667 |
-
fuzzy_with_tokens=False):
|
668 |
-
"""
|
669 |
-
Private method which performs the heavy lifting of parsing, called from
|
670 |
-
``parse()``, which passes on its ``kwargs`` to this function.
|
671 |
-
|
672 |
-
:param timestr:
|
673 |
-
The string to parse.
|
674 |
-
|
675 |
-
:param dayfirst:
|
676 |
-
Whether to interpret the first value in an ambiguous 3-integer date
|
677 |
-
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
|
678 |
-
``yearfirst`` is set to ``True``, this distinguishes between YDM
|
679 |
-
and YMD. If set to ``None``, this value is retrieved from the
|
680 |
-
current :class:`parserinfo` object (which itself defaults to
|
681 |
-
``False``).
|
682 |
-
|
683 |
-
:param yearfirst:
|
684 |
-
Whether to interpret the first value in an ambiguous 3-integer date
|
685 |
-
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
|
686 |
-
to be the year, otherwise the last number is taken to be the year.
|
687 |
-
If this is set to ``None``, the value is retrieved from the current
|
688 |
-
:class:`parserinfo` object (which itself defaults to ``False``).
|
689 |
-
|
690 |
-
:param fuzzy:
|
691 |
-
Whether to allow fuzzy parsing, allowing for string like "Today is
|
692 |
-
January 1, 2047 at 8:21:00AM".
|
693 |
-
|
694 |
-
:param fuzzy_with_tokens:
|
695 |
-
If ``True``, ``fuzzy`` is automatically set to True, and the parser
|
696 |
-
will return a tuple where the first element is the parsed
|
697 |
-
:class:`datetime.datetime` datetimestamp and the second element is
|
698 |
-
a tuple containing the portions of the string which were ignored:
|
699 |
-
|
700 |
-
.. doctest::
|
701 |
-
|
702 |
-
>>> from dateutil.parser import parse
|
703 |
-
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
|
704 |
-
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
|
705 |
-
|
706 |
-
"""
|
707 |
-
if fuzzy_with_tokens:
|
708 |
-
fuzzy = True
|
709 |
-
|
710 |
-
info = self.info
|
711 |
-
|
712 |
-
if dayfirst is None:
|
713 |
-
dayfirst = info.dayfirst
|
714 |
-
|
715 |
-
if yearfirst is None:
|
716 |
-
yearfirst = info.yearfirst
|
717 |
-
|
718 |
-
res = self._result()
|
719 |
-
l = _timelex.split(timestr) # Splits the timestr into tokens
|
720 |
-
|
721 |
-
skipped_idxs = []
|
722 |
-
|
723 |
-
# year/month/day list
|
724 |
-
ymd = _ymd()
|
725 |
-
|
726 |
-
len_l = len(l)
|
727 |
-
i = 0
|
728 |
-
try:
|
729 |
-
while i < len_l:
|
730 |
-
|
731 |
-
# Check if it's a number
|
732 |
-
value_repr = l[i]
|
733 |
-
try:
|
734 |
-
value = float(value_repr)
|
735 |
-
except ValueError:
|
736 |
-
value = None
|
737 |
-
|
738 |
-
if value is not None:
|
739 |
-
# Numeric token
|
740 |
-
i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy)
|
741 |
-
|
742 |
-
# Check weekday
|
743 |
-
elif info.weekday(l[i]) is not None:
|
744 |
-
value = info.weekday(l[i])
|
745 |
-
res.weekday = value
|
746 |
-
|
747 |
-
# Check month name
|
748 |
-
elif info.month(l[i]) is not None:
|
749 |
-
value = info.month(l[i])
|
750 |
-
ymd.append(value, 'M')
|
751 |
-
|
752 |
-
if i + 1 < len_l:
|
753 |
-
if l[i + 1] in ('-', '/'):
|
754 |
-
# Jan-01[-99]
|
755 |
-
sep = l[i + 1]
|
756 |
-
ymd.append(l[i + 2])
|
757 |
-
|
758 |
-
if i + 3 < len_l and l[i + 3] == sep:
|
759 |
-
# Jan-01-99
|
760 |
-
ymd.append(l[i + 4])
|
761 |
-
i += 2
|
762 |
-
|
763 |
-
i += 2
|
764 |
-
|
765 |
-
elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and
|
766 |
-
info.pertain(l[i + 2])):
|
767 |
-
# Jan of 01
|
768 |
-
# In this case, 01 is clearly year
|
769 |
-
if l[i + 4].isdigit():
|
770 |
-
# Convert it here to become unambiguous
|
771 |
-
value = int(l[i + 4])
|
772 |
-
year = str(info.convertyear(value))
|
773 |
-
ymd.append(year, 'Y')
|
774 |
-
else:
|
775 |
-
# Wrong guess
|
776 |
-
pass
|
777 |
-
# TODO: not hit in tests
|
778 |
-
i += 4
|
779 |
-
|
780 |
-
# Check am/pm
|
781 |
-
elif info.ampm(l[i]) is not None:
|
782 |
-
value = info.ampm(l[i])
|
783 |
-
val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy)
|
784 |
-
|
785 |
-
if val_is_ampm:
|
786 |
-
res.hour = self._adjust_ampm(res.hour, value)
|
787 |
-
res.ampm = value
|
788 |
-
|
789 |
-
elif fuzzy:
|
790 |
-
skipped_idxs.append(i)
|
791 |
-
|
792 |
-
# Check for a timezone name
|
793 |
-
elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]):
|
794 |
-
res.tzname = l[i]
|
795 |
-
res.tzoffset = info.tzoffset(res.tzname)
|
796 |
-
|
797 |
-
# Check for something like GMT+3, or BRST+3. Notice
|
798 |
-
# that it doesn't mean "I am 3 hours after GMT", but
|
799 |
-
# "my time +3 is GMT". If found, we reverse the
|
800 |
-
# logic so that timezone parsing code will get it
|
801 |
-
# right.
|
802 |
-
if i + 1 < len_l and l[i + 1] in ('+', '-'):
|
803 |
-
l[i + 1] = ('+', '-')[l[i + 1] == '+']
|
804 |
-
res.tzoffset = None
|
805 |
-
if info.utczone(res.tzname):
|
806 |
-
# With something like GMT+3, the timezone
|
807 |
-
# is *not* GMT.
|
808 |
-
res.tzname = None
|
809 |
-
|
810 |
-
# Check for a numbered timezone
|
811 |
-
elif res.hour is not None and l[i] in ('+', '-'):
|
812 |
-
signal = (-1, 1)[l[i] == '+']
|
813 |
-
len_li = len(l[i + 1])
|
814 |
-
|
815 |
-
# TODO: check that l[i + 1] is integer?
|
816 |
-
if len_li == 4:
|
817 |
-
# -0300
|
818 |
-
hour_offset = int(l[i + 1][:2])
|
819 |
-
min_offset = int(l[i + 1][2:])
|
820 |
-
elif i + 2 < len_l and l[i + 2] == ':':
|
821 |
-
# -03:00
|
822 |
-
hour_offset = int(l[i + 1])
|
823 |
-
min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like?
|
824 |
-
i += 2
|
825 |
-
elif len_li <= 2:
|
826 |
-
# -[0]3
|
827 |
-
hour_offset = int(l[i + 1][:2])
|
828 |
-
min_offset = 0
|
829 |
-
else:
|
830 |
-
raise ValueError(timestr)
|
831 |
-
|
832 |
-
res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60)
|
833 |
-
|
834 |
-
# Look for a timezone name between parenthesis
|
835 |
-
if (i + 5 < len_l and
|
836 |
-
info.jump(l[i + 2]) and l[i + 3] == '(' and
|
837 |
-
l[i + 5] == ')' and
|
838 |
-
3 <= len(l[i + 4]) and
|
839 |
-
self._could_be_tzname(res.hour, res.tzname,
|
840 |
-
None, l[i + 4])):
|
841 |
-
# -0300 (BRST)
|
842 |
-
res.tzname = l[i + 4]
|
843 |
-
i += 4
|
844 |
-
|
845 |
-
i += 1
|
846 |
-
|
847 |
-
# Check jumps
|
848 |
-
elif not (info.jump(l[i]) or fuzzy):
|
849 |
-
raise ValueError(timestr)
|
850 |
-
|
851 |
-
else:
|
852 |
-
skipped_idxs.append(i)
|
853 |
-
i += 1
|
854 |
-
|
855 |
-
# Process year/month/day
|
856 |
-
year, month, day = ymd.resolve_ymd(yearfirst, dayfirst)
|
857 |
-
|
858 |
-
res.century_specified = ymd.century_specified
|
859 |
-
res.year = year
|
860 |
-
res.month = month
|
861 |
-
res.day = day
|
862 |
-
|
863 |
-
except (IndexError, ValueError):
|
864 |
-
return None, None
|
865 |
-
|
866 |
-
if not info.validate(res):
|
867 |
-
return None, None
|
868 |
-
|
869 |
-
if fuzzy_with_tokens:
|
870 |
-
skipped_tokens = self._recombine_skipped(l, skipped_idxs)
|
871 |
-
return res, tuple(skipped_tokens)
|
872 |
-
else:
|
873 |
-
return res, None
|
874 |
-
|
875 |
-
def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy):
|
876 |
-
# Token is a number
|
877 |
-
value_repr = tokens[idx]
|
878 |
-
try:
|
879 |
-
value = self._to_decimal(value_repr)
|
880 |
-
except Exception as e:
|
881 |
-
six.raise_from(ValueError('Unknown numeric token'), e)
|
882 |
-
|
883 |
-
len_li = len(value_repr)
|
884 |
-
|
885 |
-
len_l = len(tokens)
|
886 |
-
|
887 |
-
if (len(ymd) == 3 and len_li in (2, 4) and
|
888 |
-
res.hour is None and
|
889 |
-
(idx + 1 >= len_l or
|
890 |
-
(tokens[idx + 1] != ':' and
|
891 |
-
info.hms(tokens[idx + 1]) is None))):
|
892 |
-
# 19990101T23[59]
|
893 |
-
s = tokens[idx]
|
894 |
-
res.hour = int(s[:2])
|
895 |
-
|
896 |
-
if len_li == 4:
|
897 |
-
res.minute = int(s[2:])
|
898 |
-
|
899 |
-
elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6):
|
900 |
-
# YYMMDD or HHMMSS[.ss]
|
901 |
-
s = tokens[idx]
|
902 |
-
|
903 |
-
if not ymd and '.' not in tokens[idx]:
|
904 |
-
ymd.append(s[:2])
|
905 |
-
ymd.append(s[2:4])
|
906 |
-
ymd.append(s[4:])
|
907 |
-
else:
|
908 |
-
# 19990101T235959[.59]
|
909 |
-
|
910 |
-
# TODO: Check if res attributes already set.
|
911 |
-
res.hour = int(s[:2])
|
912 |
-
res.minute = int(s[2:4])
|
913 |
-
res.second, res.microsecond = self._parsems(s[4:])
|
914 |
-
|
915 |
-
elif len_li in (8, 12, 14):
|
916 |
-
# YYYYMMDD
|
917 |
-
s = tokens[idx]
|
918 |
-
ymd.append(s[:4], 'Y')
|
919 |
-
ymd.append(s[4:6])
|
920 |
-
ymd.append(s[6:8])
|
921 |
-
|
922 |
-
if len_li > 8:
|
923 |
-
res.hour = int(s[8:10])
|
924 |
-
res.minute = int(s[10:12])
|
925 |
-
|
926 |
-
if len_li > 12:
|
927 |
-
res.second = int(s[12:])
|
928 |
-
|
929 |
-
elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None:
|
930 |
-
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
|
931 |
-
hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True)
|
932 |
-
(idx, hms) = self._parse_hms(idx, tokens, info, hms_idx)
|
933 |
-
if hms is not None:
|
934 |
-
# TODO: checking that hour/minute/second are not
|
935 |
-
# already set?
|
936 |
-
self._assign_hms(res, value_repr, hms)
|
937 |
-
|
938 |
-
elif idx + 2 < len_l and tokens[idx + 1] == ':':
|
939 |
-
# HH:MM[:SS[.ss]]
|
940 |
-
res.hour = int(value)
|
941 |
-
value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this?
|
942 |
-
(res.minute, res.second) = self._parse_min_sec(value)
|
943 |
-
|
944 |
-
if idx + 4 < len_l and tokens[idx + 3] == ':':
|
945 |
-
res.second, res.microsecond = self._parsems(tokens[idx + 4])
|
946 |
-
|
947 |
-
idx += 2
|
948 |
-
|
949 |
-
idx += 2
|
950 |
-
|
951 |
-
elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'):
|
952 |
-
sep = tokens[idx + 1]
|
953 |
-
ymd.append(value_repr)
|
954 |
-
|
955 |
-
if idx + 2 < len_l and not info.jump(tokens[idx + 2]):
|
956 |
-
if tokens[idx + 2].isdigit():
|
957 |
-
# 01-01[-01]
|
958 |
-
ymd.append(tokens[idx + 2])
|
959 |
-
else:
|
960 |
-
# 01-Jan[-01]
|
961 |
-
value = info.month(tokens[idx + 2])
|
962 |
-
|
963 |
-
if value is not None:
|
964 |
-
ymd.append(value, 'M')
|
965 |
-
else:
|
966 |
-
raise ValueError()
|
967 |
-
|
968 |
-
if idx + 3 < len_l and tokens[idx + 3] == sep:
|
969 |
-
# We have three members
|
970 |
-
value = info.month(tokens[idx + 4])
|
971 |
-
|
972 |
-
if value is not None:
|
973 |
-
ymd.append(value, 'M')
|
974 |
-
else:
|
975 |
-
ymd.append(tokens[idx + 4])
|
976 |
-
idx += 2
|
977 |
-
|
978 |
-
idx += 1
|
979 |
-
idx += 1
|
980 |
-
|
981 |
-
elif idx + 1 >= len_l or info.jump(tokens[idx + 1]):
|
982 |
-
if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None:
|
983 |
-
# 12 am
|
984 |
-
hour = int(value)
|
985 |
-
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2]))
|
986 |
-
idx += 1
|
987 |
-
else:
|
988 |
-
# Year, month or day
|
989 |
-
ymd.append(value)
|
990 |
-
idx += 1
|
991 |
-
|
992 |
-
elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24):
|
993 |
-
# 12am
|
994 |
-
hour = int(value)
|
995 |
-
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1]))
|
996 |
-
idx += 1
|
997 |
-
|
998 |
-
elif ymd.could_be_day(value):
|
999 |
-
ymd.append(value)
|
1000 |
-
|
1001 |
-
elif not fuzzy:
|
1002 |
-
raise ValueError()
|
1003 |
-
|
1004 |
-
return idx
|
1005 |
-
|
1006 |
-
def _find_hms_idx(self, idx, tokens, info, allow_jump):
|
1007 |
-
len_l = len(tokens)
|
1008 |
-
|
1009 |
-
if idx+1 < len_l and info.hms(tokens[idx+1]) is not None:
|
1010 |
-
# There is an "h", "m", or "s" label following this token. We take
|
1011 |
-
# assign the upcoming label to the current token.
|
1012 |
-
# e.g. the "12" in 12h"
|
1013 |
-
hms_idx = idx + 1
|
1014 |
-
|
1015 |
-
elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and
|
1016 |
-
info.hms(tokens[idx+2]) is not None):
|
1017 |
-
# There is a space and then an "h", "m", or "s" label.
|
1018 |
-
# e.g. the "12" in "12 h"
|
1019 |
-
hms_idx = idx + 2
|
1020 |
-
|
1021 |
-
elif idx > 0 and info.hms(tokens[idx-1]) is not None:
|
1022 |
-
# There is a "h", "m", or "s" preceding this token. Since neither
|
1023 |
-
# of the previous cases was hit, there is no label following this
|
1024 |
-
# token, so we use the previous label.
|
1025 |
-
# e.g. the "04" in "12h04"
|
1026 |
-
hms_idx = idx-1
|
1027 |
-
|
1028 |
-
elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and
|
1029 |
-
info.hms(tokens[idx-2]) is not None):
|
1030 |
-
# If we are looking at the final token, we allow for a
|
1031 |
-
# backward-looking check to skip over a space.
|
1032 |
-
# TODO: Are we sure this is the right condition here?
|
1033 |
-
hms_idx = idx - 2
|
1034 |
-
|
1035 |
-
else:
|
1036 |
-
hms_idx = None
|
1037 |
-
|
1038 |
-
return hms_idx
|
1039 |
-
|
1040 |
-
def _assign_hms(self, res, value_repr, hms):
|
1041 |
-
# See GH issue #427, fixing float rounding
|
1042 |
-
value = self._to_decimal(value_repr)
|
1043 |
-
|
1044 |
-
if hms == 0:
|
1045 |
-
# Hour
|
1046 |
-
res.hour = int(value)
|
1047 |
-
if value % 1:
|
1048 |
-
res.minute = int(60*(value % 1))
|
1049 |
-
|
1050 |
-
elif hms == 1:
|
1051 |
-
(res.minute, res.second) = self._parse_min_sec(value)
|
1052 |
-
|
1053 |
-
elif hms == 2:
|
1054 |
-
(res.second, res.microsecond) = self._parsems(value_repr)
|
1055 |
-
|
1056 |
-
def _could_be_tzname(self, hour, tzname, tzoffset, token):
|
1057 |
-
return (hour is not None and
|
1058 |
-
tzname is None and
|
1059 |
-
tzoffset is None and
|
1060 |
-
len(token) <= 5 and
|
1061 |
-
(all(x in string.ascii_uppercase for x in token)
|
1062 |
-
or token in self.info.UTCZONE))
|
1063 |
-
|
1064 |
-
def _ampm_valid(self, hour, ampm, fuzzy):
|
1065 |
-
"""
|
1066 |
-
For fuzzy parsing, 'a' or 'am' (both valid English words)
|
1067 |
-
may erroneously trigger the AM/PM flag. Deal with that
|
1068 |
-
here.
|
1069 |
-
"""
|
1070 |
-
val_is_ampm = True
|
1071 |
-
|
1072 |
-
# If there's already an AM/PM flag, this one isn't one.
|
1073 |
-
if fuzzy and ampm is not None:
|
1074 |
-
val_is_ampm = False
|
1075 |
-
|
1076 |
-
# If AM/PM is found and hour is not, raise a ValueError
|
1077 |
-
if hour is None:
|
1078 |
-
if fuzzy:
|
1079 |
-
val_is_ampm = False
|
1080 |
-
else:
|
1081 |
-
raise ValueError('No hour specified with AM or PM flag.')
|
1082 |
-
elif not 0 <= hour <= 12:
|
1083 |
-
# If AM/PM is found, it's a 12 hour clock, so raise
|
1084 |
-
# an error for invalid range
|
1085 |
-
if fuzzy:
|
1086 |
-
val_is_ampm = False
|
1087 |
-
else:
|
1088 |
-
raise ValueError('Invalid hour specified for 12-hour clock.')
|
1089 |
-
|
1090 |
-
return val_is_ampm
|
1091 |
-
|
1092 |
-
def _adjust_ampm(self, hour, ampm):
|
1093 |
-
if hour < 12 and ampm == 1:
|
1094 |
-
hour += 12
|
1095 |
-
elif hour == 12 and ampm == 0:
|
1096 |
-
hour = 0
|
1097 |
-
return hour
|
1098 |
-
|
1099 |
-
def _parse_min_sec(self, value):
|
1100 |
-
# TODO: Every usage of this function sets res.second to the return
|
1101 |
-
# value. Are there any cases where second will be returned as None and
|
1102 |
-
# we *don't* want to set res.second = None?
|
1103 |
-
minute = int(value)
|
1104 |
-
second = None
|
1105 |
-
|
1106 |
-
sec_remainder = value % 1
|
1107 |
-
if sec_remainder:
|
1108 |
-
second = int(60 * sec_remainder)
|
1109 |
-
return (minute, second)
|
1110 |
-
|
1111 |
-
def _parse_hms(self, idx, tokens, info, hms_idx):
|
1112 |
-
# TODO: Is this going to admit a lot of false-positives for when we
|
1113 |
-
# just happen to have digits and "h", "m" or "s" characters in non-date
|
1114 |
-
# text? I guess hex hashes won't have that problem, but there's plenty
|
1115 |
-
# of random junk out there.
|
1116 |
-
if hms_idx is None:
|
1117 |
-
hms = None
|
1118 |
-
new_idx = idx
|
1119 |
-
elif hms_idx > idx:
|
1120 |
-
hms = info.hms(tokens[hms_idx])
|
1121 |
-
new_idx = hms_idx
|
1122 |
-
else:
|
1123 |
-
# Looking backwards, increment one.
|
1124 |
-
hms = info.hms(tokens[hms_idx]) + 1
|
1125 |
-
new_idx = idx
|
1126 |
-
|
1127 |
-
return (new_idx, hms)
|
1128 |
-
|
1129 |
-
# ------------------------------------------------------------------
|
1130 |
-
# Handling for individual tokens. These are kept as methods instead
|
1131 |
-
# of functions for the sake of customizability via subclassing.
|
1132 |
-
|
1133 |
-
def _parsems(self, value):
|
1134 |
-
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
|
1135 |
-
if "." not in value:
|
1136 |
-
return int(value), 0
|
1137 |
-
else:
|
1138 |
-
i, f = value.split(".")
|
1139 |
-
return int(i), int(f.ljust(6, "0")[:6])
|
1140 |
-
|
1141 |
-
def _to_decimal(self, val):
|
1142 |
-
try:
|
1143 |
-
decimal_value = Decimal(val)
|
1144 |
-
# See GH 662, edge case, infinite value should not be converted
|
1145 |
-
# via `_to_decimal`
|
1146 |
-
if not decimal_value.is_finite():
|
1147 |
-
raise ValueError("Converted decimal value is infinite or NaN")
|
1148 |
-
except Exception as e:
|
1149 |
-
msg = "Could not convert %s to decimal" % val
|
1150 |
-
six.raise_from(ValueError(msg), e)
|
1151 |
-
else:
|
1152 |
-
return decimal_value
|
1153 |
-
|
1154 |
-
# ------------------------------------------------------------------
|
1155 |
-
# Post-Parsing construction of datetime output. These are kept as
|
1156 |
-
# methods instead of functions for the sake of customizability via
|
1157 |
-
# subclassing.
|
1158 |
-
|
1159 |
-
def _build_tzinfo(self, tzinfos, tzname, tzoffset):
|
1160 |
-
if callable(tzinfos):
|
1161 |
-
tzdata = tzinfos(tzname, tzoffset)
|
1162 |
-
else:
|
1163 |
-
tzdata = tzinfos.get(tzname)
|
1164 |
-
# handle case where tzinfo is paased an options that returns None
|
1165 |
-
# eg tzinfos = {'BRST' : None}
|
1166 |
-
if isinstance(tzdata, datetime.tzinfo) or tzdata is None:
|
1167 |
-
tzinfo = tzdata
|
1168 |
-
elif isinstance(tzdata, text_type):
|
1169 |
-
tzinfo = tz.tzstr(tzdata)
|
1170 |
-
elif isinstance(tzdata, integer_types):
|
1171 |
-
tzinfo = tz.tzoffset(tzname, tzdata)
|
1172 |
-
else:
|
1173 |
-
raise TypeError("Offset must be tzinfo subclass, tz string, "
|
1174 |
-
"or int offset.")
|
1175 |
-
return tzinfo
|
1176 |
-
|
1177 |
-
def _build_tzaware(self, naive, res, tzinfos):
|
1178 |
-
if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)):
|
1179 |
-
tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset)
|
1180 |
-
aware = naive.replace(tzinfo=tzinfo)
|
1181 |
-
aware = self._assign_tzname(aware, res.tzname)
|
1182 |
-
|
1183 |
-
elif res.tzname and res.tzname in time.tzname:
|
1184 |
-
aware = naive.replace(tzinfo=tz.tzlocal())
|
1185 |
-
|
1186 |
-
# Handle ambiguous local datetime
|
1187 |
-
aware = self._assign_tzname(aware, res.tzname)
|
1188 |
-
|
1189 |
-
# This is mostly relevant for winter GMT zones parsed in the UK
|
1190 |
-
if (aware.tzname() != res.tzname and
|
1191 |
-
res.tzname in self.info.UTCZONE):
|
1192 |
-
aware = aware.replace(tzinfo=tz.UTC)
|
1193 |
-
|
1194 |
-
elif res.tzoffset == 0:
|
1195 |
-
aware = naive.replace(tzinfo=tz.UTC)
|
1196 |
-
|
1197 |
-
elif res.tzoffset:
|
1198 |
-
aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
|
1199 |
-
|
1200 |
-
elif not res.tzname and not res.tzoffset:
|
1201 |
-
# i.e. no timezone information was found.
|
1202 |
-
aware = naive
|
1203 |
-
|
1204 |
-
elif res.tzname:
|
1205 |
-
# tz-like string was parsed but we don't know what to do
|
1206 |
-
# with it
|
1207 |
-
warnings.warn("tzname {tzname} identified but not understood. "
|
1208 |
-
"Pass `tzinfos` argument in order to correctly "
|
1209 |
-
"return a timezone-aware datetime. In a future "
|
1210 |
-
"version, this will raise an "
|
1211 |
-
"exception.".format(tzname=res.tzname),
|
1212 |
-
category=UnknownTimezoneWarning)
|
1213 |
-
aware = naive
|
1214 |
-
|
1215 |
-
return aware
|
1216 |
-
|
1217 |
-
def _build_naive(self, res, default):
|
1218 |
-
repl = {}
|
1219 |
-
for attr in ("year", "month", "day", "hour",
|
1220 |
-
"minute", "second", "microsecond"):
|
1221 |
-
value = getattr(res, attr)
|
1222 |
-
if value is not None:
|
1223 |
-
repl[attr] = value
|
1224 |
-
|
1225 |
-
if 'day' not in repl:
|
1226 |
-
# If the default day exceeds the last day of the month, fall back
|
1227 |
-
# to the end of the month.
|
1228 |
-
cyear = default.year if res.year is None else res.year
|
1229 |
-
cmonth = default.month if res.month is None else res.month
|
1230 |
-
cday = default.day if res.day is None else res.day
|
1231 |
-
|
1232 |
-
if cday > monthrange(cyear, cmonth)[1]:
|
1233 |
-
repl['day'] = monthrange(cyear, cmonth)[1]
|
1234 |
-
|
1235 |
-
naive = default.replace(**repl)
|
1236 |
-
|
1237 |
-
if res.weekday is not None and not res.day:
|
1238 |
-
naive = naive + relativedelta.relativedelta(weekday=res.weekday)
|
1239 |
-
|
1240 |
-
return naive
|
1241 |
-
|
1242 |
-
def _assign_tzname(self, dt, tzname):
|
1243 |
-
if dt.tzname() != tzname:
|
1244 |
-
new_dt = tz.enfold(dt, fold=1)
|
1245 |
-
if new_dt.tzname() == tzname:
|
1246 |
-
return new_dt
|
1247 |
-
|
1248 |
-
return dt
|
1249 |
-
|
1250 |
-
def _recombine_skipped(self, tokens, skipped_idxs):
|
1251 |
-
"""
|
1252 |
-
>>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"]
|
1253 |
-
>>> skipped_idxs = [0, 1, 2, 5]
|
1254 |
-
>>> _recombine_skipped(tokens, skipped_idxs)
|
1255 |
-
["foo bar", "baz"]
|
1256 |
-
"""
|
1257 |
-
skipped_tokens = []
|
1258 |
-
for i, idx in enumerate(sorted(skipped_idxs)):
|
1259 |
-
if i > 0 and idx - 1 == skipped_idxs[i - 1]:
|
1260 |
-
skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx]
|
1261 |
-
else:
|
1262 |
-
skipped_tokens.append(tokens[idx])
|
1263 |
-
|
1264 |
-
return skipped_tokens
|
1265 |
-
|
1266 |
-
|
1267 |
-
DEFAULTPARSER = parser()
|
1268 |
-
|
1269 |
-
|
1270 |
-
def parse(timestr, parserinfo=None, **kwargs):
|
1271 |
-
"""
|
1272 |
-
|
1273 |
-
Parse a string in one of the supported formats, using the
|
1274 |
-
``parserinfo`` parameters.
|
1275 |
-
|
1276 |
-
:param timestr:
|
1277 |
-
A string containing a date/time stamp.
|
1278 |
-
|
1279 |
-
:param parserinfo:
|
1280 |
-
A :class:`parserinfo` object containing parameters for the parser.
|
1281 |
-
If ``None``, the default arguments to the :class:`parserinfo`
|
1282 |
-
constructor are used.
|
1283 |
-
|
1284 |
-
The ``**kwargs`` parameter takes the following keyword arguments:
|
1285 |
-
|
1286 |
-
:param default:
|
1287 |
-
The default datetime object, if this is a datetime object and not
|
1288 |
-
``None``, elements specified in ``timestr`` replace elements in the
|
1289 |
-
default object.
|
1290 |
-
|
1291 |
-
:param ignoretz:
|
1292 |
-
If set ``True``, time zones in parsed strings are ignored and a naive
|
1293 |
-
:class:`datetime` object is returned.
|
1294 |
-
|
1295 |
-
:param tzinfos:
|
1296 |
-
Additional time zone names / aliases which may be present in the
|
1297 |
-
string. This argument maps time zone names (and optionally offsets
|
1298 |
-
from those time zones) to time zones. This parameter can be a
|
1299 |
-
dictionary with timezone aliases mapping time zone names to time
|
1300 |
-
zones or a function taking two parameters (``tzname`` and
|
1301 |
-
``tzoffset``) and returning a time zone.
|
1302 |
-
|
1303 |
-
The timezones to which the names are mapped can be an integer
|
1304 |
-
offset from UTC in seconds or a :class:`tzinfo` object.
|
1305 |
-
|
1306 |
-
.. doctest::
|
1307 |
-
:options: +NORMALIZE_WHITESPACE
|
1308 |
-
|
1309 |
-
>>> from dateutil.parser import parse
|
1310 |
-
>>> from dateutil.tz import gettz
|
1311 |
-
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
|
1312 |
-
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
|
1313 |
-
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
|
1314 |
-
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
|
1315 |
-
datetime.datetime(2012, 1, 19, 17, 21,
|
1316 |
-
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
|
1317 |
-
|
1318 |
-
This parameter is ignored if ``ignoretz`` is set.
|
1319 |
-
|
1320 |
-
:param dayfirst:
|
1321 |
-
Whether to interpret the first value in an ambiguous 3-integer date
|
1322 |
-
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
|
1323 |
-
``yearfirst`` is set to ``True``, this distinguishes between YDM and
|
1324 |
-
YMD. If set to ``None``, this value is retrieved from the current
|
1325 |
-
:class:`parserinfo` object (which itself defaults to ``False``).
|
1326 |
-
|
1327 |
-
:param yearfirst:
|
1328 |
-
Whether to interpret the first value in an ambiguous 3-integer date
|
1329 |
-
(e.g. 01/05/09) as the year. If ``True``, the first number is taken to
|
1330 |
-
be the year, otherwise the last number is taken to be the year. If
|
1331 |
-
this is set to ``None``, the value is retrieved from the current
|
1332 |
-
:class:`parserinfo` object (which itself defaults to ``False``).
|
1333 |
-
|
1334 |
-
:param fuzzy:
|
1335 |
-
Whether to allow fuzzy parsing, allowing for string like "Today is
|
1336 |
-
January 1, 2047 at 8:21:00AM".
|
1337 |
-
|
1338 |
-
:param fuzzy_with_tokens:
|
1339 |
-
If ``True``, ``fuzzy`` is automatically set to True, and the parser
|
1340 |
-
will return a tuple where the first element is the parsed
|
1341 |
-
:class:`datetime.datetime` datetimestamp and the second element is
|
1342 |
-
a tuple containing the portions of the string which were ignored:
|
1343 |
-
|
1344 |
-
.. doctest::
|
1345 |
-
|
1346 |
-
>>> from dateutil.parser import parse
|
1347 |
-
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
|
1348 |
-
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
|
1349 |
-
|
1350 |
-
:return:
|
1351 |
-
Returns a :class:`datetime.datetime` object or, if the
|
1352 |
-
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
|
1353 |
-
first element being a :class:`datetime.datetime` object, the second
|
1354 |
-
a tuple containing the fuzzy tokens.
|
1355 |
-
|
1356 |
-
:raises ParserError:
|
1357 |
-
Raised for invalid or unknown string formats, if the provided
|
1358 |
-
:class:`tzinfo` is not in a valid format, or if an invalid date would
|
1359 |
-
be created.
|
1360 |
-
|
1361 |
-
:raises OverflowError:
|
1362 |
-
Raised if the parsed date exceeds the largest valid C integer on
|
1363 |
-
your system.
|
1364 |
-
"""
|
1365 |
-
if parserinfo:
|
1366 |
-
return parser(parserinfo).parse(timestr, **kwargs)
|
1367 |
-
else:
|
1368 |
-
return DEFAULTPARSER.parse(timestr, **kwargs)
|
1369 |
-
|
1370 |
-
|
1371 |
-
class _tzparser(object):
|
1372 |
-
|
1373 |
-
class _result(_resultbase):
|
1374 |
-
|
1375 |
-
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
|
1376 |
-
"start", "end"]
|
1377 |
-
|
1378 |
-
class _attr(_resultbase):
|
1379 |
-
__slots__ = ["month", "week", "weekday",
|
1380 |
-
"yday", "jyday", "day", "time"]
|
1381 |
-
|
1382 |
-
def __repr__(self):
|
1383 |
-
return self._repr("")
|
1384 |
-
|
1385 |
-
def __init__(self):
|
1386 |
-
_resultbase.__init__(self)
|
1387 |
-
self.start = self._attr()
|
1388 |
-
self.end = self._attr()
|
1389 |
-
|
1390 |
-
def parse(self, tzstr):
|
1391 |
-
res = self._result()
|
1392 |
-
l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x]
|
1393 |
-
used_idxs = list()
|
1394 |
-
try:
|
1395 |
-
|
1396 |
-
len_l = len(l)
|
1397 |
-
|
1398 |
-
i = 0
|
1399 |
-
while i < len_l:
|
1400 |
-
# BRST+3[BRDT[+2]]
|
1401 |
-
j = i
|
1402 |
-
while j < len_l and not [x for x in l[j]
|
1403 |
-
if x in "0123456789:,-+"]:
|
1404 |
-
j += 1
|
1405 |
-
if j != i:
|
1406 |
-
if not res.stdabbr:
|
1407 |
-
offattr = "stdoffset"
|
1408 |
-
res.stdabbr = "".join(l[i:j])
|
1409 |
-
else:
|
1410 |
-
offattr = "dstoffset"
|
1411 |
-
res.dstabbr = "".join(l[i:j])
|
1412 |
-
|
1413 |
-
for ii in range(j):
|
1414 |
-
used_idxs.append(ii)
|
1415 |
-
i = j
|
1416 |
-
if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
|
1417 |
-
"0123456789")):
|
1418 |
-
if l[i] in ('+', '-'):
|
1419 |
-
# Yes, that's right. See the TZ variable
|
1420 |
-
# documentation.
|
1421 |
-
signal = (1, -1)[l[i] == '+']
|
1422 |
-
used_idxs.append(i)
|
1423 |
-
i += 1
|
1424 |
-
else:
|
1425 |
-
signal = -1
|
1426 |
-
len_li = len(l[i])
|
1427 |
-
if len_li == 4:
|
1428 |
-
# -0300
|
1429 |
-
setattr(res, offattr, (int(l[i][:2]) * 3600 +
|
1430 |
-
int(l[i][2:]) * 60) * signal)
|
1431 |
-
elif i + 1 < len_l and l[i + 1] == ':':
|
1432 |
-
# -03:00
|
1433 |
-
setattr(res, offattr,
|
1434 |
-
(int(l[i]) * 3600 +
|
1435 |
-
int(l[i + 2]) * 60) * signal)
|
1436 |
-
used_idxs.append(i)
|
1437 |
-
i += 2
|
1438 |
-
elif len_li <= 2:
|
1439 |
-
# -[0]3
|
1440 |
-
setattr(res, offattr,
|
1441 |
-
int(l[i][:2]) * 3600 * signal)
|
1442 |
-
else:
|
1443 |
-
return None
|
1444 |
-
used_idxs.append(i)
|
1445 |
-
i += 1
|
1446 |
-
if res.dstabbr:
|
1447 |
-
break
|
1448 |
-
else:
|
1449 |
-
break
|
1450 |
-
|
1451 |
-
|
1452 |
-
if i < len_l:
|
1453 |
-
for j in range(i, len_l):
|
1454 |
-
if l[j] == ';':
|
1455 |
-
l[j] = ','
|
1456 |
-
|
1457 |
-
assert l[i] == ','
|
1458 |
-
|
1459 |
-
i += 1
|
1460 |
-
|
1461 |
-
if i >= len_l:
|
1462 |
-
pass
|
1463 |
-
elif (8 <= l.count(',') <= 9 and
|
1464 |
-
not [y for x in l[i:] if x != ','
|
1465 |
-
for y in x if y not in "0123456789+-"]):
|
1466 |
-
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
|
1467 |
-
for x in (res.start, res.end):
|
1468 |
-
x.month = int(l[i])
|
1469 |
-
used_idxs.append(i)
|
1470 |
-
i += 2
|
1471 |
-
if l[i] == '-':
|
1472 |
-
value = int(l[i + 1]) * -1
|
1473 |
-
used_idxs.append(i)
|
1474 |
-
i += 1
|
1475 |
-
else:
|
1476 |
-
value = int(l[i])
|
1477 |
-
used_idxs.append(i)
|
1478 |
-
i += 2
|
1479 |
-
if value:
|
1480 |
-
x.week = value
|
1481 |
-
x.weekday = (int(l[i]) - 1) % 7
|
1482 |
-
else:
|
1483 |
-
x.day = int(l[i])
|
1484 |
-
used_idxs.append(i)
|
1485 |
-
i += 2
|
1486 |
-
x.time = int(l[i])
|
1487 |
-
used_idxs.append(i)
|
1488 |
-
i += 2
|
1489 |
-
if i < len_l:
|
1490 |
-
if l[i] in ('-', '+'):
|
1491 |
-
signal = (-1, 1)[l[i] == "+"]
|
1492 |
-
used_idxs.append(i)
|
1493 |
-
i += 1
|
1494 |
-
else:
|
1495 |
-
signal = 1
|
1496 |
-
used_idxs.append(i)
|
1497 |
-
res.dstoffset = (res.stdoffset + int(l[i]) * signal)
|
1498 |
-
|
1499 |
-
# This was a made-up format that is not in normal use
|
1500 |
-
warn(('Parsed time zone "%s"' % tzstr) +
|
1501 |
-
'is in a non-standard dateutil-specific format, which ' +
|
1502 |
-
'is now deprecated; support for parsing this format ' +
|
1503 |
-
'will be removed in future versions. It is recommended ' +
|
1504 |
-
'that you switch to a standard format like the GNU ' +
|
1505 |
-
'TZ variable format.', tz.DeprecatedTzFormatWarning)
|
1506 |
-
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
|
1507 |
-
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
|
1508 |
-
'.', '-', ':')
|
1509 |
-
for y in x if y not in "0123456789"]):
|
1510 |
-
for x in (res.start, res.end):
|
1511 |
-
if l[i] == 'J':
|
1512 |
-
# non-leap year day (1 based)
|
1513 |
-
used_idxs.append(i)
|
1514 |
-
i += 1
|
1515 |
-
x.jyday = int(l[i])
|
1516 |
-
elif l[i] == 'M':
|
1517 |
-
# month[-.]week[-.]weekday
|
1518 |
-
used_idxs.append(i)
|
1519 |
-
i += 1
|
1520 |
-
x.month = int(l[i])
|
1521 |
-
used_idxs.append(i)
|
1522 |
-
i += 1
|
1523 |
-
assert l[i] in ('-', '.')
|
1524 |
-
used_idxs.append(i)
|
1525 |
-
i += 1
|
1526 |
-
x.week = int(l[i])
|
1527 |
-
if x.week == 5:
|
1528 |
-
x.week = -1
|
1529 |
-
used_idxs.append(i)
|
1530 |
-
i += 1
|
1531 |
-
assert l[i] in ('-', '.')
|
1532 |
-
used_idxs.append(i)
|
1533 |
-
i += 1
|
1534 |
-
x.weekday = (int(l[i]) - 1) % 7
|
1535 |
-
else:
|
1536 |
-
# year day (zero based)
|
1537 |
-
x.yday = int(l[i]) + 1
|
1538 |
-
|
1539 |
-
used_idxs.append(i)
|
1540 |
-
i += 1
|
1541 |
-
|
1542 |
-
if i < len_l and l[i] == '/':
|
1543 |
-
used_idxs.append(i)
|
1544 |
-
i += 1
|
1545 |
-
# start time
|
1546 |
-
len_li = len(l[i])
|
1547 |
-
if len_li == 4:
|
1548 |
-
# -0300
|
1549 |
-
x.time = (int(l[i][:2]) * 3600 +
|
1550 |
-
int(l[i][2:]) * 60)
|
1551 |
-
elif i + 1 < len_l and l[i + 1] == ':':
|
1552 |
-
# -03:00
|
1553 |
-
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
|
1554 |
-
used_idxs.append(i)
|
1555 |
-
i += 2
|
1556 |
-
if i + 1 < len_l and l[i + 1] == ':':
|
1557 |
-
used_idxs.append(i)
|
1558 |
-
i += 2
|
1559 |
-
x.time += int(l[i])
|
1560 |
-
elif len_li <= 2:
|
1561 |
-
# -[0]3
|
1562 |
-
x.time = (int(l[i][:2]) * 3600)
|
1563 |
-
else:
|
1564 |
-
return None
|
1565 |
-
used_idxs.append(i)
|
1566 |
-
i += 1
|
1567 |
-
|
1568 |
-
assert i == len_l or l[i] == ','
|
1569 |
-
|
1570 |
-
i += 1
|
1571 |
-
|
1572 |
-
assert i >= len_l
|
1573 |
-
|
1574 |
-
except (IndexError, ValueError, AssertionError):
|
1575 |
-
return None
|
1576 |
-
|
1577 |
-
unused_idxs = set(range(len_l)).difference(used_idxs)
|
1578 |
-
res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"})
|
1579 |
-
return res
|
1580 |
-
|
1581 |
-
|
1582 |
-
DEFAULTTZPARSER = _tzparser()
|
1583 |
-
|
1584 |
-
|
1585 |
-
def _parsetz(tzstr):
|
1586 |
-
return DEFAULTTZPARSER.parse(tzstr)
|
1587 |
-
|
1588 |
-
|
1589 |
-
class ParserError(ValueError):
|
1590 |
-
"""Exception subclass used for any failure to parse a datetime string.
|
1591 |
-
|
1592 |
-
This is a subclass of :py:exc:`ValueError`, and should be raised any time
|
1593 |
-
earlier versions of ``dateutil`` would have raised ``ValueError``.
|
1594 |
-
|
1595 |
-
.. versionadded:: 2.8.1
|
1596 |
-
"""
|
1597 |
-
def __str__(self):
|
1598 |
-
try:
|
1599 |
-
return self.args[0] % self.args[1:]
|
1600 |
-
except (TypeError, IndexError):
|
1601 |
-
return super(ParserError, self).__str__()
|
1602 |
-
|
1603 |
-
def __repr__(self):
|
1604 |
-
args = ", ".join("'%s'" % arg for arg in self.args)
|
1605 |
-
return "%s(%s)" % (self.__class__.__name__, args)
|
1606 |
-
|
1607 |
-
|
1608 |
-
class UnknownTimezoneWarning(RuntimeWarning):
|
1609 |
-
"""Raised when the parser finds a timezone it cannot parse into a tzinfo.
|
1610 |
-
|
1611 |
-
.. versionadded:: 2.7.0
|
1612 |
-
"""
|
1613 |
-
# vim:ts=4:sw=4:et
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/operations/__init__.py
DELETED
File without changes
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/resolution/resolvelib/base.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
from typing import FrozenSet, Iterable, Optional, Tuple, Union
|
2 |
-
|
3 |
-
from pip._vendor.packaging.specifiers import SpecifierSet
|
4 |
-
from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
|
5 |
-
from pip._vendor.packaging.version import LegacyVersion, Version
|
6 |
-
|
7 |
-
from pip._internal.models.link import Link, links_equivalent
|
8 |
-
from pip._internal.req.req_install import InstallRequirement
|
9 |
-
from pip._internal.utils.hashes import Hashes
|
10 |
-
|
11 |
-
CandidateLookup = Tuple[Optional["Candidate"], Optional[InstallRequirement]]
|
12 |
-
CandidateVersion = Union[LegacyVersion, Version]
|
13 |
-
|
14 |
-
|
15 |
-
def format_name(project: str, extras: FrozenSet[str]) -> str:
|
16 |
-
if not extras:
|
17 |
-
return project
|
18 |
-
canonical_extras = sorted(canonicalize_name(e) for e in extras)
|
19 |
-
return "{}[{}]".format(project, ",".join(canonical_extras))
|
20 |
-
|
21 |
-
|
22 |
-
class Constraint:
|
23 |
-
def __init__(
|
24 |
-
self, specifier: SpecifierSet, hashes: Hashes, links: FrozenSet[Link]
|
25 |
-
) -> None:
|
26 |
-
self.specifier = specifier
|
27 |
-
self.hashes = hashes
|
28 |
-
self.links = links
|
29 |
-
|
30 |
-
@classmethod
|
31 |
-
def empty(cls) -> "Constraint":
|
32 |
-
return Constraint(SpecifierSet(), Hashes(), frozenset())
|
33 |
-
|
34 |
-
@classmethod
|
35 |
-
def from_ireq(cls, ireq: InstallRequirement) -> "Constraint":
|
36 |
-
links = frozenset([ireq.link]) if ireq.link else frozenset()
|
37 |
-
return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links)
|
38 |
-
|
39 |
-
def __bool__(self) -> bool:
|
40 |
-
return bool(self.specifier) or bool(self.hashes) or bool(self.links)
|
41 |
-
|
42 |
-
def __and__(self, other: InstallRequirement) -> "Constraint":
|
43 |
-
if not isinstance(other, InstallRequirement):
|
44 |
-
return NotImplemented
|
45 |
-
specifier = self.specifier & other.specifier
|
46 |
-
hashes = self.hashes & other.hashes(trust_internet=False)
|
47 |
-
links = self.links
|
48 |
-
if other.link:
|
49 |
-
links = links.union([other.link])
|
50 |
-
return Constraint(specifier, hashes, links)
|
51 |
-
|
52 |
-
def is_satisfied_by(self, candidate: "Candidate") -> bool:
|
53 |
-
# Reject if there are any mismatched URL constraints on this package.
|
54 |
-
if self.links and not all(_match_link(link, candidate) for link in self.links):
|
55 |
-
return False
|
56 |
-
# We can safely always allow prereleases here since PackageFinder
|
57 |
-
# already implements the prerelease logic, and would have filtered out
|
58 |
-
# prerelease candidates if the user does not expect them.
|
59 |
-
return self.specifier.contains(candidate.version, prereleases=True)
|
60 |
-
|
61 |
-
|
62 |
-
class Requirement:
|
63 |
-
@property
|
64 |
-
def project_name(self) -> NormalizedName:
|
65 |
-
"""The "project name" of a requirement.
|
66 |
-
|
67 |
-
This is different from ``name`` if this requirement contains extras,
|
68 |
-
in which case ``name`` would contain the ``[...]`` part, while this
|
69 |
-
refers to the name of the project.
|
70 |
-
"""
|
71 |
-
raise NotImplementedError("Subclass should override")
|
72 |
-
|
73 |
-
@property
|
74 |
-
def name(self) -> str:
|
75 |
-
"""The name identifying this requirement in the resolver.
|
76 |
-
|
77 |
-
This is different from ``project_name`` if this requirement contains
|
78 |
-
extras, where ``project_name`` would not contain the ``[...]`` part.
|
79 |
-
"""
|
80 |
-
raise NotImplementedError("Subclass should override")
|
81 |
-
|
82 |
-
def is_satisfied_by(self, candidate: "Candidate") -> bool:
|
83 |
-
return False
|
84 |
-
|
85 |
-
def get_candidate_lookup(self) -> CandidateLookup:
|
86 |
-
raise NotImplementedError("Subclass should override")
|
87 |
-
|
88 |
-
def format_for_error(self) -> str:
|
89 |
-
raise NotImplementedError("Subclass should override")
|
90 |
-
|
91 |
-
|
92 |
-
def _match_link(link: Link, candidate: "Candidate") -> bool:
|
93 |
-
if candidate.source_link:
|
94 |
-
return links_equivalent(link, candidate.source_link)
|
95 |
-
return False
|
96 |
-
|
97 |
-
|
98 |
-
class Candidate:
|
99 |
-
@property
|
100 |
-
def project_name(self) -> NormalizedName:
|
101 |
-
"""The "project name" of the candidate.
|
102 |
-
|
103 |
-
This is different from ``name`` if this candidate contains extras,
|
104 |
-
in which case ``name`` would contain the ``[...]`` part, while this
|
105 |
-
refers to the name of the project.
|
106 |
-
"""
|
107 |
-
raise NotImplementedError("Override in subclass")
|
108 |
-
|
109 |
-
@property
|
110 |
-
def name(self) -> str:
|
111 |
-
"""The name identifying this candidate in the resolver.
|
112 |
-
|
113 |
-
This is different from ``project_name`` if this candidate contains
|
114 |
-
extras, where ``project_name`` would not contain the ``[...]`` part.
|
115 |
-
"""
|
116 |
-
raise NotImplementedError("Override in subclass")
|
117 |
-
|
118 |
-
@property
|
119 |
-
def version(self) -> CandidateVersion:
|
120 |
-
raise NotImplementedError("Override in subclass")
|
121 |
-
|
122 |
-
@property
|
123 |
-
def is_installed(self) -> bool:
|
124 |
-
raise NotImplementedError("Override in subclass")
|
125 |
-
|
126 |
-
@property
|
127 |
-
def is_editable(self) -> bool:
|
128 |
-
raise NotImplementedError("Override in subclass")
|
129 |
-
|
130 |
-
@property
|
131 |
-
def source_link(self) -> Optional[Link]:
|
132 |
-
raise NotImplementedError("Override in subclass")
|
133 |
-
|
134 |
-
def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
|
135 |
-
raise NotImplementedError("Override in subclass")
|
136 |
-
|
137 |
-
def get_install_requirement(self) -> Optional[InstallRequirement]:
|
138 |
-
raise NotImplementedError("Override in subclass")
|
139 |
-
|
140 |
-
def format_for_error(self) -> str:
|
141 |
-
raise NotImplementedError("Subclass should override")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/platformdirs/windows.py
DELETED
@@ -1,195 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import ctypes
|
4 |
-
import os
|
5 |
-
import sys
|
6 |
-
from functools import lru_cache
|
7 |
-
from typing import Callable
|
8 |
-
|
9 |
-
from .api import PlatformDirsABC
|
10 |
-
|
11 |
-
|
12 |
-
class Windows(PlatformDirsABC):
|
13 |
-
"""`MSDN on where to store app data files
|
14 |
-
<http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120>`_.
|
15 |
-
Makes use of the
|
16 |
-
`appname <platformdirs.api.PlatformDirsABC.appname>`,
|
17 |
-
`appauthor <platformdirs.api.PlatformDirsABC.appauthor>`,
|
18 |
-
`version <platformdirs.api.PlatformDirsABC.version>`,
|
19 |
-
`roaming <platformdirs.api.PlatformDirsABC.roaming>`,
|
20 |
-
`opinion <platformdirs.api.PlatformDirsABC.opinion>`,
|
21 |
-
`ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.
|
22 |
-
"""
|
23 |
-
|
24 |
-
@property
|
25 |
-
def user_data_dir(self) -> str:
|
26 |
-
"""
|
27 |
-
:return: data directory tied to the user, e.g.
|
28 |
-
``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or
|
29 |
-
``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming)
|
30 |
-
"""
|
31 |
-
const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA"
|
32 |
-
path = os.path.normpath(get_win_folder(const))
|
33 |
-
return self._append_parts(path)
|
34 |
-
|
35 |
-
def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str:
|
36 |
-
params = []
|
37 |
-
if self.appname:
|
38 |
-
if self.appauthor is not False:
|
39 |
-
author = self.appauthor or self.appname
|
40 |
-
params.append(author)
|
41 |
-
params.append(self.appname)
|
42 |
-
if opinion_value is not None and self.opinion:
|
43 |
-
params.append(opinion_value)
|
44 |
-
if self.version:
|
45 |
-
params.append(self.version)
|
46 |
-
path = os.path.join(path, *params)
|
47 |
-
self._optionally_create_directory(path)
|
48 |
-
return path
|
49 |
-
|
50 |
-
@property
|
51 |
-
def site_data_dir(self) -> str:
|
52 |
-
""":return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``"""
|
53 |
-
path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
|
54 |
-
return self._append_parts(path)
|
55 |
-
|
56 |
-
@property
|
57 |
-
def user_config_dir(self) -> str:
|
58 |
-
""":return: config directory tied to the user, same as `user_data_dir`"""
|
59 |
-
return self.user_data_dir
|
60 |
-
|
61 |
-
@property
|
62 |
-
def site_config_dir(self) -> str:
|
63 |
-
""":return: config directory shared by the users, same as `site_data_dir`"""
|
64 |
-
return self.site_data_dir
|
65 |
-
|
66 |
-
@property
|
67 |
-
def user_cache_dir(self) -> str:
|
68 |
-
"""
|
69 |
-
:return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.
|
70 |
-
``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``
|
71 |
-
"""
|
72 |
-
path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA"))
|
73 |
-
return self._append_parts(path, opinion_value="Cache")
|
74 |
-
|
75 |
-
@property
|
76 |
-
def site_cache_dir(self) -> str:
|
77 |
-
""":return: cache directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname\\Cache\\$version``"""
|
78 |
-
path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
|
79 |
-
return self._append_parts(path, opinion_value="Cache")
|
80 |
-
|
81 |
-
@property
|
82 |
-
def user_state_dir(self) -> str:
|
83 |
-
""":return: state directory tied to the user, same as `user_data_dir`"""
|
84 |
-
return self.user_data_dir
|
85 |
-
|
86 |
-
@property
|
87 |
-
def user_log_dir(self) -> str:
|
88 |
-
"""
|
89 |
-
:return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it
|
90 |
-
"""
|
91 |
-
path = self.user_data_dir
|
92 |
-
if self.opinion:
|
93 |
-
path = os.path.join(path, "Logs")
|
94 |
-
self._optionally_create_directory(path)
|
95 |
-
return path
|
96 |
-
|
97 |
-
@property
|
98 |
-
def user_documents_dir(self) -> str:
|
99 |
-
"""
|
100 |
-
:return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``
|
101 |
-
"""
|
102 |
-
return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))
|
103 |
-
|
104 |
-
@property
|
105 |
-
def user_runtime_dir(self) -> str:
|
106 |
-
"""
|
107 |
-
:return: runtime directory tied to the user, e.g.
|
108 |
-
``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname``
|
109 |
-
"""
|
110 |
-
path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp"))
|
111 |
-
return self._append_parts(path)
|
112 |
-
|
113 |
-
|
114 |
-
def get_win_folder_from_env_vars(csidl_name: str) -> str:
|
115 |
-
"""Get folder from environment variables."""
|
116 |
-
if csidl_name == "CSIDL_PERSONAL": # does not have an environment name
|
117 |
-
return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents")
|
118 |
-
|
119 |
-
env_var_name = {
|
120 |
-
"CSIDL_APPDATA": "APPDATA",
|
121 |
-
"CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE",
|
122 |
-
"CSIDL_LOCAL_APPDATA": "LOCALAPPDATA",
|
123 |
-
}.get(csidl_name)
|
124 |
-
if env_var_name is None:
|
125 |
-
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
|
126 |
-
result = os.environ.get(env_var_name)
|
127 |
-
if result is None:
|
128 |
-
raise ValueError(f"Unset environment variable: {env_var_name}")
|
129 |
-
return result
|
130 |
-
|
131 |
-
|
132 |
-
def get_win_folder_from_registry(csidl_name: str) -> str:
|
133 |
-
"""Get folder from the registry.
|
134 |
-
|
135 |
-
This is a fallback technique at best. I'm not sure if using the
|
136 |
-
registry for this guarantees us the correct answer for all CSIDL_*
|
137 |
-
names.
|
138 |
-
"""
|
139 |
-
shell_folder_name = {
|
140 |
-
"CSIDL_APPDATA": "AppData",
|
141 |
-
"CSIDL_COMMON_APPDATA": "Common AppData",
|
142 |
-
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
143 |
-
"CSIDL_PERSONAL": "Personal",
|
144 |
-
}.get(csidl_name)
|
145 |
-
if shell_folder_name is None:
|
146 |
-
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
|
147 |
-
if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows
|
148 |
-
raise NotImplementedError
|
149 |
-
import winreg
|
150 |
-
|
151 |
-
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
|
152 |
-
directory, _ = winreg.QueryValueEx(key, shell_folder_name)
|
153 |
-
return str(directory)
|
154 |
-
|
155 |
-
|
156 |
-
def get_win_folder_via_ctypes(csidl_name: str) -> str:
|
157 |
-
"""Get folder with ctypes."""
|
158 |
-
csidl_const = {
|
159 |
-
"CSIDL_APPDATA": 26,
|
160 |
-
"CSIDL_COMMON_APPDATA": 35,
|
161 |
-
"CSIDL_LOCAL_APPDATA": 28,
|
162 |
-
"CSIDL_PERSONAL": 5,
|
163 |
-
}.get(csidl_name)
|
164 |
-
if csidl_const is None:
|
165 |
-
raise ValueError(f"Unknown CSIDL name: {csidl_name}")
|
166 |
-
|
167 |
-
buf = ctypes.create_unicode_buffer(1024)
|
168 |
-
windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker
|
169 |
-
windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
170 |
-
|
171 |
-
# Downgrade to short path name if it has highbit chars.
|
172 |
-
if any(ord(c) > 255 for c in buf):
|
173 |
-
buf2 = ctypes.create_unicode_buffer(1024)
|
174 |
-
if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
175 |
-
buf = buf2
|
176 |
-
|
177 |
-
return buf.value
|
178 |
-
|
179 |
-
|
180 |
-
def _pick_get_win_folder() -> Callable[[str], str]:
|
181 |
-
if hasattr(ctypes, "windll"):
|
182 |
-
return get_win_folder_via_ctypes
|
183 |
-
try:
|
184 |
-
import winreg # noqa: F401
|
185 |
-
except ImportError:
|
186 |
-
return get_win_folder_from_env_vars
|
187 |
-
else:
|
188 |
-
return get_win_folder_from_registry
|
189 |
-
|
190 |
-
|
191 |
-
get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder())
|
192 |
-
|
193 |
-
__all__ = [
|
194 |
-
"Windows",
|
195 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Billyosoro/ESRGAN/Training.md
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
# :computer: How to Train Real-ESRGAN
|
2 |
-
|
3 |
-
The training codes have been released. <br>
|
4 |
-
Note that the codes have a lot of refactoring. So there may be some bugs/performance drops. Welcome to report issues and I will also retrain the models.
|
5 |
-
|
6 |
-
## Overview
|
7 |
-
|
8 |
-
The training has been divided into two stages. These two stages have the same data synthesis process and training pipeline, except for the loss functions. Specifically,
|
9 |
-
|
10 |
-
1. We first train Real-ESRNet with L1 loss from the pre-trained model ESRGAN.
|
11 |
-
1. We then use the trained Real-ESRNet model as an initialization of the generator, and train the Real-ESRGAN with a combination of L1 loss, perceptual loss and GAN loss.
|
12 |
-
|
13 |
-
## Dataset Preparation
|
14 |
-
|
15 |
-
We use DF2K (DIV2K and Flickr2K) + OST datasets for our training. Only HR images are required. <br>
|
16 |
-
You can download from :
|
17 |
-
|
18 |
-
1. DIV2K: http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_train_HR.zip
|
19 |
-
2. Flickr2K: https://cv.snu.ac.kr/research/EDSR/Flickr2K.tar
|
20 |
-
3. OST: https://openmmlab.oss-cn-hangzhou.aliyuncs.com/datasets/OST_dataset.zip
|
21 |
-
|
22 |
-
For the DF2K dataset, we use a multi-scale strategy, *i.e.*, we downsample HR images to obtain several Ground-Truth images with different scales.
|
23 |
-
|
24 |
-
We then crop DF2K images into sub-images for faster IO and processing.
|
25 |
-
|
26 |
-
You need to prepare a txt file containing the image paths. The following are some examples in `meta_info_DF2Kmultiscale+OST_sub.txt` (As different users may have different sub-images partitions, this file is not suitable for your purpose and you need to prepare your own txt file):
|
27 |
-
|
28 |
-
```txt
|
29 |
-
DF2K_HR_sub/000001_s001.png
|
30 |
-
DF2K_HR_sub/000001_s002.png
|
31 |
-
DF2K_HR_sub/000001_s003.png
|
32 |
-
...
|
33 |
-
```
|
34 |
-
|
35 |
-
## Train Real-ESRNet
|
36 |
-
|
37 |
-
1. Download pre-trained model [ESRGAN](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth) into `experiments/pretrained_models`.
|
38 |
-
```bash
|
39 |
-
wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth -P experiments/pretrained_models
|
40 |
-
```
|
41 |
-
1. Modify the content in the option file `options/train_realesrnet_x4plus.yml` accordingly:
|
42 |
-
```yml
|
43 |
-
train:
|
44 |
-
name: DF2K+OST
|
45 |
-
type: RealESRGANDataset
|
46 |
-
dataroot_gt: datasets/DF2K # modify to the root path of your folder
|
47 |
-
meta_info: realesrgan/meta_info/meta_info_DF2Kmultiscale+OST_sub.txt # modify to your own generate meta info txt
|
48 |
-
io_backend:
|
49 |
-
type: disk
|
50 |
-
```
|
51 |
-
1. If you want to perform validation during training, uncomment those lines and modify accordingly:
|
52 |
-
```yml
|
53 |
-
# Uncomment these for validation
|
54 |
-
# val:
|
55 |
-
# name: validation
|
56 |
-
# type: PairedImageDataset
|
57 |
-
# dataroot_gt: path_to_gt
|
58 |
-
# dataroot_lq: path_to_lq
|
59 |
-
# io_backend:
|
60 |
-
# type: disk
|
61 |
-
|
62 |
-
...
|
63 |
-
|
64 |
-
# Uncomment these for validation
|
65 |
-
# validation settings
|
66 |
-
# val:
|
67 |
-
# val_freq: !!float 5e3
|
68 |
-
# save_img: True
|
69 |
-
|
70 |
-
# metrics:
|
71 |
-
# psnr: # metric name, can be arbitrary
|
72 |
-
# type: calculate_psnr
|
73 |
-
# crop_border: 4
|
74 |
-
# test_y_channel: false
|
75 |
-
```
|
76 |
-
1. Before the formal training, you may run in the `--debug` mode to see whether everything is OK. We use four GPUs for training:
|
77 |
-
```bash
|
78 |
-
CUDA_VISIBLE_DEVICES=0,1,2,3 \
|
79 |
-
python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --debug
|
80 |
-
```
|
81 |
-
1. The formal training. We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary.
|
82 |
-
```bash
|
83 |
-
CUDA_VISIBLE_DEVICES=0,1,2,3 \
|
84 |
-
python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrnet_x4plus.yml --launcher pytorch --auto_resume
|
85 |
-
```
|
86 |
-
|
87 |
-
## Train Real-ESRGAN
|
88 |
-
|
89 |
-
1. After the training of Real-ESRNet, you now have the file `experiments/train_RealESRNetx4plus_1000k_B12G4_fromESRGAN/model/net_g_1000000.pth`. If you need to specify the pre-trained path to other files, modify the `pretrain_network_g` value in the option file `train_realesrgan_x4plus.yml`.
|
90 |
-
1. Modify the option file `train_realesrgan_x4plus.yml` accordingly. Most modifications are similar to those listed above.
|
91 |
-
1. Before the formal training, you may run in the `--debug` mode to see whether everything is OK. We use four GPUs for training:
|
92 |
-
```bash
|
93 |
-
CUDA_VISIBLE_DEVICES=0,1,2,3 \
|
94 |
-
python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --debug
|
95 |
-
```
|
96 |
-
1. The formal training. We use four GPUs for training. We use the `--auto_resume` argument to automatically resume the training if necessary.
|
97 |
-
```bash
|
98 |
-
CUDA_VISIBLE_DEVICES=0,1,2,3 \
|
99 |
-
python -m torch.distributed.launch --nproc_per_node=4 --master_port=4321 realesrgan/train.py -opt options/train_realesrgan_x4plus.yml --launcher pytorch --auto_resume
|
100 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BraydenMoore/MARCI-NFL-Betting/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: MARCI (NFL Betting)
|
3 |
-
emoji: 🏈
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: blue
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
|
10 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/main.py
DELETED
@@ -1,1040 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Here are some use cases:
|
3 |
-
python main.py --config config/all.yaml --experiment experiment_8x1 --signature demo1 --target data/demo1.png
|
4 |
-
"""
|
5 |
-
import pydiffvg
|
6 |
-
import torch
|
7 |
-
import cv2
|
8 |
-
import matplotlib.pyplot as plt
|
9 |
-
import random
|
10 |
-
import argparse
|
11 |
-
import math
|
12 |
-
import errno
|
13 |
-
from tqdm import tqdm
|
14 |
-
from torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR
|
15 |
-
from torch.nn.functional import adaptive_avg_pool2d
|
16 |
-
import warnings
|
17 |
-
warnings.filterwarnings("ignore")
|
18 |
-
|
19 |
-
import PIL
|
20 |
-
import PIL.Image
|
21 |
-
import os
|
22 |
-
import os.path as osp
|
23 |
-
import numpy as np
|
24 |
-
import numpy.random as npr
|
25 |
-
import shutil
|
26 |
-
import copy
|
27 |
-
# import skfmm
|
28 |
-
from xing_loss import xing_loss
|
29 |
-
|
30 |
-
import yaml
|
31 |
-
from easydict import EasyDict as edict
|
32 |
-
|
33 |
-
|
34 |
-
pydiffvg.set_print_timing(False)
|
35 |
-
gamma = 1.0
|
36 |
-
|
37 |
-
##########
|
38 |
-
# helper #
|
39 |
-
##########
|
40 |
-
|
41 |
-
from utils import \
|
42 |
-
get_experiment_id, \
|
43 |
-
get_path_schedule, \
|
44 |
-
edict_2_dict, \
|
45 |
-
check_and_create_dir
|
46 |
-
|
47 |
-
def get_bezier_circle(radius=1, segments=4, bias=None):
|
48 |
-
points = []
|
49 |
-
if bias is None:
|
50 |
-
bias = (random.random(), random.random())
|
51 |
-
avg_degree = 360 / (segments*3)
|
52 |
-
for i in range(0, segments*3):
|
53 |
-
point = (np.cos(np.deg2rad(i * avg_degree)),
|
54 |
-
np.sin(np.deg2rad(i * avg_degree)))
|
55 |
-
points.append(point)
|
56 |
-
points = torch.tensor(points)
|
57 |
-
points = (points)*radius + torch.tensor(bias).unsqueeze(dim=0)
|
58 |
-
points = points.type(torch.FloatTensor)
|
59 |
-
return points
|
60 |
-
|
61 |
-
def get_sdf(phi, method='skfmm', **kwargs):
|
62 |
-
if method == 'skfmm':
|
63 |
-
import skfmm
|
64 |
-
phi = (phi-0.5)*2
|
65 |
-
if (phi.max() <= 0) or (phi.min() >= 0):
|
66 |
-
return np.zeros(phi.shape).astype(np.float32)
|
67 |
-
sd = skfmm.distance(phi, dx=1)
|
68 |
-
|
69 |
-
flip_negative = kwargs.get('flip_negative', True)
|
70 |
-
if flip_negative:
|
71 |
-
sd = np.abs(sd)
|
72 |
-
|
73 |
-
truncate = kwargs.get('truncate', 10)
|
74 |
-
sd = np.clip(sd, -truncate, truncate)
|
75 |
-
# print(f"max sd value is: {sd.max()}")
|
76 |
-
|
77 |
-
zero2max = kwargs.get('zero2max', True)
|
78 |
-
if zero2max and flip_negative:
|
79 |
-
sd = sd.max() - sd
|
80 |
-
elif zero2max:
|
81 |
-
raise ValueError
|
82 |
-
|
83 |
-
normalize = kwargs.get('normalize', 'sum')
|
84 |
-
if normalize == 'sum':
|
85 |
-
sd /= sd.sum()
|
86 |
-
elif normalize == 'to1':
|
87 |
-
sd /= sd.max()
|
88 |
-
return sd
|
89 |
-
|
90 |
-
def parse_args():
|
91 |
-
parser = argparse.ArgumentParser()
|
92 |
-
parser.add_argument('--debug', action='store_true', default=False)
|
93 |
-
parser.add_argument("--config", type=str)
|
94 |
-
parser.add_argument("--experiment", type=str)
|
95 |
-
parser.add_argument("--seed", type=int)
|
96 |
-
parser.add_argument("--target", type=str, help="target image path")
|
97 |
-
parser.add_argument('--log_dir', metavar='DIR', default="log/debug")
|
98 |
-
parser.add_argument('--initial', type=str, default="random", choices=['random', 'circle'])
|
99 |
-
parser.add_argument('--signature', nargs='+', type=str)
|
100 |
-
parser.add_argument('--seginit', nargs='+', type=str)
|
101 |
-
parser.add_argument("--num_segments", type=int, default=4)
|
102 |
-
# parser.add_argument("--num_paths", type=str, default="1,1,1")
|
103 |
-
# parser.add_argument("--num_iter", type=int, default=500)
|
104 |
-
# parser.add_argument('--free', action='store_true')
|
105 |
-
# Please ensure that image resolution is divisible by pool_size; otherwise the performance would drop a lot.
|
106 |
-
# parser.add_argument('--pool_size', type=int, default=40, help="the pooled image size for next path initialization")
|
107 |
-
# parser.add_argument('--save_loss', action='store_true')
|
108 |
-
# parser.add_argument('--save_init', action='store_true')
|
109 |
-
# parser.add_argument('--save_image', action='store_true')
|
110 |
-
# parser.add_argument('--save_video', action='store_true')
|
111 |
-
# parser.add_argument('--print_weight', action='store_true')
|
112 |
-
# parser.add_argument('--circle_init_radius', type=float)
|
113 |
-
cfg = edict()
|
114 |
-
args = parser.parse_args()
|
115 |
-
cfg.debug = args.debug
|
116 |
-
cfg.config = args.config
|
117 |
-
cfg.experiment = args.experiment
|
118 |
-
cfg.seed = args.seed
|
119 |
-
cfg.target = args.target
|
120 |
-
cfg.log_dir = args.log_dir
|
121 |
-
cfg.initial = args.initial
|
122 |
-
cfg.signature = args.signature
|
123 |
-
# set cfg num_segments in command
|
124 |
-
cfg.num_segments = args.num_segments
|
125 |
-
if args.seginit is not None:
|
126 |
-
cfg.seginit = edict()
|
127 |
-
cfg.seginit.type = args.seginit[0]
|
128 |
-
if cfg.seginit.type == 'circle':
|
129 |
-
cfg.seginit.radius = float(args.seginit[1])
|
130 |
-
return cfg
|
131 |
-
|
132 |
-
def ycrcb_conversion(im, format='[bs x 3 x 2D]', reverse=False):
|
133 |
-
mat = torch.FloatTensor([
|
134 |
-
[ 65.481/255, 128.553/255, 24.966/255], # ranged_from [0, 219/255]
|
135 |
-
[-37.797/255, -74.203/255, 112.000/255], # ranged_from [-112/255, 112/255]
|
136 |
-
[112.000/255, -93.786/255, -18.214/255], # ranged_from [-112/255, 112/255]
|
137 |
-
]).to(im.device)
|
138 |
-
|
139 |
-
if reverse:
|
140 |
-
mat = mat.inverse()
|
141 |
-
|
142 |
-
if format == '[bs x 3 x 2D]':
|
143 |
-
im = im.permute(0, 2, 3, 1)
|
144 |
-
im = torch.matmul(im, mat.T)
|
145 |
-
im = im.permute(0, 3, 1, 2).contiguous()
|
146 |
-
return im
|
147 |
-
elif format == '[2D x 3]':
|
148 |
-
im = torch.matmul(im, mat.T)
|
149 |
-
return im
|
150 |
-
else:
|
151 |
-
raise ValueError
|
152 |
-
|
153 |
-
class random_coord_init():
|
154 |
-
def __init__(self, canvas_size):
|
155 |
-
self.canvas_size = canvas_size
|
156 |
-
def __call__(self):
|
157 |
-
h, w = self.canvas_size
|
158 |
-
return [npr.uniform(0, 1)*w, npr.uniform(0, 1)*h]
|
159 |
-
|
160 |
-
class naive_coord_init():
|
161 |
-
def __init__(self, pred, gt, format='[bs x c x 2D]', replace_sampling=True):
|
162 |
-
if isinstance(pred, torch.Tensor):
|
163 |
-
pred = pred.detach().cpu().numpy()
|
164 |
-
if isinstance(gt, torch.Tensor):
|
165 |
-
gt = gt.detach().cpu().numpy()
|
166 |
-
|
167 |
-
if format == '[bs x c x 2D]':
|
168 |
-
self.map = ((pred[0] - gt[0])**2).sum(0)
|
169 |
-
elif format == ['[2D x c]']:
|
170 |
-
self.map = ((pred - gt)**2).sum(-1)
|
171 |
-
else:
|
172 |
-
raise ValueError
|
173 |
-
self.replace_sampling = replace_sampling
|
174 |
-
|
175 |
-
def __call__(self):
|
176 |
-
coord = np.where(self.map == self.map.max())
|
177 |
-
coord_h, coord_w = coord[0][0], coord[1][0]
|
178 |
-
if self.replace_sampling:
|
179 |
-
self.map[coord_h, coord_w] = -1
|
180 |
-
return [coord_w, coord_h]
|
181 |
-
|
182 |
-
|
183 |
-
class sparse_coord_init():
|
184 |
-
def __init__(self, pred, gt, format='[bs x c x 2D]', quantile_interval=200, nodiff_thres=0.1):
|
185 |
-
if isinstance(pred, torch.Tensor):
|
186 |
-
pred = pred.detach().cpu().numpy()
|
187 |
-
if isinstance(gt, torch.Tensor):
|
188 |
-
gt = gt.detach().cpu().numpy()
|
189 |
-
if format == '[bs x c x 2D]':
|
190 |
-
self.map = ((pred[0] - gt[0])**2).sum(0)
|
191 |
-
self.reference_gt = copy.deepcopy(
|
192 |
-
np.transpose(gt[0], (1, 2, 0)))
|
193 |
-
elif format == ['[2D x c]']:
|
194 |
-
self.map = (np.abs(pred - gt)).sum(-1)
|
195 |
-
self.reference_gt = copy.deepcopy(gt[0])
|
196 |
-
else:
|
197 |
-
raise ValueError
|
198 |
-
# OptionA: Zero too small errors to avoid the error too small deadloop
|
199 |
-
self.map[self.map < nodiff_thres] = 0
|
200 |
-
quantile_interval = np.linspace(0., 1., quantile_interval)
|
201 |
-
quantized_interval = np.quantile(self.map, quantile_interval)
|
202 |
-
# remove redundant
|
203 |
-
quantized_interval = np.unique(quantized_interval)
|
204 |
-
quantized_interval = sorted(quantized_interval[1:-1])
|
205 |
-
self.map = np.digitize(self.map, quantized_interval, right=False)
|
206 |
-
self.map = np.clip(self.map, 0, 255).astype(np.uint8)
|
207 |
-
self.idcnt = {}
|
208 |
-
for idi in sorted(np.unique(self.map)):
|
209 |
-
self.idcnt[idi] = (self.map==idi).sum()
|
210 |
-
self.idcnt.pop(min(self.idcnt.keys()))
|
211 |
-
# remove smallest one to remove the correct region
|
212 |
-
def __call__(self):
|
213 |
-
if len(self.idcnt) == 0:
|
214 |
-
h, w = self.map.shape
|
215 |
-
return [npr.uniform(0, 1)*w, npr.uniform(0, 1)*h]
|
216 |
-
target_id = max(self.idcnt, key=self.idcnt.get)
|
217 |
-
_, component, cstats, ccenter = cv2.connectedComponentsWithStats(
|
218 |
-
(self.map==target_id).astype(np.uint8), connectivity=4)
|
219 |
-
# remove cid = 0, it is the invalid area
|
220 |
-
csize = [ci[-1] for ci in cstats[1:]]
|
221 |
-
target_cid = csize.index(max(csize))+1
|
222 |
-
center = ccenter[target_cid][::-1]
|
223 |
-
coord = np.stack(np.where(component == target_cid)).T
|
224 |
-
dist = np.linalg.norm(coord-center, axis=1)
|
225 |
-
target_coord_id = np.argmin(dist)
|
226 |
-
coord_h, coord_w = coord[target_coord_id]
|
227 |
-
# replace_sampling
|
228 |
-
self.idcnt[target_id] -= max(csize)
|
229 |
-
if self.idcnt[target_id] == 0:
|
230 |
-
self.idcnt.pop(target_id)
|
231 |
-
self.map[component == target_cid] = 0
|
232 |
-
return [coord_w, coord_h]
|
233 |
-
|
234 |
-
|
235 |
-
def init_shapes(num_paths,
|
236 |
-
num_segments,
|
237 |
-
canvas_size,
|
238 |
-
seginit_cfg,
|
239 |
-
shape_cnt,
|
240 |
-
pos_init_method=None,
|
241 |
-
trainable_stroke=False,
|
242 |
-
gt=None,
|
243 |
-
**kwargs):
|
244 |
-
shapes = []
|
245 |
-
shape_groups = []
|
246 |
-
h, w = canvas_size
|
247 |
-
|
248 |
-
# change path init location
|
249 |
-
if pos_init_method is None:
|
250 |
-
pos_init_method = random_coord_init(canvas_size=canvas_size)
|
251 |
-
|
252 |
-
for i in range(num_paths):
|
253 |
-
num_control_points = [2] * num_segments
|
254 |
-
|
255 |
-
if seginit_cfg.type=="random":
|
256 |
-
points = []
|
257 |
-
p0 = pos_init_method()
|
258 |
-
color_ref = copy.deepcopy(p0)
|
259 |
-
points.append(p0)
|
260 |
-
for j in range(num_segments):
|
261 |
-
radius = seginit_cfg.radius
|
262 |
-
p1 = (p0[0] + radius * npr.uniform(-0.5, 0.5),
|
263 |
-
p0[1] + radius * npr.uniform(-0.5, 0.5))
|
264 |
-
p2 = (p1[0] + radius * npr.uniform(-0.5, 0.5),
|
265 |
-
p1[1] + radius * npr.uniform(-0.5, 0.5))
|
266 |
-
p3 = (p2[0] + radius * npr.uniform(-0.5, 0.5),
|
267 |
-
p2[1] + radius * npr.uniform(-0.5, 0.5))
|
268 |
-
points.append(p1)
|
269 |
-
points.append(p2)
|
270 |
-
if j < num_segments - 1:
|
271 |
-
points.append(p3)
|
272 |
-
p0 = p3
|
273 |
-
points = torch.FloatTensor(points)
|
274 |
-
|
275 |
-
# circle points initialization
|
276 |
-
elif seginit_cfg.type=="circle":
|
277 |
-
radius = seginit_cfg.radius
|
278 |
-
if radius is None:
|
279 |
-
radius = npr.uniform(0.5, 1)
|
280 |
-
center = pos_init_method()
|
281 |
-
color_ref = copy.deepcopy(center)
|
282 |
-
points = get_bezier_circle(
|
283 |
-
radius=radius, segments=num_segments,
|
284 |
-
bias=center)
|
285 |
-
|
286 |
-
path = pydiffvg.Path(num_control_points = torch.LongTensor(num_control_points),
|
287 |
-
points = points,
|
288 |
-
stroke_width = torch.tensor(0.0),
|
289 |
-
is_closed = True)
|
290 |
-
shapes.append(path)
|
291 |
-
# !!!!!!problem is here. the shape group shape_ids is wrong
|
292 |
-
|
293 |
-
if gt is not None:
|
294 |
-
wref, href = color_ref
|
295 |
-
wref = max(0, min(int(wref), w-1))
|
296 |
-
href = max(0, min(int(href), h-1))
|
297 |
-
fill_color_init = list(gt[0, :, href, wref]) + [1.]
|
298 |
-
fill_color_init = torch.FloatTensor(fill_color_init)
|
299 |
-
stroke_color_init = torch.FloatTensor(npr.uniform(size=[4]))
|
300 |
-
else:
|
301 |
-
fill_color_init = torch.FloatTensor(npr.uniform(size=[4]))
|
302 |
-
stroke_color_init = torch.FloatTensor(npr.uniform(size=[4]))
|
303 |
-
|
304 |
-
path_group = pydiffvg.ShapeGroup(
|
305 |
-
shape_ids = torch.LongTensor([shape_cnt+i]),
|
306 |
-
fill_color = fill_color_init,
|
307 |
-
stroke_color = stroke_color_init,
|
308 |
-
)
|
309 |
-
shape_groups.append(path_group)
|
310 |
-
|
311 |
-
point_var = []
|
312 |
-
color_var = []
|
313 |
-
|
314 |
-
for path in shapes:
|
315 |
-
path.points.requires_grad = True
|
316 |
-
point_var.append(path.points)
|
317 |
-
for group in shape_groups:
|
318 |
-
group.fill_color.requires_grad = True
|
319 |
-
color_var.append(group.fill_color)
|
320 |
-
|
321 |
-
if trainable_stroke:
|
322 |
-
stroke_width_var = []
|
323 |
-
stroke_color_var = []
|
324 |
-
for path in shapes:
|
325 |
-
path.stroke_width.requires_grad = True
|
326 |
-
stroke_width_var.append(path.stroke_width)
|
327 |
-
for group in shape_groups:
|
328 |
-
group.stroke_color.requires_grad = True
|
329 |
-
stroke_color_var.append(group.stroke_color)
|
330 |
-
return shapes, shape_groups, point_var, color_var, stroke_width_var, stroke_color_var
|
331 |
-
else:
|
332 |
-
return shapes, shape_groups, point_var, color_var
|
333 |
-
|
334 |
-
class linear_decay_lrlambda_f(object):
|
335 |
-
def __init__(self, decay_every, decay_ratio):
|
336 |
-
self.decay_every = decay_every
|
337 |
-
self.decay_ratio = decay_ratio
|
338 |
-
|
339 |
-
def __call__(self, n):
|
340 |
-
decay_time = n//self.decay_every
|
341 |
-
decay_step = n %self.decay_every
|
342 |
-
lr_s = self.decay_ratio**decay_time
|
343 |
-
lr_e = self.decay_ratio**(decay_time+1)
|
344 |
-
r = decay_step/self.decay_every
|
345 |
-
lr = lr_s * (1-r) + lr_e * r
|
346 |
-
return lr
|
347 |
-
|
348 |
-
def main_func(target, experiment, num_iter, cfg_arg):
|
349 |
-
with open(cfg_arg.config, 'r') as f:
|
350 |
-
cfg = yaml.load(f, Loader=yaml.FullLoader)
|
351 |
-
cfg_default = edict(cfg['default'])
|
352 |
-
cfg = edict(cfg[cfg_arg.experiment])
|
353 |
-
cfg.update(cfg_default)
|
354 |
-
cfg.update(cfg_arg)
|
355 |
-
cfg.exid = get_experiment_id(cfg.debug)
|
356 |
-
|
357 |
-
cfg.experiment_dir = \
|
358 |
-
osp.join(cfg.log_dir, '{}_{}'.format(cfg.exid, '_'.join(cfg.signature)))
|
359 |
-
cfg.target = target
|
360 |
-
cfg.experiment = experiment
|
361 |
-
cfg.num_iter = num_iter
|
362 |
-
|
363 |
-
configfile = osp.join(cfg.experiment_dir, 'config.yaml')
|
364 |
-
check_and_create_dir(configfile)
|
365 |
-
with open(osp.join(configfile), 'w') as f:
|
366 |
-
yaml.dump(edict_2_dict(cfg), f)
|
367 |
-
|
368 |
-
# Use GPU if available
|
369 |
-
pydiffvg.set_use_gpu(torch.cuda.is_available())
|
370 |
-
device = pydiffvg.get_device()
|
371 |
-
|
372 |
-
# gt = np.array(PIL.Image.open(cfg.target))
|
373 |
-
gt = np.array(cfg.target)
|
374 |
-
print(f"Input image shape is: {gt.shape}")
|
375 |
-
if len(gt.shape) == 2:
|
376 |
-
print("Converting the gray-scale image to RGB.")
|
377 |
-
gt = gt.unsqueeze(dim=-1).repeat(1,1,3)
|
378 |
-
if gt.shape[2] == 4:
|
379 |
-
print("Input image includes alpha channel, simply dropout alpha channel.")
|
380 |
-
gt = gt[:, :, :3]
|
381 |
-
gt = (gt/255).astype(np.float32)
|
382 |
-
gt = torch.FloatTensor(gt).permute(2, 0, 1)[None].to(device)
|
383 |
-
if cfg.use_ycrcb:
|
384 |
-
gt = ycrcb_conversion(gt)
|
385 |
-
h, w = gt.shape[2:]
|
386 |
-
|
387 |
-
path_schedule = get_path_schedule(**cfg.path_schedule)
|
388 |
-
|
389 |
-
if cfg.seed is not None:
|
390 |
-
random.seed(cfg.seed)
|
391 |
-
npr.seed(cfg.seed)
|
392 |
-
torch.manual_seed(cfg.seed)
|
393 |
-
render = pydiffvg.RenderFunction.apply
|
394 |
-
|
395 |
-
shapes_record, shape_groups_record = [], []
|
396 |
-
|
397 |
-
region_loss = None
|
398 |
-
loss_matrix = []
|
399 |
-
|
400 |
-
para_point, para_color = {}, {}
|
401 |
-
if cfg.trainable.stroke:
|
402 |
-
para_stroke_width, para_stroke_color = {}, {}
|
403 |
-
|
404 |
-
pathn_record = []
|
405 |
-
# Background
|
406 |
-
if cfg.trainable.bg:
|
407 |
-
# meancolor = gt.mean([2, 3])[0]
|
408 |
-
para_bg = torch.tensor([1., 1., 1.], requires_grad=True, device=device)
|
409 |
-
else:
|
410 |
-
if cfg.use_ycrcb:
|
411 |
-
para_bg = torch.tensor([219/255, 0, 0], requires_grad=False, device=device)
|
412 |
-
else:
|
413 |
-
para_bg = torch.tensor([1., 1., 1.], requires_grad=False, device=device)
|
414 |
-
|
415 |
-
##################
|
416 |
-
# start_training #
|
417 |
-
##################
|
418 |
-
|
419 |
-
loss_weight = None
|
420 |
-
loss_weight_keep = 0
|
421 |
-
if cfg.coord_init.type == 'naive':
|
422 |
-
pos_init_method = naive_coord_init(
|
423 |
-
para_bg.view(1, -1, 1, 1).repeat(1, 1, h, w), gt)
|
424 |
-
elif cfg.coord_init.type == 'sparse':
|
425 |
-
pos_init_method = sparse_coord_init(
|
426 |
-
para_bg.view(1, -1, 1, 1).repeat(1, 1, h, w), gt)
|
427 |
-
elif cfg.coord_init.type == 'random':
|
428 |
-
pos_init_method = random_coord_init([h, w])
|
429 |
-
else:
|
430 |
-
raise ValueError
|
431 |
-
|
432 |
-
lrlambda_f = linear_decay_lrlambda_f(cfg.num_iter, 0.4)
|
433 |
-
optim_schedular_dict = {}
|
434 |
-
|
435 |
-
for path_idx, pathn in enumerate(path_schedule):
|
436 |
-
loss_list = []
|
437 |
-
print("=> Adding [{}] paths, [{}] ...".format(pathn, cfg.seginit.type))
|
438 |
-
pathn_record.append(pathn)
|
439 |
-
pathn_record_str = '-'.join([str(i) for i in pathn_record])
|
440 |
-
|
441 |
-
# initialize new shapes related stuffs.
|
442 |
-
if cfg.trainable.stroke:
|
443 |
-
shapes, shape_groups, point_var, color_var, stroke_width_var, stroke_color_var = init_shapes(
|
444 |
-
pathn, cfg.num_segments, (h, w),
|
445 |
-
cfg.seginit, len(shapes_record),
|
446 |
-
pos_init_method,
|
447 |
-
trainable_stroke=True,
|
448 |
-
gt=gt, )
|
449 |
-
para_stroke_width[path_idx] = stroke_width_var
|
450 |
-
para_stroke_color[path_idx] = stroke_color_var
|
451 |
-
else:
|
452 |
-
shapes, shape_groups, point_var, color_var = init_shapes(
|
453 |
-
pathn, cfg.num_segments, (h, w),
|
454 |
-
cfg.seginit, len(shapes_record),
|
455 |
-
pos_init_method,
|
456 |
-
trainable_stroke=False,
|
457 |
-
gt=gt, )
|
458 |
-
|
459 |
-
shapes_record += shapes
|
460 |
-
shape_groups_record += shape_groups
|
461 |
-
|
462 |
-
if cfg.save.init:
|
463 |
-
filename = os.path.join(
|
464 |
-
cfg.experiment_dir, "svg-init",
|
465 |
-
"{}-init.svg".format(pathn_record_str))
|
466 |
-
check_and_create_dir(filename)
|
467 |
-
pydiffvg.save_svg(
|
468 |
-
filename, w, h,
|
469 |
-
shapes_record, shape_groups_record)
|
470 |
-
|
471 |
-
para = {}
|
472 |
-
if (cfg.trainable.bg) and (path_idx == 0):
|
473 |
-
para['bg'] = [para_bg]
|
474 |
-
para['point'] = point_var
|
475 |
-
para['color'] = color_var
|
476 |
-
if cfg.trainable.stroke:
|
477 |
-
para['stroke_width'] = stroke_width_var
|
478 |
-
para['stroke_color'] = stroke_color_var
|
479 |
-
|
480 |
-
pg = [{'params' : para[ki], 'lr' : cfg.lr_base[ki]} for ki in sorted(para.keys())]
|
481 |
-
optim = torch.optim.Adam(pg)
|
482 |
-
|
483 |
-
if cfg.trainable.record:
|
484 |
-
scheduler = LambdaLR(
|
485 |
-
optim, lr_lambda=lrlambda_f, last_epoch=-1)
|
486 |
-
else:
|
487 |
-
scheduler = LambdaLR(
|
488 |
-
optim, lr_lambda=lrlambda_f, last_epoch=cfg.num_iter)
|
489 |
-
optim_schedular_dict[path_idx] = (optim, scheduler)
|
490 |
-
|
491 |
-
# Inner loop training
|
492 |
-
t_range = tqdm(range(cfg.num_iter))
|
493 |
-
for t in t_range:
|
494 |
-
|
495 |
-
for _, (optim, _) in optim_schedular_dict.items():
|
496 |
-
optim.zero_grad()
|
497 |
-
|
498 |
-
# Forward pass: render the image.
|
499 |
-
scene_args = pydiffvg.RenderFunction.serialize_scene(
|
500 |
-
w, h, shapes_record, shape_groups_record)
|
501 |
-
img = render(w, h, 2, 2, t, None, *scene_args)
|
502 |
-
|
503 |
-
# Compose img with white background
|
504 |
-
img = img[:, :, 3:4] * img[:, :, :3] + \
|
505 |
-
para_bg * (1 - img[:, :, 3:4])
|
506 |
-
|
507 |
-
|
508 |
-
|
509 |
-
|
510 |
-
|
511 |
-
if cfg.save.video:
|
512 |
-
filename = os.path.join(
|
513 |
-
cfg.experiment_dir, "video-png",
|
514 |
-
"{}-iter{}.png".format(pathn_record_str, t))
|
515 |
-
check_and_create_dir(filename)
|
516 |
-
if cfg.use_ycrcb:
|
517 |
-
imshow = ycrcb_conversion(
|
518 |
-
img, format='[2D x 3]', reverse=True).detach().cpu()
|
519 |
-
else:
|
520 |
-
imshow = img.detach().cpu()
|
521 |
-
pydiffvg.imwrite(imshow, filename, gamma=gamma)
|
522 |
-
|
523 |
-
# ### added for app
|
524 |
-
# if t%30==0 and t !=0 :
|
525 |
-
# # print(f"debug: {t}, {filename} {img.size()}")
|
526 |
-
# return img.detach().cpu().numpy(), t
|
527 |
-
|
528 |
-
x = img.unsqueeze(0).permute(0, 3, 1, 2) # HWC -> NCHW
|
529 |
-
|
530 |
-
if cfg.use_ycrcb:
|
531 |
-
color_reweight = torch.FloatTensor([255/219, 255/224, 255/255]).to(device)
|
532 |
-
loss = ((x-gt)*(color_reweight.view(1, -1, 1, 1)))**2
|
533 |
-
else:
|
534 |
-
loss = ((x-gt)**2)
|
535 |
-
|
536 |
-
if cfg.loss.use_l1_loss:
|
537 |
-
loss = abs(x-gt)
|
538 |
-
|
539 |
-
if cfg.loss.use_distance_weighted_loss:
|
540 |
-
if cfg.use_ycrcb:
|
541 |
-
raise ValueError
|
542 |
-
shapes_forsdf = copy.deepcopy(shapes)
|
543 |
-
shape_groups_forsdf = copy.deepcopy(shape_groups)
|
544 |
-
for si in shapes_forsdf:
|
545 |
-
si.stroke_width = torch.FloatTensor([0]).to(device)
|
546 |
-
for sg_idx, sgi in enumerate(shape_groups_forsdf):
|
547 |
-
sgi.fill_color = torch.FloatTensor([1, 1, 1, 1]).to(device)
|
548 |
-
sgi.shape_ids = torch.LongTensor([sg_idx]).to(device)
|
549 |
-
|
550 |
-
sargs_forsdf = pydiffvg.RenderFunction.serialize_scene(
|
551 |
-
w, h, shapes_forsdf, shape_groups_forsdf)
|
552 |
-
with torch.no_grad():
|
553 |
-
im_forsdf = render(w, h, 2, 2, 0, None, *sargs_forsdf)
|
554 |
-
# use alpha channel is a trick to get 0-1 image
|
555 |
-
im_forsdf = (im_forsdf[:, :, 3]).detach().cpu().numpy()
|
556 |
-
loss_weight = get_sdf(im_forsdf, normalize='to1')
|
557 |
-
loss_weight += loss_weight_keep
|
558 |
-
loss_weight = np.clip(loss_weight, 0, 1)
|
559 |
-
loss_weight = torch.FloatTensor(loss_weight).to(device)
|
560 |
-
|
561 |
-
if cfg.save.loss:
|
562 |
-
save_loss = loss.squeeze(dim=0).mean(dim=0,keepdim=False).cpu().detach().numpy()
|
563 |
-
save_weight = loss_weight.cpu().detach().numpy()
|
564 |
-
save_weighted_loss = save_loss*save_weight
|
565 |
-
# normalize to [0,1]
|
566 |
-
save_loss = (save_loss - np.min(save_loss))/np.ptp(save_loss)
|
567 |
-
save_weight = (save_weight - np.min(save_weight))/np.ptp(save_weight)
|
568 |
-
save_weighted_loss = (save_weighted_loss - np.min(save_weighted_loss))/np.ptp(save_weighted_loss)
|
569 |
-
|
570 |
-
# save
|
571 |
-
plt.imshow(save_loss, cmap='Reds')
|
572 |
-
plt.axis('off')
|
573 |
-
# plt.colorbar()
|
574 |
-
filename = os.path.join(cfg.experiment_dir, "loss", "{}-iter{}-mseloss.png".format(pathn_record_str, t))
|
575 |
-
check_and_create_dir(filename)
|
576 |
-
plt.savefig(filename, dpi=800)
|
577 |
-
plt.close()
|
578 |
-
|
579 |
-
plt.imshow(save_weight, cmap='Greys')
|
580 |
-
plt.axis('off')
|
581 |
-
# plt.colorbar()
|
582 |
-
filename = os.path.join(cfg.experiment_dir, "loss", "{}-iter{}-sdfweight.png".format(pathn_record_str, t))
|
583 |
-
plt.savefig(filename, dpi=800)
|
584 |
-
plt.close()
|
585 |
-
|
586 |
-
plt.imshow(save_weighted_loss, cmap='Reds')
|
587 |
-
plt.axis('off')
|
588 |
-
# plt.colorbar()
|
589 |
-
filename = os.path.join(cfg.experiment_dir, "loss", "{}-iter{}-weightedloss.png".format(pathn_record_str, t))
|
590 |
-
plt.savefig(filename, dpi=800)
|
591 |
-
plt.close()
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
if loss_weight is None:
|
598 |
-
loss = loss.sum(1).mean()
|
599 |
-
else:
|
600 |
-
loss = (loss.sum(1)*loss_weight).mean()
|
601 |
-
|
602 |
-
# if (cfg.loss.bis_loss_weight is not None) and (cfg.loss.bis_loss_weight > 0):
|
603 |
-
# loss_bis = bezier_intersection_loss(point_var[0]) * cfg.loss.bis_loss_weight
|
604 |
-
# loss = loss + loss_bis
|
605 |
-
if (cfg.loss.xing_loss_weight is not None) \
|
606 |
-
and (cfg.loss.xing_loss_weight > 0):
|
607 |
-
loss_xing = xing_loss(point_var) * cfg.loss.xing_loss_weight
|
608 |
-
loss = loss + loss_xing
|
609 |
-
|
610 |
-
|
611 |
-
loss_list.append(loss.item())
|
612 |
-
t_range.set_postfix({'loss': loss.item()})
|
613 |
-
loss.backward()
|
614 |
-
|
615 |
-
# step
|
616 |
-
for _, (optim, scheduler) in optim_schedular_dict.items():
|
617 |
-
optim.step()
|
618 |
-
scheduler.step()
|
619 |
-
|
620 |
-
for group in shape_groups_record:
|
621 |
-
group.fill_color.data.clamp_(0.0, 1.0)
|
622 |
-
|
623 |
-
if cfg.loss.use_distance_weighted_loss:
|
624 |
-
loss_weight_keep = loss_weight.detach().cpu().numpy() * 1
|
625 |
-
|
626 |
-
if not cfg.trainable.record:
|
627 |
-
for _, pi in pg.items():
|
628 |
-
for ppi in pi:
|
629 |
-
pi.require_grad = False
|
630 |
-
optim_schedular_dict = {}
|
631 |
-
|
632 |
-
if cfg.save.image:
|
633 |
-
filename = os.path.join(
|
634 |
-
cfg.experiment_dir, "demo-png", "{}.png".format(pathn_record_str))
|
635 |
-
check_and_create_dir(filename)
|
636 |
-
if cfg.use_ycrcb:
|
637 |
-
imshow = ycrcb_conversion(
|
638 |
-
img, format='[2D x 3]', reverse=True).detach().cpu()
|
639 |
-
else:
|
640 |
-
imshow = img.detach().cpu()
|
641 |
-
pydiffvg.imwrite(imshow, filename, gamma=gamma)
|
642 |
-
|
643 |
-
svg_app_file_name = ""
|
644 |
-
if cfg.save.output:
|
645 |
-
filename = os.path.join(
|
646 |
-
cfg.experiment_dir, "output-svg", "{}.svg".format(pathn_record_str))
|
647 |
-
check_and_create_dir(filename)
|
648 |
-
pydiffvg.save_svg(filename, w, h, shapes_record, shape_groups_record)
|
649 |
-
svg_app_file_name = filename
|
650 |
-
|
651 |
-
loss_matrix.append(loss_list)
|
652 |
-
|
653 |
-
# calculate the pixel loss
|
654 |
-
# pixel_loss = ((x-gt)**2).sum(dim=1, keepdim=True).sqrt_() # [N,1,H, W]
|
655 |
-
# region_loss = adaptive_avg_pool2d(pixel_loss, cfg.region_loss_pool_size)
|
656 |
-
# loss_weight = torch.softmax(region_loss.reshape(1, 1, -1), dim=-1)\
|
657 |
-
# .reshape_as(region_loss)
|
658 |
-
|
659 |
-
pos_init_method = naive_coord_init(x, gt)
|
660 |
-
|
661 |
-
if cfg.coord_init.type == 'naive':
|
662 |
-
pos_init_method = naive_coord_init(x, gt)
|
663 |
-
elif cfg.coord_init.type == 'sparse':
|
664 |
-
pos_init_method = sparse_coord_init(x, gt)
|
665 |
-
elif cfg.coord_init.type == 'random':
|
666 |
-
pos_init_method = random_coord_init([h, w])
|
667 |
-
else:
|
668 |
-
raise ValueError
|
669 |
-
|
670 |
-
if cfg.save.video:
|
671 |
-
print("saving iteration video...")
|
672 |
-
img_array = []
|
673 |
-
for ii in range(0, cfg.num_iter):
|
674 |
-
filename = os.path.join(
|
675 |
-
cfg.experiment_dir, "video-png",
|
676 |
-
"{}-iter{}.png".format(pathn_record_str, ii))
|
677 |
-
img = cv2.imread(filename)
|
678 |
-
# cv2.putText(
|
679 |
-
# img, "Path:{} \nIteration:{}".format(pathn_record_str, ii),
|
680 |
-
# (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
|
681 |
-
img_array.append(img)
|
682 |
-
|
683 |
-
videoname = os.path.join(
|
684 |
-
cfg.experiment_dir, "video-avi",
|
685 |
-
"{}.avi".format(pathn_record_str))
|
686 |
-
check_and_create_dir(videoname)
|
687 |
-
out = cv2.VideoWriter(
|
688 |
-
videoname,
|
689 |
-
# cv2.VideoWriter_fourcc(*'mp4v'),
|
690 |
-
cv2.VideoWriter_fourcc(*'FFV1'),
|
691 |
-
20.0, (w, h))
|
692 |
-
for iii in range(len(img_array)):
|
693 |
-
out.write(img_array[iii])
|
694 |
-
out.release()
|
695 |
-
# shutil.rmtree(os.path.join(cfg.experiment_dir, "video-png"))
|
696 |
-
|
697 |
-
print("The last loss is: {}".format(loss.item()))
|
698 |
-
return img.detach().cpu().numpy(), svg_app_file_name
|
699 |
-
|
700 |
-
|
701 |
-
if __name__ == "__main__":
|
702 |
-
|
703 |
-
###############
|
704 |
-
# make config #
|
705 |
-
###############
|
706 |
-
|
707 |
-
cfg_arg = parse_args()
|
708 |
-
with open(cfg_arg.config, 'r') as f:
|
709 |
-
cfg = yaml.load(f, Loader=yaml.FullLoader)
|
710 |
-
cfg_default = edict(cfg['default'])
|
711 |
-
cfg = edict(cfg[cfg_arg.experiment])
|
712 |
-
cfg.update(cfg_default)
|
713 |
-
cfg.update(cfg_arg)
|
714 |
-
cfg.exid = get_experiment_id(cfg.debug)
|
715 |
-
|
716 |
-
cfg.experiment_dir = \
|
717 |
-
osp.join(cfg.log_dir, '{}_{}'.format(cfg.exid, '_'.join(cfg.signature)))
|
718 |
-
configfile = osp.join(cfg.experiment_dir, 'config.yaml')
|
719 |
-
check_and_create_dir(configfile)
|
720 |
-
with open(osp.join(configfile), 'w') as f:
|
721 |
-
yaml.dump(edict_2_dict(cfg), f)
|
722 |
-
|
723 |
-
# Use GPU if available
|
724 |
-
pydiffvg.set_use_gpu(torch.cuda.is_available())
|
725 |
-
device = pydiffvg.get_device()
|
726 |
-
|
727 |
-
gt = np.array(PIL.Image.open(cfg.target))
|
728 |
-
print(f"Input image shape is: {gt.shape}")
|
729 |
-
if len(gt.shape) == 2:
|
730 |
-
print("Converting the gray-scale image to RGB.")
|
731 |
-
gt = gt.unsqueeze(dim=-1).repeat(1,1,3)
|
732 |
-
if gt.shape[2] == 4:
|
733 |
-
print("Input image includes alpha channel, simply dropout alpha channel.")
|
734 |
-
gt = gt[:, :, :3]
|
735 |
-
gt = (gt/255).astype(np.float32)
|
736 |
-
gt = torch.FloatTensor(gt).permute(2, 0, 1)[None].to(device)
|
737 |
-
if cfg.use_ycrcb:
|
738 |
-
gt = ycrcb_conversion(gt)
|
739 |
-
h, w = gt.shape[2:]
|
740 |
-
|
741 |
-
path_schedule = get_path_schedule(**cfg.path_schedule)
|
742 |
-
|
743 |
-
if cfg.seed is not None:
|
744 |
-
random.seed(cfg.seed)
|
745 |
-
npr.seed(cfg.seed)
|
746 |
-
torch.manual_seed(cfg.seed)
|
747 |
-
render = pydiffvg.RenderFunction.apply
|
748 |
-
|
749 |
-
shapes_record, shape_groups_record = [], []
|
750 |
-
|
751 |
-
region_loss = None
|
752 |
-
loss_matrix = []
|
753 |
-
|
754 |
-
para_point, para_color = {}, {}
|
755 |
-
if cfg.trainable.stroke:
|
756 |
-
para_stroke_width, para_stroke_color = {}, {}
|
757 |
-
|
758 |
-
pathn_record = []
|
759 |
-
# Background
|
760 |
-
if cfg.trainable.bg:
|
761 |
-
# meancolor = gt.mean([2, 3])[0]
|
762 |
-
para_bg = torch.tensor([1., 1., 1.], requires_grad=True, device=device)
|
763 |
-
else:
|
764 |
-
if cfg.use_ycrcb:
|
765 |
-
para_bg = torch.tensor([219/255, 0, 0], requires_grad=False, device=device)
|
766 |
-
else:
|
767 |
-
para_bg = torch.tensor([1., 1., 1.], requires_grad=False, device=device)
|
768 |
-
|
769 |
-
##################
|
770 |
-
# start_training #
|
771 |
-
##################
|
772 |
-
|
773 |
-
loss_weight = None
|
774 |
-
loss_weight_keep = 0
|
775 |
-
if cfg.coord_init.type == 'naive':
|
776 |
-
pos_init_method = naive_coord_init(
|
777 |
-
para_bg.view(1, -1, 1, 1).repeat(1, 1, h, w), gt)
|
778 |
-
elif cfg.coord_init.type == 'sparse':
|
779 |
-
pos_init_method = sparse_coord_init(
|
780 |
-
para_bg.view(1, -1, 1, 1).repeat(1, 1, h, w), gt)
|
781 |
-
elif cfg.coord_init.type == 'random':
|
782 |
-
pos_init_method = random_coord_init([h, w])
|
783 |
-
else:
|
784 |
-
raise ValueError
|
785 |
-
|
786 |
-
lrlambda_f = linear_decay_lrlambda_f(cfg.num_iter, 0.4)
|
787 |
-
optim_schedular_dict = {}
|
788 |
-
|
789 |
-
for path_idx, pathn in enumerate(path_schedule):
|
790 |
-
loss_list = []
|
791 |
-
print("=> Adding [{}] paths, [{}] ...".format(pathn, cfg.seginit.type))
|
792 |
-
pathn_record.append(pathn)
|
793 |
-
pathn_record_str = '-'.join([str(i) for i in pathn_record])
|
794 |
-
|
795 |
-
# initialize new shapes related stuffs.
|
796 |
-
if cfg.trainable.stroke:
|
797 |
-
shapes, shape_groups, point_var, color_var, stroke_width_var, stroke_color_var = init_shapes(
|
798 |
-
pathn, cfg.num_segments, (h, w),
|
799 |
-
cfg.seginit, len(shapes_record),
|
800 |
-
pos_init_method,
|
801 |
-
trainable_stroke=True,
|
802 |
-
gt=gt, )
|
803 |
-
para_stroke_width[path_idx] = stroke_width_var
|
804 |
-
para_stroke_color[path_idx] = stroke_color_var
|
805 |
-
else:
|
806 |
-
shapes, shape_groups, point_var, color_var = init_shapes(
|
807 |
-
pathn, cfg.num_segments, (h, w),
|
808 |
-
cfg.seginit, len(shapes_record),
|
809 |
-
pos_init_method,
|
810 |
-
trainable_stroke=False,
|
811 |
-
gt=gt, )
|
812 |
-
|
813 |
-
shapes_record += shapes
|
814 |
-
shape_groups_record += shape_groups
|
815 |
-
|
816 |
-
if cfg.save.init:
|
817 |
-
filename = os.path.join(
|
818 |
-
cfg.experiment_dir, "svg-init",
|
819 |
-
"{}-init.svg".format(pathn_record_str))
|
820 |
-
check_and_create_dir(filename)
|
821 |
-
pydiffvg.save_svg(
|
822 |
-
filename, w, h,
|
823 |
-
shapes_record, shape_groups_record)
|
824 |
-
|
825 |
-
para = {}
|
826 |
-
if (cfg.trainable.bg) and (path_idx == 0):
|
827 |
-
para['bg'] = [para_bg]
|
828 |
-
para['point'] = point_var
|
829 |
-
para['color'] = color_var
|
830 |
-
if cfg.trainable.stroke:
|
831 |
-
para['stroke_width'] = stroke_width_var
|
832 |
-
para['stroke_color'] = stroke_color_var
|
833 |
-
|
834 |
-
pg = [{'params' : para[ki], 'lr' : cfg.lr_base[ki]} for ki in sorted(para.keys())]
|
835 |
-
optim = torch.optim.Adam(pg)
|
836 |
-
|
837 |
-
if cfg.trainable.record:
|
838 |
-
scheduler = LambdaLR(
|
839 |
-
optim, lr_lambda=lrlambda_f, last_epoch=-1)
|
840 |
-
else:
|
841 |
-
scheduler = LambdaLR(
|
842 |
-
optim, lr_lambda=lrlambda_f, last_epoch=cfg.num_iter)
|
843 |
-
optim_schedular_dict[path_idx] = (optim, scheduler)
|
844 |
-
|
845 |
-
# Inner loop training
|
846 |
-
t_range = tqdm(range(cfg.num_iter))
|
847 |
-
for t in t_range:
|
848 |
-
|
849 |
-
for _, (optim, _) in optim_schedular_dict.items():
|
850 |
-
optim.zero_grad()
|
851 |
-
|
852 |
-
# Forward pass: render the image.
|
853 |
-
scene_args = pydiffvg.RenderFunction.serialize_scene(
|
854 |
-
w, h, shapes_record, shape_groups_record)
|
855 |
-
img = render(w, h, 2, 2, t, None, *scene_args)
|
856 |
-
|
857 |
-
# Compose img with white background
|
858 |
-
img = img[:, :, 3:4] * img[:, :, :3] + \
|
859 |
-
para_bg * (1 - img[:, :, 3:4])
|
860 |
-
|
861 |
-
if cfg.save.video:
|
862 |
-
filename = os.path.join(
|
863 |
-
cfg.experiment_dir, "video-png",
|
864 |
-
"{}-iter{}.png".format(pathn_record_str, t))
|
865 |
-
check_and_create_dir(filename)
|
866 |
-
if cfg.use_ycrcb:
|
867 |
-
imshow = ycrcb_conversion(
|
868 |
-
img, format='[2D x 3]', reverse=True).detach().cpu()
|
869 |
-
else:
|
870 |
-
imshow = img.detach().cpu()
|
871 |
-
pydiffvg.imwrite(imshow, filename, gamma=gamma)
|
872 |
-
|
873 |
-
x = img.unsqueeze(0).permute(0, 3, 1, 2) # HWC -> NCHW
|
874 |
-
|
875 |
-
if cfg.use_ycrcb:
|
876 |
-
color_reweight = torch.FloatTensor([255/219, 255/224, 255/255]).to(device)
|
877 |
-
loss = ((x-gt)*(color_reweight.view(1, -1, 1, 1)))**2
|
878 |
-
else:
|
879 |
-
loss = ((x-gt)**2)
|
880 |
-
|
881 |
-
if cfg.loss.use_l1_loss:
|
882 |
-
loss = abs(x-gt)
|
883 |
-
|
884 |
-
if cfg.loss.use_distance_weighted_loss:
|
885 |
-
if cfg.use_ycrcb:
|
886 |
-
raise ValueError
|
887 |
-
shapes_forsdf = copy.deepcopy(shapes)
|
888 |
-
shape_groups_forsdf = copy.deepcopy(shape_groups)
|
889 |
-
for si in shapes_forsdf:
|
890 |
-
si.stroke_width = torch.FloatTensor([0]).to(device)
|
891 |
-
for sg_idx, sgi in enumerate(shape_groups_forsdf):
|
892 |
-
sgi.fill_color = torch.FloatTensor([1, 1, 1, 1]).to(device)
|
893 |
-
sgi.shape_ids = torch.LongTensor([sg_idx]).to(device)
|
894 |
-
|
895 |
-
sargs_forsdf = pydiffvg.RenderFunction.serialize_scene(
|
896 |
-
w, h, shapes_forsdf, shape_groups_forsdf)
|
897 |
-
with torch.no_grad():
|
898 |
-
im_forsdf = render(w, h, 2, 2, 0, None, *sargs_forsdf)
|
899 |
-
# use alpha channel is a trick to get 0-1 image
|
900 |
-
im_forsdf = (im_forsdf[:, :, 3]).detach().cpu().numpy()
|
901 |
-
loss_weight = get_sdf(im_forsdf, normalize='to1')
|
902 |
-
loss_weight += loss_weight_keep
|
903 |
-
loss_weight = np.clip(loss_weight, 0, 1)
|
904 |
-
loss_weight = torch.FloatTensor(loss_weight).to(device)
|
905 |
-
|
906 |
-
if cfg.save.loss:
|
907 |
-
save_loss = loss.squeeze(dim=0).mean(dim=0,keepdim=False).cpu().detach().numpy()
|
908 |
-
save_weight = loss_weight.cpu().detach().numpy()
|
909 |
-
save_weighted_loss = save_loss*save_weight
|
910 |
-
# normalize to [0,1]
|
911 |
-
save_loss = (save_loss - np.min(save_loss))/np.ptp(save_loss)
|
912 |
-
save_weight = (save_weight - np.min(save_weight))/np.ptp(save_weight)
|
913 |
-
save_weighted_loss = (save_weighted_loss - np.min(save_weighted_loss))/np.ptp(save_weighted_loss)
|
914 |
-
|
915 |
-
# save
|
916 |
-
plt.imshow(save_loss, cmap='Reds')
|
917 |
-
plt.axis('off')
|
918 |
-
# plt.colorbar()
|
919 |
-
filename = os.path.join(cfg.experiment_dir, "loss", "{}-iter{}-mseloss.png".format(pathn_record_str, t))
|
920 |
-
check_and_create_dir(filename)
|
921 |
-
plt.savefig(filename, dpi=800)
|
922 |
-
plt.close()
|
923 |
-
|
924 |
-
plt.imshow(save_weight, cmap='Greys')
|
925 |
-
plt.axis('off')
|
926 |
-
# plt.colorbar()
|
927 |
-
filename = os.path.join(cfg.experiment_dir, "loss", "{}-iter{}-sdfweight.png".format(pathn_record_str, t))
|
928 |
-
plt.savefig(filename, dpi=800)
|
929 |
-
plt.close()
|
930 |
-
|
931 |
-
plt.imshow(save_weighted_loss, cmap='Reds')
|
932 |
-
plt.axis('off')
|
933 |
-
# plt.colorbar()
|
934 |
-
filename = os.path.join(cfg.experiment_dir, "loss", "{}-iter{}-weightedloss.png".format(pathn_record_str, t))
|
935 |
-
plt.savefig(filename, dpi=800)
|
936 |
-
plt.close()
|
937 |
-
|
938 |
-
|
939 |
-
|
940 |
-
|
941 |
-
|
942 |
-
if loss_weight is None:
|
943 |
-
loss = loss.sum(1).mean()
|
944 |
-
else:
|
945 |
-
loss = (loss.sum(1)*loss_weight).mean()
|
946 |
-
|
947 |
-
# if (cfg.loss.bis_loss_weight is not None) and (cfg.loss.bis_loss_weight > 0):
|
948 |
-
# loss_bis = bezier_intersection_loss(point_var[0]) * cfg.loss.bis_loss_weight
|
949 |
-
# loss = loss + loss_bis
|
950 |
-
if (cfg.loss.xing_loss_weight is not None) \
|
951 |
-
and (cfg.loss.xing_loss_weight > 0):
|
952 |
-
loss_xing = xing_loss(point_var) * cfg.loss.xing_loss_weight
|
953 |
-
loss = loss + loss_xing
|
954 |
-
|
955 |
-
|
956 |
-
loss_list.append(loss.item())
|
957 |
-
t_range.set_postfix({'loss': loss.item()})
|
958 |
-
loss.backward()
|
959 |
-
|
960 |
-
# step
|
961 |
-
for _, (optim, scheduler) in optim_schedular_dict.items():
|
962 |
-
optim.step()
|
963 |
-
scheduler.step()
|
964 |
-
|
965 |
-
for group in shape_groups_record:
|
966 |
-
group.fill_color.data.clamp_(0.0, 1.0)
|
967 |
-
|
968 |
-
if cfg.loss.use_distance_weighted_loss:
|
969 |
-
loss_weight_keep = loss_weight.detach().cpu().numpy() * 1
|
970 |
-
|
971 |
-
if not cfg.trainable.record:
|
972 |
-
for _, pi in pg.items():
|
973 |
-
for ppi in pi:
|
974 |
-
pi.require_grad = False
|
975 |
-
optim_schedular_dict = {}
|
976 |
-
|
977 |
-
if cfg.save.image:
|
978 |
-
filename = os.path.join(
|
979 |
-
cfg.experiment_dir, "demo-png", "{}.png".format(pathn_record_str))
|
980 |
-
check_and_create_dir(filename)
|
981 |
-
if cfg.use_ycrcb:
|
982 |
-
imshow = ycrcb_conversion(
|
983 |
-
img, format='[2D x 3]', reverse=True).detach().cpu()
|
984 |
-
else:
|
985 |
-
imshow = img.detach().cpu()
|
986 |
-
pydiffvg.imwrite(imshow, filename, gamma=gamma)
|
987 |
-
|
988 |
-
if cfg.save.output:
|
989 |
-
filename = os.path.join(
|
990 |
-
cfg.experiment_dir, "output-svg", "{}.svg".format(pathn_record_str))
|
991 |
-
check_and_create_dir(filename)
|
992 |
-
pydiffvg.save_svg(filename, w, h, shapes_record, shape_groups_record)
|
993 |
-
|
994 |
-
loss_matrix.append(loss_list)
|
995 |
-
|
996 |
-
# calculate the pixel loss
|
997 |
-
# pixel_loss = ((x-gt)**2).sum(dim=1, keepdim=True).sqrt_() # [N,1,H, W]
|
998 |
-
# region_loss = adaptive_avg_pool2d(pixel_loss, cfg.region_loss_pool_size)
|
999 |
-
# loss_weight = torch.softmax(region_loss.reshape(1, 1, -1), dim=-1)\
|
1000 |
-
# .reshape_as(region_loss)
|
1001 |
-
|
1002 |
-
pos_init_method = naive_coord_init(x, gt)
|
1003 |
-
|
1004 |
-
if cfg.coord_init.type == 'naive':
|
1005 |
-
pos_init_method = naive_coord_init(x, gt)
|
1006 |
-
elif cfg.coord_init.type == 'sparse':
|
1007 |
-
pos_init_method = sparse_coord_init(x, gt)
|
1008 |
-
elif cfg.coord_init.type == 'random':
|
1009 |
-
pos_init_method = random_coord_init([h, w])
|
1010 |
-
else:
|
1011 |
-
raise ValueError
|
1012 |
-
|
1013 |
-
if cfg.save.video:
|
1014 |
-
print("saving iteration video...")
|
1015 |
-
img_array = []
|
1016 |
-
for ii in range(0, cfg.num_iter):
|
1017 |
-
filename = os.path.join(
|
1018 |
-
cfg.experiment_dir, "video-png",
|
1019 |
-
"{}-iter{}.png".format(pathn_record_str, ii))
|
1020 |
-
img = cv2.imread(filename)
|
1021 |
-
# cv2.putText(
|
1022 |
-
# img, "Path:{} \nIteration:{}".format(pathn_record_str, ii),
|
1023 |
-
# (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1)
|
1024 |
-
img_array.append(img)
|
1025 |
-
|
1026 |
-
videoname = os.path.join(
|
1027 |
-
cfg.experiment_dir, "video-avi",
|
1028 |
-
"{}.avi".format(pathn_record_str))
|
1029 |
-
check_and_create_dir(videoname)
|
1030 |
-
out = cv2.VideoWriter(
|
1031 |
-
videoname,
|
1032 |
-
# cv2.VideoWriter_fourcc(*'mp4v'),
|
1033 |
-
cv2.VideoWriter_fourcc(*'FFV1'),
|
1034 |
-
20.0, (w, h))
|
1035 |
-
for iii in range(len(img_array)):
|
1036 |
-
out.write(img_array[iii])
|
1037 |
-
out.release()
|
1038 |
-
# shutil.rmtree(os.path.join(cfg.experiment_dir, "video-png"))
|
1039 |
-
|
1040 |
-
print("The last loss is: {}".format(loss.item()))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/adjacent_difference.h
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/system/tbb/detail/execution_policy.h>
|
21 |
-
#include <thrust/system/detail/generic/adjacent_difference.h>
|
22 |
-
|
23 |
-
namespace thrust
|
24 |
-
{
|
25 |
-
namespace system
|
26 |
-
{
|
27 |
-
namespace tbb
|
28 |
-
{
|
29 |
-
namespace detail
|
30 |
-
{
|
31 |
-
|
32 |
-
template<typename DerivedPolicy,
|
33 |
-
typename InputIterator,
|
34 |
-
typename OutputIterator,
|
35 |
-
typename BinaryFunction>
|
36 |
-
OutputIterator adjacent_difference(execution_policy<DerivedPolicy> &exec,
|
37 |
-
InputIterator first,
|
38 |
-
InputIterator last,
|
39 |
-
OutputIterator result,
|
40 |
-
BinaryFunction binary_op)
|
41 |
-
{
|
42 |
-
// tbb prefers generic::adjacent_difference to cpp::adjacent_difference
|
43 |
-
return thrust::system::detail::generic::adjacent_difference(exec, first, last, result, binary_op);
|
44 |
-
} // end adjacent_difference()
|
45 |
-
|
46 |
-
} // end detail
|
47 |
-
} // end tbb
|
48 |
-
} // end system
|
49 |
-
} // end thrust
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|