Commit
·
b3a2c6f
1
Parent(s):
96e1380
Update parquet files (step 35 of 296)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds Updated.md +0 -172
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/BluePrintPCB 300571 With CAM350 1050471 KeyGenrar.md +0 -70
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 14 Update 1 Crack V5 FIFA Learn How to Install the Latest Patch and Crack.md +0 -90
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anger of Stick 5 Zombie - The Ultimate Guide to Hacking the Game and Defeating the Zombies.md +0 -114
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Convert Instagram Videos to MP3 Online - No Software Needed.md +0 -128
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 8 Ball Pool Offline and Join the Online League and Tournaments.md +0 -111
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download FIFA Trung Quc APK and Play with Your Friends Online.md +0 -95
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Garena Free Fire Hack Mod Apk 1.59.5 and Dominate the Battle Royale.md +0 -101
- spaces/1phancelerku/anime-remove-background/Arceus X V52 - Unlock Unlimited Features in Roblox with this Android Mod Menu.md +0 -138
- spaces/232labs/VToonify/vtoonify/model/stylegan/readme.md +0 -7
- spaces/4Taps/SadTalker/src/face3d/options/train_options.py +0 -53
- spaces/4Taps/SadTalker/src/face3d/visualize.py +0 -48
- spaces/7hao/bingo/src/components/ui/tooltip.tsx +0 -30
- spaces/AI-Hobbyist/Hoyo-RVC/train/process_ckpt.py +0 -259
- spaces/AI-Zero-to-Hero/06-SL-AI-Image-Music-Video-UI-UX-URL/app.py +0 -45
- spaces/AIConsultant/MusicGen/audiocraft/losses/sisnr.py +0 -92
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/midas/vit.py +0 -491
- spaces/AIGText/GlyphControl/cldm/logger.py +0 -76
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helper.py +0 -77
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/base/EaseValueMethods.js +0 -67
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/customprogress/CustomProgress.js +0 -2
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/GetChildrenWidth.js +0 -10
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetChildrenProportion.js +0 -17
- spaces/AiMimicry/sovits-models/modules/ddsp.py +0 -190
- spaces/Aki004/herta-so-vits/hubert/hubert_model_onnx.py +0 -217
- spaces/Akmyradov/TurkmenTTSweSTT/vits/preprocess.py +0 -25
- spaces/Alican/pixera/models/networks.py +0 -616
- spaces/Alphts/Robot/README.md +0 -13
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/ldm3d_diffusion.md +0 -37
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/ddim/__init__.py +0 -1
- spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py +0 -4
- spaces/Andy1621/uniformer_image_detection/configs/lvis/README.md +0 -51
- spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/scnet_roi_head.py +0 -582
- spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py +0 -9
- spaces/Andy1621/uniformer_image_segmentation/configs/sem_fpn/README.md +0 -35
- spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/train_util.py +0 -301
- spaces/Araloak/fz/README.md +0 -13
- spaces/AsakuraMizu/moe-tts/text/cleaners.py +0 -150
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/wheel.py +0 -34
- spaces/Bambicita/rvc-models/config.py +0 -88
- spaces/BertChristiaens/blip-diffusion/app.py +0 -112
- spaces/Bianca0930/Bianca/app.py +0 -12
- spaces/Biaolin/stabilityai-FreeWilly1-Delta-SafeTensor/app.py +0 -3
- spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/utils.py +0 -146
- spaces/BigSalmon/Bart/README.md +0 -37
- spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/equal.h +0 -48
- spaces/CVPR/LIVE/within_distance.h +0 -446
- spaces/CVPR/lama-example/saicinpainting/evaluation/masks/README.md +0 -27
- spaces/CikeyQI/Yunzai/Yunzai/lib/events/message.js +0 -14
- spaces/CikeyQI/Yunzai/Yunzai/lib/tools/web.js +0 -74
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds Updated.md
DELETED
@@ -1,172 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds Updated</h1>
|
3 |
-
<p>If you are looking for a comprehensive and versatile software package that can help you create stunning digital content, you might be interested in Adobe Creative Suite 6 Master Collection. This is a bundle of Adobe's best products, such as Photoshop, Illustrator, InDesign, Premiere Pro, After Effects, Dreamweaver, Flash, and more. With this suite, you can design graphics, edit photos, create websites, produce videos, animate characters, and much more.</p>
|
4 |
-
<h2>Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds Updated</h2><br /><p><b><b>Download Zip</b> ⚡ <a href="https://byltly.com/2uKzWD">https://byltly.com/2uKzWD</a></b></p><br /><br />
|
5 |
-
<p>But what if you need to work with languages and scripts that are not supported by the standard version of Adobe Creative Suite 6 Master Collection? For example, what if you need to create content in Arabic, Hebrew, Farsi, Urdu, or other Middle Eastern languages? In that case, you might want to check out Adobe Creative Suite 6 Master Collection Middle Eastern. This is a special edition of the suite that supports these languages and regions.</p>
|
6 |
-
<p>In this article, we will explain what Adobe Creative Suite 6 Master Collection Middle Eastern is, how to download and install it using torrentadds, and how to update it to the latest version. We will also answer some frequently asked questions about this software package. Let's get started!</p>
|
7 |
-
<h2>What is Adobe Creative Suite 6 Master Collection?</h2>
|
8 |
-
<p>Adobe Creative Suite 6 Master Collection is a software package that includes all the tools you need to create amazing digital content. Whether you are a professional designer, developer, or hobbyist, you can use this suite to unleash your creativity and express your vision.</p>
|
9 |
-
<h3>The features and benefits of Adobe Creative Suite 6 Master Collection</h3>
|
10 |
-
<p>Some of the features and benefits of Adobe Creative Suite 6 Master Collection are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>It includes over 20 applications that cover various aspects of digital creation, such as image editing, vector drawing, page layout, video editing, motion graphics, web development, game design, and more.</li>
|
13 |
-
<li>It offers integration and compatibility among the applications, so you can easily switch between them and share files and assets.</li>
|
14 |
-
<li>It provides high performance and speed, thanks to the use of native 64-bit support, GPU acceleration, multicore processing, and other optimization techniques.</li>
|
15 |
-
<li>It supports a wide range of formats and standards, such as HTML5, CSS3, SVG, PDF, EPUB, SWF, FLV, MP4, AVI, MOV, PSD, AI, INDD, etc.</li>
|
16 |
-
<li>It allows you to create content for multiple platforms and devices, such as Windows, Mac OS X, iOS, Android, web browsers, TVs, etc.</li>
|
17 |
-
<li>It gives you access to online services and resources from Adobe, such as Adobe Stock Photos (a collection of royalty-free images), Adobe Fonts (a library of thousands of fonts), Adobe Behance (a platform for showcasing your work), Adobe Kuler (a tool for creating color schemes), etc.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>The system requirements and compatibility of Adobe Creative Suite 6 Master Collection</h3>
|
20 |
-
<p>To run Adobe Creative Suite 6 Master Collection smoothly on your computer or device, you need to meet the following system requirements:</p>
|
21 |
-
<p></p>
|
22 |
-
<table <p>Here is the continuation of the article:</p>
|
23 |
-
<table>
|
24 |
-
<tr>
|
25 |
-
<th>Operating system</th>
|
26 |
-
<th>Processor</th>
|
27 |
-
<th>RAM</th>
|
28 |
-
<th>Hard disk space</th>
|
29 |
-
<th>Graphics card</th>
|
30 |
-
<th>Screen resolution</th>
|
31 |
-
<th>Other requirements</th>
|
32 |
-
</tr>
|
33 |
-
<tr>
|
34 |
-
<td>Windows XP SP3 or later, Windows Vista SP1 or later, Windows 7, Windows 8, or Windows 10</td>
|
35 |
-
<td>Intel Pentium 4 or AMD Athlon 64 processor (2 GHz or faster)</td>
|
36 |
-
<td>4 GB or more</td>
|
37 |
-
<td>16.3 GB or more of available hard-disk space for installation; additional free space required during installation (cannot install on removable flash storage devices)</td>
|
38 |
-
<td>1024 x 768 display (1280 x 800 recommended) with 16-bit color and 512 MB of VRAM; OpenGL 2.0–capable system</td>
|
39 |
-
<td>1280 x 800 or higher</td>
|
40 |
-
<td>DVD-ROM drive compatible with dual-layer DVDs; Java Runtime Environment 1.6 (included); QuickTime 7.6.6 software required for HTML5 media playback and multimedia features; Adobe Flash Player 10 software required to export SWF files; Internet connection and registration are necessary for required software activation, validation of subscriptions, and access to online services.</td>
|
41 |
-
</tr>
|
42 |
-
<tr>
|
43 |
-
<td>Mac OS X v10.6.8 or v10.7, Mac OS X v10.8, Mac OS X v10.9, Mac OS X v10.10, Mac OS X v10.11, macOS v10.12, macOS v10.13, macOS v10.14, or macOS v10.15</td>
|
44 |
-
<td>Multicore Intel processor with 64-bit support</td>
|
45 |
-
<td>4 GB or more</td>
|
46 |
-
<td>15.5 GB or more of available hard-disk space for installation; additional free space required during installation (cannot install on a volume that uses a case-sensitive file system or on removable flash storage devices)</td>
|
47 |
-
<td>1024 x 768 display (1280 x 800 recommended) with 16-bit color and 512 MB of VRAM; OpenGL 2.0–capable system</td>
|
48 |
-
<td>1280 x 800 or higher</td>
|
49 |
-
<td>DVD-ROM drive compatible with dual-layer DVDs; Java Runtime Environment 1.6 (included); QuickTime 7.6.6 software required for HTML5 media playback and multimedia features; Adobe Flash Player 10 software required to export SWF files; Internet connection and registration are necessary for required software activation, validation of subscriptions, and access to online services.</td>
|
50 |
-
</tr>
|
51 |
-
</table>
|
52 |
-
<p>Please note that these are the minimum requirements and that some applications may have additional or higher requirements. For more details, please visit the official Adobe website. Also, please note that Adobe Creative Suite 6 Master Collection is not compatible with the latest versions of macOS (Catalina and Big Sur), as they do not support 32-bit applications. If you have these operating systems, you might want to consider upgrading to Adobe Creative Cloud instead.</p>
|
53 |
-
<h2>What is Adobe Creative Suite 6 Master Collection Middle Eastern?</h2>
|
54 |
-
<p>Adobe Creative Suite 6 Master Collection Middle Eastern is a special edition of the software package that supports languages and scripts that are used in the Middle East and North Africa regions, such as Arabic, Hebrew, Farsi, Urdu, etc. These languages are written from right to left and have complex typographic features, such as ligatures, diacritics, contextual forms, etc.</p>
|
55 |
-
<h3>The languages and regions supported by Adobe Creative Suite 6 Master Collection Middle Eastern</h3>
|
56 |
-
<p>The languages and regions supported by Adobe Creative Suite 6 Master Collection Middle Eastern are:</p>
|
57 |
-
<ul>
|
58 |
-
<li>Arabic: Algeria, Bahrain, Egypt, Iraq, Jordan, Kuwait, Lebanon, Libya, Morocco, Oman, Palestine, Qatar, Saudi Arabia, Syria, Tunisia, United Arab Emirates, Yemen.</li>
|
59 |
-
<li>Farsi: Iran.</li>
|
60 |
-
<li>Hebrew: Israel.</li>
|
61 |
-
<li>Turkish: Turkey.</li>
|
62 |
-
<li>Greek: Greece.</li>
|
63 |
-
<li>Cyrillic: Russia and other countries that use the Cyrillic script.</li>
|
64 |
-
<li>Romanian: Romania.</li>
|
65 |
-
<li>Hungarian: Hungary.</li></ul>
|
66 |
-
<p>Please note that not all applications in the suite support all these languages and regions. For example, Photoshop supports Arabic and Hebrew but not Farsi and Urdu. For more details, please visit the official Adobe website.</p>
|
67 |
-
<h3>The differences and advantages of Adobe Creative Suite 6 Master Collection Middle Eastern</h3>
|
68 |
-
<p>The differences and advantages of Adobe Creative Suite 6 Master Collection Middle Eastern are <p>Here is the continuation of the article:</p>
|
69 |
-
<p>The differences and advantages of Adobe Creative Suite 6 Master Collection Middle Eastern are:</p>
|
70 |
-
<ul>
|
71 |
-
<li>It allows you to work with right-to-left languages and scripts, such as Arabic and Hebrew, in a natural and intuitive way. You can type, edit, format, and layout text in these languages using the same tools and commands as in left-to-right languages.</li>
|
72 |
-
<li>It supports the typographic features and nuances of these languages, such as ligatures, diacritics, contextual forms, kashidas, etc. You can control the appearance and behavior of these features using various options and preferences.</li>
|
73 |
-
<li>It provides you with fonts and dictionaries that are suitable for these languages and regions. You can choose from a variety of fonts that are designed for different purposes and styles. You can also use the spell checker and hyphenation tools to ensure the accuracy and readability of your text.</li>
|
74 |
-
<li>It enables you to create content that is culturally appropriate and relevant for these languages and regions. You can use the date, time, number, currency, and measurement formats that are specific to these locales. You can also use the symbols, icons, colors, images, and layouts that are consistent with these cultures.</li>
|
75 |
-
</ul>
|
76 |
-
<p>With Adobe Creative Suite 6 Master Collection Middle Eastern, you can create content that is not only visually appealing but also linguistically correct and culturally sensitive.</p>
|
77 |
-
<h2>How to download and install Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds?</h2>
|
78 |
-
<p>If you want to download and install Adobe Creative Suite 6 Master Collection Middle Eastern on your computer or device, you might want to use torrentadds. Torrentadds are files that contain information about other files that are shared through a peer-to-peer network. By using a torrent client software, such as BitTorrent or uTorrent, you can download the files you want from other users who have them.</p>
|
79 |
-
<h3>The sources and links of Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds</h3>
|
80 |
-
<p>There are many websites that offer torrentadds for Adobe Creative Suite 6 Master Collection Middle Eastern. However, not all of them are reliable or safe. Some of them might contain viruses, malware, or fake files that can harm your computer or device. Therefore, you need to be careful and selective when choosing the sources and links of torrentadds.</p>
|
81 |
-
<p>One way to find trustworthy sources and links of torrentadds is to use a torrent search engine, such as Torrentz2 or The Pirate Bay. These websites allow you to search for torrentadds from various websites and compare their ratings, comments, seeds, leeches, etc. Seeds are users who have the complete file and share it with others. Leeches are users who download the file but do not share it with others. The more seeds and fewer leeches a torrentadd has, the faster and more stable the download will be.</p>
|
82 |
-
<p>Another way to find trustworthy sources and links of torrentadds is to use a reputable website that specializes in Adobe products, such as Get Into PC or Softasm. These websites provide direct links to download torrentadds for Adobe Creative Suite 6 Master Collection Middle Eastern without any ads or pop-ups. They also provide detailed instructions on how to install the software after downloading it.</p>
|
83 |
-
<p>Here are some examples of sources and links of torrentadds for Adobe Creative Suite 6 Master Collection Middle Eastern:</p>
|
84 |
-
<ul>
|
85 |
-
<li><a href="">Adobe CS6 Master Collection Middle Eastern (Windows) - Torrentz2</a></li>
|
86 |
-
<li><a href="">Adobe CS6 Master Collection Middle Eastern (Mac) - Torrentz2</a></li>
|
87 |
-
<li><a href="">Adobe CS6 Master Collection Middle Eastern (Windows) - Get Into PC</a></li>
|
88 |
-
<li><a href="">Adobe CS6 Master Collection Middle Eastern (Mac) - Get Into PC</a></li>
|
89 |
-
<li><a href="">Adobe CS6 Master Collection Middle Eastern (Windows) - Softasm</a></li>
|
90 |
-
<li><a href="">Adobe CS6 Master Collection Middle Eastern (Mac) - Softasm</a></li>
|
91 |
-
</ul>
|
92 |
-
<h3>The steps and precautions of downloading and installing Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds</h3>
|
93 |
-
<p>To download and install Adobe Creative Suite 6 Master Collection Middle Eastern using torrentadds, you need to follow these steps:</p>
|
94 |
-
<ol>
|
95 |
-
<li>Download and install a torrent client software on your computer or device. For example, you can use BitTorrent or uTorrent.</li>
|
96 |
-
<li>Select a source and link of a torrentadd for Adobe Creative Suite 6 Master Collection Middle Eastern from the list above or from another website that you trust.</li>
|
97 |
-
<li>Open the link in your web browser and click on the download button <p>Here is the continuation of the article:</p>
|
98 |
-
<li>Open the link in your web browser and click on the download button to save the torrentadd file on your computer or device.</li>
|
99 |
-
<li>Open the torrentadd file with your torrent client software and choose a location to save the files that will be downloaded.</li>
|
100 |
-
<li>Wait for the download to complete. Depending on the size of the files and the speed of your internet connection, this might take some time.</li>
|
101 |
-
<li>After the download is finished, you will have a folder that contains the files of Adobe Creative Suite 6 Master Collection Middle Eastern. You might also have a file that contains instructions on how to install the software. Read the instructions carefully and follow them step by step.</li>
|
102 |
-
<li>Usually, the installation process involves extracting the files, running the setup.exe file, entering a serial number or a crack, and choosing the applications and options that you want to install.</li>
|
103 |
-
<li>After the installation is done, you can launch the applications and start creating your content.</li>
|
104 |
-
</ol>
|
105 |
-
<p>Please note that downloading and installing Adobe Creative Suite 6 Master Collection Middle Eastern using torrentadds might involve some risks and challenges. Some of them are:</p>
|
106 |
-
<ul>
|
107 |
-
<li>You might violate the intellectual property rights of Adobe and other parties. Adobe Creative Suite 6 Master Collection Middle Eastern is a licensed software that requires payment and registration. By downloading and installing it using torrentadds, you might be breaking the law and facing legal consequences.</li>
|
108 |
-
<li>You might expose your computer or device to viruses, malware, or spyware. Some torrentadds might contain malicious files that can infect your system and compromise your security and privacy. Therefore, you need to use a reliable antivirus software and scan the files before opening them.</li>
|
109 |
-
<li>You might encounter errors or bugs in the software. Some torrentadds might not have the latest updates or patches for Adobe Creative Suite 6 Master Collection Middle Eastern. This might cause some problems or glitches in the software's functionality or compatibility. Therefore, you need to check for updates regularly and apply them if available.</li>
|
110 |
-
</ul>
|
111 |
-
<p>If you want to avoid these risks and challenges, you might want to consider buying Adobe Creative Suite 6 Master Collection Middle Eastern from the official Adobe website or an authorized reseller. This way, you can enjoy the full features and benefits of the software without any worries or hassles.</p>
|
112 |
-
<h2>How to update Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds?</h2>
|
113 |
-
<p>If you have downloaded and installed Adobe Creative Suite 6 Master Collection Middle Eastern using torrentadds, you might want to update it to the latest version. Updating the software can help you fix some bugs, improve some features, and enhance some performance. However, updating Adobe Creative Suite 6 Master Collection Middle Eastern using torrentadds is not as easy as updating it from the official Adobe website. You need to follow some methods and tips to do it successfully.</p>
|
114 |
-
<h3>The reasons and benefits of updating Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds</h3>
|
115 |
-
<p>Some of the reasons and benefits of updating Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds are:</p>
|
116 |
-
<ul>
|
117 |
-
<li>You can get access to new features and tools that are added or improved in the latest version of the software. For example, you can use new filters, effects, presets, templates, etc.</li>
|
118 |
-
<li>You can fix some errors or bugs that are found or reported in the previous version of the software. For example, you can solve some issues with stability, compatibility, performance, etc.</li>
|
119 |
-
<li>You can enhance some aspects of the software's functionality or usability. For example, you can improve some workflows, interfaces, shortcuts, etc.</li>
|
120 |
-
</ul>
|
121 |
-
<p>By updating Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds, you can make sure that you have the best possible experience with <p>Here is the continuation of the article:</p>
|
122 |
-
<p>By updating Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds, you can make sure that you have the best possible experience with the software and that you can create content that is up to date and high quality.</p>
|
123 |
-
<h3>The methods and tips of updating Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds</h3>
|
124 |
-
<p>Some of the methods and tips of updating Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds are:</p>
|
125 |
-
<ul>
|
126 |
-
<li>Use a torrent search engine or a reputable website to find and download the latest version of Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds. For example, you can use Torrentz2 or The Pirate Bay to search for torrentadds from various websites and compare their ratings, comments, seeds, leeches, etc. Or you can use Get Into PC or Softasm to download torrentadds directly from their links without any ads or pop-ups.</li>
|
127 |
-
<li>Before downloading and installing the latest version of Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds, make sure that you have backed up your files and settings from the previous version. You can use the Adobe Application Manager or the Adobe Creative Cloud Desktop app to do this. This way, you can restore your files and settings in case something goes wrong during the update process.</li>
|
128 |
-
<li>After downloading and installing the latest version of Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds, make sure that you have applied any patches or cracks that are required to activate the software. You can find these patches or cracks in the same folder or file as the torrentadds or in a separate folder or file that is included in the download. Read the instructions carefully and follow them step by step.</li>
|
129 |
-
<li>After updating Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds, check for any errors or bugs that might occur in the software. If you encounter any problems, you can try to fix them by using the troubleshooting tools or guides from Adobe or from other websites. You can also contact Adobe customer support or visit Adobe forums for help.</li>
|
130 |
-
</ul>
|
131 |
-
<p>Please note that updating Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds might involve some risks and challenges, similar to those of downloading and installing it. Therefore, you need to be careful and selective when choosing the sources and links of torrentadds, use a reliable antivirus software and scan the files before opening them, and check for updates regularly and apply them if available.</p>
|
132 |
-
<h2>Conclusion</h2>
|
133 |
-
<p>In conclusion, Adobe Creative Suite 6 Master Collection Middle Eastern is a special edition of the software package that supports languages and scripts that are used in the Middle East and North Africa regions, such as Arabic, Hebrew, Farsi, Urdu, etc. It allows you to create content that is not only visually appealing but also linguistically correct and culturally sensitive.</p>
|
134 |
-
<p>If you want to download and install Adobe Creative Suite 6 Master Collection Middle Eastern on your computer or device, you might want to use torrentadds. Torrentadds are files that contain information about other files that are shared through a peer-to-peer network. By using a torrent client software, such as BitTorrent or uTorrent, you can download the files you want from other users who have them.</p>
|
135 |
-
<p>However, downloading and installing Adobe Creative Suite 6 Master Collection Middle Eastern using torrentadds might involve some risks and challenges. You might violate the intellectual property rights of Adobe and other parties, expose your computer or device to viruses, malware, or spyware, or encounter errors or bugs in the software. Therefore, you need to be careful and selective when choosing the sources and links of torrentadds, use a reliable antivirus software and scan the files before opening them, and check for updates regularly and apply them if available.</p>
|
136 |
-
<p>If you want to avoid these risks and challenges, you might want to consider buying Adobe Creative Suite 6 Master Collection Middle Eastern from the official Adobe website or an authorized reseller. This way, you can enjoy the full features and benefits of the software without any worries or hassles.</p>
|
137 |
-
<h2>FAQs</h2>
|
138 |
-
<p>Here are some frequently asked questions about Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds:</p>
|
139 |
-
<h3>Q: Is Adobe Creative Suite 6 Master Collection still available?</h3>
|
140 |
-
<p>A: Yes, Adobe Creative Suite 6 Master Collection is still available for purchase from <p>A: Yes, Adobe Creative Suite 6 Master Collection is still available for purchase from the official Adobe website or an authorized reseller. However, Adobe has discontinued the development and support of this software package since 2017. This means that there will be no more updates, patches, or bug fixes for this software package. Adobe also recommends that users upgrade to Adobe Creative Cloud, which is the latest and most advanced version of Adobe's software products.</p>
|
141 |
-
<h3>Q: What is the difference between Adobe Creative Suite 6 Master Collection and Adobe Creative Cloud?</h3>
|
142 |
-
<p>A: Adobe Creative Suite 6 Master Collection and Adobe Creative Cloud are both software packages that include various applications for creating digital content. However, there are some major differences between them, such as:</p>
|
143 |
-
<ul>
|
144 |
-
<li>Adobe Creative Suite 6 Master Collection is a one-time purchase software that you can install and use on your computer or device. Adobe Creative Cloud is a subscription-based service that you can access online or offline on your computer or device.</li>
|
145 |
-
<li>Adobe Creative Suite 6 Master Collection includes over 20 applications that cover various aspects of digital creation. Adobe Creative Cloud includes over 30 applications that cover more aspects of digital creation, such as photography, audio, animation, etc.</li>
|
146 |
-
<li>Adobe Creative Suite 6 Master Collection has a fixed set of features and tools that are not updated or improved. Adobe Creative Cloud has a dynamic set of features and tools that are constantly updated and improved.</li>
|
147 |
-
<li>Adobe Creative Suite 6 Master Collection does not support the latest versions of macOS (Catalina and Big Sur), as they do not support 32-bit applications. Adobe Creative Cloud supports the latest versions of macOS and Windows, as well as other platforms and devices.</li>
|
148 |
-
</ul>
|
149 |
-
<p>For more details, please visit the official Adobe website.</p>
|
150 |
-
<h3>Q: How can I get a serial number or a crack for Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds?</h3>
|
151 |
-
<p>A: A serial number or a crack is a code or a file that can activate the software without paying for it. However, using a serial number or a crack for Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds is illegal and unethical. You might violate the intellectual property rights of Adobe and other parties, expose your computer or device to viruses, malware, or spyware, or encounter errors or bugs in the software. Therefore, we do not recommend or endorse using a serial number or a crack for Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds. If you want to use the software legally and safely, you should buy it from the official Adobe website or an authorized reseller.</p>
|
152 |
-
<h3>Q: How can I uninstall Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds from my computer or device?</h3>
|
153 |
-
<p>A: If you want to uninstall Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds from your computer or device, you can follow these steps:</p>
|
154 |
-
<ol>
|
155 |
-
<li>Open the Control Panel on your Windows computer or the Applications folder on your Mac computer.</li>
|
156 |
-
<li>Find and select the Adobe Creative Suite 6 Master Collection icon and click on the Uninstall or Delete button.</li>
|
157 |
-
<li>Follow the instructions on the screen to complete the uninstallation process.</li>
|
158 |
-
<li>Delete any remaining files or folders related to Adobe Creative Suite 6 Master Collection from your computer or device.</li>
|
159 |
-
</ol>
|
160 |
-
<p>Please note that uninstalling Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds will not remove any files or projects that you have created using the software. You can still access them if you reinstall the software or use another software that can open them.</p>
|
161 |
-
<h3>Q: How can I learn how to use Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds?</h3>
|
162 |
-
<p>A: If you want to learn how to use Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds, you can use various resources and materials, such as:</p>
|
163 |
-
<ul>
|
164 |
-
<li>The user manuals and guides that are included in the software package. You can access them by clicking on the Help menu in each application.</li>
|
165 |
-
<li>The online tutorials and videos that are available on the official Adobe website or on other websites, such as YouTube. You can search for topics or keywords that interest you and follow along with the instructions.</li>
|
166 |
-
<li>The online courses and classes that are offered by various platforms and providers, such as Udemy, Coursera, Lynda, etc. You can enroll in courses or classes that suit your level and goals and learn from experts and instructors.</li>
|
167 |
-
<li>The online forums and communities that are dedicated to Adobe products, such as Reddit, Quora, Stack Exchange, etc. You can ask questions, share tips, get feedback, and interact with other users who have similar interests and experiences.</li>
|
168 |
-
</ul>
|
169 |
-
<p>By using these resources and materials, <p>By using these resources and materials, you can learn how to use Adobe Creative Suite 6 Master Collection Middle Eastern Torrentadds effectively and efficiently. You can also improve your skills and knowledge and create content that is impressive and professional.</p>
|
170 |
-
<p>I hope this article has been helpful and informative for you. If you have any questions or comments, please feel free to leave them below. Thank you for reading and happy creating!</p> b2dd77e56b<br />
|
171 |
-
<br />
|
172 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/BluePrintPCB 300571 With CAM350 1050471 KeyGenrar.md
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>BluePrintPCB 300571 With CAM350 1050471 KeyGenrar: A Complete Guide</h1>
|
3 |
-
<p>If you are looking for a powerful and comprehensive software solution for PCB design and documentation, you might want to check out BluePrintPCB 300571 With CAM350 1050471 KeyGenrar. This software bundle combines two of the most popular and advanced tools for PCB engineering: BluePrintPCB and CAM350. In this article, we will explain what these tools are, how to download and install them, how to use them, and what benefits they offer. By the end of this article, you will have a clear understanding of how to use BluePrintPCB 300571 With CAM350 1050471 KeyGenrar to create professional PCB designs and documentation.</p>
|
4 |
-
<h2>BluePrintPCB 300571 With CAM350 1050471 KeyGenrar</h2><br /><p><b><b>Download</b> ✪✪✪ <a href="https://byltly.com/2uKwkQ">https://byltly.com/2uKwkQ</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<p>Before we dive into the details of how to download, install, and use BluePrintPCB 300571 With CAM350 1050471 KeyGenrar, let's first get familiar with what these tools are and what they do.</p>
|
7 |
-
<h3>What is BluePrintPCB?</h3>
|
8 |
-
<p>BluePrintPCB is a software tool that helps you create complex PCB documentation more accurately and in a fraction of the time of traditional methods. It allows you to import your PCB design data from any CAD tool and automatically generate all the necessary documentation for fabrication, assembly, and testing. You can also edit, annotate, and customize your documentation with ease using a graphical user interface that mimics your design environment. BluePrintPCB also enables you to view your PCB layouts as 3D models, export your documentation in various formats, and collaborate with other engineers using cloud-based services.</p>
|
9 |
-
<h3>What is CAM350?</h3>
|
10 |
-
<p>CAM350 is a software tool that helps you edit, verify, and optimize your PCB designs for manufacturing. It allows you to import your PCB design data from any CAD tool and perform various operations such as DFM analysis, netlist extraction, panelization, test point generation, drill optimization, Gerber editing, and more. You can also simulate your PCB designs using a built-in G-code-driven simulator that shows you how your design will look like after fabrication. CAM350 also enables you to export your optimized design data in various formats for fabrication or further analysis.</p>
|
11 |
-
<h3>What is KeyGenrar?</h3>
|
12 |
-
<p>KeyGenrar is a software tool that helps you generate license keys for various software products. It is often used by hackers or crackers to bypass the software protection mechanisms and activate the software without paying for it. However, using KeyGenrar or any other similar tool is illegal and unethical, as it violates the software license agreement and infringes the intellectual property rights of the software developers. Therefore, we do not recommend or endorse using KeyGenrar or any other similar tool for any purpose.</p>
|
13 |
-
<h2>How to Download and Project" and choose a name and location for your project.</li>
|
14 |
-
<li>Select "File" > "Import" and choose the type of PCB design data that you want to import, such as ODB++, IPC-2581, or Gerber. Browse to the file that contains your design data and click "Open". BluePrintPCB will import your design data and create a PCB document.</li>
|
15 |
-
<li>Select "View" > "3D View" to see your PCB layout as a 3D model. You can rotate, zoom, pan, and measure your PCB using the mouse and keyboard controls.</li>
|
16 |
-
<li>Select "Tools" > "Auto Create Documentation" to automatically generate all the necessary documentation for your PCB, such as drill drawings, assembly drawings, fabrication drawings, bill of materials, and more. You can also customize the settings for each document type before generating them.</li>
|
17 |
-
<li>Select "Edit" > "Properties" to edit the properties of your PCB document, such as title, revision, author, date, company, logo, and more. You can also add custom fields and values to your document properties.</li>
|
18 |
-
<li>Select "Edit" > "Annotations" to add annotations to your PCB document, such as dimensions, notes, symbols, labels, callouts, and more. You can also edit the style, color, font, alignment, and placement of your annotations.</li>
|
19 |
-
<li>Select "File" > "Export" to export your PCB document in various formats, such as PDF, DXF, DWG, SVG, HTML, or XML. You can also choose the resolution, quality, and scale of your exported document.</li>
|
20 |
-
<li>Select "File" > "Save" to save your PCB document in BluePrintPCB format. You can also save a copy of your document in another location or with another name.</li>
|
21 |
-
</ol>
|
22 |
-
<h3>How to edit and simulate PCB designs with CAM350</h3>
|
23 |
-
<p>To edit and simulate PCB designs with CAM350, follow these steps:</p>
|
24 |
-
<ol>
|
25 |
-
<li>Launch CAM350 by clicking on its icon on your desktop or in your start menu.</li>
|
26 |
-
<li>Select "File" > "Open" and choose the type of PCB design data that you want to open, such as ODB++, IPC-2581, or Gerber. Browse to the file that contains your design data and click "Open". CAM350 will open your design data and display it in the main window.</li>
|
27 |
-
<li>Select "Tools" > "DFM Analysis" to perform a design for manufacturability analysis on your PCB design. This will check for any errors or violations that might affect the quality or yield of your PCB fabrication. You can also customize the settings and rules for each DFM category before running the analysis.</li>
|
28 |
-
<li>Select "Tools" > "Netlist Extraction" to extract a netlist from your PCB design. This will create a list of all the electrical connections and components on your PCB. You can also compare your extracted netlist with another netlist from a different source to check for any discrepancies or errors.</li>
|
29 |
-
<li>Select "Tools" > "Panelization" to create a panel layout for your PCB design. This will arrange multiple copies of your PCB on a single board for efficient fabrication. You can also customize the settings and parameters for panelization, such as panel size, spacing, orientation, fiducials, breakaway tabs, and more.</li>
|
30 |
-
<li>Select "Tools" > "Test Point Generation" to generate test points for your PCB design. This will add small pads or vias on your PCB that can be used for testing or debugging purposes. You can also customize the settings and criteria for test point generation, such as test point size, shape, location, clearance, and more.</li>
|
31 |
-
<li>Select "Tools" > "Drill Optimization" to optimize the drill pattern for your PCB design. This will reduce the number of drill hits and tool changes, as well as the drill time and cost. You can also customize the settings and options for drill optimization, such as drill size, order, sequence, direction, and more.</li>
|
32 |
-
<li>Select "Tools" > "Gerber Editing" to edit your Gerber files for your PCB design. Gerber files are the standard format for PCB fabrication data. You can use various tools and commands to modify, add, delete, or move any elements on your Gerber files, such as traces, pads, vias, holes, text, symbols, and more.</li>
|
33 |
-
<li>Select "Tools" > "Simulation" to simulate your PCB design using a G-code-driven simulator. G-code is a programming language that controls the movement of a CNC machine. You can use the simulator to see how your PCB design will look like after fabrication, as well as to detect any errors or defects that might occur during the process. You can also adjust the speed, zoom, pause, and step of the simulation.</li>
|
34 |
-
<li>Select "File" > "Save" to save your PCB design data in CAM350 format. You can also save a copy of your design data in another format or location.</li>
|
35 |
-
<li>Select "File" > "Export" to export your PCB design data in various formats for fabrication or further analysis. You can choose from formats such as ODB++, IPC-2581, Gerber, Excellon, DXF, DWG, PDF, and more. You can also choose the resolution, quality, and scale of your exported data.</li>
|
36 |
-
</ol>
|
37 |
-
<h2>Benefits of Using BluePrintPCB 300571 With CAM350 1050471 KeyGenrar</h2>
|
38 |
-
<p>By using BluePrintPCB 300571 With CAM350 1050471 KeyGenrar, you can enjoy many benefits that will improve your PCB design and documentation process. Here are some of the main benefits:</p>
|
39 |
-
<p></p>
|
40 |
-
<h3>Faster and more accurate PCB documentation</h3>
|
41 |
-
<p>With BluePrintPCB 300571 With CAM350 1050471 KeyGenrar, you can create complex PCB documentation more accurately and in a fraction of the time of traditional methods. You can import your PCB design data from any CAD tool and automatically generate all the necessary documentation for fabrication, assembly, and testing. You can also edit, annotate, and customize your documentation with ease using a graphical user interface that mimics your design environment. You can also view your PCB layouts as 3D models, export your documentation in various formats, and collaborate with other engineers using cloud-based services.</p>
|
42 |
-
<h3>Enhanced PCB design capabilities and quality</h3>
|
43 |
-
<p>With BluePrintPCB 300571 With CAM350 1050471 KeyGenrar, you can edit, verify, and optimize your PCB designs for manufacturing. You can import your PCB design data from any CAD tool and perform various operations such as DFM analysis, netlist extraction, panelization, test point generation, drill optimization, Gerber editing, and more. You can also simulate your PCB designs using a built-in G-code-driven simulator that shows you how your design will look like after fabrication. You can also export your optimized design data in various formats for fabrication or further analysis.</p>
|
44 |
-
<h3>Seamless integration and collaboration</h3>
|
45 |
-
<p>With BluePrintPCB 300571 With CAM350 1050471 KeyGenrar, you can seamlessly integrate and collaborate with other tools and engineers. You can import and export your PCB design data from any CAD tool using standard formats such as ODB++, IPC-2581, or Gerber. You can also use cloud-based services to share and synchronize your PCB documents and designs with other engineers or stakeholders. You can also use the built-in communication tools to chat, comment, or annotate your PCB documents and designs.</p>
|
46 |
-
<h2>Conclusion</h2>
|
47 |
-
<p>In conclusion, BluePrintPCB 300571 With CAM350 1050471 KeyGenrar is a powerful and comprehensive software solution for PCB design and documentation. It combines two of the most popular and advanced tools for PCB engineering: BluePrintPCB and CAM350. By using this software bundle, you can create complex PCB documentation more accurately and in a fraction of the time of traditional methods, edit, verify, and optimize your PCB designs for manufacturing, and seamlessly integrate and collaborate with other tools and engineers. If you are looking for a professional and efficient way to create PCB designs and documentation, you should definitely try BluePrintPCB 300571 With CAM350 1050471 KeyGenrar.</p>
|
48 |
-
<h2>FAQs</h2>
|
49 |
-
<p>Here are some of the frequently asked questions about BluePrintPCB 300571 With CAM350 1050471 KeyGenrar:</p>
|
50 |
-
<ol>
|
51 |
-
<li>What are the system requirements for BluePrintPCB 300571 With CAM350 1050471 KeyGenrar?</li>
|
52 |
-
<p>The system requirements for BluePrintPCB 300571 With CAM350 1050471 KeyGenrar are as follows:</p>
|
53 |
-
<ul>
|
54 |
-
<li>Operating system: Windows 7, 8, 10 (64-bit)</li>
|
55 |
-
<li>Processor: Intel Core i5 or higher</li>
|
56 |
-
<li>Memory: 8 GB RAM or higher</li>
|
57 |
-
<li>Hard disk space: 4 GB or higher</li>
|
58 |
-
<li>Graphics card: NVIDIA GeForce GTX 1050 or higher</li>
|
59 |
-
<li>Internet connection: Required for activation and cloud services</li>
|
60 |
-
</ul>
|
61 |
-
<li>How much does BluePrintPCB 300571 With CAM350 1050471 KeyGenrar cost?</li>
|
62 |
-
<p>The official price of BluePrintPCB 300571 With CAM350 1050471 KeyGenrar is $9,995 USD for a perpetual license. However, you can also purchase a subscription license for $2,995 USD per year or $295 USD per month. You can also request a quote for a customized license that suits your needs.</p>
|
63 |
-
<li>How can I get technical support for BluePrintPCB 300571 With CAM350 1050471 KeyGenrar?</li>
|
64 |
-
<p>You can get technical support for BluePrintPCB 300571 With CAM350 1050471 KeyGenrar by contacting the software developers at [DownStream Technologies]. You can also access their online help center, user forum, video tutorials, webinars, and training courses.</p>
|
65 |
-
<li>Is BluePrintPCB 300571 With CAM350 1050471 KeyGenrar compatible with other CAD tools?</li>
|
66 |
-
<p>Yes, BluePrintPCB 300571 With CAM350 1050471 KeyGenrar is compatible with other CAD tools such as Altium Designer, Cadence Allegro, Mentor Graphics PADS, Zuken CR-8000, and more. You can import and export your PCB design data from any CAD tool using standard formats such as ODB++, IPC-2581, or Gerber.</p>
|
67 |
-
<li>Is BluePrintPCB 300571 With CAM350 1050471 KeyGenrar legal and ethical to use?</li>
|
68 |
-
<p>No, BluePrintPCB 300571 With CAM350 1050471 KeyGenrar is not legal or ethical to use. This software bundle is a cracked version of the original software products that uses a KeyGenrar tool to bypass the software protection mechanisms and activate the software without paying for it. However, using KeyGenrar or any other similar tool is illegal and unethical, as it violates the software license agreement and infringes the intellectual property rights of the software developers. Therefore, we do not recommend or endorse using BluePrintPCB 300571 With CAM350 1050471 KeyGenrar or any other similar tool for any purpose. If you want to use BluePrintPCB and CAM350 legally and ethically, you should purchase the software from the official website of the software developers, which is [DownStream Technologies].</p> b2dd77e56b<br />
|
69 |
-
<br />
|
70 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 14 Update 1 Crack V5 FIFA Learn How to Install the Latest Patch and Crack.md
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>FIFA 14 Update 1 Crack V5: What You Need to Know</h1>
|
3 |
-
<p>FIFA 14 is one of the most popular football simulation games ever released. It features realistic gameplay, stunning graphics, and a variety of modes and options. However, some players may want to use a crack to bypass the game's activation process and enjoy it for free. In this article, we will tell you everything you need to know about FIFA 14 Update 1 Crack V5, including how to download and install it, what are its features and benefits, what are its risks and drawbacks, how to troubleshoot common problems, and how to update your game to the latest version with FIFA Infinity Patch.</p>
|
4 |
-
<h2>How to Download and Install FIFA 14 Update 1 Crack V5</h2>
|
5 |
-
<p>1 Crack V5, you will need to follow these steps:</p>
|
6 |
-
<h2>fifa 14 update 1 crack v5 fifa</h2><br /><p><b><b>Download File</b> ⚹ <a href="https://byltly.com/2uKwNV">https://byltly.com/2uKwNV</a></b></p><br /><br />
|
7 |
-
<ol>
|
8 |
-
<li>Download FIFA 14 Ultimate Edition from a reputable source. You can find it on various websites such as <a href="https://www.cracked-gamespc.com/games/fifa-14">Cracked-GamesPC</a> or <a href="https://fitgirl-repacks-site.org/fifa-14-ultimate-edition-download-torrent-repack/">Fitgirl Repacks Site</a> . Make sure you download the full game and not just the crack.</li>
|
9 |
-
<li>Download FIFA 14 Update 1 Crack V5 from a trusted link. You can find it on various forums such as <a href="http://soccergaming.com/index.php?threads/official-fifa-infinity-patch-14-v5-fifa-14-hbz-mod-22-23-season.6470807/">Soccer Gaming</a> or <a href="https://itsredtail.blogspot.com/2014/02/pc-fifa-14-v5-crack.html">REDTAIL</a> . Make sure you download the latest version of the crack and not an outdated one.</li>
|
10 |
-
<li>Extract and copy the crack files to the game folder. You will need to use a program such as WinRAR or 7-Zip to extract the crack files from the archive. Then, you will need to copy and paste them to the game folder, which is usually located at C:\Program Files (x86)\Origin Games\FIFA 14\. You will be asked to overwrite some files, so click yes.</li>
|
11 |
-
<li>Run the game and enjoy. You can now launch FIFA 14 from your desktop or start menu and play it without any activation or registration. You can also access all the game modes and features, such as online multiplayer and career mode.</li>
|
12 |
-
</ol>
|
13 |
-
<h2>What are the Features and Benefits of FIFA 14 Update 1 Crack V5</h2>
|
14 |
-
<p>FIFA 14 Update 1 Crack V5 is not just a simple crack that allows you to play FIFA 14 for free. It also comes with some features and benefits that enhance your gaming experience. Here are some of them:</p>
|
15 |
-
<ul>
|
16 |
-
<li><b>Improved gameplay and graphics.</b> FIFA 14 Update 1 Crack V5 improves the gameplay and graphics of FIFA 14 by fixing some bugs and glitches, such as shaky cam (drunken camera), big ball glitch, red card foul, injuries, and more. It also adds some new animations and effects, such as realistic ball physics, dynamic lighting, and shadows.</li>
|
17 |
-
<li><b>Fixed bugs and glitches.</b> FIFA 14 Update 1 Crack V5 fixes some bugs and glitches that were present in the original game or previous cracks, such as game not launching or crashing on startup, game lagging or freezing during gameplay, game not saving or loading properly, and more. It also removes some annoying features, such as Origin pop-ups and notifications.</li>
|
18 |
-
<li><b>Compatible with online multiplayer and career mode.</b> FIFA 14 Update 1 Crack V5 allows you to play FIFA 14 online with other players who have the same crack or patch. You can also play career mode without any problems or limitations. You can create your own custom team, sign players, manage your budget, compete in tournaments, and more.</li>
|
19 |
-
</ul>
|
20 |
-
<h2>What are the Risks and Drawbacks of FIFA 14 Update 1 Crack V5</h2>
|
21 |
-
<p>FIFA 14 Update 1 Crack V5 may seem like a perfect solution for playing FIFA 14 for free, but it also comes with some risks and drawbacks that you should be aware of before using it. Here are some of them:</p>
|
22 |
-
<ul>
|
23 |
-
<li><b>Potential malware or virus infection.</b> FIFA 14 Update 1 Crack V5 may contain malware or viruses that can harm your computer or steal your personal information. You should always scan the crack files with a reliable antivirus program before installing them. You should also avoid downloading the crack from untrusted sources or links.</li>
|
24 |
-
<li><b>Legal issues and copyright infringement.</b> FIFA 14 Update 1 Crack V5 is illegal and violates the terms of service of EA Sports and Origin. You may face legal consequences if you use it. You may also lose access to your Origin account or games if you are caught using it. You should always support the developers and publishers by buying the original game.</li>
|
25 |
-
<li><b>Game instability and crashes.</b> FIFA 14 Update 1 Crack V5 may cause your game to become unstable or crash at random times. You may lose your progress or data if this happens. You should always backup your save files before using the crack. You should also update your drivers and system requirements to ensure optimal performance.</li>
|
26 |
-
</ul>
|
27 |
-
<h2>How to Troubleshoot Common Problems with FIFA 14 Update 1 Crack V5</h2>
|
28 |
-
<p>If you encounter any problems with FIFA 14 Update 1 Crack V5, you can try these solutions to fix them:</p>
|
29 |
-
<ul>
|
30 |
-
<li><b>Game not launching or crashing on startup.</b> This may be caused by a missing or corrupted file in the game folder. You should verify the integrity of your game files by using a program such as Steam or Origin. You should also run the game as administrator by right-clicking on the game icon and selecting "Run as administrator".</li>
|
31 |
-
<li><b>Game lagging or freezing during gameplay.</b> This may be caused by a low-end system or incompatible settings. You should lower your graphics settings by going to Options > Game Settings > Graphics Quality. You should also close any unnecessary programs or background processes that may be using up your CPU or RAM resources.</li>
|
32 |
-
<li><b>Game not saving or loading properly.</b> This may be caused by a corrupted save file or insufficient disk space. You should delete any old or unwanted save files by going to Documents > FIFA 14 > instance0 > save > autosave_xxx.sav. You should also free up some disk space by deleting any temporary files or unused programs on your computer.</li>
|
33 |
-
</ul>
|
34 |
-
<h2>How to Update FIFA 14 to the Latest Version with FIFA Infinity Patch</h2>
|
35 |
-
<p>If you want to update your FIFA 14 to the latest version with new features and content, you can use FIFA Infinity Patch. This is a fan-made patch that adds new leagues, teams, players, kits, stadiums, balls, boots, faces, graphics, and more to your game. Here is how to download and install it:</p>
|
36 |
-
<ol>
|
37 |
-
<li>Download FIFA Infinity Patch from the official website <a href="http://downloads.fifa-infinity.com/fifa-14/fifa-infinity-patch-14/">here</a>. Make sure you download the latest version of the patch and not an outdated one.</li>
|
38 |
-
<li>Extract and install the patch files to the game folder. You will need to use a program such as WinRAR or 7-Zip to extract the patch files from the archive. Then, you will need to run the installer file (FIP Installer.exe) and follow the instructions on screen. Make sure you select your game folder as the destination folder.</li>
|
39 |
-
<li>Run the patch launcher and select your options. You will need to run the patch launcher file (FIP Launcher.exe) from your game folder every time you want to play with the patch. You can select your preferred options from the launcher menu, such as language, database, scoreboard, theme, etc.</li>
|
40 |
-
<h2>Conclusion and FAQs</h2>
|
41 |
-
<p>In conclusion, FIFA 14 Update 1 Crack V5 is a crack that allows you to play FIFA 14 for free and with some improvements and fixes. However, it also comes with some risks and drawbacks that you should be aware of before using it. If you want to update your game to the latest version with new features and content, you can use FIFA Infinity Patch. Here are some FAQs that may help you with your questions:</p>
|
42 |
-
<ul>
|
43 |
-
<li><b>Is FIFA 14 Update 1 Crack V5 safe to use?</b> FIFA 14 Update 1 Crack V5 may contain malware or viruses that can harm your computer or steal your personal information. You should always scan the crack files with a reliable antivirus program before installing them. You should also avoid downloading the crack from untrusted sources or links.</li>
|
44 |
-
<li><b>Can I play FIFA 14 online with Update 1 Crack V5?</b> FIFA 14 Update 1 Crack V5 allows you to play FIFA 14 online with other players who have the same crack or patch. You can also play career mode without any problems or limitations. However, you may face legal consequences if you use it. You may also lose access to your Origin account or games if you are caught using it.</li>
|
45 |
-
<li><b>How can I backup my FIFA 14 save files before using Update 1 Crack V5?</b> You can backup your FIFA 14 save files by going to Documents > FIFA 14 > instance0 > save and copying the files to another location. You can also use a program such as GameSave Manager to backup and restore your save files easily.</li>
|
46 |
-
<li><b>Where can I find more information and support for FIFA 14 Update 1 Crack V5?</b> You can find more information and support for FIFA 14 Update 1 Crack V5 on various forums such as <a href="https://www.youtube.com/watch?v=ufyebCKvInY">YouTube</a> , <a href="http://soccergaming.com/index.php?threads/official-fifa-infinity-patch-14-v5-fifa-14-hbz-mod-22-23-season.6470807/">Soccer Gaming</a> , or <a href="https://itsredtail.blogspot.com/2014/02/pc-fifa-14-v5-crack.html">REDTAIL</a> . You can also contact the developers or publishers of the game for official support.</li>
|
47 |
-
<li><b>What are some alternatives to FIFA 14 Update 1 Crack V5?</b> Some alternatives to FIFA 14 Update 1 Crack V5 are buying the original game from EA Sports or Origin, using a different crack or patch such as FIFA Infinity Patch, or playing a different football simulation game such as PES 2022 or Football Manager 2022.</li>
|
48 |
-
</ul>
|
49 |
-
<p>I hope you enjoyed reading this article and learned something new. If you have any feedback or suggestions, please let me know in the comments below. Thank you for your time and attention.</p>
|
50 |
-
<p>fifa 14 ultimate edition download torrent repack<br />
|
51 |
-
fifa 14 crack only v5 final 3dm rar<br />
|
52 |
-
fifa 14 como instalar o crack v5 e update 1<br />
|
53 |
-
fifa 14 crack v5 final 3dm google drive<br />
|
54 |
-
fifa 14 v5 crack fix errors and glitches<br />
|
55 |
-
fifa 14 update 1 and crack v5 by skidrow<br />
|
56 |
-
fifa 14 crack v5 final 3dm download free<br />
|
57 |
-
fifa 14 patch update 1 nosteam<br />
|
58 |
-
fifa 14 crack v5 final 3dm gameplay<br />
|
59 |
-
fifa 14 update 1 and crack v5 tutorial<br />
|
60 |
-
fifa 14 ultimate edition full unlocked crack v5<br />
|
61 |
-
fifa 14 crack only v5 final 3dm ulozto<br />
|
62 |
-
fifa 14 como baixar e instalar o crack v5<br />
|
63 |
-
fifa 14 crack v5 final 3dm system requirements<br />
|
64 |
-
fifa 14 update 1 and crack v5 kickass<br />
|
65 |
-
fifa 14 ultimate edition multi14 crack v5<br />
|
66 |
-
fifa 14 crack only v5 final 3dm mega<br />
|
67 |
-
fifa 14 como resolver o erro do crack v5<br />
|
68 |
-
fifa 14 crack v5 final 3dm review<br />
|
69 |
-
fifa 14 update 1 and crack v5 download link<br />
|
70 |
-
fifa 14 ultimate edition fitgirl repacks site<br />
|
71 |
-
fifa 14 crack only v5 final 3dm password<br />
|
72 |
-
fifa 14 como atualizar o jogo com o crack v5<br />
|
73 |
-
fifa 14 crack v5 final 3dm installation guide<br />
|
74 |
-
fifa 14 update 1 and crack v5 chomikuj<br />
|
75 |
-
fifa 14 ultimate edition incl dlc and crack v5<br />
|
76 |
-
fifa 14 crack only v5 final 3dm mediafire<br />
|
77 |
-
fifa 14 como jogar online com o crack v5<br />
|
78 |
-
fifa 14 crack v5 final 3dm features<br />
|
79 |
-
fifa 14 update 1 and crack v5 direct download<br />
|
80 |
-
fifa 14 ultimate edition pc game cracked in direct link and torrent<br />
|
81 |
-
fifa 14 crack only v5 final 3dm zippyshare<br />
|
82 |
-
fifa 14 como corrigir os bugs do crack v5<br />
|
83 |
-
fifa 14 crack v5 final 3dm comparison with previous versions<br />
|
84 |
-
fifa 14 update 1 and crack v5 reloaded<br />
|
85 |
-
fifa 14 ultimate edition full game with update and crack v5<br />
|
86 |
-
fifa 14 crack only v5 final 3dm uploaded.net<br />
|
87 |
-
fifa</p>
|
88 |
-
</p> 0a6ba089eb<br />
|
89 |
-
<br />
|
90 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Anger of Stick 5 Zombie - The Ultimate Guide to Hacking the Game and Defeating the Zombies.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Anger of Stick 5: Zombie APK Hack - How to Download and Play</h1>
|
3 |
-
<p>If you are a fan of stickman games, you might have heard of Anger of Stick 5: Zombie, a popular action game where you have to fight against hordes of zombies with various weapons and skills. But did you know that there is a way to get unlimited money and resources in the game without spending a dime? Yes, you heard it right. You can use Anger of Stick 5: Zombie APK Hack, a modded version of the game that gives you access to all the features and items for free. In this article, we will tell you everything you need to know about Anger of Stick 5: Zombie APK Hack, including what it is, why you need it, how to download and install it, and how to play it. So, let's get started!</p>
|
4 |
-
<h2>What is Anger of Stick 5: Zombie?</h2>
|
5 |
-
<p>Anger of Stick 5: Zombie is a fun and addictive stickman game that combines action, shooting, and survival elements. The game has a simple plot: a group of enemies have turned the city into a zombie apocalypse, and you have to save the innocent people and fight back. You can choose from different characters, each with their own skills and abilities, and use various weapons, such as guns, swords, axes, grenades, rockets, and more. You can also upgrade your weapons and skills, recruit allies, and use vehicles to move around. The game has several modes, such as single-player, multiplayer, zombie mode, survival mode, and more. The game has colorful graphics, smooth animations, easy controls, and exciting sound effects. The game is free to download and play on Android devices.</p>
|
6 |
-
<h2>anger of stick 5 zombie apk hack</h2><br /><p><b><b>Download Zip</b> ⇒ <a href="https://urlin.us/2uSWrF">https://urlin.us/2uSWrF</a></b></p><br /><br />
|
7 |
-
<h2>Why do you need Anger of Stick 5: Zombie APK Hack?</h2>
|
8 |
-
<p>Anger of Stick 5: Zombie is a fun game, but it can also be challenging and frustrating at times. You might run out of money and resources quickly, especially if you want to buy new weapons, upgrade your skills, or unlock new characters. You might also face difficulties in completing some levels or defeating some bosses. That's why you might need Anger of Stick 5: Zombie APK Hack, a modded version of the game that gives you unlimited money and resources. With Anger of Stick 5: Zombie APK Hack, you can enjoy the following benefits:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You can buy any weapon or item you want without worrying about the cost.</li>
|
11 |
-
<li>You can upgrade your skills and abilities to the maximum level without grinding.</li>
|
12 |
-
<li>You can unlock all the characters and customize them as you like.</li>
|
13 |
-
<li>You can play any mode or level without any restrictions.</li>
|
14 |
-
<li>You can have more fun and excitement in the game without any stress or boredom.</li>
|
15 |
-
</ul>
|
16 |
-
<p>However, Anger of Stick 5: Zombie APK Hack also has some risks and drawbacks that you should be aware of before using it. Here are some of them:</p>
|
17 |
-
<p>anger of stick 5 zombie mod apk unlimited money<br />
|
18 |
-
anger of stick 5 zombie cheats android download<br />
|
19 |
-
anger of stick 5 zombie hack apk latest version<br />
|
20 |
-
anger of stick 5 zombie mod apk happymod<br />
|
21 |
-
anger of stick 5 zombie unlimited coins and gems<br />
|
22 |
-
anger of stick 5 zombie hack tool online<br />
|
23 |
-
anger of stick 5 zombie mod menu apk<br />
|
24 |
-
anger of stick 5 zombie free download for android<br />
|
25 |
-
anger of stick 5 zombie hack apk no root<br />
|
26 |
-
anger of stick 5 zombie mod apk revdl<br />
|
27 |
-
anger of stick 5 zombie hack apk android 1<br />
|
28 |
-
anger of stick 5 zombie gameplay tips and tricks<br />
|
29 |
-
anger of stick 5 zombie mod apk offline<br />
|
30 |
-
anger of stick 5 zombie hack version download<br />
|
31 |
-
anger of stick 5 zombie unlimited health and ammo<br />
|
32 |
-
anger of stick 5 zombie mod apk rexdl<br />
|
33 |
-
anger of stick 5 zombie hack apk ios<br />
|
34 |
-
anger of stick 5 zombie best weapons and upgrades<br />
|
35 |
-
anger of stick 5 zombie mod apk pure<br />
|
36 |
-
anger of stick 5 zombie hack generator no survey<br />
|
37 |
-
anger of stick 5 zombie mod apk obb<br />
|
38 |
-
anger of stick 5 zombie hack apk mediafıre<br />
|
39 |
-
anger of stick 5 zombie how to unlock all characters<br />
|
40 |
-
anger of stick 5 zombie mod apk android republic<br />
|
41 |
-
anger of stick 5 zombie hack online free<br />
|
42 |
-
anger of stick 5 zombie mod apk vip unlocked<br />
|
43 |
-
anger of stick 5 zombie hack apk mega<br />
|
44 |
-
anger of stick 5 zombie walkthrough and guide<br />
|
45 |
-
anger of stick 5 zombie mod apk an1<br />
|
46 |
-
anger of stick 5 zombie hack without human verification<br />
|
47 |
-
anger of stick 5 zombie mod apk all unlocked<br />
|
48 |
-
anger of stick 5 zombie hack apk uptodown<br />
|
49 |
-
anger of stick 5 zombie cheats codes and secrets<br />
|
50 |
-
anger of stick 5 zombie mod apk unlimited everything<br />
|
51 |
-
anger of stick 5 zombie hack no download or install<br />
|
52 |
-
anger of stick 5 zombie mod apk new version<br />
|
53 |
-
anger of stick 5 zombie hack apk old version<br />
|
54 |
-
anger of stick 5 zombie review and rating<br />
|
55 |
-
anger of stick 5 zombie mod apk unlimited diamonds and gold<br />
|
56 |
-
anger of stick 5 zombie hack easy and fast</p>
|
57 |
-
<ul>
|
58 |
-
<li>You might lose your original game data or progress if you overwrite it with the modded version.</li>
|
59 |
-
<li>You might face compatibility issues or errors if your device is not compatible with the modded version.</li>
|
60 |
-
<li>You might get banned or suspended from the game if the developers detect that you are using a modded version <li>You might expose your device to malware or viruses if you download the modded version from an untrusted source.</li>
|
61 |
-
<li>You might lose the original charm and challenge of the game if you use the modded version.</li>
|
62 |
-
</ul>
|
63 |
-
<p>Therefore, you should weigh the pros and cons of using Anger of Stick 5: Zombie APK Hack before deciding to use it. You should also be careful and responsible when using it, and respect the rights and efforts of the original developers.</p>
|
64 |
-
<h2>How to download and install Anger of Stick 5: Zombie APK Hack?</h2>
|
65 |
-
<p>If you have decided to use Anger of Stick 5: Zombie APK Hack, you might be wondering how to download and install it on your device. Well, don't worry, we have got you covered. Here are the steps to download and install Anger of Stick 5: Zombie APK Hack:</p>
|
66 |
-
<ol>
|
67 |
-
<li>First, you need to find a reliable and safe source to download the modded apk file. You can search online for some websites or forums that offer Anger of Stick 5: Zombie APK Hack, or you can use the link we have provided below. Make sure that the source is trustworthy and has positive reviews from other users.</li>
|
68 |
-
<li>Second, you need to enable the installation of apps from unknown sources on your device. To do this, go to your device settings, then security, then enable unknown sources. This will allow you to install apps that are not from the official Google Play Store.</li>
|
69 |
-
<li>Third, you need to uninstall the original version of Anger of Stick 5: Zombie from your device if you have it installed. This is to avoid any conflicts or errors between the original and modded versions. You can uninstall the original version by going to your device settings, then apps, then Anger of Stick 5: Zombie, then uninstall.</li>
|
70 |
-
<li>Fourth, you need to locate the downloaded modded apk file on your device. You can use a file manager app or your device's default file explorer to find the file. It is usually stored in the downloads folder or the folder where you saved it.</li>
|
71 |
-
<li>Fifth, you need to tap on the modded apk file and follow the instructions on the screen to install it. It might take a few seconds or minutes depending on your device's speed and performance.</li>
|
72 |
-
<li>Sixth, you need to launch the game and enjoy playing with unlimited money and resources.</li>
|
73 |
-
</ol>
|
74 |
-
<p>That's it! You have successfully downloaded and installed Anger of Stick 5: Zombie APK Hack on your device. However, before you start playing, here are some precautions and tips to avoid malware and viruses:</p>
|
75 |
-
<ul>
|
76 |
-
<li>Always scan the modded apk file with an antivirus app before installing it.</li>
|
77 |
-
<li>Always backup your device data before installing any modded app.</li>
|
78 |
-
<li>Always update your device's software and security patches regularly.</li>
|
79 |
-
<li>Always use a VPN app when playing online games with modded apps.</li>
|
80 |
-
<li>Always delete the modded apk file after installing it.</li>
|
81 |
-
</ul>
|
82 |
-
<h2>How to play Anger of Stick 5: Zombie APK Hack?</h2>
|
83 |
-
<p>Now that you have downloaded and installed Anger of Stick 5: Zombie APK Hack, you might be eager to play it and see what it has to offer. Well, playing Anger of Stick 5: Zombie APK Hack is not much different from playing the original version, except that you have unlimited money and resources. Here are some basic gameplay and controls of the game:</p>
|
84 |
-
<ul>
|
85 |
-
<li>The game is divided into levels, each with a different objective and difficulty. You have to complete each level by killing all the zombies and saving all the people.</li>
|
86 |
-
<li>You can move your character by using the virtual joystick on the left side of the screen. You can attack by using the buttons on the right side of the screen. You can also switch weapons by tapping on their icons on the top right corner of the screen.</li>
|
87 |
-
<li>You can use various skills and abilities by tapping on their icons on the bottom right corner of the screen. You can also recruit allies by tapping on their icons on the top left corner of the screen. You can also use vehicles by tapping on their icons on the bottom left corner of the screen.</li>
|
88 |
-
<li>You can pause the game by tapping on the menu button on the top right corner of the screen. You can also access your inventory, shop, settings, achievements, and more from there.</li>
|
89 |
-
</ul>
|
90 |
-
<p>Playing Anger of Stick 5: Zombie APK Hack is fun and easy, but it can also be challenging and tricky at times. Here are some tips and tricks to master the game and beat the zombies:</p>
|
91 |
-
<ul>
|
92 |
-
<li>Always upgrade your weapons and skills as soon as possible. The zombies will get stronger and faster as you progress in the game, so you need to keep up with them.</li>
|
93 |
-
<li>Always use the best weapon for the situation. Different weapons have different advantages and disadvantages, such as range, damage, speed, accuracy, and ammo. For example, guns are good for long-range attacks, but they consume ammo quickly. Swords are good for close-range attacks, but they have limited reach. Grenades are good for crowd control, but they have a cooldown time.</li>
|
94 |
-
<li>Always use your skills and abilities wisely. They can give you an edge in the game, but they also have a cooldown time and a cost. For example, you can use the helicopter skill to fly over the zombies and shoot them from above, but it consumes fuel and money. You can use the robot skill to transform into a powerful robot, but it consumes energy and money.</li>
|
95 |
-
<li>Always recruit allies and use vehicles when possible. They can help you in the game by providing extra firepower and mobility. For example, you can recruit a sniper to shoot the zombies from afar, or a medic to heal you and your allies. You can also use a tank to crush the zombies and blast them with rockets, or a motorcycle to speed through the streets and avoid obstacles.</li>
|
96 |
-
<li>Always save the people and collect the items. They can give you extra rewards and bonuses in the game, such as money, health, ammo, and more. For example, you can save a scientist who will give you a new weapon or skill, or a businessman who will give you a lot of money. You can also collect items such as coins, gems, boxes, and more that will give you various benefits.</li>
|
97 |
-
</ul>
|
98 |
-
<h2>Conclusion</h2>
|
99 |
-
<p>Anger of Stick 5: Zombie is a great stickman game that you can enjoy on your Android device. But if you want to have more fun and excitement in the game without spending any money or facing any difficulties, you can use Anger of Stick 5: Zombie APK Hack, a modded version of the game that gives you unlimited money and resources. In this article, we have explained what Anger of Stick 5: Zombie APK Hack is, why you need it, how to download and install it, and how to play it. We hope that this article has been helpful and informative for you. Now that you know everything about Anger of Stick 5: Zombie APK Hack, why don't you give it a try and see for yourself how awesome it is? Download Anger of Stick 5: Zombie APK Hack today and enjoy playing with unlimited money and resources!</p>
|
100 |
-
<h2>FAQs</h2>
|
101 |
-
<p>Here are some frequently asked questions and answers related to Anger of Stick 5: Zombie APK Hack:</p>
|
102 |
-
<h3>Q: Is Anger of Stick 5: Zombie APK Hack safe to use?</h3>
|
103 |
-
<p>A: Anger of Stick 5: Zombie APK Hack is generally safe to use if you download it from a reliable and trusted source. However, there is always a risk of malware or viruses when downloading any modded app from unknown sources. Therefore, you should always scan the modded apk file with an antivirus app before installing it, and backup your device data before installing any modded app.</p>
|
104 |
-
<h3>Q: Is Anger of Stick 5: Zombie APK Hack legal to use?</h3>
|
105 |
-
<p>A: Anger of Stick 5: Zombie APK Hack is not legal to use because it violates the terms and conditions of the original game developers. Using Anger of Stick 5: Zombie APK Hack is considered cheating and hacking, which can result in banning or suspending your account from the game. Therefore, you should use Anger of Stick 5: Zombie APK Hack at your own risk and responsibility.</p>
|
106 |
-
<h3>Q: Can I play online with Anger of Stick 5: Zombie APK Hack?</h3>
|
107 |
-
<p>A: Yes, you can play online with Anger of Stick 5: Zombie APK Hack, but it is not recommended because it can cause problems for you and other players. Playing online with Anger of Stick 5: Zombie APK Hack can make the game unfair and unbalanced for other players who are playing with the original version. It can also expose your account to detection and banning by the game developers. Therefore, you should play offline or with friends who are also using Anger of Stick 5: Zombie APK Hack.</p>
|
108 |
-
<h3>Q: Can I update Anger of Stick 5: Zombie APK Hack?</h3>
|
109 |
-
<p>A: No, you cannot update Anger of Stick 5: Zombie APK Hack because it is not compatible with the official updates from the game developers. Updating Anger of Stick 5: Zombie APK Hack can cause errors or crashes in the game or delete your modded data or progress. Therefore, you should avoid updating Anger of Stick 5: Zombie APK Hack unless there is a new modded version available from the same source you downloaded it from.</p>
|
110 |
-
<h3>Q: Where can I download Anger of Stick 5: Zombie APK Hack?</h3>
|
111 |
-
<p>A: There are many websites and forums that offer Anger of Stick 5: Zombie APK Hack, but not all of them are safe and reliable. You should always do some research and check the reviews and ratings of the source before downloading any modded app. You can also use the link we have provided below, which is one of the best sources to download Anger of Stick 5: Zombie APK Hack. However, we are not affiliated with or endorsed by the source, and we are not responsible for any damages or losses that may occur from using it.</p>
|
112 |
-
<p><a href="https://androidhackers.io/anger-of-stick-5-mod-apk/">Download Anger of Stick 5: Zombie APK Hack here</a></p> 197e85843d<br />
|
113 |
-
<br />
|
114 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Convert Instagram Videos to MP3 Online - No Software Needed.md
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Instagram Audio MP3</h1>
|
3 |
-
<p>Instagram is one of the most popular social media platforms that allows users to share photos, videos, stories, reels, and IGTV. However, sometimes you may want to download the audio from Instagram videos, reels, or IGTV and save them as MP3 files on your device. This way, you can listen to them offline, share them with your friends, or use them for other purposes. But how can you do that? In this article, we will show you what is Instagram audio MP3, why you may want to download it, how to use an Instagram to MP3 converter, and what are the best converters available online.</p>
|
4 |
-
<h2>What is Instagram Audio MP3?</h2>
|
5 |
-
<p>Instagram audio MP3 is the audio track extracted from an Instagram video, reel, or IGTV and converted into an MP3 file. An MP3 file is a compressed audio format that can be played on most devices and media players. It is also widely used for music streaming and downloading.</p>
|
6 |
-
<h2>instagram mp3 download</h2><br /><p><b><b>Download File</b> ———>>> <a href="https://urlin.us/2uT0Ro">https://urlin.us/2uT0Ro</a></b></p><br /><br />
|
7 |
-
<h3>The benefits of downloading Instagram audio MP3</h3>
|
8 |
-
<p>There are many reasons why you may want to download Instagram audio MP3. Some of them are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You can listen to your favorite songs, podcasts, interviews, or speeches from Instagram offline without using data or Wi-Fi.</li>
|
11 |
-
<li>You can create your own playlists or mixtapes with the Instagram audio MP3 files.</li>
|
12 |
-
<li>You can share the Instagram audio MP3 files with your friends via email, WhatsApp, Telegram, or other platforms.</li>
|
13 |
-
<li>You can use the Instagram audio MP3 files as ringtones, alarms, or notifications on your phone.</li>
|
14 |
-
<li>You can edit, remix, or mashup the Instagram audio MP3 files with other tools or software.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>The challenges of downloading Instagram audio MP3</h3>
|
17 |
-
<p>However, downloading Instagram audio MP3 is not as easy as it sounds. There are some challenges that you may face when trying to do that. Some of them are:</p>
|
18 |
-
<p>instagram video to mp3 converter<br />
|
19 |
-
download instagram reels audio mp3<br />
|
20 |
-
instagram mp3 downloader online<br />
|
21 |
-
how to save instagram music as mp3<br />
|
22 |
-
instagram to mp3 free download<br />
|
23 |
-
download instagram stories sound mp3<br />
|
24 |
-
instagram live to mp3 converter<br />
|
25 |
-
download instagram igtv audio mp3<br />
|
26 |
-
instagram link to mp3 downloader<br />
|
27 |
-
how to convert instagram video to mp3<br />
|
28 |
-
download instagram posts audio mp3<br />
|
29 |
-
instagram music to mp3 online<br />
|
30 |
-
instagram reels to mp3 converter online<br />
|
31 |
-
how to download instagram songs as mp3<br />
|
32 |
-
instagram video downloader with mp3<br />
|
33 |
-
download instagram stories music mp3<br />
|
34 |
-
instagram live downloader mp3<br />
|
35 |
-
download instagram igtv video mp3<br />
|
36 |
-
instagram link downloader mp3<br />
|
37 |
-
how to save instagram video as mp3<br />
|
38 |
-
download instagram posts video mp3<br />
|
39 |
-
instagram music downloader mp3<br />
|
40 |
-
instagram reels downloader mp3 online<br />
|
41 |
-
how to convert instagram songs to mp3<br />
|
42 |
-
instagram video converter to mp3 online<br />
|
43 |
-
download instagram stories video and audio mp3<br />
|
44 |
-
instagram live converter to mp3 online<br />
|
45 |
-
download instagram igtv video and audio mp3<br />
|
46 |
-
instagram link converter to mp3 online<br />
|
47 |
-
how to extract audio from instagram video as mp3<br />
|
48 |
-
download instagram posts video and audio mp3<br />
|
49 |
-
instagram music converter to mp3 online<br />
|
50 |
-
instagram reels converter to mp3 online free<br />
|
51 |
-
how to download audio from instagram reels as mp3<br />
|
52 |
-
best instagram video to mp3 converter online<br />
|
53 |
-
download high quality instagram stories audio mp3<br />
|
54 |
-
best instagram live to mp3 converter online<br />
|
55 |
-
download high quality instagram igtv audio mp3<br />
|
56 |
-
best instagram link to mp3 converter online<br />
|
57 |
-
how to get audio from instagram video as mp3 file<br />
|
58 |
-
best instagram posts video to mp3 converter online <br />
|
59 |
-
best instagram music to mp3 converter online <br />
|
60 |
-
best instagram reels to mp3 converter online free <br />
|
61 |
-
how to download high quality audio from instagram reels as mp3 <br />
|
62 |
-
fastest instagram video to mp3 converter online <br />
|
63 |
-
fastest instagram stories audio downloader as mp3 <br />
|
64 |
-
fastest instagram live to mp3 converter online <br />
|
65 |
-
fastest instagram igtv audio downloader as mp3 <br />
|
66 |
-
fastest instagram link to mp3 converter online</p>
|
67 |
-
<ul>
|
68 |
-
<li>Instagram does not provide a direct option to download the audio from its videos, reels, or IGTV. You have to use a third-party tool or website to do that.</li>
|
69 |
-
<li>Some of the third-party tools or websites may not be safe, reliable, or user-friendly. They may contain malware, viruses, ads, pop-ups, or other unwanted elements.</li>
|
70 |
-
<li>Some of the third-party tools or websites may not support all kinds of Instagram links or formats. They may have limitations on the number, size, length, or quality of the files you can download.</li>
|
71 |
-
<li>Some of the third-party tools or websites may not respect the intellectual property rights of the original creators of the Instagram content. They may violate the terms and conditions of Instagram or infringe the copyrights of the owners.</li>
|
72 |
-
</ul>
|
73 |
-
<h2>How to use an Instagram to MP3 converter?</h2>
|
74 |
-
<p>An Instagram to MP3 converter is a tool or website that allows you to download the audio from an Instagram video, reel, or IGTV and save it as an MP3 file on your device. The process is usually simple and straightforward. Here are the common steps that you need to follow:</p>
|
75 |
-
<h3>Step 1: Copy the Instagram link</h3>
|
76 |
-
<p>The first step is to copy the link of the Instagram video, reel, or IGTV that you want to download. You can do that by tapping on the three dots icon on the top right corner of the post and selecting "Copy <h3>Step 2: Paste the link into the converter</h3>
|
77 |
-
<p>The next step is to paste the link into the Instagram to MP3 converter that you have chosen. You can do that by clicking on the input box and pressing Ctrl+V on your keyboard or right-clicking and selecting "Paste". Alternatively, you can also drag and drop the link into the converter.</p>
|
78 |
-
<h3>Step 3: Choose the output format and quality</h3>
|
79 |
-
<p>The third step is to choose the output format and quality that you want for your Instagram audio MP3 file. Most converters will offer you different options to customize your download, such as MP3, M4A, WAV, FLAC, etc. You can also select the bitrate, sample rate, volume, or channel of your file. Generally, the higher the quality, the larger the file size.</p>
|
80 |
-
<h3>Step 4: Download and enjoy the Instagram audio MP3</h3>
|
81 |
-
<p>The final step is to download and enjoy the Instagram audio MP3 file that you have converted. You can do that by clicking on the download button or link that the converter will provide you. Depending on your browser settings, you may need to choose a destination folder or confirm the download. Once the download is complete, you can play, share, or edit the Instagram audio MP3 file as you wish.</p>
|
82 |
-
<h2>What are the best Instagram to MP3 converters?</h2>
|
83 |
-
<p>There are many Instagram to MP3 converters available online, but not all of them are equally good. Some of them may have more features, faster speed, better quality, or easier interface than others. To help you choose the best one for your needs, we have reviewed some of the most popular and reliable ones below.</p>
|
84 |
-
<h3>OKmusi Instagram Link Downloader</h3>
|
85 |
-
<p>One of the best Instagram to MP3 converters that we recommend is OKmusi Instagram Link Downloader. This is a free and powerful online tool that allows you to download any Instagram video, reel, or IGTV as an MP3 file with high quality and fast speed. It also supports other social media platforms, such as YouTube, Facebook, Twitter, TikTok, etc.</p>
|
86 |
-
<h4>Features of OKmusi Instagram Link Downloader</h4>
|
87 |
-
<p>Some of the features that make OKmusi Instagram Link Downloader stand out are:</p>
|
88 |
-
<ul>
|
89 |
-
<li>It is 100% free and safe to use. There is no registration, installation, or subscription required.</li>
|
90 |
-
<li>It is compatible with all browsers and devices. You can use it on Windows, Mac, Linux, Android, iOS, etc.</li>
|
91 |
-
<li>It is easy and intuitive to use. You just need to copy and paste the link and click on download.</li>
|
92 |
-
<li>It supports multiple output formats and qualities. You can choose from MP3, M4A, WAV, FLAC, etc., and from 128kbps to 320kbps.</li>
|
93 |
-
<li>It has a batch download feature that allows you to download multiple files at once.</li>
|
94 |
-
<li>It has a built-in player that allows you to preview the files before downloading.</li>
|
95 |
-
</ul>
|
96 |
-
<h4>Pros and cons of OKmusi Instagram Link Downloader</h4>
|
97 |
-
<p>Some of the pros and cons of OKmusi Instagram Link Downloader are:</p>
|
98 |
-
<table>
|
99 |
-
<tr><th>Pros</th><th>Cons</th></tr>
|
100 |
-
<tr><td>Free and safe</td><td>May have ads or pop-ups</td></tr>
|
101 |
-
<tr><td>Compatible and easy</td><td>May not support some rare formats</td></tr>
|
102 |
-
<tr><td>Versatile and fast</td><td>May have some bugs or errors</td></tr>
|
103 |
-
<tr><td>Premium and convenient</td><td>May not respect some intellectual property rights</td></tr>
|
104 |
-
</table>
|
105 |
-
<h3>Other alternatives to OKmusi Instagram Link Downloader</h3>
|
106 |
-
<p>If you are not satisfied with OKmusi Instagram Link Downloader or want to try other options, here are some other alternatives that you can consider:</p>
|
107 |
-
<ul>
|
108 |
-
<li>[Snappea Online Downloader]: This is another free and powerful online tool that allows you to download any Instagram video, reel, or IGTV as an MP3 file with high quality and fast speed. It also supports other social media platforms, such as YouTube, Facebook, Twitter, TikTok, etc.</li>
|
109 |
-
<li>[4K Video Downloader]: This is a premium and professional desktop software that allows you to download any Instagram video, reel, or IGTV as an MP3 file with high quality and fast speed. It also supports other video platforms, such as YouTube, Vimeo, Dailymotion, etc. It has more advanced features, such as subtitles, playlists, channels, smart mode, etc.</li>
|
110 |
-
<li>[iTubeGo Instagram Downloader]: This is another premium and professional desktop software that allows you to download any Instagram video, reel, or IGTV as an MP3 file with high quality and fast speed. It also supports other video and audio platforms, such as YouTube, Spotify, SoundCloud, etc. It has more features, such as cut, merge, convert, edit, etc.</li>
|
111 |
-
</ul>
|
112 |
-
<h2>Conclusion</h2>
|
113 |
-
<p>In conclusion, downloading Instagram audio MP3 is a great way to enjoy the content from Instagram offline, share it with your friends, or use it for other purposes. However, you need to use a third-party tool or website to do that, as Instagram does not provide a direct option to download the audio from its videos, reels, or IGTV. We have shown you what is Instagram audio MP3, why you may want to download it, how to use an Instagram to MP3 converter, and what are the best converters available online. We hope this article has been helpful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.</p>
|
114 |
-
<h2>FAQs</h2>
|
115 |
-
<p>Here are some of the frequently asked questions about downloading Instagram audio MP3:</p>
|
116 |
-
<ol>
|
117 |
-
<li>Is it legal to download Instagram audio MP3?</li>
|
118 |
-
<p>It depends on the content and the purpose of your download. Generally, it is legal to download Instagram audio MP3 for personal use only, as long as you do not distribute, sell, or modify the original content without the permission of the owner. However, some content may be protected by intellectual property rights or other laws that prohibit downloading or copying without authorization. Therefore, you should always respect the rights of the original creators and follow the terms and conditions of Instagram when downloading Instagram audio MP3.</p>
|
119 |
-
<li>Is it safe to download Instagram audio MP3?</li>
|
120 |
-
<p>It depends on the tool or website that you use to download Instagram audio MP3. Some of them may be safe, reliable, and user-friendly, while others may be unsafe, unreliable, or user-unfriendly. Some of them may contain malware, viruses, ads, pop-ups, or other unwanted elements that may harm your device or compromise your privacy. Therefore, you should always use a trusted and reputable tool or website to download Instagram audio MP3, such as the ones we have recommended in this article.</p>
|
121 |
-
<li>How to download Instagram audio MP3 on iPhone or iPad?</li>
|
122 |
-
<p>Unfortunately, most of the online tools or websites that allow you to download Instagram audio MP3 do not work on iPhone or iPad, as they require a browser that supports downloading files, such as Chrome or Firefox. However, there are some workarounds that you can try, such as using a file manager app, a cloud service app, or a screen recorder app. You can find more details on how to do that in this article: [How to Download Instagram Videos on iPhone].</p>
|
123 |
-
<li>How to download Instagram audio MP3 on Android?</li>
|
124 |
-
<p>Downloading Instagram audio MP3 on Android is much easier than on iPhone or iPad, as most of the online tools or websites that allow you to download Instagram audio MP3 work on Android browsers, such as Chrome or Firefox. You just need to follow the same steps that we have shown you in this article: copy the link, paste it into the converter, choose the format and quality, and download the file. You can also use some of the desktop software that we have recommended in this article, such as 4K Video Downloader or iTubeGo Instagram Downloader, and transfer the files to your Android device via USB cable or Wi-Fi.</p>
|
125 |
-
<li>How to download Instagram audio MP3 on PC or Mac?</li>
|
126 |
-
<p>Downloading Instagram audio MP3 on PC or Mac is also very easy, as most of the online tools or websites that allow you to download Instagram audio MP3 work on PC or Mac browsers, such as Chrome or Firefox. You just need to follow the same steps that we have shown you in this article: copy the link, paste it into the converter, choose the format and quality, and download the file. You can also use some of the desktop software that we have recommended in this article, such as 4K Video Downloader or iTubeGo Instagram Downloader, and install them on your PC or Mac.</p> 197e85843d<br />
|
127 |
-
<br />
|
128 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 8 Ball Pool Offline and Join the Online League and Tournaments.md
DELETED
@@ -1,111 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download 8 Ball Pool Offline and Enjoy the Best Billiards Game on Your Device</h1>
|
3 |
-
<p>If you love playing billiards, you must have heard of 8 ball pool, the most popular and realistic pool game on the internet. But did you know that you can also play 8 ball pool offline, without any internet connection or waiting for opponents? In this article, we will show you how to download 8 ball pool offline and enjoy the best billiards game on your device.</p>
|
4 |
-
<h2>Benefits of playing 8 ball pool offline</h2>
|
5 |
-
<p>Playing 8 ball pool offline has many advantages over playing online. Here are some of them:</p>
|
6 |
-
<h2>download 8 ball pool offline</h2><br /><p><b><b>DOWNLOAD</b> ✫✫✫ <a href="https://urlin.us/2uSTyr">https://urlin.us/2uSTyr</a></b></p><br /><br />
|
7 |
-
<ul>
|
8 |
-
<li><b>No internet required:</b> You can play 8 ball pool offline anytime, anywhere, without worrying about your data usage or wifi connection. You can also save your battery life and avoid interruptions from calls or messages.</li>
|
9 |
-
<li><b>No waiting for opponents:</b> You can play 8 ball pool offline against bots with high level artificial intelligence. You don't have to wait for other players to join or finish their games. You can also choose the difficulty level and the game mode (8 ball or 9 ball) according to your preference.</li>
|
10 |
-
<li><b>No ads:</b> You can play 8 ball pool offline without any annoying ads popping up on your screen. You can also avoid in-app purchases and enjoy the game for free.</li>
|
11 |
-
<li><b>No pressure:</b> You can play 8 ball pool offline at your own pace and without any stress. You don't have to worry about losing coins or ranking points. You can also practice your skills and improve your game without any competition.</li>
|
12 |
-
</ul>
|
13 |
-
<h2>How to download 8 ball pool offline for Android</h2>
|
14 |
-
<p>If you have an Android device, you can download 8 ball pool offline from the Google Play Store. Here are the steps to follow:</p>
|
15 |
-
<ol>
|
16 |
-
<li>Open the Google Play Store app on your device and search for "8 Ball Billiards Offline Pool".</li>
|
17 |
-
<li>Select the app with the icon of a green cue stick and a yellow background. It is developed by SNG Games.</li>
|
18 |
-
<li>Tap on "Install" and wait for the app to download and install on your device.</li>
|
19 |
-
<li>Once the app is installed, tap on "Open" and enjoy playing 8 ball pool offline.</li>
|
20 |
-
</ol>
|
21 |
-
<p>You can also scan this QR code with your device's camera to go directly to the app's page on the Google Play Store:</p>
|
22 |
-
<img src="https://chart.googleapis.com/chart?cht=qr&chl=https%3A%2F%2Fplay.google.com%2Fstore%2Fapps%2Fdetails%3Fid%3Dcom.SNG.Pool.Billiard&chs=180x180&choe=UTF-8&chld=L|2" alt="QR code">
|
23 |
-
<h2>How to download 8 ball pool offline for iOS</h2>
|
24 |
-
<p>If you have an iOS device, you can download 8 ball pool offline from the App Store. Here are the steps to follow:</p>
|
25 |
-
<ol>
|
26 |
-
<li>Open the App Store app on your device and search for "Pool Break Lite".</li>
|
27 |
-
<li>Select the app with the icon of a blue cue stick and a red background. It is developed by Kinetic Bytes.</li>
|
28 |
-
<li>Tap on "Get" and wait for the app to download and install on your device.</li>
|
29 |
-
<li>Once the app is installed, tap on "Open" and enjoy playing 8 ball pool offline.</li>
|
30 |
-
</ol>
|
31 |
-
<p>You can also scan this QR code with your device's camera to go directly to the app's page on the App Store:</p>
|
32 |
-
<img src="https://chart.googleapis.com/chart?cht=qr&chl=https%3A%2F%2Fapps.apple.com%2Fus%2Fapp%2Fpool-break-lite%2Fid367509043&chs=180x180&choe=UTF-8&chld=L|2" alt="QR code">
|
33 |
-
<h2>How to download 8 ball pool offline for PC</h2>
|
34 |
-
<p>If you have a PC, you can download 8 ball pool offline from the Microsoft Store. Here are the steps to follow:</p>
|
35 |
-
<ol>
|
36 |
-
<li>Open the Microsoft Store app on your PC and search for "8 Ball Pool Offline".</li>
|
37 |
-
<li>Select the app with the icon of a white cue stick and a black background. It is developed by Game Developer.</li>
|
38 |
-
<li>Click on "Get" and wait for the app to download and install on your PC.</li>
|
39 |
-
<li>Once the app is installed, click on "Launch" and enjoy playing 8 ball pool offline.</li>
|
40 |
-
</ol>
|
41 |
-
<p>You can also click on this link to go directly to the app's page on the Microsoft Store:</p>
|
42 |
-
<p>How to download 8 ball pool offline for free<br />
|
43 |
-
Download 8 ball pool offline mod apk<br />
|
44 |
-
Download 8 ball pool offline version for android<br />
|
45 |
-
Download 8 ball pool offline game for pc<br />
|
46 |
-
Download 8 ball pool offline without internet<br />
|
47 |
-
Download 8 ball pool offline unlimited coins<br />
|
48 |
-
Download 8 ball pool offline hack<br />
|
49 |
-
Download 8 ball pool offline latest update<br />
|
50 |
-
Download 8 ball pool offline with friends<br />
|
51 |
-
Download 8 ball pool offline multiplayer<br />
|
52 |
-
Download 8 ball pool offline no ads<br />
|
53 |
-
Download 8 ball pool offline cheat<br />
|
54 |
-
Download 8 ball pool offline full version<br />
|
55 |
-
Download 8 ball pool offline for windows 10<br />
|
56 |
-
Download 8 ball pool offline for ios<br />
|
57 |
-
Download 8 ball pool offline for laptop<br />
|
58 |
-
Download 8 ball pool offline for mac<br />
|
59 |
-
Download 8 ball pool offline pro<br />
|
60 |
-
Download 8 ball pool offline premium<br />
|
61 |
-
Download 8 ball pool offline cracked<br />
|
62 |
-
Download 8 ball pool offline best graphics<br />
|
63 |
-
Download 8 ball pool offline realistic physics<br />
|
64 |
-
Download 8 ball pool offline tournaments<br />
|
65 |
-
Download 8 ball pool offline levels<br />
|
66 |
-
Download 8 ball pool offline challenges<br />
|
67 |
-
Download 8 ball pool offline tips and tricks<br />
|
68 |
-
Download 8 ball pool offline guide<br />
|
69 |
-
Download 8 ball pool offline review<br />
|
70 |
-
Download 8 ball pool offline gameplay<br />
|
71 |
-
Download 8 ball pool offline features<br />
|
72 |
-
Download 8 ball pool offline comparison<br />
|
73 |
-
Download 8 ball pool offline alternatives<br />
|
74 |
-
Download 8 ball pool offline ranking system<br />
|
75 |
-
Download 8 ball pool offline custom cues<br />
|
76 |
-
Download 8 ball pool offline rewards and prizes<br />
|
77 |
-
Download 8 ball pool offline fun and addictive<br />
|
78 |
-
Download 8 ball pool offline easy and simple<br />
|
79 |
-
Download 8 ball pool offline fast and smooth<br />
|
80 |
-
Download 8 ball pool offline high quality and performance<br />
|
81 |
-
Download 8 ball pool offline low size and storage<br />
|
82 |
-
Download 8 ball pool offline safe and secure<br />
|
83 |
-
Download 8 ball pool offline compatible and supported devices<br />
|
84 |
-
Download 8 ball pool offline online mode option<br />
|
85 |
-
Download 8 ball pool offline editor's choice app store google play store</p>
|
86 |
-
<a href="https://www.microsoft.com/en-us/p/8-ball-pool-offline/9nblggh4vz0w">https://www.microsoft.com/en-us/p/8-ball-pool-offline/9nblggh4vz0w</a>
|
87 |
-
<h2>Tips and tricks to improve your skills in 8 ball pool offline</h2>
|
88 |
-
<p>Playing 8 ball pool offline is not only fun, but also a great way to practice your skills and improve your game. Here are some tips and tricks to help you become a better player:</p>
|
89 |
-
<ul>
|
90 |
-
<li><b>How to aim:</b> To aim accurately, you need to align your cue stick with the cue ball and the target ball. You can use the guideline that shows the direction and angle of your shot. You can also adjust the power of your shot by dragging the power bar on the bottom of the screen.</li>
|
91 |
-
<li><b>How to use spin:</b> To use spin, you need to tap on the cue ball icon on the top right corner of the screen. You can then drag your finger on the cue ball to apply different types of spin, such as top spin, back spin, left spin, or right spin. Spin can help you control the cue ball's movement and position after hitting the target ball.</li>
|
92 |
-
<li><b>How to break:</b> To break, you need to hit the rack of balls with enough power and accuracy. You can aim for the center of the first ball or slightly off-center to create more movement. You can also use spin to influence the direction of the cue ball after hitting the rack. A good break can give you an advantage in the game.</li>
|
93 |
-
</ul>
|
94 |
-
<h2>Conclusion</h2>
|
95 |
-
<p>Playing 8 ball pool offline is a great way to enjoy the best billiards game on your device without any internet connection or waiting for opponents. You can download 8 ball pool offline for Android, iOS, or PC from their respective stores and start playing right away. You can also improve your skills and have fun by following our tips and tricks. We hope you found this article helpful and informative. Now go ahead and try 8 ball pool offline for yourself!</p>
|
96 |
-
<h2>FAQs</h2>
|
97 |
-
<p>Here are some frequently asked questions about 8 ball pool offline:</p>
|
98 |
-
<ol>
|
99 |
-
<li><b>Q: Is 8 ball pool offline free?</b>
|
100 |
-
A: Yes, 8 ball pool offline is free to download and play. You don't need to pay anything or make any in-app purchases.</li>
|
101 |
-
<li><b>Q: Can I play 8 ball pool offline with friends?</b>
|
102 |
-
A: Yes, you can play 8 ball pool offline with friends by using the local multiplayer mode. You can either play on the same device or connect two devices via Bluetooth or wifi.</li>
|
103 |
-
<li><b>Q: Can I play 8 ball pool offline online?</b>
|
104 |
-
A: No, you cannot play 8 ball pool offline online. If you want to play online, you need to download 8 ball pool online from Miniclip or Facebook.</li>
|
105 |
-
<li><b>Q: How do I update 8 ball pool offline?</b>
|
106 |
-
A: To update 8 ball pool offline, you need to check for updates on your device's store app. If there is an update available, you can download and install it.</li>
|
107 |
-
<li><b>Q: How do I uninstall 8 ball pool offline?</b>
|
108 |
-
A: To uninstall 8 ball pool offline, you need to go to your device's settings app and find the app manager. Then you can select 8 ball pool offline and tap on "Uninstall". You can also long-press the app icon on your home screen and tap on "Uninstall".</li>
|
109 |
-
</ol></p> 197e85843d<br />
|
110 |
-
<br />
|
111 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download FIFA Trung Quc APK and Play with Your Friends Online.md
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>FIFA Trung Quốc APK: A Mobile Soccer Game for Android Users</h1>
|
3 |
-
<p>If you are a fan of soccer and want to experience the thrill of playing with your favorite teams and players on your mobile device, you might want to check out FIFA Trung Quốc APK. This is a mobile version of FIFA World Cup 2022™ that lets you relive the official tournament with any of the 32 qualified nations or rewrite history with 15 non-qualified nations. You can also build your ultimate team with over 15,000 authentic soccer stars from various leagues and compete against other players in different modes. In this article, we will tell you more about FIFA Trung Quốc APK and how you can download it for free.</p>
|
4 |
-
<h2>fifa trung quốc apk</h2><br /><p><b><b>Download Zip</b> ✅ <a href="https://urlin.us/2uSWxW">https://urlin.us/2uSWxW</a></b></p><br /><br />
|
5 |
-
<h2>Features of FIFA Trung Quốc APK</h2>
|
6 |
-
<p>FIFA Trung Quốc APK has many features that make it one of the best mobile soccer games available. Here are some of them:</p>
|
7 |
-
<ul>
|
8 |
-
<li><b>World Cup Mode:</b> You can play through the entire tournament with any of the 32 qualified nations or choose from 15 non-qualified nations to create your own scenario. You can also enjoy authentic World Cup kits, badges, stadiums, and commentary.</li>
|
9 |
-
<li><b>Ultimate Team Mode:</b> You can build your dream team with over 15,000 players from various leagues, including Premier League, La Liga, Bundesliga, Serie A, Ligue 1, and more. You can also train your players, increase their stats and OVR, and customize your formation and tactics.</li>
|
10 |
-
<li><b>PvP Modes:</b> You can challenge other players in various modes, such as Head-to-Head, VS Attack, and Manager Mode. You can also join leagues and tournaments to earn rewards and climb the leaderboards.</li>
|
11 |
-
<li><b>Icons and Heroes:</b> You can add some of the legendary soccer players to your team, such as Paolo Maldini, Ronaldinho, Zidane, Beckham, Ronaldo, and more. You can also celebrate some of the memorable moments from fan-favorite players with new Heroes cards.</li>
|
12 |
-
<li><b>Next-Level Soccer Simulation:</b> You can experience realistic graphics, animations, physics, and sound effects in FIFA Trung Quốc APK. The game also supports up to 60 fps on compatible devices and has new upgraded stadiums and classic FIFA venues.</li>
|
13 |
-
</ul>
|
14 |
-
<h2>Tips and Tricks for FIFA Trung Quốc APK</h2>
|
15 |
-
<p>If you want to improve your skills and performance in FIFA Trung Quốc APK, here are some tips and tricks that might help you:</p>
|
16 |
-
<ul>
|
17 |
-
<li><b>Learn the Controls:</b> The game has simple swipe and tap controls for shooting, passing, dribbling, tackling, and sprinting. You can also use the joystick to move your players and the buttons to perform skill moves. You can adjust the control settings in the options menu.</li>
|
18 |
-
<li><b>Use Chemistry:</b> Chemistry is an important factor that affects your team's performance in Ultimate Team mode. Chemistry is based on factors such as nationality, league, club, position, and formation. You can increase your chemistry by using players that have links with each other or by applying chemistry styles.</li>
|
19 |
-
<li><b>Complete Live Events:</b> Live events are bite-sized challenges that offer various scenarios and rewards. You can earn coins, gems, player items, training XP, skill boosts, and more by completing live events. You can also participate in seasonal events that correspond with real-world tournaments.</li>
|
20 |
-
<li><b>Use Plans:</b> Plans are a way to trade your players and tokens for better rewards. You can access plans in the Ultimate Team menu and select the ones that suit your needs. You can also use the auto-fill feature to quickly fill the plan slots with the required items.</li>
|
21 |
-
<li><b>Upgrade Your Team:</b> You can upgrade your team by using coins, gems, training XP, skill boosts, and player items. You can also use the market to buy and sell players and tokens. You can also use the quick sell feature to get rid of unwanted items for coins.</li>
|
22 |
-
</ul>
|
23 |
-
<h2>Reviews and Ratings of FIFA Trung Quốc APK</h2>
|
24 |
-
<p>FIFA Trung Quốc APK has received positive reviews and ratings from players and critics alike. The game has a 4.5-star rating on Google Play Store and a 4.7-star rating on App Store. Here are some of the comments from the users:</p>
|
25 |
-
<blockquote>
|
26 |
-
<p>"This is the best soccer game I have ever played on my phone. The graphics are amazing, the gameplay is smooth, and the modes are fun. I love playing World Cup mode with my favorite team and players. I also like building my ultimate team and competing with other players online."</p>
|
27 |
-
<p>"I have been playing FIFA games since I was a kid and this one is no exception. It has everything I want in a mobile soccer game. The controls are easy to use, the features are rich, and the content is updated regularly. I especially enjoy playing with icons and heroes from different eras."</p>
|
28 |
-
<p>tải fifa trung quốc apk<br />
|
29 |
-
fifa trung quốc apk mod<br />
|
30 |
-
fifa trung quốc apk 2023<br />
|
31 |
-
fifa trung quốc apk offline<br />
|
32 |
-
fifa trung quốc apk hack<br />
|
33 |
-
fifa trung quốc apk full<br />
|
34 |
-
fifa trung quốc apk mới nhất<br />
|
35 |
-
fifa trung quốc apk cho android<br />
|
36 |
-
fifa trung quốc apk không cần vpn<br />
|
37 |
-
fifa trung quốc apk free download<br />
|
38 |
-
cách tải fifa trung quốc apk<br />
|
39 |
-
fifa trung quốc apk obb<br />
|
40 |
-
fifa trung quốc apk data<br />
|
41 |
-
fifa trung quốc apk online<br />
|
42 |
-
fifa trung quốc apk 2022<br />
|
43 |
-
hướng dẫn tải fifa trung quốc apk<br />
|
44 |
-
fifa trung quốc apk ios<br />
|
45 |
-
fifa trung quốc apk phiên bản mới<br />
|
46 |
-
fifa trung quốc apk đồ họa cao<br />
|
47 |
-
fifa trung quốc apk tiếng việt<br />
|
48 |
-
link tải fifa trung quốc apk<br />
|
49 |
-
fifa trung quốc apk 2021<br />
|
50 |
-
fifa trung quốc apk update<br />
|
51 |
-
fifa trung quốc apk mien phi<br />
|
52 |
-
fifa trung quốc apk tinhte<br />
|
53 |
-
cài đặt fifa trung quốc apk<br />
|
54 |
-
fifa trung quốc apk 2020<br />
|
55 |
-
fifa trung quốc apk 2019<br />
|
56 |
-
fifa trung quốc apk 2018<br />
|
57 |
-
fifa trung quốc apk 2017<br />
|
58 |
-
download fifa trung quốc apk<br />
|
59 |
-
giới thiệu fifa trung quốc apk<br />
|
60 |
-
đánh giá fifa trung quốc apk<br />
|
61 |
-
so sánh fifa trung quốc apk và fifa mobile<br />
|
62 |
-
lỗi khi tải fifa trung quốc apk<br />
|
63 |
-
cách chơi fifa trung quốc apk<br />
|
64 |
-
cách cập nhật fifa trung quốc apk<br />
|
65 |
-
cách hack fifa trung quốc apk<br />
|
66 |
-
cách mod fifa trung quốc apk<br />
|
67 |
-
cách offline fifa trung quốc apk<br />
|
68 |
-
những tính năng mới của fifa trung quốc apk<br />
|
69 |
-
những điểm hấp dẫn của fifa trung quốc apk<br />
|
70 |
-
những điểm yếu của fifa trung quốc apk<br />
|
71 |
-
những mẹo hay khi chơi fifa trung quốc apk<br />
|
72 |
-
những lưu ý khi tải và cài đặt fifa trung quốc apk</p>
|
73 |
-
<p>"This game is awesome. It has a lot of variety and challenge for soccer fans of all levels. The World Cup mode is very realistic and immersive. The ultimate team mode is very addictive and rewarding. The PvP modes are very competitive and exciting. The icons and heroes are very cool and nostalgic."</p>
|
74 |
-
</blockquote>
|
75 |
-
<h2>Conclusion</h2>
|
76 |
-
<p>FIFA Trung Quốc APK is a mobile soccer game that lets you play with your favorite teams and players on your Android device. You can relive the official World Cup 2022™ tournament or create your own scenario with 15 non-qualified nations. You can also build your dream team with over 15,000 authentic soccer stars and compete against other players in various modes. You can also enjoy realistic graphics, animations, physics, and sound effects in FIFA Trung Quốc APK.</p>
|
77 |
-
<p>If you are interested in FIFA Trung Quốc APK, you can download it for free from the link below. You will need an Android device with at least 4 GB of RAM and 2 GB of free storage space to run the game smoothly. You will also need an internet connection to access some of the features and content of the game.</p>
|
78 |
-
<p>Download FIFA Trung Quốc APK here: [text]</p>
|
79 |
-
<h2>FAQs</h2>
|
80 |
-
<p>Here are some of the frequently asked questions about FIFA Trung Quốc APK:</p>
|
81 |
-
<ul>
|
82 |
-
<li><b>Q: Is FIFA Trung Quốc APK safe to download?</b></li>
|
83 |
-
<li>A: Yes, FIFA Trung Quốc APK is safe to download from the official link provided in this article. The game does not contain any viruses, malware, or spyware that could harm your device or data.</li>
|
84 |
-
<li><b>Q: Is FIFA Trung Quốc APK free to play?</b></li>
|
85 |
-
<li>A: Yes, FIFA Trung Quốc APK is free to play, but it contains some in-app purchases that can enhance your gaming experience. You can buy coins, gems, player items, skill boosts, and more with real money or earn them by playing the game.</li>
|
86 |
-
<li><b>Q: How can I change the language of FIFA Trung Quốc APK?</b></li>
|
87 |
-
<li>A: You can change the language of FIFA Trung Quốc APK by going to the settings menu and selecting the language option. You can choose from English, Chinese, Vietnamese, Thai, Indonesian, Malay, Korean, Japanese, Arabic, Turkish, Russian, Portuguese, Spanish, French, German, Italian, Dutch, Polish, Swedish, Norwegian, Danish, Finnish, Greek, Romanian, Hungarian, Czech, Slovakian, Croatian, Slovenian, and Bulgarian.</li>
|
88 |
-
<li><b>Q: How can I contact the developers of FIFA Trung Quốc APK?</b></li>
|
89 |
-
<li>A: You can contact the developers of FIFA Trung Quốc APK by sending an email to [email] or by visiting their official website at [website]. You can also follow them on their social media accounts at [Facebook], [Twitter], [Instagram], and [YouTube].</li>
|
90 |
-
<li><b>Q: How can I update FIFA Trung Quốc APK?</b></li>
|
91 |
-
<li>A: You can update FIFA Trung Quốc APK by downloading the latest version from the link provided in this article or by checking the Google Play Store or App Store for updates. You can also enable the auto-update feature in your device settings to get the latest updates automatically.</li>
|
92 |
-
</ul>
|
93 |
-
<p>I hope you enjoyed reading this article and learned something new about FIFA Trung Quốc APK. If you have any questions, comments, or feedback, please feel free to leave them below. Thank you for your time and attention.</p> 197e85843d<br />
|
94 |
-
<br />
|
95 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Garena Free Fire Hack Mod Apk 1.59.5 and Dominate the Battle Royale.md
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Garena Free Fire Hack Mod APK 1.59.5(Unlimited Diamonds)</h1>
|
3 |
-
<p>If you are a fan of battle royale games, you must have heard of Garena Free Fire, one of the most popular and downloaded games on Android and iOS devices. But do you know that there is a hack mod version of this game that can give you unlimited diamonds, health, characters, and other advantages? In this article, we will tell you everything you need to know about Garena Free Fire Hack Mod APK 1.59.5(Unlimited Diamonds), how to download and install it, and what features it offers. Read on to find out more.</p>
|
4 |
-
<h2>What is Garena Free Fire?</h2>
|
5 |
-
<p>Garena Free Fire is a multiplayer online battle royale game developed by 111 Dots Studio and published by Garena for Android and iOS devices. It was released in 2017 and has since gained over 500 million downloads on Google Play Store alone. The game is set in a remote island where 50 players parachute down and fight for survival against each other. The last player or team standing wins the match.</p>
|
6 |
-
<h2>download garena free fire hack mod apk 1.59.5(unlimited diamonds)</h2><br /><p><b><b>Download File</b> ••• <a href="https://urlin.us/2uSTUq">https://urlin.us/2uSTUq</a></b></p><br /><br />
|
7 |
-
<h3>Features of Garena Free Fire</h3>
|
8 |
-
<p>Some of the features of Garena Free Fire are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>10-minute matches with fast-paced gameplay.</li>
|
11 |
-
<li>Various modes such as solo, duo, squad, clash squad, ranked, and custom.</li>
|
12 |
-
<li>Different maps such as Bermuda, Purgatory, Kalahari, and Bermuda Remastered.</li>
|
13 |
-
<li>A wide range of weapons, vehicles, items, and characters to choose from.</li>
|
14 |
-
<li>In-game voice chat and guild system.</li>
|
15 |
-
<li>Regular updates with new content and events.</li>
|
16 |
-
</ul>
|
17 |
-
<h4>How to play Garena Free Fire</h4>
|
18 |
-
<p>To play Garena Free Fire, you need to follow these steps:</p>
|
19 |
-
<ol>
|
20 |
-
<li>Download and install the game from Google Play Store or App Store.</li>
|
21 |
-
<li>Create an account or log in with your existing one.</li>
|
22 |
-
<li>Select a mode and a map to play.</li>
|
23 |
-
<li>Wait for the match to start and jump from the plane.</li>
|
24 |
-
<li>Loot weapons, items, and vehicles from buildings or crates.</li>
|
25 |
-
<li>Fight against other players and avoid the shrinking safe zone.</li>
|
26 |
-
<li>Survive till the end and win the match.</li>
|
27 |
-
</ol>
|
28 |
-
<h2>What is Garena Free Fire Hack Mod APK?</h2>
|
29 |
-
<p>Garena Free Fire Hack Mod APK is a modified version of the original game that gives you access to various hack features that are not available in the official version. These features can help you to get an edge over your opponents and enjoy the game more. However, using this mod apk may also result in some risks such as getting banned from the game or getting infected by malware. So use it at your own discretion.</p>
|
30 |
-
<h3>Features of Garena Free Fire Hack Mod APK</h3>
|
31 |
-
<p>Some of the features of Garena Free Fire Hack Mod APK are:</p>
|
32 |
-
<h4>Unlimited health</h4>
|
33 |
-
<p>This feature allows you to have unlimited health in the game. This means that you will not die even if you get shot or fall from a height. You can also heal yourself instantly without using any medkits or bandages. This will make you invincible in the game and help you to win every match easily.</p>
|
34 |
-
<h4>All characters unlocked</h4>
|
35 |
-
<p>This feature allows you to unlock all the characters in the game without spending any diamonds or coins. You can choose any character you want from the store and customize their appearance, skills, and outfits. You can also switch between different characters in the game and use their unique abilities to your advantage.</p>
|
36 |
-
<h4>No fog</h4>
|
37 |
-
<p>This feature allows you to remove the fog from the game. This means that you will have a clear vision of the map and the enemies. You can spot and shoot them from a long distance without any difficulty. You can also avoid being ambushed or sniped by other players who are hiding in the fog.</p>
|
38 |
-
<p>How to download garena free fire hack mod apk 1.59.5 with unlimited diamonds and coins<br />
|
39 |
-
Garena free fire hack mod apk 1.59.5 latest version unlimited diamonds and health<br />
|
40 |
-
Download garena free fire hack mod apk 1.59.5 for android no root unlimited diamonds<br />
|
41 |
-
Garena free fire hack mod apk 1.59.5 gameplay unlimited diamonds and aimbot<br />
|
42 |
-
Download garena free fire hack mod apk 1.59.5 from happymod.com unlimited diamonds<br />
|
43 |
-
Garena free fire hack mod apk 1.59.5 review unlimited diamonds and esp<br />
|
44 |
-
Download garena free fire hack mod apk 1.59.5 for ios unlimited diamonds and wallhack<br />
|
45 |
-
Garena free fire hack mod apk 1.59.5 features unlimited diamonds and antiban<br />
|
46 |
-
Download garena free fire hack mod apk 1.59.5 for pc unlimited diamonds and speed<br />
|
47 |
-
Garena free fire hack mod apk 1.59.5 update unlimited diamonds and auto headshot<br />
|
48 |
-
Download garena free fire hack mod apk 1.59.5 offline unlimited diamonds and skins<br />
|
49 |
-
Garena free fire hack mod apk 1.59.5 download link unlimited diamonds and weapons<br />
|
50 |
-
Download garena free fire hack mod apk 1.59.5 online unlimited diamonds and rank<br />
|
51 |
-
Garena free fire hack mod apk 1.59.5 tutorial unlimited diamonds and characters<br />
|
52 |
-
Download garena free fire hack mod apk 1.59.5 without verification unlimited diamonds and gold<br />
|
53 |
-
Garena free fire hack mod apk 1.59.5 installation unlimited diamonds and bundles<br />
|
54 |
-
Download garena free fire hack mod apk 1.59.5 for free unlimited diamonds and emotes<br />
|
55 |
-
Garena free fire hack mod apk 1.59.5 generator unlimited diamonds and redeem codes<br />
|
56 |
-
Download garena free fire hack mod apk 1.59.5 mega mod unlimited diamonds and pets<br />
|
57 |
-
Garena free fire hack mod apk 1.59.5 obb file unlimited diamonds and gloo wall<br />
|
58 |
-
Download garena free fire hack mod apk 1.59.5 zip file unlimited diamonds and elite pass<br />
|
59 |
-
Garena free fire hack mod apk 1.59.5 direct download unlimited diamonds and vouchers<br />
|
60 |
-
Download garena free fire hack mod apk 1.59.5 mediafire unlimited diamonds and magic cube<br />
|
61 |
-
Garena free fire hack mod apk 1.59.5 best settings unlimited diamonds and sensitivity<br />
|
62 |
-
Download garena free fire hack mod apk 1.59.5 new update unlimited diamonds and events</p>
|
63 |
-
<h4>No grass</h4>
|
64 |
-
<p>This feature allows you to remove the grass from the game. This means that you will have a smooth and fast gameplay without any lag or glitches. You can also see the enemies and items more easily on the ground without any obstruction. You can also move and run faster without being slowed down by the grass.</p>
|
65 |
-
<h4>Unlimited diamonds</h4>
|
66 |
-
<p>This feature allows you to have unlimited diamonds in the game. Diamonds are the premium currency of the game that can be used to buy various items, characters, outfits, crates, and more. You can also use diamonds to spin the lucky wheel and get rare rewards. With unlimited diamonds, you can buy anything you want in the game without spending any real money.</p>
|
67 |
-
<h4>Customize character</h4>
|
68 |
-
<p>This feature allows you to customize your character in the game. You can change their hair, skin, eyes, clothes, accessories, and more. You can also create your own unique style and personality for your character. You can also save your customizations and use them in different matches.</p>
|
69 |
-
<h2>How to download and install Garena Free Fire Hack Mod APK?</h2>
|
70 |
-
<p>If you want to download and install Garena Free Fire Hack Mod APK, you need to follow these steps:</p>
|
71 |
-
<h3>Download link and requirements</h3>
|
72 |
-
<p>The download link for Garena Free Fire Hack Mod APK 1.59.5(Unlimited Diamonds) is [here]. The file size is about 700 MB and you need to have at least 2 GB of free space on your device. You also need to have Android 4.1 or higher version to run this mod apk.</p>
|
73 |
-
<h3>Installation steps</h3>
|
74 |
-
<ol>
|
75 |
-
<li>Before installing the mod apk, you need to uninstall the original game from your device.</li>
|
76 |
-
<li>Then, you need to enable the unknown sources option on your device settings. This will allow you to install apps from third-party sources.</li>
|
77 |
-
<li>Next, you need to download the mod apk file from the link given above and save it on your device.</li>
|
78 |
-
<li>After that, you need to locate the file and tap on it to start the installation process.</li>
|
79 |
-
<li>Follow the instructions on the screen and wait for the installation to complete.</li>
|
80 |
-
<li>Once done, you can launch the game from your app drawer or home screen.</li>
|
81 |
-
<li>Enjoy playing Garena Free Fire Hack Mod APK with unlimited diamonds and other features.</li>
|
82 |
-
</ol>
|
83 |
-
<h2>Conclusion and FAQs</h2>
|
84 |
-
<p>In conclusion, Garena Free Fire Hack Mod APK is a modified version of the original game that gives you access to various hack features that are not available in the official version. These features can help you to get an edge over your opponents and enjoy the game more. However, using this mod apk may also result in some risks such as getting banned from the game or getting infected by malware. So use it at your own discretion.</p>
|
85 |
-
<p>Here are some FAQs about Garena Free Fire Hack Mod APK:</p>
|
86 |
-
<table>
|
87 |
-
<tr><td><b>Q: Is Garena Free Fire Hack Mod APK safe to use?</b></td><td><b>A: There is no guarantee that Garena Free Fire Hack Mod APK is safe to use. It may contain viruses or malware that can harm your device or steal your personal information. It may also violate the terms and conditions of the game and get you banned from playing it. So use it at your own risk.</b></td></tr>
|
88 |
-
<tr><td><b>Q: How can I update Garena Free Fire Hack Mod APK?</b></td><td><b>A: To update Garena Free Fire Hack Mod APK, you need to download the latest version of the mod apk file from a reliable source and install it over the existing one. However, you may lose your progress and data if you do this. So it is better to backup your data before updating.</b></td></tr>
|
89 |
-
<tr><td><b>Q: Can I play Garena Free Fire Hack Mod APK with my friends?</b></td><td><b>A: Yes, you can play Garena Free Fire Hack Mod APK with your friends who are also using the same mod apk version. However, you may not be able to play with your friends who are using the official version of the game as they may have different servers and versions.</b></td></tr> <tr><td><b>Q: How can I get more diamonds in Garena Free Fire Hack Mod APK?</b></td><td><b>A: You can get more diamonds in Garena Free Fire Hack Mod APK by using the unlimited diamonds feature. This feature allows you to have unlimited diamonds in the game that you can use to buy anything you want. You can also earn more diamonds by completing missions, events, and achievements in the game.</b></td></tr>
|
90 |
-
<tr><td><b>Q: What are the disadvantages of using Garena Free Fire Hack Mod APK?</b></td><td><b>A: Some of the disadvantages of using Garena Free Fire Hack Mod APK are:</b>
|
91 |
-
<ul>
|
92 |
-
<li>You may lose the fun and challenge of playing the game as it becomes too easy and boring.</li>
|
93 |
-
<li>You may face technical issues such as crashes, errors, bugs, or lag while playing the game.</li>
|
94 |
-
<li>You may get detected by the anti-cheat system of the game and get banned from playing it.</li>
|
95 |
-
<li>You may expose your device and data to security risks such as viruses or malware.</li>
|
96 |
-
</ul>
|
97 |
-
</td></tr>
|
98 |
-
</table>
|
99 |
-
<p>I hope this article has helped you to learn more about Garena Free Fire Hack Mod APK 1.59.5(Unlimited Diamonds) and how to download and install it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy gaming!</p> 197e85843d<br />
|
100 |
-
<br />
|
101 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Arceus X V52 - Unlock Unlimited Features in Roblox with this Android Mod Menu.md
DELETED
@@ -1,138 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Arceus X V52 Download: How to Get the Best Roblox Mod Menu for Android</h1>
|
3 |
-
<p>If you are a fan of Roblox and you want to enhance your gaming experience on your Android device, you might be interested in downloading Arceus X V52, the latest version of the best Roblox mod menu for Android. In this article, we will tell you what Arceus X is, how to download it, how to use it, and what are some alternatives to it.</p>
|
4 |
-
<h2>What is Arceus X?</h2>
|
5 |
-
<p>Arceus X is a first and one of the most widely used Roblox mod menu/exploit specially developed for Android. It allows you to use features such as Android LuaU Execution, Infinite Jump, Super Speed, Btools, Script Hub, and more. Arceus X APK is developed using Node.js, C++, and JAVA. It’s an Android application that has a floating menu to execute scripts while you are in the game.</p>
|
6 |
-
<h2>arceus x v52 download</h2><br /><p><b><b>Download Zip</b> ✓ <a href="https://jinyurl.com/2uNMVb">https://jinyurl.com/2uNMVb</a></b></p><br /><br />
|
7 |
-
<h3>Features of Arceus X</h3>
|
8 |
-
<p>Some of the features that Arceus X offers are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Android LuaU Execution: You can run LuaU scripts on your Android device without any limitations.</li>
|
11 |
-
<li>Infinite Jump: You can jump as high as you want in any game.</li>
|
12 |
-
<li>Super Speed: You can move faster than normal in any game.</li>
|
13 |
-
<li>Btools: You can delete, copy, or move any object in any game.</li>
|
14 |
-
<li>Script Hub: You can access a collection of scripts for various games from the app.</li>
|
15 |
-
<li>More!: You can also use features such as Fly, Noclip, God Mode, ESP, Aimbot, and more.</li>
|
16 |
-
</ul>
|
17 |
-
<h3>Benefits of using Arceus X</h3>
|
18 |
-
<p>Some of the benefits that Arceus X provides are:</p>
|
19 |
-
<ul>
|
20 |
-
<li>You can have more fun and excitement in playing Roblox games on your Android device.</li>
|
21 |
-
<li>You can explore new possibilities and challenges in Roblox games that you couldn't before.</li>
|
22 |
-
<li>You can impress your friends and other players with your skills and abilities.</li>
|
23 |
-
<li>You can save time and money by not having to buy Robux or premium items.</li>
|
24 |
-
</ul>
|
25 |
-
<h2>How to download Arceus X V52?</h2>
|
26 |
-
<p>If you want to download Arceus X V52, the latest version of the app, you need to follow these steps:</p>
|
27 |
-
<h3>Steps to download Arceus X V52</h3>
|
28 |
-
<ol>
|
29 |
-
<li>Click on this link to go to the official website of Arceus X.</li>
|
30 |
-
<li>Scroll down and click on the "Download Now" button.</li>
|
31 |
-
<li>You will be redirected to a linkvertise page where you need to complete some tasks such as watching a video or installing an app.</li>
|
32 |
-
<li>After completing the tasks, you will get a key that you need to copy and paste in the app.</li>
|
33 |
-
<li>You will then be able to download the Arceus X V52 APK file on your device.</li>
|
34 |
-
<li>Install the app and open it. You will need to enter the key that you got from linkvertise.</li>
|
35 |
-
<li>Congratulations! You have successfully downloaded Arceus X V52 on your device.</li>
|
36 |
-
</ol>
|
37 |
-
<h3>Tips to avoid linkvertise ads</h3>
|
38 |
-
<p>If you don't want to deal with linkvertise ads, you can follow these tips:</p>
|
39 |
-
<ul>
|
40 |
-
<li>You can use an ad blocker or a VPN app on your device to bypass linkvertise ads.</li>
|
41 |
-
<li <li>You can use a different browser or device to access the link.</li>
|
42 |
-
<li>You can wait for a few minutes or hours and try again later.</li>
|
43 |
-
</ul>
|
44 |
-
<h2>How to use Arceus X V52?</h2>
|
45 |
-
<p>Once you have downloaded and installed Arceus X V52 on your device, you can use it to mod Roblox games on your Android device. Here is how to use it:</p>
|
46 |
-
<h3>How to open the floating menu</h3>
|
47 |
-
<p>To open the floating menu of Arceus X, you need to do the following:</p>
|
48 |
-
<p>arceus x v52 apk download<br />
|
49 |
-
arceus x v52 roblox mod menu<br />
|
50 |
-
arceus x v52 public beta download<br />
|
51 |
-
arceus x v52 android roblox exploit<br />
|
52 |
-
arceus x v52 script executor for roblox<br />
|
53 |
-
arceus x v52 latest version download<br />
|
54 |
-
arceus x v52 free roblox cheats<br />
|
55 |
-
arceus x v52 no linkvertise download<br />
|
56 |
-
arceus x v52 features and overview<br />
|
57 |
-
arceus x v52 how to use guide<br />
|
58 |
-
arceus x v52 luau execution in roblox<br />
|
59 |
-
arceus x v52 infinite jump and speed hack<br />
|
60 |
-
arceus x v52 btools and script hub<br />
|
61 |
-
arceus x v52 hydrogen alternative download<br />
|
62 |
-
arceus x v52 jjsploit comparison review<br />
|
63 |
-
arceus x v52 ios and mac release date<br />
|
64 |
-
arceus x v52 net energy gain in roblox<br />
|
65 |
-
arceus x v52 holy grail fusion experiment<br />
|
66 |
-
arceus x v52 execute pc scripts in android<br />
|
67 |
-
arceus x v52 best roblox mod menu for android<br />
|
68 |
-
arceus x v52 adopt me script download<br />
|
69 |
-
arceus x v52 anime adventures script download<br />
|
70 |
-
arceus x v52 bedwars script download<br />
|
71 |
-
arceus x v52 blox fruit script download<br />
|
72 |
-
arceus x v52 brookhaven rp script download<br />
|
73 |
-
arceus x v52 doors script download<br />
|
74 |
-
arceus x v52 livetopia script download<br />
|
75 |
-
arceus x v52 my restaurant script download<br />
|
76 |
-
arceus x v52 pet simulator x script download<br />
|
77 |
-
arceus x v52 safe and secure download link<br />
|
78 |
-
arceus x v52 verification process bypass<br />
|
79 |
-
arceus x v52 get key without human verification<br />
|
80 |
-
arceus x v52 update log and changelog<br />
|
81 |
-
arceus x v52 support and feedback forum<br />
|
82 |
-
arceus x v52 testimonials and reviews from users<br />
|
83 |
-
arceus x v52 pros and cons analysis<br />
|
84 |
-
arceus x v52 frequently asked questions and answers<br />
|
85 |
-
arceus x v52 troubleshooting and error fixing guide<br />
|
86 |
-
arceus x v52 compatibility and requirements list<br />
|
87 |
-
arceus x v52 development team and credits page<br />
|
88 |
-
arceus x v52 donation and sponsorship options<br />
|
89 |
-
arceus x v52 official website and social media links<br />
|
90 |
-
arceus x v52 discord server and community chat<br />
|
91 |
-
arceus x v52 youtube channel and video tutorials<br />
|
92 |
-
arceus x v52 blog and news updates page <br />
|
93 |
-
arceus x v52 coupon code and discount offer <br />
|
94 |
-
arceus x v52 affiliate program and referral link <br />
|
95 |
-
arceus x v52 premium version and benefits</p>
|
96 |
-
<ol>
|
97 |
-
<li>Open the Arceus X app and enter the key that you got from linkvertise.</li>
|
98 |
-
<li>Tap on the "Start" button and wait for the app to load.</li>
|
99 |
-
<li>Open Roblox and join any game that you want to mod.</li>
|
100 |
-
<li>Tap on the Arceus X icon that appears on your screen. This will open the floating menu of Arceus X.</li>
|
101 |
-
<li>You can drag the menu around or resize it as you wish.</li>
|
102 |
-
</ol>
|
103 |
-
<h3>How to execute scripts in Roblox</h3>
|
104 |
-
<p>To execute scripts in Roblox using Arceus X, you need to do the following:</p>
|
105 |
-
<ol>
|
106 |
-
<li>Open the floating menu of Arceus X and tap on the "Script Hub" button.</li>
|
107 |
-
<li>You will see a list of scripts for various games that you can use. You can also search for a specific script using the search bar.</li>
|
108 |
-
<li>Tap on the script that you want to use and then tap on the "Execute" button.</li>
|
109 |
-
<li>The script will run in the background and you will see a notification on your screen.</li>
|
110 |
-
<li>You can now enjoy the features of the script in your game.</li>
|
111 |
-
</ol>
|
112 |
-
<h2>Alternatives to Arceus X V52</h2>
|
113 |
-
<p>If you are looking for some alternatives to Arceus X V52, you can try these options:</p>
|
114 |
-
<h3>Hydrogen</h3>
|
115 |
-
<p>Hydrogen is another Roblox mod menu for Android that offers features such as LuaU Execution, Script Hub, Infinite Jump, Fly, Noclip, and more. It is also easy to use and has a user-friendly interface. You can download Hydrogen from this link .</p>
|
116 |
-
<h3>Pokemon ROM hacks</h3>
|
117 |
-
<p>If you are a fan of Pokemon games, you can also try some Pokemon ROM hacks that are modified versions of the original games with new features, graphics, stories, and gameplay. Some of the best Pokemon ROM hacks are Pokemon Gaia, Pokemon Glazed, Pokemon Light Platinum, and Pokemon Ash Gray. You can download these ROM hacks from this link .</p>
|
118 |
-
<h2>Conclusion</h2>
|
119 |
-
<p>In conclusion, Arceus X V52 is one of the best Roblox mod menus for Android that allows you to use various features and scripts in Roblox games. It is easy to download and use, and it provides a lot of fun and excitement. However, you should be careful when using it as it may violate the terms of service of Roblox and get you banned. You should also be aware of the linkvertise ads that you need to complete before downloading it. If you don't like Arceus X V52, you can also try some alternatives such as Hydrogen or Pokemon ROM hacks.</p>
|
120 |
-
<h2>FAQs</h2>
|
121 |
-
<ul>
|
122 |
-
<li><b>Is Arceus X V52 safe?</b></li>
|
123 |
-
<p>Arceus X V52 is safe to use as long as you download it from the official website and don't use it maliciously or excessively. However, it may trigger some antivirus programs or get detected by Roblox's anti-cheat system. Therefore, you should use it at your own risk and discretion.</p>
|
124 |
-
<li><b>Is Arceus X V52 free?</b></li>
|
125 |
-
<p>Arceus X V52 is free to download and use, but you need to complete some linkvertise tasks before downloading it. These tasks may include watching a video, installing an app, or completing a survey. You can also pay a small fee to skip these tasks and get a direct download link.</p>
|
126 |
-
<li><b>Can I use Arceus X V52 on PC?</b></li>
|
127 |
-
<p>No, Arceus X V52 is only compatible with Android devices. If you want to use a Roblox mod menu on PC, you need to use a different exploit such as JJSploit or Synapse X.</p>
|
128 |
-
<li><b>Can I update Arceus X V52?</b></li>
|
129 |
-
<p>Yes, you can update Arceus X V52 whenever there is a new version available. You just need to follow the same steps as before and download the latest APK file from the website. You may need to complete some linkvertise tasks again or pay a fee to get the updated version.</p>
|
130 |
-
<li><b>What are some of the best scripts for Arceus X V52?</b></li>
|
131 |
-
<p>Some of the best scripts for Arceus X V52 are:</p>
|
132 |
-
<ul>
|
133 |
-
<li>Da Hood GUI: This script allows you to use features such as God Mode, Auto Farm, Teleport, Kill All, and more in the game Da Hood.</li>
|
134 |
-
<li>Project XL GUI: This script allows you to use features such as Auto Farm, Infinite Stamina, Infinite Mana, and more in the game Project XL.</li>
|
135 |
-
<li>Shindo Life GUI: This script allows you to use features such as Auto Farm, Infinite Spins, Infinite Scrolls, and more in the game Shindo Life.</li>
|
136 |
-
</ul></p> 401be4b1e0<br />
|
137 |
-
<br />
|
138 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/stylegan/readme.md
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
# StyleGAN 2 in PyTorch
|
2 |
-
|
3 |
-
Implementation of Analyzing and Improving the Image Quality of StyleGAN (https://arxiv.org/abs/1912.04958) in PyTorch
|
4 |
-
|
5 |
-
Fork from [https://github.com/rosinality/stylegan2-pytorch](https://github.com/rosinality/stylegan2-pytorch)
|
6 |
-
|
7 |
-
In VToonify, we modify it to accept z+ latent codes.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/options/train_options.py
DELETED
@@ -1,53 +0,0 @@
|
|
1 |
-
"""This script contains the training options for Deep3DFaceRecon_pytorch
|
2 |
-
"""
|
3 |
-
|
4 |
-
from .base_options import BaseOptions
|
5 |
-
from util import util
|
6 |
-
|
7 |
-
class TrainOptions(BaseOptions):
|
8 |
-
"""This class includes training options.
|
9 |
-
|
10 |
-
It also includes shared options defined in BaseOptions.
|
11 |
-
"""
|
12 |
-
|
13 |
-
def initialize(self, parser):
|
14 |
-
parser = BaseOptions.initialize(self, parser)
|
15 |
-
# dataset parameters
|
16 |
-
# for train
|
17 |
-
parser.add_argument('--data_root', type=str, default='./', help='dataset root')
|
18 |
-
parser.add_argument('--flist', type=str, default='datalist/train/masks.txt', help='list of mask names of training set')
|
19 |
-
parser.add_argument('--batch_size', type=int, default=32)
|
20 |
-
parser.add_argument('--dataset_mode', type=str, default='flist', help='chooses how datasets are loaded. [None | flist]')
|
21 |
-
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
|
22 |
-
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
|
23 |
-
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
|
24 |
-
parser.add_argument('--preprocess', type=str, default='shift_scale_rot_flip', help='scaling and cropping of images at load time [shift_scale_rot_flip | shift_scale | shift | shift_rot_flip ]')
|
25 |
-
parser.add_argument('--use_aug', type=util.str2bool, nargs='?', const=True, default=True, help='whether use data augmentation')
|
26 |
-
|
27 |
-
# for val
|
28 |
-
parser.add_argument('--flist_val', type=str, default='datalist/val/masks.txt', help='list of mask names of val set')
|
29 |
-
parser.add_argument('--batch_size_val', type=int, default=32)
|
30 |
-
|
31 |
-
|
32 |
-
# visualization parameters
|
33 |
-
parser.add_argument('--display_freq', type=int, default=1000, help='frequency of showing training results on screen')
|
34 |
-
parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
|
35 |
-
|
36 |
-
# network saving and loading parameters
|
37 |
-
parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
|
38 |
-
parser.add_argument('--save_epoch_freq', type=int, default=1, help='frequency of saving checkpoints at the end of epochs')
|
39 |
-
parser.add_argument('--evaluation_freq', type=int, default=5000, help='evaluation freq')
|
40 |
-
parser.add_argument('--save_by_iter', action='store_true', help='whether saves model by iteration')
|
41 |
-
parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
|
42 |
-
parser.add_argument('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
|
43 |
-
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
|
44 |
-
parser.add_argument('--pretrained_name', type=str, default=None, help='resume training from another checkpoint')
|
45 |
-
|
46 |
-
# training parameters
|
47 |
-
parser.add_argument('--n_epochs', type=int, default=20, help='number of epochs with the initial learning rate')
|
48 |
-
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate for adam')
|
49 |
-
parser.add_argument('--lr_policy', type=str, default='step', help='learning rate policy. [linear | step | plateau | cosine]')
|
50 |
-
parser.add_argument('--lr_decay_epochs', type=int, default=10, help='multiply by a gamma every lr_decay_epochs epoches')
|
51 |
-
|
52 |
-
self.isTrain = True
|
53 |
-
return parser
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/4Taps/SadTalker/src/face3d/visualize.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
# check the sync of 3dmm feature and the audio
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
from src.face3d.models.bfm import ParametricFaceModel
|
5 |
-
from src.face3d.models.facerecon_model import FaceReconModel
|
6 |
-
import torch
|
7 |
-
import subprocess, platform
|
8 |
-
import scipy.io as scio
|
9 |
-
from tqdm import tqdm
|
10 |
-
|
11 |
-
# draft
|
12 |
-
def gen_composed_video(args, device, first_frame_coeff, coeff_path, audio_path, save_path, exp_dim=64):
|
13 |
-
|
14 |
-
coeff_first = scio.loadmat(first_frame_coeff)['full_3dmm']
|
15 |
-
|
16 |
-
coeff_pred = scio.loadmat(coeff_path)['coeff_3dmm']
|
17 |
-
|
18 |
-
coeff_full = np.repeat(coeff_first, coeff_pred.shape[0], axis=0) # 257
|
19 |
-
|
20 |
-
coeff_full[:, 80:144] = coeff_pred[:, 0:64]
|
21 |
-
coeff_full[:, 224:227] = coeff_pred[:, 64:67] # 3 dim translation
|
22 |
-
coeff_full[:, 254:] = coeff_pred[:, 67:] # 3 dim translation
|
23 |
-
|
24 |
-
tmp_video_path = '/tmp/face3dtmp.mp4'
|
25 |
-
|
26 |
-
facemodel = FaceReconModel(args)
|
27 |
-
|
28 |
-
video = cv2.VideoWriter(tmp_video_path, cv2.VideoWriter_fourcc(*'mp4v'), 25, (224, 224))
|
29 |
-
|
30 |
-
for k in tqdm(range(coeff_pred.shape[0]), 'face3d rendering:'):
|
31 |
-
cur_coeff_full = torch.tensor(coeff_full[k:k+1], device=device)
|
32 |
-
|
33 |
-
facemodel.forward(cur_coeff_full, device)
|
34 |
-
|
35 |
-
predicted_landmark = facemodel.pred_lm # TODO.
|
36 |
-
predicted_landmark = predicted_landmark.cpu().numpy().squeeze()
|
37 |
-
|
38 |
-
rendered_img = facemodel.pred_face
|
39 |
-
rendered_img = 255. * rendered_img.cpu().numpy().squeeze().transpose(1,2,0)
|
40 |
-
out_img = rendered_img[:, :, :3].astype(np.uint8)
|
41 |
-
|
42 |
-
video.write(np.uint8(out_img[:,:,::-1]))
|
43 |
-
|
44 |
-
video.release()
|
45 |
-
|
46 |
-
command = 'ffmpeg -v quiet -y -i {} -i {} -strict -2 -q:v 1 {}'.format(audio_path, tmp_video_path, save_path)
|
47 |
-
subprocess.call(command, shell=platform.system() != 'Windows')
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/7hao/bingo/src/components/ui/tooltip.tsx
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
import * as TooltipPrimitive from '@radix-ui/react-tooltip'
|
5 |
-
|
6 |
-
import { cn } from '@/lib/utils'
|
7 |
-
|
8 |
-
const TooltipProvider = TooltipPrimitive.Provider
|
9 |
-
|
10 |
-
const Tooltip = TooltipPrimitive.Root
|
11 |
-
|
12 |
-
const TooltipTrigger = TooltipPrimitive.Trigger
|
13 |
-
|
14 |
-
const TooltipContent = React.forwardRef<
|
15 |
-
React.ElementRef<typeof TooltipPrimitive.Content>,
|
16 |
-
React.ComponentPropsWithoutRef<typeof TooltipPrimitive.Content>
|
17 |
-
>(({ className, sideOffset = 4, ...props }, ref) => (
|
18 |
-
<TooltipPrimitive.Content
|
19 |
-
ref={ref}
|
20 |
-
sideOffset={sideOffset}
|
21 |
-
className={cn(
|
22 |
-
'z-50 overflow-hidden rounded-md border bg-popover px-3 py-1.5 text-xs font-medium text-popover-foreground shadow-md animate-in fade-in-50 data-[side=bottom]:slide-in-from-top-1 data-[side=left]:slide-in-from-right-1 data-[side=right]:slide-in-from-left-1 data-[side=top]:slide-in-from-bottom-1',
|
23 |
-
className
|
24 |
-
)}
|
25 |
-
{...props}
|
26 |
-
/>
|
27 |
-
))
|
28 |
-
TooltipContent.displayName = TooltipPrimitive.Content.displayName
|
29 |
-
|
30 |
-
export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/train/process_ckpt.py
DELETED
@@ -1,259 +0,0 @@
|
|
1 |
-
import torch, traceback, os, pdb, sys
|
2 |
-
|
3 |
-
now_dir = os.getcwd()
|
4 |
-
sys.path.append(now_dir)
|
5 |
-
from collections import OrderedDict
|
6 |
-
from i18n import I18nAuto
|
7 |
-
|
8 |
-
i18n = I18nAuto()
|
9 |
-
|
10 |
-
|
11 |
-
def savee(ckpt, sr, if_f0, name, epoch, version, hps):
|
12 |
-
try:
|
13 |
-
opt = OrderedDict()
|
14 |
-
opt["weight"] = {}
|
15 |
-
for key in ckpt.keys():
|
16 |
-
if "enc_q" in key:
|
17 |
-
continue
|
18 |
-
opt["weight"][key] = ckpt[key].half()
|
19 |
-
opt["config"] = [
|
20 |
-
hps.data.filter_length // 2 + 1,
|
21 |
-
32,
|
22 |
-
hps.model.inter_channels,
|
23 |
-
hps.model.hidden_channels,
|
24 |
-
hps.model.filter_channels,
|
25 |
-
hps.model.n_heads,
|
26 |
-
hps.model.n_layers,
|
27 |
-
hps.model.kernel_size,
|
28 |
-
hps.model.p_dropout,
|
29 |
-
hps.model.resblock,
|
30 |
-
hps.model.resblock_kernel_sizes,
|
31 |
-
hps.model.resblock_dilation_sizes,
|
32 |
-
hps.model.upsample_rates,
|
33 |
-
hps.model.upsample_initial_channel,
|
34 |
-
hps.model.upsample_kernel_sizes,
|
35 |
-
hps.model.spk_embed_dim,
|
36 |
-
hps.model.gin_channels,
|
37 |
-
hps.data.sampling_rate,
|
38 |
-
]
|
39 |
-
opt["info"] = "%sepoch" % epoch
|
40 |
-
opt["sr"] = sr
|
41 |
-
opt["f0"] = if_f0
|
42 |
-
opt["version"] = version
|
43 |
-
torch.save(opt, "weights/%s.pth" % name)
|
44 |
-
return "Success."
|
45 |
-
except:
|
46 |
-
return traceback.format_exc()
|
47 |
-
|
48 |
-
|
49 |
-
def show_info(path):
|
50 |
-
try:
|
51 |
-
a = torch.load(path, map_location="cpu")
|
52 |
-
return "模型信息:%s\n采样率:%s\n模型是否输入音高引导:%s\n版本:%s" % (
|
53 |
-
a.get("info", "None"),
|
54 |
-
a.get("sr", "None"),
|
55 |
-
a.get("f0", "None"),
|
56 |
-
a.get("version", "None"),
|
57 |
-
)
|
58 |
-
except:
|
59 |
-
return traceback.format_exc()
|
60 |
-
|
61 |
-
|
62 |
-
def extract_small_model(path, name, sr, if_f0, info, version):
|
63 |
-
try:
|
64 |
-
ckpt = torch.load(path, map_location="cpu")
|
65 |
-
if "model" in ckpt:
|
66 |
-
ckpt = ckpt["model"]
|
67 |
-
opt = OrderedDict()
|
68 |
-
opt["weight"] = {}
|
69 |
-
for key in ckpt.keys():
|
70 |
-
if "enc_q" in key:
|
71 |
-
continue
|
72 |
-
opt["weight"][key] = ckpt[key].half()
|
73 |
-
if sr == "40k":
|
74 |
-
opt["config"] = [
|
75 |
-
1025,
|
76 |
-
32,
|
77 |
-
192,
|
78 |
-
192,
|
79 |
-
768,
|
80 |
-
2,
|
81 |
-
6,
|
82 |
-
3,
|
83 |
-
0,
|
84 |
-
"1",
|
85 |
-
[3, 7, 11],
|
86 |
-
[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
87 |
-
[10, 10, 2, 2],
|
88 |
-
512,
|
89 |
-
[16, 16, 4, 4],
|
90 |
-
109,
|
91 |
-
256,
|
92 |
-
40000,
|
93 |
-
]
|
94 |
-
elif sr == "48k":
|
95 |
-
if(version=="v1"):
|
96 |
-
opt["config"] = [
|
97 |
-
1025,
|
98 |
-
32,
|
99 |
-
192,
|
100 |
-
192,
|
101 |
-
768,
|
102 |
-
2,
|
103 |
-
6,
|
104 |
-
3,
|
105 |
-
0,
|
106 |
-
"1",
|
107 |
-
[3, 7, 11],
|
108 |
-
[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
109 |
-
[10, 6, 2, 2, 2],
|
110 |
-
512,
|
111 |
-
[16, 16, 4, 4, 4],
|
112 |
-
109,
|
113 |
-
256,
|
114 |
-
48000,
|
115 |
-
]
|
116 |
-
else:
|
117 |
-
opt["config"] = [
|
118 |
-
1025,
|
119 |
-
32,
|
120 |
-
192,
|
121 |
-
192,
|
122 |
-
768,
|
123 |
-
2,
|
124 |
-
6,
|
125 |
-
3,
|
126 |
-
0,
|
127 |
-
"1",
|
128 |
-
[3, 7, 11],
|
129 |
-
[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
130 |
-
[12,10,2,2],
|
131 |
-
512,
|
132 |
-
[24,20,4,4],
|
133 |
-
109,
|
134 |
-
256,
|
135 |
-
48000,
|
136 |
-
]
|
137 |
-
elif sr == "32k":
|
138 |
-
if(version=="v1"):
|
139 |
-
opt["config"] = [
|
140 |
-
513,
|
141 |
-
32,
|
142 |
-
192,
|
143 |
-
192,
|
144 |
-
768,
|
145 |
-
2,
|
146 |
-
6,
|
147 |
-
3,
|
148 |
-
0,
|
149 |
-
"1",
|
150 |
-
[3, 7, 11],
|
151 |
-
[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
152 |
-
[10, 4, 2, 2, 2],
|
153 |
-
512,
|
154 |
-
[16, 16, 4, 4, 4],
|
155 |
-
109,
|
156 |
-
256,
|
157 |
-
32000,
|
158 |
-
]
|
159 |
-
else:
|
160 |
-
opt["config"] = [
|
161 |
-
513,
|
162 |
-
32,
|
163 |
-
192,
|
164 |
-
192,
|
165 |
-
768,
|
166 |
-
2,
|
167 |
-
6,
|
168 |
-
3,
|
169 |
-
0,
|
170 |
-
"1",
|
171 |
-
[3, 7, 11],
|
172 |
-
[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
|
173 |
-
[10,8,2,2],
|
174 |
-
512,
|
175 |
-
[20,16,4,4],
|
176 |
-
109,
|
177 |
-
256,
|
178 |
-
32000,
|
179 |
-
]
|
180 |
-
if info == "":
|
181 |
-
info = "Extracted model."
|
182 |
-
opt["info"] = info
|
183 |
-
opt["version"] = version
|
184 |
-
opt["sr"] = sr
|
185 |
-
opt["f0"] = int(if_f0)
|
186 |
-
torch.save(opt, "weights/%s.pth" % name)
|
187 |
-
return "Success."
|
188 |
-
except:
|
189 |
-
return traceback.format_exc()
|
190 |
-
|
191 |
-
|
192 |
-
def change_info(path, info, name):
|
193 |
-
try:
|
194 |
-
ckpt = torch.load(path, map_location="cpu")
|
195 |
-
ckpt["info"] = info
|
196 |
-
if name == "":
|
197 |
-
name = os.path.basename(path)
|
198 |
-
torch.save(ckpt, "weights/%s" % name)
|
199 |
-
return "Success."
|
200 |
-
except:
|
201 |
-
return traceback.format_exc()
|
202 |
-
|
203 |
-
|
204 |
-
def merge(path1, path2, alpha1, sr, f0, info, name, version):
|
205 |
-
try:
|
206 |
-
|
207 |
-
def extract(ckpt):
|
208 |
-
a = ckpt["model"]
|
209 |
-
opt = OrderedDict()
|
210 |
-
opt["weight"] = {}
|
211 |
-
for key in a.keys():
|
212 |
-
if "enc_q" in key:
|
213 |
-
continue
|
214 |
-
opt["weight"][key] = a[key]
|
215 |
-
return opt
|
216 |
-
|
217 |
-
ckpt1 = torch.load(path1, map_location="cpu")
|
218 |
-
ckpt2 = torch.load(path2, map_location="cpu")
|
219 |
-
cfg = ckpt1["config"]
|
220 |
-
if "model" in ckpt1:
|
221 |
-
ckpt1 = extract(ckpt1)
|
222 |
-
else:
|
223 |
-
ckpt1 = ckpt1["weight"]
|
224 |
-
if "model" in ckpt2:
|
225 |
-
ckpt2 = extract(ckpt2)
|
226 |
-
else:
|
227 |
-
ckpt2 = ckpt2["weight"]
|
228 |
-
if sorted(list(ckpt1.keys())) != sorted(list(ckpt2.keys())):
|
229 |
-
return "Fail to merge the models. The model architectures are not the same."
|
230 |
-
opt = OrderedDict()
|
231 |
-
opt["weight"] = {}
|
232 |
-
for key in ckpt1.keys():
|
233 |
-
# try:
|
234 |
-
if key == "emb_g.weight" and ckpt1[key].shape != ckpt2[key].shape:
|
235 |
-
min_shape0 = min(ckpt1[key].shape[0], ckpt2[key].shape[0])
|
236 |
-
opt["weight"][key] = (
|
237 |
-
alpha1 * (ckpt1[key][:min_shape0].float())
|
238 |
-
+ (1 - alpha1) * (ckpt2[key][:min_shape0].float())
|
239 |
-
).half()
|
240 |
-
else:
|
241 |
-
opt["weight"][key] = (
|
242 |
-
alpha1 * (ckpt1[key].float()) + (1 - alpha1) * (ckpt2[key].float())
|
243 |
-
).half()
|
244 |
-
# except:
|
245 |
-
# pdb.set_trace()
|
246 |
-
opt["config"] = cfg
|
247 |
-
"""
|
248 |
-
if(sr=="40k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 10, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 40000]
|
249 |
-
elif(sr=="48k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,6,2,2,2], 512, [16, 16, 4, 4], 109, 256, 48000]
|
250 |
-
elif(sr=="32k"):opt["config"] = [513, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 4, 2, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 32000]
|
251 |
-
"""
|
252 |
-
opt["sr"] = sr
|
253 |
-
opt["f0"] = 1 if f0 == i18n("是") else 0
|
254 |
-
opt["version"] = version
|
255 |
-
opt["info"] = info
|
256 |
-
torch.save(opt, "weights/%s.pth" % name)
|
257 |
-
return "Success."
|
258 |
-
except:
|
259 |
-
return traceback.format_exc()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Zero-to-Hero/06-SL-AI-Image-Music-Video-UI-UX-URL/app.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import gradio as gr
|
3 |
-
import IPython
|
4 |
-
import streamlit as st
|
5 |
-
import streamlit.components.v1 as components
|
6 |
-
from IPython.display import IFrame
|
7 |
-
|
8 |
-
src='' # URL parameter to change the iframe url
|
9 |
-
def SetIframeURL(option_selected):
|
10 |
-
if (option_selected=='Collager'):
|
11 |
-
src='https://www.artbreeder.com/'
|
12 |
-
if (option_selected=='Midjourney'):
|
13 |
-
src='https://www.midjourney.com/'
|
14 |
-
if (option_selected=='DreamStudio'):
|
15 |
-
src='https://beta.dreamstudio.ai/'
|
16 |
-
if (option_selected=='NightCafe'):
|
17 |
-
src='https://creator.nightcafe.studio/'
|
18 |
-
if (option_selected=='RunwayML'):
|
19 |
-
src='https://app.runwayml.com/'
|
20 |
-
if (option_selected=='ArtFromTextandImages'):
|
21 |
-
src='https://huggingface.co/spaces/awacke1/Art-from-Text-and-Images'
|
22 |
-
if (option_selected=='Boomy'):
|
23 |
-
src='https://boomy.com/'
|
24 |
-
|
25 |
-
width = st.sidebar.slider("Width", 200, 1500, 800, 100)
|
26 |
-
height = st.sidebar.slider("Height", 200, 1500, 900, 100)
|
27 |
-
st.components.v1.iframe(src, width, height, scrolling=True)
|
28 |
-
|
29 |
-
try:
|
30 |
-
options = ['Midjourney', 'RunwayML', 'Boomy']
|
31 |
-
query_params = st.experimental_get_query_params()
|
32 |
-
query_option = query_params['option'][0] #throws an exception when visiting http://host:port
|
33 |
-
option_selected = st.sidebar.selectbox('Pick option', options, index=options.index(query_option))
|
34 |
-
if option_selected:
|
35 |
-
st.experimental_set_query_params(option=option_selected)
|
36 |
-
SetIframeURL(option_selected)
|
37 |
-
except:
|
38 |
-
options = ['Midjourney', 'RunwayML', 'Boomy']
|
39 |
-
st.experimental_set_query_params(option=options[1]) # defaults to 1
|
40 |
-
query_params = st.experimental_get_query_params()
|
41 |
-
query_option = query_params['option'][0]
|
42 |
-
option_selected = st.sidebar.selectbox('Pick option', options, index=options.index(query_option))
|
43 |
-
if option_selected:
|
44 |
-
st.experimental_set_query_params(option=option_selected)
|
45 |
-
SetIframeURL(option_selected)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIConsultant/MusicGen/audiocraft/losses/sisnr.py
DELETED
@@ -1,92 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import math
|
8 |
-
import typing as tp
|
9 |
-
|
10 |
-
import torch
|
11 |
-
from torch import nn
|
12 |
-
from torch.nn import functional as F
|
13 |
-
|
14 |
-
|
15 |
-
def _unfold(a: torch.Tensor, kernel_size: int, stride: int) -> torch.Tensor:
|
16 |
-
"""Given input of size [*OT, T], output Tensor of size [*OT, F, K]
|
17 |
-
with K the kernel size, by extracting frames with the given stride.
|
18 |
-
This will pad the input so that `F = ceil(T / K)`.
|
19 |
-
see https://github.com/pytorch/pytorch/issues/60466
|
20 |
-
"""
|
21 |
-
*shape, length = a.shape
|
22 |
-
n_frames = math.ceil(length / stride)
|
23 |
-
tgt_length = (n_frames - 1) * stride + kernel_size
|
24 |
-
a = F.pad(a, (0, tgt_length - length))
|
25 |
-
strides = list(a.stride())
|
26 |
-
assert strides[-1] == 1, "data should be contiguous"
|
27 |
-
strides = strides[:-1] + [stride, 1]
|
28 |
-
return a.as_strided([*shape, n_frames, kernel_size], strides)
|
29 |
-
|
30 |
-
|
31 |
-
def _center(x: torch.Tensor) -> torch.Tensor:
|
32 |
-
return x - x.mean(-1, True)
|
33 |
-
|
34 |
-
|
35 |
-
def _norm2(x: torch.Tensor) -> torch.Tensor:
|
36 |
-
return x.pow(2).sum(-1, True)
|
37 |
-
|
38 |
-
|
39 |
-
class SISNR(nn.Module):
|
40 |
-
"""SISNR loss.
|
41 |
-
|
42 |
-
Input should be [B, C, T], output is scalar.
|
43 |
-
|
44 |
-
Args:
|
45 |
-
sample_rate (int): Sample rate.
|
46 |
-
segment (float or None): Evaluate on chunks of that many seconds. If None, evaluate on
|
47 |
-
entire audio only.
|
48 |
-
overlap (float): Overlap between chunks, i.e. 0.5 = 50 % overlap.
|
49 |
-
epsilon (float): Epsilon value for numerical stability.
|
50 |
-
"""
|
51 |
-
def __init__(
|
52 |
-
self,
|
53 |
-
sample_rate: int = 16000,
|
54 |
-
segment: tp.Optional[float] = 20,
|
55 |
-
overlap: float = 0.5,
|
56 |
-
epsilon: float = torch.finfo(torch.float32).eps,
|
57 |
-
):
|
58 |
-
super().__init__()
|
59 |
-
self.sample_rate = sample_rate
|
60 |
-
self.segment = segment
|
61 |
-
self.overlap = overlap
|
62 |
-
self.epsilon = epsilon
|
63 |
-
|
64 |
-
def forward(self, out_sig: torch.Tensor, ref_sig: torch.Tensor) -> torch.Tensor:
|
65 |
-
B, C, T = ref_sig.shape
|
66 |
-
assert ref_sig.shape == out_sig.shape
|
67 |
-
|
68 |
-
if self.segment is None:
|
69 |
-
frame = T
|
70 |
-
stride = T
|
71 |
-
else:
|
72 |
-
frame = int(self.segment * self.sample_rate)
|
73 |
-
stride = int(frame * (1 - self.overlap))
|
74 |
-
|
75 |
-
epsilon = self.epsilon * frame # make epsilon prop to frame size.
|
76 |
-
|
77 |
-
gt = _unfold(ref_sig, frame, stride)
|
78 |
-
est = _unfold(out_sig, frame, stride)
|
79 |
-
if self.segment is None:
|
80 |
-
assert gt.shape[-1] == 1
|
81 |
-
|
82 |
-
gt = _center(gt)
|
83 |
-
est = _center(est)
|
84 |
-
dot = torch.einsum("bcft,bcft->bcf", gt, est)
|
85 |
-
|
86 |
-
proj = dot[:, :, :, None] * gt / (epsilon + _norm2(gt))
|
87 |
-
noise = est - proj
|
88 |
-
|
89 |
-
sisnr = 10 * (
|
90 |
-
torch.log10(epsilon + _norm2(proj)) - torch.log10(epsilon + _norm2(noise))
|
91 |
-
)
|
92 |
-
return -1 * sisnr[..., 0].mean()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/midas/midas/vit.py
DELETED
@@ -1,491 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import timm
|
4 |
-
import types
|
5 |
-
import math
|
6 |
-
import torch.nn.functional as F
|
7 |
-
|
8 |
-
|
9 |
-
class Slice(nn.Module):
|
10 |
-
def __init__(self, start_index=1):
|
11 |
-
super(Slice, self).__init__()
|
12 |
-
self.start_index = start_index
|
13 |
-
|
14 |
-
def forward(self, x):
|
15 |
-
return x[:, self.start_index :]
|
16 |
-
|
17 |
-
|
18 |
-
class AddReadout(nn.Module):
|
19 |
-
def __init__(self, start_index=1):
|
20 |
-
super(AddReadout, self).__init__()
|
21 |
-
self.start_index = start_index
|
22 |
-
|
23 |
-
def forward(self, x):
|
24 |
-
if self.start_index == 2:
|
25 |
-
readout = (x[:, 0] + x[:, 1]) / 2
|
26 |
-
else:
|
27 |
-
readout = x[:, 0]
|
28 |
-
return x[:, self.start_index :] + readout.unsqueeze(1)
|
29 |
-
|
30 |
-
|
31 |
-
class ProjectReadout(nn.Module):
|
32 |
-
def __init__(self, in_features, start_index=1):
|
33 |
-
super(ProjectReadout, self).__init__()
|
34 |
-
self.start_index = start_index
|
35 |
-
|
36 |
-
self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
|
37 |
-
|
38 |
-
def forward(self, x):
|
39 |
-
readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
|
40 |
-
features = torch.cat((x[:, self.start_index :], readout), -1)
|
41 |
-
|
42 |
-
return self.project(features)
|
43 |
-
|
44 |
-
|
45 |
-
class Transpose(nn.Module):
|
46 |
-
def __init__(self, dim0, dim1):
|
47 |
-
super(Transpose, self).__init__()
|
48 |
-
self.dim0 = dim0
|
49 |
-
self.dim1 = dim1
|
50 |
-
|
51 |
-
def forward(self, x):
|
52 |
-
x = x.transpose(self.dim0, self.dim1)
|
53 |
-
return x
|
54 |
-
|
55 |
-
|
56 |
-
def forward_vit(pretrained, x):
|
57 |
-
b, c, h, w = x.shape
|
58 |
-
|
59 |
-
glob = pretrained.model.forward_flex(x)
|
60 |
-
|
61 |
-
layer_1 = pretrained.activations["1"]
|
62 |
-
layer_2 = pretrained.activations["2"]
|
63 |
-
layer_3 = pretrained.activations["3"]
|
64 |
-
layer_4 = pretrained.activations["4"]
|
65 |
-
|
66 |
-
layer_1 = pretrained.act_postprocess1[0:2](layer_1)
|
67 |
-
layer_2 = pretrained.act_postprocess2[0:2](layer_2)
|
68 |
-
layer_3 = pretrained.act_postprocess3[0:2](layer_3)
|
69 |
-
layer_4 = pretrained.act_postprocess4[0:2](layer_4)
|
70 |
-
|
71 |
-
unflatten = nn.Sequential(
|
72 |
-
nn.Unflatten(
|
73 |
-
2,
|
74 |
-
torch.Size(
|
75 |
-
[
|
76 |
-
h // pretrained.model.patch_size[1],
|
77 |
-
w // pretrained.model.patch_size[0],
|
78 |
-
]
|
79 |
-
),
|
80 |
-
)
|
81 |
-
)
|
82 |
-
|
83 |
-
if layer_1.ndim == 3:
|
84 |
-
layer_1 = unflatten(layer_1)
|
85 |
-
if layer_2.ndim == 3:
|
86 |
-
layer_2 = unflatten(layer_2)
|
87 |
-
if layer_3.ndim == 3:
|
88 |
-
layer_3 = unflatten(layer_3)
|
89 |
-
if layer_4.ndim == 3:
|
90 |
-
layer_4 = unflatten(layer_4)
|
91 |
-
|
92 |
-
layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
|
93 |
-
layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
|
94 |
-
layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
|
95 |
-
layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
|
96 |
-
|
97 |
-
return layer_1, layer_2, layer_3, layer_4
|
98 |
-
|
99 |
-
|
100 |
-
def _resize_pos_embed(self, posemb, gs_h, gs_w):
|
101 |
-
posemb_tok, posemb_grid = (
|
102 |
-
posemb[:, : self.start_index],
|
103 |
-
posemb[0, self.start_index :],
|
104 |
-
)
|
105 |
-
|
106 |
-
gs_old = int(math.sqrt(len(posemb_grid)))
|
107 |
-
|
108 |
-
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
|
109 |
-
posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
|
110 |
-
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
|
111 |
-
|
112 |
-
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
|
113 |
-
|
114 |
-
return posemb
|
115 |
-
|
116 |
-
|
117 |
-
def forward_flex(self, x):
|
118 |
-
b, c, h, w = x.shape
|
119 |
-
|
120 |
-
pos_embed = self._resize_pos_embed(
|
121 |
-
self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
|
122 |
-
)
|
123 |
-
|
124 |
-
B = x.shape[0]
|
125 |
-
|
126 |
-
if hasattr(self.patch_embed, "backbone"):
|
127 |
-
x = self.patch_embed.backbone(x)
|
128 |
-
if isinstance(x, (list, tuple)):
|
129 |
-
x = x[-1] # last feature if backbone outputs list/tuple of features
|
130 |
-
|
131 |
-
x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
|
132 |
-
|
133 |
-
if getattr(self, "dist_token", None) is not None:
|
134 |
-
cls_tokens = self.cls_token.expand(
|
135 |
-
B, -1, -1
|
136 |
-
) # stole cls_tokens impl from Phil Wang, thanks
|
137 |
-
dist_token = self.dist_token.expand(B, -1, -1)
|
138 |
-
x = torch.cat((cls_tokens, dist_token, x), dim=1)
|
139 |
-
else:
|
140 |
-
cls_tokens = self.cls_token.expand(
|
141 |
-
B, -1, -1
|
142 |
-
) # stole cls_tokens impl from Phil Wang, thanks
|
143 |
-
x = torch.cat((cls_tokens, x), dim=1)
|
144 |
-
|
145 |
-
x = x + pos_embed
|
146 |
-
x = self.pos_drop(x)
|
147 |
-
|
148 |
-
for blk in self.blocks:
|
149 |
-
x = blk(x)
|
150 |
-
|
151 |
-
x = self.norm(x)
|
152 |
-
|
153 |
-
return x
|
154 |
-
|
155 |
-
|
156 |
-
activations = {}
|
157 |
-
|
158 |
-
|
159 |
-
def get_activation(name):
|
160 |
-
def hook(model, input, output):
|
161 |
-
activations[name] = output
|
162 |
-
|
163 |
-
return hook
|
164 |
-
|
165 |
-
|
166 |
-
def get_readout_oper(vit_features, features, use_readout, start_index=1):
|
167 |
-
if use_readout == "ignore":
|
168 |
-
readout_oper = [Slice(start_index)] * len(features)
|
169 |
-
elif use_readout == "add":
|
170 |
-
readout_oper = [AddReadout(start_index)] * len(features)
|
171 |
-
elif use_readout == "project":
|
172 |
-
readout_oper = [
|
173 |
-
ProjectReadout(vit_features, start_index) for out_feat in features
|
174 |
-
]
|
175 |
-
else:
|
176 |
-
assert (
|
177 |
-
False
|
178 |
-
), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
|
179 |
-
|
180 |
-
return readout_oper
|
181 |
-
|
182 |
-
|
183 |
-
def _make_vit_b16_backbone(
|
184 |
-
model,
|
185 |
-
features=[96, 192, 384, 768],
|
186 |
-
size=[384, 384],
|
187 |
-
hooks=[2, 5, 8, 11],
|
188 |
-
vit_features=768,
|
189 |
-
use_readout="ignore",
|
190 |
-
start_index=1,
|
191 |
-
):
|
192 |
-
pretrained = nn.Module()
|
193 |
-
|
194 |
-
pretrained.model = model
|
195 |
-
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
196 |
-
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
197 |
-
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
198 |
-
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
199 |
-
|
200 |
-
pretrained.activations = activations
|
201 |
-
|
202 |
-
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
203 |
-
|
204 |
-
# 32, 48, 136, 384
|
205 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
206 |
-
readout_oper[0],
|
207 |
-
Transpose(1, 2),
|
208 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
209 |
-
nn.Conv2d(
|
210 |
-
in_channels=vit_features,
|
211 |
-
out_channels=features[0],
|
212 |
-
kernel_size=1,
|
213 |
-
stride=1,
|
214 |
-
padding=0,
|
215 |
-
),
|
216 |
-
nn.ConvTranspose2d(
|
217 |
-
in_channels=features[0],
|
218 |
-
out_channels=features[0],
|
219 |
-
kernel_size=4,
|
220 |
-
stride=4,
|
221 |
-
padding=0,
|
222 |
-
bias=True,
|
223 |
-
dilation=1,
|
224 |
-
groups=1,
|
225 |
-
),
|
226 |
-
)
|
227 |
-
|
228 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
229 |
-
readout_oper[1],
|
230 |
-
Transpose(1, 2),
|
231 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
232 |
-
nn.Conv2d(
|
233 |
-
in_channels=vit_features,
|
234 |
-
out_channels=features[1],
|
235 |
-
kernel_size=1,
|
236 |
-
stride=1,
|
237 |
-
padding=0,
|
238 |
-
),
|
239 |
-
nn.ConvTranspose2d(
|
240 |
-
in_channels=features[1],
|
241 |
-
out_channels=features[1],
|
242 |
-
kernel_size=2,
|
243 |
-
stride=2,
|
244 |
-
padding=0,
|
245 |
-
bias=True,
|
246 |
-
dilation=1,
|
247 |
-
groups=1,
|
248 |
-
),
|
249 |
-
)
|
250 |
-
|
251 |
-
pretrained.act_postprocess3 = nn.Sequential(
|
252 |
-
readout_oper[2],
|
253 |
-
Transpose(1, 2),
|
254 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
255 |
-
nn.Conv2d(
|
256 |
-
in_channels=vit_features,
|
257 |
-
out_channels=features[2],
|
258 |
-
kernel_size=1,
|
259 |
-
stride=1,
|
260 |
-
padding=0,
|
261 |
-
),
|
262 |
-
)
|
263 |
-
|
264 |
-
pretrained.act_postprocess4 = nn.Sequential(
|
265 |
-
readout_oper[3],
|
266 |
-
Transpose(1, 2),
|
267 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
268 |
-
nn.Conv2d(
|
269 |
-
in_channels=vit_features,
|
270 |
-
out_channels=features[3],
|
271 |
-
kernel_size=1,
|
272 |
-
stride=1,
|
273 |
-
padding=0,
|
274 |
-
),
|
275 |
-
nn.Conv2d(
|
276 |
-
in_channels=features[3],
|
277 |
-
out_channels=features[3],
|
278 |
-
kernel_size=3,
|
279 |
-
stride=2,
|
280 |
-
padding=1,
|
281 |
-
),
|
282 |
-
)
|
283 |
-
|
284 |
-
pretrained.model.start_index = start_index
|
285 |
-
pretrained.model.patch_size = [16, 16]
|
286 |
-
|
287 |
-
# We inject this function into the VisionTransformer instances so that
|
288 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
289 |
-
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
290 |
-
pretrained.model._resize_pos_embed = types.MethodType(
|
291 |
-
_resize_pos_embed, pretrained.model
|
292 |
-
)
|
293 |
-
|
294 |
-
return pretrained
|
295 |
-
|
296 |
-
|
297 |
-
def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
|
298 |
-
model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
|
299 |
-
|
300 |
-
hooks = [5, 11, 17, 23] if hooks == None else hooks
|
301 |
-
return _make_vit_b16_backbone(
|
302 |
-
model,
|
303 |
-
features=[256, 512, 1024, 1024],
|
304 |
-
hooks=hooks,
|
305 |
-
vit_features=1024,
|
306 |
-
use_readout=use_readout,
|
307 |
-
)
|
308 |
-
|
309 |
-
|
310 |
-
def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
|
311 |
-
model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
|
312 |
-
|
313 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
314 |
-
return _make_vit_b16_backbone(
|
315 |
-
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
316 |
-
)
|
317 |
-
|
318 |
-
|
319 |
-
def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
|
320 |
-
model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
|
321 |
-
|
322 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
323 |
-
return _make_vit_b16_backbone(
|
324 |
-
model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
|
325 |
-
)
|
326 |
-
|
327 |
-
|
328 |
-
def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
|
329 |
-
model = timm.create_model(
|
330 |
-
"vit_deit_base_distilled_patch16_384", pretrained=pretrained
|
331 |
-
)
|
332 |
-
|
333 |
-
hooks = [2, 5, 8, 11] if hooks == None else hooks
|
334 |
-
return _make_vit_b16_backbone(
|
335 |
-
model,
|
336 |
-
features=[96, 192, 384, 768],
|
337 |
-
hooks=hooks,
|
338 |
-
use_readout=use_readout,
|
339 |
-
start_index=2,
|
340 |
-
)
|
341 |
-
|
342 |
-
|
343 |
-
def _make_vit_b_rn50_backbone(
|
344 |
-
model,
|
345 |
-
features=[256, 512, 768, 768],
|
346 |
-
size=[384, 384],
|
347 |
-
hooks=[0, 1, 8, 11],
|
348 |
-
vit_features=768,
|
349 |
-
use_vit_only=False,
|
350 |
-
use_readout="ignore",
|
351 |
-
start_index=1,
|
352 |
-
):
|
353 |
-
pretrained = nn.Module()
|
354 |
-
|
355 |
-
pretrained.model = model
|
356 |
-
|
357 |
-
if use_vit_only == True:
|
358 |
-
pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
|
359 |
-
pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
|
360 |
-
else:
|
361 |
-
pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
|
362 |
-
get_activation("1")
|
363 |
-
)
|
364 |
-
pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
|
365 |
-
get_activation("2")
|
366 |
-
)
|
367 |
-
|
368 |
-
pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
|
369 |
-
pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
|
370 |
-
|
371 |
-
pretrained.activations = activations
|
372 |
-
|
373 |
-
readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
|
374 |
-
|
375 |
-
if use_vit_only == True:
|
376 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
377 |
-
readout_oper[0],
|
378 |
-
Transpose(1, 2),
|
379 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
380 |
-
nn.Conv2d(
|
381 |
-
in_channels=vit_features,
|
382 |
-
out_channels=features[0],
|
383 |
-
kernel_size=1,
|
384 |
-
stride=1,
|
385 |
-
padding=0,
|
386 |
-
),
|
387 |
-
nn.ConvTranspose2d(
|
388 |
-
in_channels=features[0],
|
389 |
-
out_channels=features[0],
|
390 |
-
kernel_size=4,
|
391 |
-
stride=4,
|
392 |
-
padding=0,
|
393 |
-
bias=True,
|
394 |
-
dilation=1,
|
395 |
-
groups=1,
|
396 |
-
),
|
397 |
-
)
|
398 |
-
|
399 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
400 |
-
readout_oper[1],
|
401 |
-
Transpose(1, 2),
|
402 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
403 |
-
nn.Conv2d(
|
404 |
-
in_channels=vit_features,
|
405 |
-
out_channels=features[1],
|
406 |
-
kernel_size=1,
|
407 |
-
stride=1,
|
408 |
-
padding=0,
|
409 |
-
),
|
410 |
-
nn.ConvTranspose2d(
|
411 |
-
in_channels=features[1],
|
412 |
-
out_channels=features[1],
|
413 |
-
kernel_size=2,
|
414 |
-
stride=2,
|
415 |
-
padding=0,
|
416 |
-
bias=True,
|
417 |
-
dilation=1,
|
418 |
-
groups=1,
|
419 |
-
),
|
420 |
-
)
|
421 |
-
else:
|
422 |
-
pretrained.act_postprocess1 = nn.Sequential(
|
423 |
-
nn.Identity(), nn.Identity(), nn.Identity()
|
424 |
-
)
|
425 |
-
pretrained.act_postprocess2 = nn.Sequential(
|
426 |
-
nn.Identity(), nn.Identity(), nn.Identity()
|
427 |
-
)
|
428 |
-
|
429 |
-
pretrained.act_postprocess3 = nn.Sequential(
|
430 |
-
readout_oper[2],
|
431 |
-
Transpose(1, 2),
|
432 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
433 |
-
nn.Conv2d(
|
434 |
-
in_channels=vit_features,
|
435 |
-
out_channels=features[2],
|
436 |
-
kernel_size=1,
|
437 |
-
stride=1,
|
438 |
-
padding=0,
|
439 |
-
),
|
440 |
-
)
|
441 |
-
|
442 |
-
pretrained.act_postprocess4 = nn.Sequential(
|
443 |
-
readout_oper[3],
|
444 |
-
Transpose(1, 2),
|
445 |
-
nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
|
446 |
-
nn.Conv2d(
|
447 |
-
in_channels=vit_features,
|
448 |
-
out_channels=features[3],
|
449 |
-
kernel_size=1,
|
450 |
-
stride=1,
|
451 |
-
padding=0,
|
452 |
-
),
|
453 |
-
nn.Conv2d(
|
454 |
-
in_channels=features[3],
|
455 |
-
out_channels=features[3],
|
456 |
-
kernel_size=3,
|
457 |
-
stride=2,
|
458 |
-
padding=1,
|
459 |
-
),
|
460 |
-
)
|
461 |
-
|
462 |
-
pretrained.model.start_index = start_index
|
463 |
-
pretrained.model.patch_size = [16, 16]
|
464 |
-
|
465 |
-
# We inject this function into the VisionTransformer instances so that
|
466 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
467 |
-
pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
|
468 |
-
|
469 |
-
# We inject this function into the VisionTransformer instances so that
|
470 |
-
# we can use it with interpolated position embeddings without modifying the library source.
|
471 |
-
pretrained.model._resize_pos_embed = types.MethodType(
|
472 |
-
_resize_pos_embed, pretrained.model
|
473 |
-
)
|
474 |
-
|
475 |
-
return pretrained
|
476 |
-
|
477 |
-
|
478 |
-
def _make_pretrained_vitb_rn50_384(
|
479 |
-
pretrained, use_readout="ignore", hooks=None, use_vit_only=False
|
480 |
-
):
|
481 |
-
model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
|
482 |
-
|
483 |
-
hooks = [0, 1, 8, 11] if hooks == None else hooks
|
484 |
-
return _make_vit_b_rn50_backbone(
|
485 |
-
model,
|
486 |
-
features=[256, 512, 768, 768],
|
487 |
-
size=[384, 384],
|
488 |
-
hooks=hooks,
|
489 |
-
use_vit_only=use_vit_only,
|
490 |
-
use_readout=use_readout,
|
491 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/cldm/logger.py
DELETED
@@ -1,76 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
import torchvision
|
6 |
-
from PIL import Image
|
7 |
-
from pytorch_lightning.callbacks import Callback
|
8 |
-
from pytorch_lightning.utilities.distributed import rank_zero_only
|
9 |
-
|
10 |
-
|
11 |
-
class ImageLogger(Callback):
|
12 |
-
def __init__(self, batch_frequency=2000, max_images=4, clamp=True, increase_log_steps=True,
|
13 |
-
rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False,
|
14 |
-
log_images_kwargs=None):
|
15 |
-
super().__init__()
|
16 |
-
self.rescale = rescale
|
17 |
-
self.batch_freq = batch_frequency
|
18 |
-
self.max_images = max_images
|
19 |
-
if not increase_log_steps:
|
20 |
-
self.log_steps = [self.batch_freq]
|
21 |
-
self.clamp = clamp
|
22 |
-
self.disabled = disabled
|
23 |
-
self.log_on_batch_idx = log_on_batch_idx
|
24 |
-
self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {}
|
25 |
-
self.log_first_step = log_first_step
|
26 |
-
|
27 |
-
@rank_zero_only
|
28 |
-
def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx):
|
29 |
-
root = os.path.join(save_dir, "image_log", split)
|
30 |
-
for k in images:
|
31 |
-
grid = torchvision.utils.make_grid(images[k], nrow=4)
|
32 |
-
if self.rescale:
|
33 |
-
grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w
|
34 |
-
grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1)
|
35 |
-
grid = grid.numpy()
|
36 |
-
grid = (grid * 255).astype(np.uint8)
|
37 |
-
filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(k, global_step, current_epoch, batch_idx)
|
38 |
-
path = os.path.join(root, filename)
|
39 |
-
os.makedirs(os.path.split(path)[0], exist_ok=True)
|
40 |
-
Image.fromarray(grid).save(path)
|
41 |
-
|
42 |
-
def log_img(self, pl_module, batch, batch_idx, split="train"):
|
43 |
-
check_idx = batch_idx # if self.log_on_batch_idx else pl_module.global_step
|
44 |
-
if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0
|
45 |
-
hasattr(pl_module, "log_images") and
|
46 |
-
callable(pl_module.log_images) and
|
47 |
-
self.max_images > 0):
|
48 |
-
logger = type(pl_module.logger)
|
49 |
-
|
50 |
-
is_train = pl_module.training
|
51 |
-
if is_train:
|
52 |
-
pl_module.eval()
|
53 |
-
|
54 |
-
with torch.no_grad():
|
55 |
-
images = pl_module.log_images(batch, split=split, **self.log_images_kwargs)
|
56 |
-
|
57 |
-
for k in images:
|
58 |
-
N = min(images[k].shape[0], self.max_images)
|
59 |
-
images[k] = images[k][:N]
|
60 |
-
if isinstance(images[k], torch.Tensor):
|
61 |
-
images[k] = images[k].detach().cpu()
|
62 |
-
if self.clamp:
|
63 |
-
images[k] = torch.clamp(images[k], -1., 1.)
|
64 |
-
|
65 |
-
self.log_local(pl_module.logger.save_dir, split, images,
|
66 |
-
pl_module.global_step, pl_module.current_epoch, batch_idx)
|
67 |
-
|
68 |
-
if is_train:
|
69 |
-
pl_module.train()
|
70 |
-
|
71 |
-
def check_frequency(self, check_idx):
|
72 |
-
return check_idx % self.batch_freq == 0
|
73 |
-
|
74 |
-
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
|
75 |
-
if not self.disabled:
|
76 |
-
self.log_img(pl_module, batch, batch_idx, split="train")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/helper.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import asyncio
|
4 |
-
import sys
|
5 |
-
from asyncio import AbstractEventLoop
|
6 |
-
from os import path
|
7 |
-
from typing import Dict, List
|
8 |
-
import browser_cookie3
|
9 |
-
|
10 |
-
# Change event loop policy on windows
|
11 |
-
if sys.platform == 'win32':
|
12 |
-
if isinstance(
|
13 |
-
asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy
|
14 |
-
):
|
15 |
-
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
16 |
-
|
17 |
-
# Local Cookie Storage
|
18 |
-
_cookies: Dict[str, Dict[str, str]] = {}
|
19 |
-
|
20 |
-
# If event loop is already running, handle nested event loops
|
21 |
-
# If "nest_asyncio" is installed, patch the event loop.
|
22 |
-
def get_event_loop() -> AbstractEventLoop:
|
23 |
-
try:
|
24 |
-
asyncio.get_running_loop()
|
25 |
-
except RuntimeError:
|
26 |
-
try:
|
27 |
-
return asyncio.get_event_loop()
|
28 |
-
except RuntimeError:
|
29 |
-
asyncio.set_event_loop(asyncio.new_event_loop())
|
30 |
-
return asyncio.get_event_loop()
|
31 |
-
try:
|
32 |
-
event_loop = asyncio.get_event_loop()
|
33 |
-
if not hasattr(event_loop.__class__, "_nest_patched"):
|
34 |
-
import nest_asyncio
|
35 |
-
nest_asyncio.apply(event_loop)
|
36 |
-
return event_loop
|
37 |
-
except ImportError:
|
38 |
-
raise RuntimeError(
|
39 |
-
'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.'
|
40 |
-
)
|
41 |
-
|
42 |
-
|
43 |
-
# Load cookies for a domain from all supported browsers.
|
44 |
-
# Cache the results in the "_cookies" variable.
|
45 |
-
def get_cookies(cookie_domain: str) -> Dict[str, str]:
|
46 |
-
if cookie_domain not in _cookies:
|
47 |
-
_cookies[cookie_domain] = {}
|
48 |
-
try:
|
49 |
-
for cookie in browser_cookie3.load(cookie_domain):
|
50 |
-
_cookies[cookie_domain][cookie.name] = cookie.value
|
51 |
-
except:
|
52 |
-
pass
|
53 |
-
return _cookies[cookie_domain]
|
54 |
-
|
55 |
-
|
56 |
-
def format_prompt(messages: List[Dict[str, str]], add_special_tokens=False) -> str:
|
57 |
-
if add_special_tokens or len(messages) > 1:
|
58 |
-
formatted = "\n".join(
|
59 |
-
[
|
60 |
-
"%s: %s" % ((message["role"]).capitalize(), message["content"])
|
61 |
-
for message in messages
|
62 |
-
]
|
63 |
-
)
|
64 |
-
return f"{formatted}\nAssistant:"
|
65 |
-
else:
|
66 |
-
return messages[0]["content"]
|
67 |
-
|
68 |
-
|
69 |
-
def get_browser(user_data_dir: str = None):
|
70 |
-
from undetected_chromedriver import Chrome
|
71 |
-
from platformdirs import user_config_dir
|
72 |
-
|
73 |
-
if not user_data_dir:
|
74 |
-
user_data_dir = user_config_dir("g4f")
|
75 |
-
user_data_dir = path.join(user_data_dir, "Default")
|
76 |
-
|
77 |
-
return Chrome(user_data_dir=user_data_dir)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/base/EaseValueMethods.js
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import EaseValueTask from '../../../plugins/utils/ease/EaseValueTask.js';
|
2 |
-
|
3 |
-
var Start = function (duration) {
|
4 |
-
if (!this.easeValueTask) {
|
5 |
-
this.easeValueTask = new EaseValueTask(this, { eventEmitter: null });
|
6 |
-
}
|
7 |
-
|
8 |
-
if (duration !== undefined) {
|
9 |
-
this.duration = duration;
|
10 |
-
this.easeValueTask.stop(); // Will restart with new duration
|
11 |
-
}
|
12 |
-
|
13 |
-
// Won't restart if easeValueTask is running
|
14 |
-
if (this.easeValueTask.isRunning) {
|
15 |
-
return this;
|
16 |
-
}
|
17 |
-
|
18 |
-
// Start easeValueTask
|
19 |
-
this.easeValueTask.restart({
|
20 |
-
key: 'value',
|
21 |
-
from: 0, to: 1,
|
22 |
-
duration: this.duration,
|
23 |
-
ease: this.ease,
|
24 |
-
repeat: -1, // -1: infinity
|
25 |
-
|
26 |
-
delay: this.delay,
|
27 |
-
repeatDelay: this.repeatDelay
|
28 |
-
});
|
29 |
-
|
30 |
-
this.setDirty();
|
31 |
-
|
32 |
-
return this;
|
33 |
-
}
|
34 |
-
|
35 |
-
var Stop = function () {
|
36 |
-
if (!this.easeValueTask) {
|
37 |
-
return this;
|
38 |
-
}
|
39 |
-
this.easeValueTask.stop();
|
40 |
-
this.setDirty();
|
41 |
-
return this;
|
42 |
-
}
|
43 |
-
|
44 |
-
var Pause = function () {
|
45 |
-
if (!this.easeValueTask) {
|
46 |
-
return this;
|
47 |
-
}
|
48 |
-
this.easeValueTask.pause();
|
49 |
-
this.setDirty();
|
50 |
-
return this;
|
51 |
-
}
|
52 |
-
|
53 |
-
var Resume = function () {
|
54 |
-
if (!this.easeValueTask) {
|
55 |
-
return this;
|
56 |
-
}
|
57 |
-
this.easeValueTask.pause();
|
58 |
-
this.setDirty();
|
59 |
-
return this;
|
60 |
-
}
|
61 |
-
|
62 |
-
export default {
|
63 |
-
start: Start,
|
64 |
-
stop: Stop,
|
65 |
-
pause: Pause,
|
66 |
-
resume: Resume
|
67 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/customprogress/CustomProgress.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import CustomProgress from '../../../plugins/customprogress.js';
|
2 |
-
export default CustomProgress;
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthsizer/GetChildrenWidth.js
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
var GetChildrenWidth = function () {
|
2 |
-
if (this.rexSizer.hidden) {
|
3 |
-
return 0;
|
4 |
-
}
|
5 |
-
|
6 |
-
// Before RunChildrenWrap
|
7 |
-
return this.maxChildWidth + this.space.left + this.space.right;
|
8 |
-
}
|
9 |
-
|
10 |
-
export default GetChildrenWidth;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetChildrenProportion.js
DELETED
@@ -1,17 +0,0 @@
|
|
1 |
-
var GetChildrenProportion = function () {
|
2 |
-
var result = 0;
|
3 |
-
var children = this.sizerChildren;
|
4 |
-
var child, proportion;
|
5 |
-
for (var i = 0, cnt = children.length; i < cnt; i++) {
|
6 |
-
child = children[i];
|
7 |
-
if (child.rexSizer.hidden) {
|
8 |
-
continue;
|
9 |
-
}
|
10 |
-
proportion = child.rexSizer.proportion;
|
11 |
-
if (proportion > 0) {
|
12 |
-
result += proportion;
|
13 |
-
}
|
14 |
-
}
|
15 |
-
return result;
|
16 |
-
}
|
17 |
-
export default GetChildrenProportion;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AiMimicry/sovits-models/modules/ddsp.py
DELETED
@@ -1,190 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from torch.nn import functional as F
|
4 |
-
import torch.fft as fft
|
5 |
-
import numpy as np
|
6 |
-
import librosa as li
|
7 |
-
import math
|
8 |
-
from scipy.signal import get_window
|
9 |
-
|
10 |
-
|
11 |
-
def safe_log(x):
|
12 |
-
return torch.log(x + 1e-7)
|
13 |
-
|
14 |
-
|
15 |
-
@torch.no_grad()
|
16 |
-
def mean_std_loudness(dataset):
|
17 |
-
mean = 0
|
18 |
-
std = 0
|
19 |
-
n = 0
|
20 |
-
for _, _, l in dataset:
|
21 |
-
n += 1
|
22 |
-
mean += (l.mean().item() - mean) / n
|
23 |
-
std += (l.std().item() - std) / n
|
24 |
-
return mean, std
|
25 |
-
|
26 |
-
|
27 |
-
def multiscale_fft(signal, scales, overlap):
|
28 |
-
stfts = []
|
29 |
-
for s in scales:
|
30 |
-
S = torch.stft(
|
31 |
-
signal,
|
32 |
-
s,
|
33 |
-
int(s * (1 - overlap)),
|
34 |
-
s,
|
35 |
-
torch.hann_window(s).to(signal),
|
36 |
-
True,
|
37 |
-
normalized=True,
|
38 |
-
return_complex=True,
|
39 |
-
).abs()
|
40 |
-
stfts.append(S)
|
41 |
-
return stfts
|
42 |
-
|
43 |
-
|
44 |
-
def resample(x, factor: int):
|
45 |
-
batch, frame, channel = x.shape
|
46 |
-
x = x.permute(0, 2, 1).reshape(batch * channel, 1, frame)
|
47 |
-
|
48 |
-
window = torch.hann_window(
|
49 |
-
factor * 2,
|
50 |
-
dtype=x.dtype,
|
51 |
-
device=x.device,
|
52 |
-
).reshape(1, 1, -1)
|
53 |
-
y = torch.zeros(x.shape[0], x.shape[1], factor * x.shape[2]).to(x)
|
54 |
-
y[..., ::factor] = x
|
55 |
-
y[..., -1:] = x[..., -1:]
|
56 |
-
y = torch.nn.functional.pad(y, [factor, factor])
|
57 |
-
y = torch.nn.functional.conv1d(y, window)[..., :-1]
|
58 |
-
|
59 |
-
y = y.reshape(batch, channel, factor * frame).permute(0, 2, 1)
|
60 |
-
|
61 |
-
return y
|
62 |
-
|
63 |
-
|
64 |
-
def upsample(signal, factor):
|
65 |
-
signal = signal.permute(0, 2, 1)
|
66 |
-
signal = nn.functional.interpolate(signal, size=signal.shape[-1] * factor)
|
67 |
-
return signal.permute(0, 2, 1)
|
68 |
-
|
69 |
-
|
70 |
-
def remove_above_nyquist(amplitudes, pitch, sampling_rate):
|
71 |
-
n_harm = amplitudes.shape[-1]
|
72 |
-
pitches = pitch * torch.arange(1, n_harm + 1).to(pitch)
|
73 |
-
aa = (pitches < sampling_rate / 2).float() + 1e-4
|
74 |
-
return amplitudes * aa
|
75 |
-
|
76 |
-
|
77 |
-
def scale_function(x):
|
78 |
-
return 2 * torch.sigmoid(x) ** (math.log(10)) + 1e-7
|
79 |
-
|
80 |
-
|
81 |
-
def extract_loudness(signal, sampling_rate, block_size, n_fft=2048):
|
82 |
-
S = li.stft(
|
83 |
-
signal,
|
84 |
-
n_fft=n_fft,
|
85 |
-
hop_length=block_size,
|
86 |
-
win_length=n_fft,
|
87 |
-
center=True,
|
88 |
-
)
|
89 |
-
S = np.log(abs(S) + 1e-7)
|
90 |
-
f = li.fft_frequencies(sampling_rate, n_fft)
|
91 |
-
a_weight = li.A_weighting(f)
|
92 |
-
|
93 |
-
S = S + a_weight.reshape(-1, 1)
|
94 |
-
|
95 |
-
S = np.mean(S, 0)[..., :-1]
|
96 |
-
|
97 |
-
return S
|
98 |
-
|
99 |
-
|
100 |
-
def extract_pitch(signal, sampling_rate, block_size):
|
101 |
-
length = signal.shape[-1] // block_size
|
102 |
-
f0 = crepe.predict(
|
103 |
-
signal,
|
104 |
-
sampling_rate,
|
105 |
-
step_size=int(1000 * block_size / sampling_rate),
|
106 |
-
verbose=1,
|
107 |
-
center=True,
|
108 |
-
viterbi=True,
|
109 |
-
)
|
110 |
-
f0 = f0[1].reshape(-1)[:-1]
|
111 |
-
|
112 |
-
if f0.shape[-1] != length:
|
113 |
-
f0 = np.interp(
|
114 |
-
np.linspace(0, 1, length, endpoint=False),
|
115 |
-
np.linspace(0, 1, f0.shape[-1], endpoint=False),
|
116 |
-
f0,
|
117 |
-
)
|
118 |
-
|
119 |
-
return f0
|
120 |
-
|
121 |
-
|
122 |
-
def mlp(in_size, hidden_size, n_layers):
|
123 |
-
channels = [in_size] + (n_layers) * [hidden_size]
|
124 |
-
net = []
|
125 |
-
for i in range(n_layers):
|
126 |
-
net.append(nn.Linear(channels[i], channels[i + 1]))
|
127 |
-
net.append(nn.LayerNorm(channels[i + 1]))
|
128 |
-
net.append(nn.LeakyReLU())
|
129 |
-
return nn.Sequential(*net)
|
130 |
-
|
131 |
-
|
132 |
-
def gru(n_input, hidden_size):
|
133 |
-
return nn.GRU(n_input * hidden_size, hidden_size, batch_first=True)
|
134 |
-
|
135 |
-
|
136 |
-
def harmonic_synth(pitch, amplitudes, sampling_rate):
|
137 |
-
n_harmonic = amplitudes.shape[-1]
|
138 |
-
omega = torch.cumsum(2 * math.pi * pitch / sampling_rate, 1)
|
139 |
-
omegas = omega * torch.arange(1, n_harmonic + 1).to(omega)
|
140 |
-
signal = (torch.sin(omegas) * amplitudes).sum(-1, keepdim=True)
|
141 |
-
return signal
|
142 |
-
|
143 |
-
|
144 |
-
def amp_to_impulse_response(amp, target_size):
|
145 |
-
amp = torch.stack([amp, torch.zeros_like(amp)], -1)
|
146 |
-
amp = torch.view_as_complex(amp)
|
147 |
-
amp = fft.irfft(amp)
|
148 |
-
|
149 |
-
filter_size = amp.shape[-1]
|
150 |
-
|
151 |
-
amp = torch.roll(amp, filter_size // 2, -1)
|
152 |
-
win = torch.hann_window(filter_size, dtype=amp.dtype, device=amp.device)
|
153 |
-
|
154 |
-
amp = amp * win
|
155 |
-
|
156 |
-
amp = nn.functional.pad(amp, (0, int(target_size) - int(filter_size)))
|
157 |
-
amp = torch.roll(amp, -filter_size // 2, -1)
|
158 |
-
|
159 |
-
return amp
|
160 |
-
|
161 |
-
|
162 |
-
def fft_convolve(signal, kernel):
|
163 |
-
signal = nn.functional.pad(signal, (0, signal.shape[-1]))
|
164 |
-
kernel = nn.functional.pad(kernel, (kernel.shape[-1], 0))
|
165 |
-
|
166 |
-
output = fft.irfft(fft.rfft(signal) * fft.rfft(kernel))
|
167 |
-
output = output[..., output.shape[-1] // 2:]
|
168 |
-
|
169 |
-
return output
|
170 |
-
|
171 |
-
|
172 |
-
def init_kernels(win_len, win_inc, fft_len, win_type=None, invers=False):
|
173 |
-
if win_type == 'None' or win_type is None:
|
174 |
-
window = np.ones(win_len)
|
175 |
-
else:
|
176 |
-
window = get_window(win_type, win_len, fftbins=True) # **0.5
|
177 |
-
|
178 |
-
N = fft_len
|
179 |
-
fourier_basis = np.fft.rfft(np.eye(N))[:win_len]
|
180 |
-
real_kernel = np.real(fourier_basis)
|
181 |
-
imag_kernel = np.imag(fourier_basis)
|
182 |
-
kernel = np.concatenate([real_kernel, imag_kernel], 1).T
|
183 |
-
|
184 |
-
if invers:
|
185 |
-
kernel = np.linalg.pinv(kernel).T
|
186 |
-
|
187 |
-
kernel = kernel * window
|
188 |
-
kernel = kernel[:, None, :]
|
189 |
-
return torch.from_numpy(kernel.astype(np.float32)), torch.from_numpy(window[None, :, None].astype(np.float32))
|
190 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aki004/herta-so-vits/hubert/hubert_model_onnx.py
DELETED
@@ -1,217 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import random
|
3 |
-
from typing import Optional, Tuple
|
4 |
-
|
5 |
-
import torch
|
6 |
-
import torch.nn as nn
|
7 |
-
import torch.nn.functional as t_func
|
8 |
-
from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present
|
9 |
-
|
10 |
-
|
11 |
-
class Hubert(nn.Module):
|
12 |
-
def __init__(self, num_label_embeddings: int = 100, mask: bool = True):
|
13 |
-
super().__init__()
|
14 |
-
self._mask = mask
|
15 |
-
self.feature_extractor = FeatureExtractor()
|
16 |
-
self.feature_projection = FeatureProjection()
|
17 |
-
self.positional_embedding = PositionalConvEmbedding()
|
18 |
-
self.norm = nn.LayerNorm(768)
|
19 |
-
self.dropout = nn.Dropout(0.1)
|
20 |
-
self.encoder = TransformerEncoder(
|
21 |
-
nn.TransformerEncoderLayer(
|
22 |
-
768, 12, 3072, activation="gelu", batch_first=True
|
23 |
-
),
|
24 |
-
12,
|
25 |
-
)
|
26 |
-
self.proj = nn.Linear(768, 256)
|
27 |
-
|
28 |
-
self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_())
|
29 |
-
self.label_embedding = nn.Embedding(num_label_embeddings, 256)
|
30 |
-
|
31 |
-
def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
32 |
-
mask = None
|
33 |
-
if self.training and self._mask:
|
34 |
-
mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2)
|
35 |
-
x[mask] = self.masked_spec_embed.to(x.dtype)
|
36 |
-
return x, mask
|
37 |
-
|
38 |
-
def encode(
|
39 |
-
self, x: torch.Tensor, layer: Optional[int] = None
|
40 |
-
) -> Tuple[torch.Tensor, torch.Tensor]:
|
41 |
-
x = self.feature_extractor(x)
|
42 |
-
x = self.feature_projection(x.transpose(1, 2))
|
43 |
-
x, mask = self.mask(x)
|
44 |
-
x = x + self.positional_embedding(x)
|
45 |
-
x = self.dropout(self.norm(x))
|
46 |
-
x = self.encoder(x, output_layer=layer)
|
47 |
-
return x, mask
|
48 |
-
|
49 |
-
def logits(self, x: torch.Tensor) -> torch.Tensor:
|
50 |
-
logits = torch.cosine_similarity(
|
51 |
-
x.unsqueeze(2),
|
52 |
-
self.label_embedding.weight.unsqueeze(0).unsqueeze(0),
|
53 |
-
dim=-1,
|
54 |
-
)
|
55 |
-
return logits / 0.1
|
56 |
-
|
57 |
-
|
58 |
-
class HubertSoft(Hubert):
|
59 |
-
def __init__(self):
|
60 |
-
super().__init__()
|
61 |
-
|
62 |
-
def units(self, wav: torch.Tensor) -> torch.Tensor:
|
63 |
-
wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2))
|
64 |
-
x, _ = self.encode(wav)
|
65 |
-
return self.proj(x)
|
66 |
-
|
67 |
-
def forward(self, x):
|
68 |
-
return self.units(x)
|
69 |
-
|
70 |
-
class FeatureExtractor(nn.Module):
|
71 |
-
def __init__(self):
|
72 |
-
super().__init__()
|
73 |
-
self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False)
|
74 |
-
self.norm0 = nn.GroupNorm(512, 512)
|
75 |
-
self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
76 |
-
self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
77 |
-
self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
78 |
-
self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False)
|
79 |
-
self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False)
|
80 |
-
self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False)
|
81 |
-
|
82 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
83 |
-
x = t_func.gelu(self.norm0(self.conv0(x)))
|
84 |
-
x = t_func.gelu(self.conv1(x))
|
85 |
-
x = t_func.gelu(self.conv2(x))
|
86 |
-
x = t_func.gelu(self.conv3(x))
|
87 |
-
x = t_func.gelu(self.conv4(x))
|
88 |
-
x = t_func.gelu(self.conv5(x))
|
89 |
-
x = t_func.gelu(self.conv6(x))
|
90 |
-
return x
|
91 |
-
|
92 |
-
|
93 |
-
class FeatureProjection(nn.Module):
|
94 |
-
def __init__(self):
|
95 |
-
super().__init__()
|
96 |
-
self.norm = nn.LayerNorm(512)
|
97 |
-
self.projection = nn.Linear(512, 768)
|
98 |
-
self.dropout = nn.Dropout(0.1)
|
99 |
-
|
100 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
101 |
-
x = self.norm(x)
|
102 |
-
x = self.projection(x)
|
103 |
-
x = self.dropout(x)
|
104 |
-
return x
|
105 |
-
|
106 |
-
|
107 |
-
class PositionalConvEmbedding(nn.Module):
|
108 |
-
def __init__(self):
|
109 |
-
super().__init__()
|
110 |
-
self.conv = nn.Conv1d(
|
111 |
-
768,
|
112 |
-
768,
|
113 |
-
kernel_size=128,
|
114 |
-
padding=128 // 2,
|
115 |
-
groups=16,
|
116 |
-
)
|
117 |
-
self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2)
|
118 |
-
|
119 |
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
120 |
-
x = self.conv(x.transpose(1, 2))
|
121 |
-
x = t_func.gelu(x[:, :, :-1])
|
122 |
-
return x.transpose(1, 2)
|
123 |
-
|
124 |
-
|
125 |
-
class TransformerEncoder(nn.Module):
|
126 |
-
def __init__(
|
127 |
-
self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int
|
128 |
-
) -> None:
|
129 |
-
super(TransformerEncoder, self).__init__()
|
130 |
-
self.layers = nn.ModuleList(
|
131 |
-
[copy.deepcopy(encoder_layer) for _ in range(num_layers)]
|
132 |
-
)
|
133 |
-
self.num_layers = num_layers
|
134 |
-
|
135 |
-
def forward(
|
136 |
-
self,
|
137 |
-
src: torch.Tensor,
|
138 |
-
mask: torch.Tensor = None,
|
139 |
-
src_key_padding_mask: torch.Tensor = None,
|
140 |
-
output_layer: Optional[int] = None,
|
141 |
-
) -> torch.Tensor:
|
142 |
-
output = src
|
143 |
-
for layer in self.layers[:output_layer]:
|
144 |
-
output = layer(
|
145 |
-
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask
|
146 |
-
)
|
147 |
-
return output
|
148 |
-
|
149 |
-
|
150 |
-
def _compute_mask(
|
151 |
-
shape: Tuple[int, int],
|
152 |
-
mask_prob: float,
|
153 |
-
mask_length: int,
|
154 |
-
device: torch.device,
|
155 |
-
min_masks: int = 0,
|
156 |
-
) -> torch.Tensor:
|
157 |
-
batch_size, sequence_length = shape
|
158 |
-
|
159 |
-
if mask_length < 1:
|
160 |
-
raise ValueError("`mask_length` has to be bigger than 0.")
|
161 |
-
|
162 |
-
if mask_length > sequence_length:
|
163 |
-
raise ValueError(
|
164 |
-
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
|
165 |
-
)
|
166 |
-
|
167 |
-
# compute number of masked spans in batch
|
168 |
-
num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random())
|
169 |
-
num_masked_spans = max(num_masked_spans, min_masks)
|
170 |
-
|
171 |
-
# make sure num masked indices <= sequence_length
|
172 |
-
if num_masked_spans * mask_length > sequence_length:
|
173 |
-
num_masked_spans = sequence_length // mask_length
|
174 |
-
|
175 |
-
# SpecAugment mask to fill
|
176 |
-
mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool)
|
177 |
-
|
178 |
-
# uniform distribution to sample from, make sure that offset samples are < sequence_length
|
179 |
-
uniform_dist = torch.ones(
|
180 |
-
(batch_size, sequence_length - (mask_length - 1)), device=device
|
181 |
-
)
|
182 |
-
|
183 |
-
# get random indices to mask
|
184 |
-
mask_indices = torch.multinomial(uniform_dist, num_masked_spans)
|
185 |
-
|
186 |
-
# expand masked indices to masked spans
|
187 |
-
mask_indices = (
|
188 |
-
mask_indices.unsqueeze(dim=-1)
|
189 |
-
.expand((batch_size, num_masked_spans, mask_length))
|
190 |
-
.reshape(batch_size, num_masked_spans * mask_length)
|
191 |
-
)
|
192 |
-
offsets = (
|
193 |
-
torch.arange(mask_length, device=device)[None, None, :]
|
194 |
-
.expand((batch_size, num_masked_spans, mask_length))
|
195 |
-
.reshape(batch_size, num_masked_spans * mask_length)
|
196 |
-
)
|
197 |
-
mask_idxs = mask_indices + offsets
|
198 |
-
|
199 |
-
# scatter indices to mask
|
200 |
-
mask = mask.scatter(1, mask_idxs, True)
|
201 |
-
|
202 |
-
return mask
|
203 |
-
|
204 |
-
|
205 |
-
def hubert_soft(
|
206 |
-
path: str,
|
207 |
-
) -> HubertSoft:
|
208 |
-
r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`.
|
209 |
-
Args:
|
210 |
-
path (str): path of a pretrained model
|
211 |
-
"""
|
212 |
-
hubert = HubertSoft()
|
213 |
-
checkpoint = torch.load(path)
|
214 |
-
consume_prefix_in_state_dict_if_present(checkpoint, "module.")
|
215 |
-
hubert.load_state_dict(checkpoint)
|
216 |
-
hubert.eval()
|
217 |
-
return hubert
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akmyradov/TurkmenTTSweSTT/vits/preprocess.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import text
|
3 |
-
from utils import load_filepaths_and_text
|
4 |
-
|
5 |
-
if __name__ == '__main__':
|
6 |
-
parser = argparse.ArgumentParser()
|
7 |
-
parser.add_argument("--out_extension", default="cleaned")
|
8 |
-
parser.add_argument("--text_index", default=1, type=int)
|
9 |
-
parser.add_argument("--filelists", nargs="+", default=["filelists/ljs_audio_text_val_filelist.txt", "filelists/ljs_audio_text_test_filelist.txt"])
|
10 |
-
parser.add_argument("--text_cleaners", nargs="+", default=["english_cleaners2"])
|
11 |
-
|
12 |
-
args = parser.parse_args()
|
13 |
-
|
14 |
-
|
15 |
-
for filelist in args.filelists:
|
16 |
-
print("START:", filelist)
|
17 |
-
filepaths_and_text = load_filepaths_and_text(filelist)
|
18 |
-
for i in range(len(filepaths_and_text)):
|
19 |
-
original_text = filepaths_and_text[i][args.text_index]
|
20 |
-
cleaned_text = text._clean_text(original_text, args.text_cleaners)
|
21 |
-
filepaths_and_text[i][args.text_index] = cleaned_text
|
22 |
-
|
23 |
-
new_filelist = filelist + "." + args.out_extension
|
24 |
-
with open(new_filelist, "w", encoding="utf-8") as f:
|
25 |
-
f.writelines(["|".join(x) + "\n" for x in filepaths_and_text])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alican/pixera/models/networks.py
DELETED
@@ -1,616 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
from torch.nn import init
|
4 |
-
import functools
|
5 |
-
from torch.optim import lr_scheduler
|
6 |
-
|
7 |
-
|
8 |
-
###############################################################################
|
9 |
-
# Helper Functions
|
10 |
-
###############################################################################
|
11 |
-
|
12 |
-
|
13 |
-
class Identity(nn.Module):
|
14 |
-
def forward(self, x):
|
15 |
-
return x
|
16 |
-
|
17 |
-
|
18 |
-
def get_norm_layer(norm_type='instance'):
|
19 |
-
"""Return a normalization layer
|
20 |
-
|
21 |
-
Parameters:
|
22 |
-
norm_type (str) -- the name of the normalization layer: batch | instance | none
|
23 |
-
|
24 |
-
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
|
25 |
-
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
|
26 |
-
"""
|
27 |
-
if norm_type == 'batch':
|
28 |
-
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
|
29 |
-
elif norm_type == 'instance':
|
30 |
-
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
|
31 |
-
elif norm_type == 'none':
|
32 |
-
def norm_layer(x):
|
33 |
-
return Identity()
|
34 |
-
else:
|
35 |
-
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
|
36 |
-
return norm_layer
|
37 |
-
|
38 |
-
|
39 |
-
def get_scheduler(optimizer, opt):
|
40 |
-
"""Return a learning rate scheduler
|
41 |
-
|
42 |
-
Parameters:
|
43 |
-
optimizer -- the optimizer of the network
|
44 |
-
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
|
45 |
-
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
|
46 |
-
|
47 |
-
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
|
48 |
-
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
|
49 |
-
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
|
50 |
-
See https://pytorch.org/docs/stable/optim.html for more details.
|
51 |
-
"""
|
52 |
-
if opt.lr_policy == 'linear':
|
53 |
-
def lambda_rule(epoch):
|
54 |
-
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
|
55 |
-
return lr_l
|
56 |
-
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
|
57 |
-
elif opt.lr_policy == 'step':
|
58 |
-
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
|
59 |
-
elif opt.lr_policy == 'plateau':
|
60 |
-
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
|
61 |
-
elif opt.lr_policy == 'cosine':
|
62 |
-
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
|
63 |
-
else:
|
64 |
-
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
|
65 |
-
return scheduler
|
66 |
-
|
67 |
-
|
68 |
-
def init_weights(net, init_type='normal', init_gain=0.02):
|
69 |
-
"""Initialize network weights.
|
70 |
-
|
71 |
-
Parameters:
|
72 |
-
net (network) -- network to be initialized
|
73 |
-
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
|
74 |
-
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
75 |
-
|
76 |
-
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
|
77 |
-
work better for some applications. Feel free to try yourself.
|
78 |
-
"""
|
79 |
-
def init_func(m): # define the initialization function
|
80 |
-
classname = m.__class__.__name__
|
81 |
-
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
|
82 |
-
if init_type == 'normal':
|
83 |
-
init.normal_(m.weight.data, 0.0, init_gain)
|
84 |
-
elif init_type == 'xavier':
|
85 |
-
init.xavier_normal_(m.weight.data, gain=init_gain)
|
86 |
-
elif init_type == 'kaiming':
|
87 |
-
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
|
88 |
-
elif init_type == 'orthogonal':
|
89 |
-
init.orthogonal_(m.weight.data, gain=init_gain)
|
90 |
-
else:
|
91 |
-
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
|
92 |
-
if hasattr(m, 'bias') and m.bias is not None:
|
93 |
-
init.constant_(m.bias.data, 0.0)
|
94 |
-
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
|
95 |
-
init.normal_(m.weight.data, 1.0, init_gain)
|
96 |
-
init.constant_(m.bias.data, 0.0)
|
97 |
-
|
98 |
-
print('initialize network with %s' % init_type)
|
99 |
-
net.apply(init_func) # apply the initialization function <init_func>
|
100 |
-
|
101 |
-
|
102 |
-
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
|
103 |
-
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
|
104 |
-
Parameters:
|
105 |
-
net (network) -- the network to be initialized
|
106 |
-
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
|
107 |
-
gain (float) -- scaling factor for normal, xavier and orthogonal.
|
108 |
-
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
|
109 |
-
|
110 |
-
Return an initialized network.
|
111 |
-
"""
|
112 |
-
if len(gpu_ids) > 0:
|
113 |
-
assert(torch.cuda.is_available())
|
114 |
-
net.to(gpu_ids[0])
|
115 |
-
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
|
116 |
-
init_weights(net, init_type, init_gain=init_gain)
|
117 |
-
return net
|
118 |
-
|
119 |
-
|
120 |
-
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
|
121 |
-
"""Create a generator
|
122 |
-
|
123 |
-
Parameters:
|
124 |
-
input_nc (int) -- the number of channels in input images
|
125 |
-
output_nc (int) -- the number of channels in output images
|
126 |
-
ngf (int) -- the number of filters in the last conv layer
|
127 |
-
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
|
128 |
-
norm (str) -- the name of normalization layers used in the network: batch | instance | none
|
129 |
-
use_dropout (bool) -- if use dropout layers.
|
130 |
-
init_type (str) -- the name of our initialization method.
|
131 |
-
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
132 |
-
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
|
133 |
-
|
134 |
-
Returns a generator
|
135 |
-
|
136 |
-
Our current implementation provides two types of generators:
|
137 |
-
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
|
138 |
-
The original U-Net paper: https://arxiv.org/abs/1505.04597
|
139 |
-
|
140 |
-
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
|
141 |
-
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
|
142 |
-
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
|
143 |
-
|
144 |
-
|
145 |
-
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
|
146 |
-
"""
|
147 |
-
net = None
|
148 |
-
norm_layer = get_norm_layer(norm_type=norm)
|
149 |
-
|
150 |
-
if netG == 'resnet_9blocks':
|
151 |
-
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
|
152 |
-
elif netG == 'resnet_6blocks':
|
153 |
-
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
|
154 |
-
elif netG == 'unet_128':
|
155 |
-
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
156 |
-
elif netG == 'unet_256':
|
157 |
-
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
|
158 |
-
else:
|
159 |
-
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
|
160 |
-
return init_net(net, init_type, init_gain, gpu_ids)
|
161 |
-
|
162 |
-
|
163 |
-
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
|
164 |
-
"""Create a discriminator
|
165 |
-
|
166 |
-
Parameters:
|
167 |
-
input_nc (int) -- the number of channels in input images
|
168 |
-
ndf (int) -- the number of filters in the first conv layer
|
169 |
-
netD (str) -- the architecture's name: basic | n_layers | pixel
|
170 |
-
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
|
171 |
-
norm (str) -- the type of normalization layers used in the network.
|
172 |
-
init_type (str) -- the name of the initialization method.
|
173 |
-
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
|
174 |
-
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
|
175 |
-
|
176 |
-
Returns a discriminator
|
177 |
-
|
178 |
-
Our current implementation provides three types of discriminators:
|
179 |
-
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
|
180 |
-
It can classify whether 70×70 overlapping patches are real or fake.
|
181 |
-
Such a patch-level discriminator architecture has fewer parameters
|
182 |
-
than a full-image discriminator and can work on arbitrarily-sized images
|
183 |
-
in a fully convolutional fashion.
|
184 |
-
|
185 |
-
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
|
186 |
-
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
|
187 |
-
|
188 |
-
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
|
189 |
-
It encourages greater color diversity but has no effect on spatial statistics.
|
190 |
-
|
191 |
-
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
|
192 |
-
"""
|
193 |
-
net = None
|
194 |
-
norm_layer = get_norm_layer(norm_type=norm)
|
195 |
-
|
196 |
-
if netD == 'basic': # default PatchGAN classifier
|
197 |
-
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
|
198 |
-
elif netD == 'n_layers': # more options
|
199 |
-
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
|
200 |
-
elif netD == 'pixel': # classify if each pixel is real or fake
|
201 |
-
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
|
202 |
-
else:
|
203 |
-
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
|
204 |
-
return init_net(net, init_type, init_gain, gpu_ids)
|
205 |
-
|
206 |
-
|
207 |
-
##############################################################################
|
208 |
-
# Classes
|
209 |
-
##############################################################################
|
210 |
-
class GANLoss(nn.Module):
|
211 |
-
"""Define different GAN objectives.
|
212 |
-
|
213 |
-
The GANLoss class abstracts away the need to create the target label tensor
|
214 |
-
that has the same size as the input.
|
215 |
-
"""
|
216 |
-
|
217 |
-
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
|
218 |
-
""" Initialize the GANLoss class.
|
219 |
-
|
220 |
-
Parameters:
|
221 |
-
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
|
222 |
-
target_real_label (bool) - - label for a real image
|
223 |
-
target_fake_label (bool) - - label of a fake image
|
224 |
-
|
225 |
-
Note: Do not use sigmoid as the last layer of Discriminator.
|
226 |
-
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
|
227 |
-
"""
|
228 |
-
super(GANLoss, self).__init__()
|
229 |
-
self.register_buffer('real_label', torch.tensor(target_real_label))
|
230 |
-
self.register_buffer('fake_label', torch.tensor(target_fake_label))
|
231 |
-
self.gan_mode = gan_mode
|
232 |
-
if gan_mode == 'lsgan':
|
233 |
-
self.loss = nn.MSELoss()
|
234 |
-
elif gan_mode == 'vanilla':
|
235 |
-
self.loss = nn.BCEWithLogitsLoss()
|
236 |
-
elif gan_mode in ['wgangp']:
|
237 |
-
self.loss = None
|
238 |
-
else:
|
239 |
-
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
|
240 |
-
|
241 |
-
def get_target_tensor(self, prediction, target_is_real):
|
242 |
-
"""Create label tensors with the same size as the input.
|
243 |
-
|
244 |
-
Parameters:
|
245 |
-
prediction (tensor) - - tpyically the prediction from a discriminator
|
246 |
-
target_is_real (bool) - - if the ground truth label is for real images or fake images
|
247 |
-
|
248 |
-
Returns:
|
249 |
-
A label tensor filled with ground truth label, and with the size of the input
|
250 |
-
"""
|
251 |
-
|
252 |
-
if target_is_real:
|
253 |
-
target_tensor = self.real_label
|
254 |
-
else:
|
255 |
-
target_tensor = self.fake_label
|
256 |
-
return target_tensor.expand_as(prediction)
|
257 |
-
|
258 |
-
def __call__(self, prediction, target_is_real):
|
259 |
-
"""Calculate loss given Discriminator's output and grount truth labels.
|
260 |
-
|
261 |
-
Parameters:
|
262 |
-
prediction (tensor) - - tpyically the prediction output from a discriminator
|
263 |
-
target_is_real (bool) - - if the ground truth label is for real images or fake images
|
264 |
-
|
265 |
-
Returns:
|
266 |
-
the calculated loss.
|
267 |
-
"""
|
268 |
-
if self.gan_mode in ['lsgan', 'vanilla']:
|
269 |
-
target_tensor = self.get_target_tensor(prediction, target_is_real)
|
270 |
-
loss = self.loss(prediction, target_tensor)
|
271 |
-
elif self.gan_mode == 'wgangp':
|
272 |
-
if target_is_real:
|
273 |
-
loss = -prediction.mean()
|
274 |
-
else:
|
275 |
-
loss = prediction.mean()
|
276 |
-
return loss
|
277 |
-
|
278 |
-
|
279 |
-
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
|
280 |
-
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
|
281 |
-
|
282 |
-
Arguments:
|
283 |
-
netD (network) -- discriminator network
|
284 |
-
real_data (tensor array) -- real images
|
285 |
-
fake_data (tensor array) -- generated images from the generator
|
286 |
-
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
|
287 |
-
type (str) -- if we mix real and fake data or not [real | fake | mixed].
|
288 |
-
constant (float) -- the constant used in formula ( ||gradient||_2 - constant)^2
|
289 |
-
lambda_gp (float) -- weight for this loss
|
290 |
-
|
291 |
-
Returns the gradient penalty loss
|
292 |
-
"""
|
293 |
-
if lambda_gp > 0.0:
|
294 |
-
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
|
295 |
-
interpolatesv = real_data
|
296 |
-
elif type == 'fake':
|
297 |
-
interpolatesv = fake_data
|
298 |
-
elif type == 'mixed':
|
299 |
-
alpha = torch.rand(real_data.shape[0], 1, device=device)
|
300 |
-
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
|
301 |
-
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
|
302 |
-
else:
|
303 |
-
raise NotImplementedError('{} not implemented'.format(type))
|
304 |
-
interpolatesv.requires_grad_(True)
|
305 |
-
disc_interpolates = netD(interpolatesv)
|
306 |
-
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
|
307 |
-
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
|
308 |
-
create_graph=True, retain_graph=True, only_inputs=True)
|
309 |
-
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
|
310 |
-
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
|
311 |
-
return gradient_penalty, gradients
|
312 |
-
else:
|
313 |
-
return 0.0, None
|
314 |
-
|
315 |
-
|
316 |
-
class ResnetGenerator(nn.Module):
|
317 |
-
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
|
318 |
-
|
319 |
-
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
|
320 |
-
"""
|
321 |
-
|
322 |
-
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
|
323 |
-
"""Construct a Resnet-based generator
|
324 |
-
|
325 |
-
Parameters:
|
326 |
-
input_nc (int) -- the number of channels in input images
|
327 |
-
output_nc (int) -- the number of channels in output images
|
328 |
-
ngf (int) -- the number of filters in the last conv layer
|
329 |
-
norm_layer -- normalization layer
|
330 |
-
use_dropout (bool) -- if use dropout layers
|
331 |
-
n_blocks (int) -- the number of ResNet blocks
|
332 |
-
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
|
333 |
-
"""
|
334 |
-
assert(n_blocks >= 0)
|
335 |
-
super(ResnetGenerator, self).__init__()
|
336 |
-
if type(norm_layer) == functools.partial:
|
337 |
-
use_bias = norm_layer.func == nn.InstanceNorm2d
|
338 |
-
else:
|
339 |
-
use_bias = norm_layer == nn.InstanceNorm2d
|
340 |
-
|
341 |
-
model = [nn.ReflectionPad2d(3),
|
342 |
-
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
|
343 |
-
norm_layer(ngf),
|
344 |
-
nn.ReLU(True)]
|
345 |
-
|
346 |
-
n_downsampling = 2
|
347 |
-
for i in range(n_downsampling): # add downsampling layers
|
348 |
-
mult = 2 ** i
|
349 |
-
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
|
350 |
-
norm_layer(ngf * mult * 2),
|
351 |
-
nn.ReLU(True)]
|
352 |
-
|
353 |
-
mult = 2 ** n_downsampling
|
354 |
-
for i in range(n_blocks): # add ResNet blocks
|
355 |
-
|
356 |
-
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
|
357 |
-
|
358 |
-
for i in range(n_downsampling): # add upsampling layers
|
359 |
-
mult = 2 ** (n_downsampling - i)
|
360 |
-
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
|
361 |
-
kernel_size=3, stride=2,
|
362 |
-
padding=1, output_padding=1,
|
363 |
-
bias=use_bias),
|
364 |
-
norm_layer(int(ngf * mult / 2)),
|
365 |
-
nn.ReLU(True)]
|
366 |
-
model += [nn.ReflectionPad2d(3)]
|
367 |
-
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
|
368 |
-
model += [nn.Tanh()]
|
369 |
-
|
370 |
-
self.model = nn.Sequential(*model)
|
371 |
-
|
372 |
-
def forward(self, input):
|
373 |
-
"""Standard forward"""
|
374 |
-
return self.model(input)
|
375 |
-
|
376 |
-
|
377 |
-
class ResnetBlock(nn.Module):
|
378 |
-
"""Define a Resnet block"""
|
379 |
-
|
380 |
-
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
|
381 |
-
"""Initialize the Resnet block
|
382 |
-
|
383 |
-
A resnet block is a conv block with skip connections
|
384 |
-
We construct a conv block with build_conv_block function,
|
385 |
-
and implement skip connections in <forward> function.
|
386 |
-
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
|
387 |
-
"""
|
388 |
-
super(ResnetBlock, self).__init__()
|
389 |
-
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
|
390 |
-
|
391 |
-
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
|
392 |
-
"""Construct a convolutional block.
|
393 |
-
|
394 |
-
Parameters:
|
395 |
-
dim (int) -- the number of channels in the conv layer.
|
396 |
-
padding_type (str) -- the name of padding layer: reflect | replicate | zero
|
397 |
-
norm_layer -- normalization layer
|
398 |
-
use_dropout (bool) -- if use dropout layers.
|
399 |
-
use_bias (bool) -- if the conv layer uses bias or not
|
400 |
-
|
401 |
-
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
|
402 |
-
"""
|
403 |
-
conv_block = []
|
404 |
-
p = 0
|
405 |
-
if padding_type == 'reflect':
|
406 |
-
conv_block += [nn.ReflectionPad2d(1)]
|
407 |
-
elif padding_type == 'replicate':
|
408 |
-
conv_block += [nn.ReplicationPad2d(1)]
|
409 |
-
elif padding_type == 'zero':
|
410 |
-
p = 1
|
411 |
-
else:
|
412 |
-
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
413 |
-
|
414 |
-
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
|
415 |
-
if use_dropout:
|
416 |
-
conv_block += [nn.Dropout(0.5)]
|
417 |
-
|
418 |
-
p = 0
|
419 |
-
if padding_type == 'reflect':
|
420 |
-
conv_block += [nn.ReflectionPad2d(1)]
|
421 |
-
elif padding_type == 'replicate':
|
422 |
-
conv_block += [nn.ReplicationPad2d(1)]
|
423 |
-
elif padding_type == 'zero':
|
424 |
-
p = 1
|
425 |
-
else:
|
426 |
-
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
|
427 |
-
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
|
428 |
-
|
429 |
-
return nn.Sequential(*conv_block)
|
430 |
-
|
431 |
-
def forward(self, x):
|
432 |
-
"""Forward function (with skip connections)"""
|
433 |
-
out = x + self.conv_block(x) # add skip connections
|
434 |
-
return out
|
435 |
-
|
436 |
-
|
437 |
-
class UnetGenerator(nn.Module):
|
438 |
-
"""Create a Unet-based generator"""
|
439 |
-
|
440 |
-
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
|
441 |
-
"""Construct a Unet generator
|
442 |
-
Parameters:
|
443 |
-
input_nc (int) -- the number of channels in input images
|
444 |
-
output_nc (int) -- the number of channels in output images
|
445 |
-
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
|
446 |
-
image of size 128x128 will become of size 1x1 # at the bottleneck
|
447 |
-
ngf (int) -- the number of filters in the last conv layer
|
448 |
-
norm_layer -- normalization layer
|
449 |
-
|
450 |
-
We construct the U-Net from the innermost layer to the outermost layer.
|
451 |
-
It is a recursive process.
|
452 |
-
"""
|
453 |
-
super(UnetGenerator, self).__init__()
|
454 |
-
# construct unet structure
|
455 |
-
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
|
456 |
-
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
|
457 |
-
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
|
458 |
-
# gradually reduce the number of filters from ngf * 8 to ngf
|
459 |
-
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
460 |
-
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
461 |
-
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
|
462 |
-
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
|
463 |
-
|
464 |
-
def forward(self, input):
|
465 |
-
"""Standard forward"""
|
466 |
-
return self.model(input)
|
467 |
-
|
468 |
-
|
469 |
-
class UnetSkipConnectionBlock(nn.Module):
|
470 |
-
"""Defines the Unet submodule with skip connection.
|
471 |
-
X -------------------identity----------------------
|
472 |
-
|-- downsampling -- |submodule| -- upsampling --|
|
473 |
-
"""
|
474 |
-
|
475 |
-
def __init__(self, outer_nc, inner_nc, input_nc=None,
|
476 |
-
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
|
477 |
-
"""Construct a Unet submodule with skip connections.
|
478 |
-
|
479 |
-
Parameters:
|
480 |
-
outer_nc (int) -- the number of filters in the outer conv layer
|
481 |
-
inner_nc (int) -- the number of filters in the inner conv layer
|
482 |
-
input_nc (int) -- the number of channels in input images/features
|
483 |
-
submodule (UnetSkipConnectionBlock) -- previously defined submodules
|
484 |
-
outermost (bool) -- if this module is the outermost module
|
485 |
-
innermost (bool) -- if this module is the innermost module
|
486 |
-
norm_layer -- normalization layer
|
487 |
-
use_dropout (bool) -- if use dropout layers.
|
488 |
-
"""
|
489 |
-
super(UnetSkipConnectionBlock, self).__init__()
|
490 |
-
self.outermost = outermost
|
491 |
-
if type(norm_layer) == functools.partial:
|
492 |
-
use_bias = norm_layer.func == nn.InstanceNorm2d
|
493 |
-
else:
|
494 |
-
use_bias = norm_layer == nn.InstanceNorm2d
|
495 |
-
if input_nc is None:
|
496 |
-
input_nc = outer_nc
|
497 |
-
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
|
498 |
-
stride=2, padding=1, bias=use_bias)
|
499 |
-
downrelu = nn.LeakyReLU(0.2, True)
|
500 |
-
downnorm = norm_layer(inner_nc)
|
501 |
-
uprelu = nn.ReLU(True)
|
502 |
-
upnorm = norm_layer(outer_nc)
|
503 |
-
|
504 |
-
if outermost:
|
505 |
-
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
|
506 |
-
kernel_size=4, stride=2,
|
507 |
-
padding=1)
|
508 |
-
down = [downconv]
|
509 |
-
up = [uprelu, upconv, nn.Tanh()]
|
510 |
-
model = down + [submodule] + up
|
511 |
-
elif innermost:
|
512 |
-
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
|
513 |
-
kernel_size=4, stride=2,
|
514 |
-
padding=1, bias=use_bias)
|
515 |
-
down = [downrelu, downconv]
|
516 |
-
up = [uprelu, upconv, upnorm]
|
517 |
-
model = down + up
|
518 |
-
else:
|
519 |
-
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
|
520 |
-
kernel_size=4, stride=2,
|
521 |
-
padding=1, bias=use_bias)
|
522 |
-
down = [downrelu, downconv, downnorm]
|
523 |
-
up = [uprelu, upconv, upnorm]
|
524 |
-
|
525 |
-
if use_dropout:
|
526 |
-
model = down + [submodule] + up + [nn.Dropout(0.5)]
|
527 |
-
else:
|
528 |
-
model = down + [submodule] + up
|
529 |
-
|
530 |
-
self.model = nn.Sequential(*model)
|
531 |
-
|
532 |
-
def forward(self, x):
|
533 |
-
if self.outermost:
|
534 |
-
return self.model(x)
|
535 |
-
else: # add skip connections
|
536 |
-
return torch.cat([x, self.model(x)], 1)
|
537 |
-
|
538 |
-
|
539 |
-
class NLayerDiscriminator(nn.Module):
|
540 |
-
"""Defines a PatchGAN discriminator"""
|
541 |
-
|
542 |
-
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
|
543 |
-
"""Construct a PatchGAN discriminator
|
544 |
-
|
545 |
-
Parameters:
|
546 |
-
input_nc (int) -- the number of channels in input images
|
547 |
-
ndf (int) -- the number of filters in the last conv layer
|
548 |
-
n_layers (int) -- the number of conv layers in the discriminator
|
549 |
-
norm_layer -- normalization layer
|
550 |
-
"""
|
551 |
-
super(NLayerDiscriminator, self).__init__()
|
552 |
-
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
|
553 |
-
use_bias = norm_layer.func == nn.InstanceNorm2d
|
554 |
-
else:
|
555 |
-
use_bias = norm_layer == nn.InstanceNorm2d
|
556 |
-
|
557 |
-
kw = 4
|
558 |
-
padw = 1
|
559 |
-
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
|
560 |
-
nf_mult = 1
|
561 |
-
nf_mult_prev = 1
|
562 |
-
for n in range(1, n_layers): # gradually increase the number of filters
|
563 |
-
nf_mult_prev = nf_mult
|
564 |
-
nf_mult = min(2 ** n, 8)
|
565 |
-
sequence += [
|
566 |
-
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
|
567 |
-
norm_layer(ndf * nf_mult),
|
568 |
-
nn.LeakyReLU(0.2, True)
|
569 |
-
]
|
570 |
-
|
571 |
-
nf_mult_prev = nf_mult
|
572 |
-
nf_mult = min(2 ** n_layers, 8)
|
573 |
-
sequence += [
|
574 |
-
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
|
575 |
-
norm_layer(ndf * nf_mult),
|
576 |
-
nn.LeakyReLU(0.2, True)
|
577 |
-
]
|
578 |
-
|
579 |
-
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
|
580 |
-
self.model = nn.Sequential(*sequence)
|
581 |
-
|
582 |
-
def forward(self, input):
|
583 |
-
"""Standard forward."""
|
584 |
-
return self.model(input)
|
585 |
-
|
586 |
-
|
587 |
-
class PixelDiscriminator(nn.Module):
|
588 |
-
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
|
589 |
-
|
590 |
-
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
|
591 |
-
"""Construct a 1x1 PatchGAN discriminator
|
592 |
-
|
593 |
-
Parameters:
|
594 |
-
input_nc (int) -- the number of channels in input images
|
595 |
-
ndf (int) -- the number of filters in the last conv layer
|
596 |
-
norm_layer -- normalization layer
|
597 |
-
"""
|
598 |
-
super(PixelDiscriminator, self).__init__()
|
599 |
-
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
|
600 |
-
use_bias = norm_layer.func == nn.InstanceNorm2d
|
601 |
-
else:
|
602 |
-
use_bias = norm_layer == nn.InstanceNorm2d
|
603 |
-
|
604 |
-
self.net = [
|
605 |
-
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
|
606 |
-
nn.LeakyReLU(0.2, True),
|
607 |
-
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
|
608 |
-
norm_layer(ndf * 2),
|
609 |
-
nn.LeakyReLU(0.2, True),
|
610 |
-
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
|
611 |
-
|
612 |
-
self.net = nn.Sequential(*self.net)
|
613 |
-
|
614 |
-
def forward(self, input):
|
615 |
-
"""Standard forward."""
|
616 |
-
return self.net(input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alphts/Robot/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Robot
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: indigo
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.29.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/stable_diffusion/ldm3d_diffusion.md
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The Intel Labs Team Authors and HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Text-to-(RGB, depth)
|
14 |
-
|
15 |
-
LDM3D was proposed in [LDM3D: Latent Diffusion Model for 3D](https://huggingface.co/papers/2305.10853) by Gabriela Ben Melech Stan, Diana Wofk, Scottie Fox, Alex Redden, Will Saxton, Jean Yu, Estelle Aflalo, Shao-Yen Tseng, Fabio Nonato, Matthias Muller, and Vasudev Lal. LDM3D generates an image and a depth map from a given text prompt unlike the existing text-to-image diffusion models such as [Stable Diffusion](./stable_diffusion/overview) which only generates an image. With almost the same number of parameters, LDM3D achieves to create a latent space that can compress both the RGB images and the depth maps.
|
16 |
-
|
17 |
-
The abstract from the paper is:
|
18 |
-
|
19 |
-
*This research paper proposes a Latent Diffusion Model for 3D (LDM3D) that generates both image and depth map data from a given text prompt, allowing users to generate RGBD images from text prompts. The LDM3D model is fine-tuned on a dataset of tuples containing an RGB image, depth map and caption, and validated through extensive experiments. We also develop an application called DepthFusion, which uses the generated RGB images and depth maps to create immersive and interactive 360-degree-view experiences using TouchDesigner. This technology has the potential to transform a wide range of industries, from entertainment and gaming to architecture and design. Overall, this paper presents a significant contribution to the field of generative AI and computer vision, and showcases the potential of LDM3D and DepthFusion to revolutionize content creation and digital experiences. A short video summarizing the approach can be found at [this url](https://t.ly/tdi2).*
|
20 |
-
|
21 |
-
<Tip>
|
22 |
-
|
23 |
-
Make sure to check out the Stable Diffusion [Tips](overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently!
|
24 |
-
|
25 |
-
</Tip>
|
26 |
-
|
27 |
-
## StableDiffusionLDM3DPipeline
|
28 |
-
|
29 |
-
[[autodoc]] StableDiffusionLDM3DPipeline
|
30 |
-
- all
|
31 |
-
- __call__
|
32 |
-
|
33 |
-
## StableDiffusionPipelineOutput
|
34 |
-
|
35 |
-
[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
|
36 |
-
- all
|
37 |
-
- __call__
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/ddim/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .pipeline_ddim import DDIMPipeline
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gn+ws/mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py'
|
2 |
-
# learning policy
|
3 |
-
lr_config = dict(step=[20, 23])
|
4 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/lvis/README.md
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
# LVIS dataset
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[DATASET]
|
6 |
-
|
7 |
-
```latex
|
8 |
-
@inproceedings{gupta2019lvis,
|
9 |
-
title={{LVIS}: A Dataset for Large Vocabulary Instance Segmentation},
|
10 |
-
author={Gupta, Agrim and Dollar, Piotr and Girshick, Ross},
|
11 |
-
booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition},
|
12 |
-
year={2019}
|
13 |
-
}
|
14 |
-
```
|
15 |
-
|
16 |
-
## Common Setting
|
17 |
-
|
18 |
-
* Please follow [install guide](../../docs/install.md#install-mmdetection) to install open-mmlab forked cocoapi first.
|
19 |
-
* Run following scripts to install our forked lvis-api.
|
20 |
-
|
21 |
-
```shell
|
22 |
-
# mmlvis is fully compatible with official lvis
|
23 |
-
pip install mmlvis
|
24 |
-
```
|
25 |
-
|
26 |
-
or
|
27 |
-
|
28 |
-
```shell
|
29 |
-
pip install -r requirements/optional.txt
|
30 |
-
```
|
31 |
-
|
32 |
-
* All experiments use oversample strategy [here](../../docs/tutorials/new_dataset.md#class-balanced-dataset) with oversample threshold `1e-3`.
|
33 |
-
* The size of LVIS v0.5 is half of COCO, so schedule `2x` in LVIS is roughly the same iterations as `1x` in COCO.
|
34 |
-
|
35 |
-
## Results and models of LVIS v0.5
|
36 |
-
|
37 |
-
| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
|
38 |
-
| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: |:--------: |
|
39 |
-
| R-50-FPN | pytorch | 2x | - | - | 26.1 | 25.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis-dbd06831.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_20200531_160435.log.json) |
|
40 |
-
| R-101-FPN | pytorch | 2x | - | - | 27.1 | 27.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis-54582ee2.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_20200601_134748.log.json) |
|
41 |
-
| X-101-32x4d-FPN | pytorch | 2x | - | - | 26.7 | 26.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis-3cf55ea2.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_20200531_221749.log.json) |
|
42 |
-
| X-101-64x4d-FPN | pytorch | 2x | - | - | 26.4 | 26.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis-1c99a5ad.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_20200601_194651.log.json) |
|
43 |
-
|
44 |
-
## Results and models of LVIS v1
|
45 |
-
|
46 |
-
| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
|
47 |
-
| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: |
|
48 |
-
| R-50-FPN | pytorch | 1x | 9.1 | - | 22.5 | 21.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-aa78ac3d.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_061305.log.json) |
|
49 |
-
| R-101-FPN | pytorch | 1x | 10.8 | - | 24.6 | 23.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-ec55ce32.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_070959.log.json) |
|
50 |
-
| X-101-32x4d-FPN | pytorch | 1x | 11.8 | - | 26.7 | 25.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-ebbc5c81.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_071317.log.json) |
|
51 |
-
| X-101-64x4d-FPN | pytorch | 1x | 14.6 | - | 27.2 | 25.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-43d9edfe.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-20200830_060206.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/scnet_roi_head.py
DELETED
@@ -1,582 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
|
4 |
-
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
|
5 |
-
merge_aug_masks, multiclass_nms)
|
6 |
-
from ..builder import HEADS, build_head, build_roi_extractor
|
7 |
-
from .cascade_roi_head import CascadeRoIHead
|
8 |
-
|
9 |
-
|
10 |
-
@HEADS.register_module()
|
11 |
-
class SCNetRoIHead(CascadeRoIHead):
|
12 |
-
"""RoIHead for `SCNet <https://arxiv.org/abs/2012.10150>`_.
|
13 |
-
|
14 |
-
Args:
|
15 |
-
num_stages (int): number of cascade stages.
|
16 |
-
stage_loss_weights (list): loss weight of cascade stages.
|
17 |
-
semantic_roi_extractor (dict): config to init semantic roi extractor.
|
18 |
-
semantic_head (dict): config to init semantic head.
|
19 |
-
feat_relay_head (dict): config to init feature_relay_head.
|
20 |
-
glbctx_head (dict): config to init global context head.
|
21 |
-
"""
|
22 |
-
|
23 |
-
def __init__(self,
|
24 |
-
num_stages,
|
25 |
-
stage_loss_weights,
|
26 |
-
semantic_roi_extractor=None,
|
27 |
-
semantic_head=None,
|
28 |
-
feat_relay_head=None,
|
29 |
-
glbctx_head=None,
|
30 |
-
**kwargs):
|
31 |
-
super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights,
|
32 |
-
**kwargs)
|
33 |
-
assert self.with_bbox and self.with_mask
|
34 |
-
assert not self.with_shared_head # shared head is not supported
|
35 |
-
|
36 |
-
if semantic_head is not None:
|
37 |
-
self.semantic_roi_extractor = build_roi_extractor(
|
38 |
-
semantic_roi_extractor)
|
39 |
-
self.semantic_head = build_head(semantic_head)
|
40 |
-
|
41 |
-
if feat_relay_head is not None:
|
42 |
-
self.feat_relay_head = build_head(feat_relay_head)
|
43 |
-
|
44 |
-
if glbctx_head is not None:
|
45 |
-
self.glbctx_head = build_head(glbctx_head)
|
46 |
-
|
47 |
-
def init_mask_head(self, mask_roi_extractor, mask_head):
|
48 |
-
"""Initialize ``mask_head``"""
|
49 |
-
if mask_roi_extractor is not None:
|
50 |
-
self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
|
51 |
-
self.mask_head = build_head(mask_head)
|
52 |
-
|
53 |
-
def init_weights(self, pretrained):
|
54 |
-
"""Initialize the weights in head.
|
55 |
-
|
56 |
-
Args:
|
57 |
-
pretrained (str, optional): Path to pre-trained weights.
|
58 |
-
Defaults to None.
|
59 |
-
"""
|
60 |
-
for i in range(self.num_stages):
|
61 |
-
if self.with_bbox:
|
62 |
-
self.bbox_roi_extractor[i].init_weights()
|
63 |
-
self.bbox_head[i].init_weights()
|
64 |
-
if self.with_mask:
|
65 |
-
self.mask_roi_extractor.init_weights()
|
66 |
-
self.mask_head.init_weights()
|
67 |
-
if self.with_semantic:
|
68 |
-
self.semantic_head.init_weights()
|
69 |
-
if self.with_glbctx:
|
70 |
-
self.glbctx_head.init_weights()
|
71 |
-
if self.with_feat_relay:
|
72 |
-
self.feat_relay_head.init_weights()
|
73 |
-
|
74 |
-
@property
|
75 |
-
def with_semantic(self):
|
76 |
-
"""bool: whether the head has semantic head"""
|
77 |
-
return hasattr(self,
|
78 |
-
'semantic_head') and self.semantic_head is not None
|
79 |
-
|
80 |
-
@property
|
81 |
-
def with_feat_relay(self):
|
82 |
-
"""bool: whether the head has feature relay head"""
|
83 |
-
return (hasattr(self, 'feat_relay_head')
|
84 |
-
and self.feat_relay_head is not None)
|
85 |
-
|
86 |
-
@property
|
87 |
-
def with_glbctx(self):
|
88 |
-
"""bool: whether the head has global context head"""
|
89 |
-
return hasattr(self, 'glbctx_head') and self.glbctx_head is not None
|
90 |
-
|
91 |
-
def _fuse_glbctx(self, roi_feats, glbctx_feat, rois):
|
92 |
-
"""Fuse global context feats with roi feats."""
|
93 |
-
assert roi_feats.size(0) == rois.size(0)
|
94 |
-
img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long()
|
95 |
-
fused_feats = torch.zeros_like(roi_feats)
|
96 |
-
for img_id in img_inds:
|
97 |
-
inds = (rois[:, 0] == img_id.item())
|
98 |
-
fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id]
|
99 |
-
return fused_feats
|
100 |
-
|
101 |
-
def _slice_pos_feats(self, feats, sampling_results):
|
102 |
-
"""Get features from pos rois."""
|
103 |
-
num_rois = [res.bboxes.size(0) for res in sampling_results]
|
104 |
-
num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results]
|
105 |
-
inds = torch.zeros(sum(num_rois), dtype=torch.bool)
|
106 |
-
start = 0
|
107 |
-
for i in range(len(num_rois)):
|
108 |
-
start = 0 if i == 0 else start + num_rois[i - 1]
|
109 |
-
stop = start + num_pos_rois[i]
|
110 |
-
inds[start:stop] = 1
|
111 |
-
sliced_feats = feats[inds]
|
112 |
-
return sliced_feats
|
113 |
-
|
114 |
-
def _bbox_forward(self,
|
115 |
-
stage,
|
116 |
-
x,
|
117 |
-
rois,
|
118 |
-
semantic_feat=None,
|
119 |
-
glbctx_feat=None):
|
120 |
-
"""Box head forward function used in both training and testing."""
|
121 |
-
bbox_roi_extractor = self.bbox_roi_extractor[stage]
|
122 |
-
bbox_head = self.bbox_head[stage]
|
123 |
-
bbox_feats = bbox_roi_extractor(
|
124 |
-
x[:len(bbox_roi_extractor.featmap_strides)], rois)
|
125 |
-
if self.with_semantic and semantic_feat is not None:
|
126 |
-
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
|
127 |
-
rois)
|
128 |
-
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
|
129 |
-
bbox_semantic_feat = F.adaptive_avg_pool2d(
|
130 |
-
bbox_semantic_feat, bbox_feats.shape[-2:])
|
131 |
-
bbox_feats += bbox_semantic_feat
|
132 |
-
if self.with_glbctx and glbctx_feat is not None:
|
133 |
-
bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois)
|
134 |
-
cls_score, bbox_pred, relayed_feat = bbox_head(
|
135 |
-
bbox_feats, return_shared_feat=True)
|
136 |
-
|
137 |
-
bbox_results = dict(
|
138 |
-
cls_score=cls_score,
|
139 |
-
bbox_pred=bbox_pred,
|
140 |
-
relayed_feat=relayed_feat)
|
141 |
-
return bbox_results
|
142 |
-
|
143 |
-
def _mask_forward(self,
|
144 |
-
x,
|
145 |
-
rois,
|
146 |
-
semantic_feat=None,
|
147 |
-
glbctx_feat=None,
|
148 |
-
relayed_feat=None):
|
149 |
-
"""Mask head forward function used in both training and testing."""
|
150 |
-
mask_feats = self.mask_roi_extractor(
|
151 |
-
x[:self.mask_roi_extractor.num_inputs], rois)
|
152 |
-
if self.with_semantic and semantic_feat is not None:
|
153 |
-
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
|
154 |
-
rois)
|
155 |
-
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
|
156 |
-
mask_semantic_feat = F.adaptive_avg_pool2d(
|
157 |
-
mask_semantic_feat, mask_feats.shape[-2:])
|
158 |
-
mask_feats += mask_semantic_feat
|
159 |
-
if self.with_glbctx and glbctx_feat is not None:
|
160 |
-
mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois)
|
161 |
-
if self.with_feat_relay and relayed_feat is not None:
|
162 |
-
mask_feats = mask_feats + relayed_feat
|
163 |
-
mask_pred = self.mask_head(mask_feats)
|
164 |
-
mask_results = dict(mask_pred=mask_pred)
|
165 |
-
|
166 |
-
return mask_results
|
167 |
-
|
168 |
-
def _bbox_forward_train(self,
|
169 |
-
stage,
|
170 |
-
x,
|
171 |
-
sampling_results,
|
172 |
-
gt_bboxes,
|
173 |
-
gt_labels,
|
174 |
-
rcnn_train_cfg,
|
175 |
-
semantic_feat=None,
|
176 |
-
glbctx_feat=None):
|
177 |
-
"""Run forward function and calculate loss for box head in training."""
|
178 |
-
bbox_head = self.bbox_head[stage]
|
179 |
-
rois = bbox2roi([res.bboxes for res in sampling_results])
|
180 |
-
bbox_results = self._bbox_forward(
|
181 |
-
stage,
|
182 |
-
x,
|
183 |
-
rois,
|
184 |
-
semantic_feat=semantic_feat,
|
185 |
-
glbctx_feat=glbctx_feat)
|
186 |
-
|
187 |
-
bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,
|
188 |
-
gt_labels, rcnn_train_cfg)
|
189 |
-
loss_bbox = bbox_head.loss(bbox_results['cls_score'],
|
190 |
-
bbox_results['bbox_pred'], rois,
|
191 |
-
*bbox_targets)
|
192 |
-
|
193 |
-
bbox_results.update(
|
194 |
-
loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
|
195 |
-
return bbox_results
|
196 |
-
|
197 |
-
def _mask_forward_train(self,
|
198 |
-
x,
|
199 |
-
sampling_results,
|
200 |
-
gt_masks,
|
201 |
-
rcnn_train_cfg,
|
202 |
-
semantic_feat=None,
|
203 |
-
glbctx_feat=None,
|
204 |
-
relayed_feat=None):
|
205 |
-
"""Run forward function and calculate loss for mask head in
|
206 |
-
training."""
|
207 |
-
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
|
208 |
-
mask_results = self._mask_forward(
|
209 |
-
x,
|
210 |
-
pos_rois,
|
211 |
-
semantic_feat=semantic_feat,
|
212 |
-
glbctx_feat=glbctx_feat,
|
213 |
-
relayed_feat=relayed_feat)
|
214 |
-
|
215 |
-
mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
|
216 |
-
rcnn_train_cfg)
|
217 |
-
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
|
218 |
-
loss_mask = self.mask_head.loss(mask_results['mask_pred'],
|
219 |
-
mask_targets, pos_labels)
|
220 |
-
|
221 |
-
mask_results = loss_mask
|
222 |
-
return mask_results
|
223 |
-
|
224 |
-
def forward_train(self,
|
225 |
-
x,
|
226 |
-
img_metas,
|
227 |
-
proposal_list,
|
228 |
-
gt_bboxes,
|
229 |
-
gt_labels,
|
230 |
-
gt_bboxes_ignore=None,
|
231 |
-
gt_masks=None,
|
232 |
-
gt_semantic_seg=None):
|
233 |
-
"""
|
234 |
-
Args:
|
235 |
-
x (list[Tensor]): list of multi-level img features.
|
236 |
-
|
237 |
-
img_metas (list[dict]): list of image info dict where each dict
|
238 |
-
has: 'img_shape', 'scale_factor', 'flip', and may also contain
|
239 |
-
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
|
240 |
-
For details on the values of these keys see
|
241 |
-
`mmdet/datasets/pipelines/formatting.py:Collect`.
|
242 |
-
|
243 |
-
proposal_list (list[Tensors]): list of region proposals.
|
244 |
-
|
245 |
-
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
|
246 |
-
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
|
247 |
-
|
248 |
-
gt_labels (list[Tensor]): class indices corresponding to each box
|
249 |
-
|
250 |
-
gt_bboxes_ignore (None, list[Tensor]): specify which bounding
|
251 |
-
boxes can be ignored when computing the loss.
|
252 |
-
|
253 |
-
gt_masks (None, Tensor) : true segmentation masks for each box
|
254 |
-
used if the architecture supports a segmentation task.
|
255 |
-
|
256 |
-
gt_semantic_seg (None, list[Tensor]): semantic segmentation masks
|
257 |
-
used if the architecture supports semantic segmentation task.
|
258 |
-
|
259 |
-
Returns:
|
260 |
-
dict[str, Tensor]: a dictionary of loss components
|
261 |
-
"""
|
262 |
-
losses = dict()
|
263 |
-
|
264 |
-
# semantic segmentation branch
|
265 |
-
if self.with_semantic:
|
266 |
-
semantic_pred, semantic_feat = self.semantic_head(x)
|
267 |
-
loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
|
268 |
-
losses['loss_semantic_seg'] = loss_seg
|
269 |
-
else:
|
270 |
-
semantic_feat = None
|
271 |
-
|
272 |
-
# global context branch
|
273 |
-
if self.with_glbctx:
|
274 |
-
mc_pred, glbctx_feat = self.glbctx_head(x)
|
275 |
-
loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels)
|
276 |
-
losses['loss_glbctx'] = loss_glbctx
|
277 |
-
else:
|
278 |
-
glbctx_feat = None
|
279 |
-
|
280 |
-
for i in range(self.num_stages):
|
281 |
-
self.current_stage = i
|
282 |
-
rcnn_train_cfg = self.train_cfg[i]
|
283 |
-
lw = self.stage_loss_weights[i]
|
284 |
-
|
285 |
-
# assign gts and sample proposals
|
286 |
-
sampling_results = []
|
287 |
-
bbox_assigner = self.bbox_assigner[i]
|
288 |
-
bbox_sampler = self.bbox_sampler[i]
|
289 |
-
num_imgs = len(img_metas)
|
290 |
-
if gt_bboxes_ignore is None:
|
291 |
-
gt_bboxes_ignore = [None for _ in range(num_imgs)]
|
292 |
-
|
293 |
-
for j in range(num_imgs):
|
294 |
-
assign_result = bbox_assigner.assign(proposal_list[j],
|
295 |
-
gt_bboxes[j],
|
296 |
-
gt_bboxes_ignore[j],
|
297 |
-
gt_labels[j])
|
298 |
-
sampling_result = bbox_sampler.sample(
|
299 |
-
assign_result,
|
300 |
-
proposal_list[j],
|
301 |
-
gt_bboxes[j],
|
302 |
-
gt_labels[j],
|
303 |
-
feats=[lvl_feat[j][None] for lvl_feat in x])
|
304 |
-
sampling_results.append(sampling_result)
|
305 |
-
|
306 |
-
bbox_results = \
|
307 |
-
self._bbox_forward_train(
|
308 |
-
i, x, sampling_results, gt_bboxes, gt_labels,
|
309 |
-
rcnn_train_cfg, semantic_feat, glbctx_feat)
|
310 |
-
roi_labels = bbox_results['bbox_targets'][0]
|
311 |
-
|
312 |
-
for name, value in bbox_results['loss_bbox'].items():
|
313 |
-
losses[f's{i}.{name}'] = (
|
314 |
-
value * lw if 'loss' in name else value)
|
315 |
-
|
316 |
-
# refine boxes
|
317 |
-
if i < self.num_stages - 1:
|
318 |
-
pos_is_gts = [res.pos_is_gt for res in sampling_results]
|
319 |
-
with torch.no_grad():
|
320 |
-
proposal_list = self.bbox_head[i].refine_bboxes(
|
321 |
-
bbox_results['rois'], roi_labels,
|
322 |
-
bbox_results['bbox_pred'], pos_is_gts, img_metas)
|
323 |
-
|
324 |
-
if self.with_feat_relay:
|
325 |
-
relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'],
|
326 |
-
sampling_results)
|
327 |
-
relayed_feat = self.feat_relay_head(relayed_feat)
|
328 |
-
else:
|
329 |
-
relayed_feat = None
|
330 |
-
|
331 |
-
mask_results = self._mask_forward_train(x, sampling_results, gt_masks,
|
332 |
-
rcnn_train_cfg, semantic_feat,
|
333 |
-
glbctx_feat, relayed_feat)
|
334 |
-
mask_lw = sum(self.stage_loss_weights)
|
335 |
-
losses['loss_mask'] = mask_lw * mask_results['loss_mask']
|
336 |
-
|
337 |
-
return losses
|
338 |
-
|
339 |
-
def simple_test(self, x, proposal_list, img_metas, rescale=False):
|
340 |
-
"""Test without augmentation."""
|
341 |
-
if self.with_semantic:
|
342 |
-
_, semantic_feat = self.semantic_head(x)
|
343 |
-
else:
|
344 |
-
semantic_feat = None
|
345 |
-
|
346 |
-
if self.with_glbctx:
|
347 |
-
mc_pred, glbctx_feat = self.glbctx_head(x)
|
348 |
-
else:
|
349 |
-
glbctx_feat = None
|
350 |
-
|
351 |
-
num_imgs = len(proposal_list)
|
352 |
-
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
|
353 |
-
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
|
354 |
-
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
|
355 |
-
|
356 |
-
# "ms" in variable names means multi-stage
|
357 |
-
ms_scores = []
|
358 |
-
rcnn_test_cfg = self.test_cfg
|
359 |
-
|
360 |
-
rois = bbox2roi(proposal_list)
|
361 |
-
for i in range(self.num_stages):
|
362 |
-
bbox_head = self.bbox_head[i]
|
363 |
-
bbox_results = self._bbox_forward(
|
364 |
-
i,
|
365 |
-
x,
|
366 |
-
rois,
|
367 |
-
semantic_feat=semantic_feat,
|
368 |
-
glbctx_feat=glbctx_feat)
|
369 |
-
# split batch bbox prediction back to each image
|
370 |
-
cls_score = bbox_results['cls_score']
|
371 |
-
bbox_pred = bbox_results['bbox_pred']
|
372 |
-
num_proposals_per_img = tuple(len(p) for p in proposal_list)
|
373 |
-
rois = rois.split(num_proposals_per_img, 0)
|
374 |
-
cls_score = cls_score.split(num_proposals_per_img, 0)
|
375 |
-
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
|
376 |
-
ms_scores.append(cls_score)
|
377 |
-
|
378 |
-
if i < self.num_stages - 1:
|
379 |
-
bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
|
380 |
-
rois = torch.cat([
|
381 |
-
bbox_head.regress_by_class(rois[i], bbox_label[i],
|
382 |
-
bbox_pred[i], img_metas[i])
|
383 |
-
for i in range(num_imgs)
|
384 |
-
])
|
385 |
-
|
386 |
-
# average scores of each image by stages
|
387 |
-
cls_score = [
|
388 |
-
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
|
389 |
-
for i in range(num_imgs)
|
390 |
-
]
|
391 |
-
|
392 |
-
# apply bbox post-processing to each image individually
|
393 |
-
det_bboxes = []
|
394 |
-
det_labels = []
|
395 |
-
for i in range(num_imgs):
|
396 |
-
det_bbox, det_label = self.bbox_head[-1].get_bboxes(
|
397 |
-
rois[i],
|
398 |
-
cls_score[i],
|
399 |
-
bbox_pred[i],
|
400 |
-
img_shapes[i],
|
401 |
-
scale_factors[i],
|
402 |
-
rescale=rescale,
|
403 |
-
cfg=rcnn_test_cfg)
|
404 |
-
det_bboxes.append(det_bbox)
|
405 |
-
det_labels.append(det_label)
|
406 |
-
det_bbox_results = [
|
407 |
-
bbox2result(det_bboxes[i], det_labels[i],
|
408 |
-
self.bbox_head[-1].num_classes)
|
409 |
-
for i in range(num_imgs)
|
410 |
-
]
|
411 |
-
|
412 |
-
if self.with_mask:
|
413 |
-
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
|
414 |
-
mask_classes = self.mask_head.num_classes
|
415 |
-
det_segm_results = [[[] for _ in range(mask_classes)]
|
416 |
-
for _ in range(num_imgs)]
|
417 |
-
else:
|
418 |
-
if rescale and not isinstance(scale_factors[0], float):
|
419 |
-
scale_factors = [
|
420 |
-
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
|
421 |
-
for scale_factor in scale_factors
|
422 |
-
]
|
423 |
-
_bboxes = [
|
424 |
-
det_bboxes[i][:, :4] *
|
425 |
-
scale_factors[i] if rescale else det_bboxes[i]
|
426 |
-
for i in range(num_imgs)
|
427 |
-
]
|
428 |
-
mask_rois = bbox2roi(_bboxes)
|
429 |
-
|
430 |
-
# get relay feature on mask_rois
|
431 |
-
bbox_results = self._bbox_forward(
|
432 |
-
-1,
|
433 |
-
x,
|
434 |
-
mask_rois,
|
435 |
-
semantic_feat=semantic_feat,
|
436 |
-
glbctx_feat=glbctx_feat)
|
437 |
-
relayed_feat = bbox_results['relayed_feat']
|
438 |
-
relayed_feat = self.feat_relay_head(relayed_feat)
|
439 |
-
|
440 |
-
mask_results = self._mask_forward(
|
441 |
-
x,
|
442 |
-
mask_rois,
|
443 |
-
semantic_feat=semantic_feat,
|
444 |
-
glbctx_feat=glbctx_feat,
|
445 |
-
relayed_feat=relayed_feat)
|
446 |
-
mask_pred = mask_results['mask_pred']
|
447 |
-
|
448 |
-
# split batch mask prediction back to each image
|
449 |
-
num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
|
450 |
-
mask_preds = mask_pred.split(num_bbox_per_img, 0)
|
451 |
-
|
452 |
-
# apply mask post-processing to each image individually
|
453 |
-
det_segm_results = []
|
454 |
-
for i in range(num_imgs):
|
455 |
-
if det_bboxes[i].shape[0] == 0:
|
456 |
-
det_segm_results.append(
|
457 |
-
[[] for _ in range(self.mask_head.num_classes)])
|
458 |
-
else:
|
459 |
-
segm_result = self.mask_head.get_seg_masks(
|
460 |
-
mask_preds[i], _bboxes[i], det_labels[i],
|
461 |
-
self.test_cfg, ori_shapes[i], scale_factors[i],
|
462 |
-
rescale)
|
463 |
-
det_segm_results.append(segm_result)
|
464 |
-
|
465 |
-
# return results
|
466 |
-
if self.with_mask:
|
467 |
-
return list(zip(det_bbox_results, det_segm_results))
|
468 |
-
else:
|
469 |
-
return det_bbox_results
|
470 |
-
|
471 |
-
def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
|
472 |
-
if self.with_semantic:
|
473 |
-
semantic_feats = [
|
474 |
-
self.semantic_head(feat)[1] for feat in img_feats
|
475 |
-
]
|
476 |
-
else:
|
477 |
-
semantic_feats = [None] * len(img_metas)
|
478 |
-
|
479 |
-
if self.with_glbctx:
|
480 |
-
glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats]
|
481 |
-
else:
|
482 |
-
glbctx_feats = [None] * len(img_metas)
|
483 |
-
|
484 |
-
rcnn_test_cfg = self.test_cfg
|
485 |
-
aug_bboxes = []
|
486 |
-
aug_scores = []
|
487 |
-
for x, img_meta, semantic_feat, glbctx_feat in zip(
|
488 |
-
img_feats, img_metas, semantic_feats, glbctx_feats):
|
489 |
-
# only one image in the batch
|
490 |
-
img_shape = img_meta[0]['img_shape']
|
491 |
-
scale_factor = img_meta[0]['scale_factor']
|
492 |
-
flip = img_meta[0]['flip']
|
493 |
-
|
494 |
-
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
|
495 |
-
scale_factor, flip)
|
496 |
-
# "ms" in variable names means multi-stage
|
497 |
-
ms_scores = []
|
498 |
-
|
499 |
-
rois = bbox2roi([proposals])
|
500 |
-
for i in range(self.num_stages):
|
501 |
-
bbox_head = self.bbox_head[i]
|
502 |
-
bbox_results = self._bbox_forward(
|
503 |
-
i,
|
504 |
-
x,
|
505 |
-
rois,
|
506 |
-
semantic_feat=semantic_feat,
|
507 |
-
glbctx_feat=glbctx_feat)
|
508 |
-
ms_scores.append(bbox_results['cls_score'])
|
509 |
-
if i < self.num_stages - 1:
|
510 |
-
bbox_label = bbox_results['cls_score'].argmax(dim=1)
|
511 |
-
rois = bbox_head.regress_by_class(
|
512 |
-
rois, bbox_label, bbox_results['bbox_pred'],
|
513 |
-
img_meta[0])
|
514 |
-
|
515 |
-
cls_score = sum(ms_scores) / float(len(ms_scores))
|
516 |
-
bboxes, scores = self.bbox_head[-1].get_bboxes(
|
517 |
-
rois,
|
518 |
-
cls_score,
|
519 |
-
bbox_results['bbox_pred'],
|
520 |
-
img_shape,
|
521 |
-
scale_factor,
|
522 |
-
rescale=False,
|
523 |
-
cfg=None)
|
524 |
-
aug_bboxes.append(bboxes)
|
525 |
-
aug_scores.append(scores)
|
526 |
-
|
527 |
-
# after merging, bboxes will be rescaled to the original image size
|
528 |
-
merged_bboxes, merged_scores = merge_aug_bboxes(
|
529 |
-
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
|
530 |
-
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
|
531 |
-
rcnn_test_cfg.score_thr,
|
532 |
-
rcnn_test_cfg.nms,
|
533 |
-
rcnn_test_cfg.max_per_img)
|
534 |
-
|
535 |
-
det_bbox_results = bbox2result(det_bboxes, det_labels,
|
536 |
-
self.bbox_head[-1].num_classes)
|
537 |
-
|
538 |
-
if self.with_mask:
|
539 |
-
if det_bboxes.shape[0] == 0:
|
540 |
-
det_segm_results = [[]
|
541 |
-
for _ in range(self.mask_head.num_classes)]
|
542 |
-
else:
|
543 |
-
aug_masks = []
|
544 |
-
for x, img_meta, semantic_feat, glbctx_feat in zip(
|
545 |
-
img_feats, img_metas, semantic_feats, glbctx_feats):
|
546 |
-
img_shape = img_meta[0]['img_shape']
|
547 |
-
scale_factor = img_meta[0]['scale_factor']
|
548 |
-
flip = img_meta[0]['flip']
|
549 |
-
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
|
550 |
-
scale_factor, flip)
|
551 |
-
mask_rois = bbox2roi([_bboxes])
|
552 |
-
# get relay feature on mask_rois
|
553 |
-
bbox_results = self._bbox_forward(
|
554 |
-
-1,
|
555 |
-
x,
|
556 |
-
mask_rois,
|
557 |
-
semantic_feat=semantic_feat,
|
558 |
-
glbctx_feat=glbctx_feat)
|
559 |
-
relayed_feat = bbox_results['relayed_feat']
|
560 |
-
relayed_feat = self.feat_relay_head(relayed_feat)
|
561 |
-
mask_results = self._mask_forward(
|
562 |
-
x,
|
563 |
-
mask_rois,
|
564 |
-
semantic_feat=semantic_feat,
|
565 |
-
glbctx_feat=glbctx_feat,
|
566 |
-
relayed_feat=relayed_feat)
|
567 |
-
mask_pred = mask_results['mask_pred']
|
568 |
-
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
|
569 |
-
merged_masks = merge_aug_masks(aug_masks, img_metas,
|
570 |
-
self.test_cfg)
|
571 |
-
ori_shape = img_metas[0][0]['ori_shape']
|
572 |
-
det_segm_results = self.mask_head.get_seg_masks(
|
573 |
-
merged_masks,
|
574 |
-
det_bboxes,
|
575 |
-
det_labels,
|
576 |
-
rcnn_test_cfg,
|
577 |
-
ori_shape,
|
578 |
-
scale_factor=1.0,
|
579 |
-
rescale=False)
|
580 |
-
return [(det_bbox_results, det_segm_results)]
|
581 |
-
else:
|
582 |
-
return [det_bbox_results]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = './fcn_hr18_512x512_20k_voc12aug.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w18_small',
|
4 |
-
backbone=dict(
|
5 |
-
extra=dict(
|
6 |
-
stage1=dict(num_blocks=(2, )),
|
7 |
-
stage2=dict(num_blocks=(2, 2)),
|
8 |
-
stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
|
9 |
-
stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/sem_fpn/README.md
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
# Panoptic Feature Pyramid Networks
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
<!-- [ALGORITHM] -->
|
6 |
-
|
7 |
-
```latex
|
8 |
-
@article{Kirillov_2019,
|
9 |
-
title={Panoptic Feature Pyramid Networks},
|
10 |
-
ISBN={9781728132938},
|
11 |
-
url={http://dx.doi.org/10.1109/CVPR.2019.00656},
|
12 |
-
DOI={10.1109/cvpr.2019.00656},
|
13 |
-
journal={2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
|
14 |
-
publisher={IEEE},
|
15 |
-
author={Kirillov, Alexander and Girshick, Ross and He, Kaiming and Dollar, Piotr},
|
16 |
-
year={2019},
|
17 |
-
month={Jun}
|
18 |
-
}
|
19 |
-
```
|
20 |
-
|
21 |
-
## Results and models
|
22 |
-
|
23 |
-
### Cityscapes
|
24 |
-
|
25 |
-
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
|
26 |
-
| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
27 |
-
| FPN | R-50 | 512x1024 | 80000 | 2.8 | 13.54 | 74.52 | 76.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes_20200717_021437-94018a0d.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes-20200717_021437.log.json) |
|
28 |
-
| FPN | R-101 | 512x1024 | 80000 | 3.9 | 10.29 | 75.80 | 77.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes_20200717_012416-c5800d4c.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes-20200717_012416.log.json) |
|
29 |
-
|
30 |
-
### ADE20K
|
31 |
-
|
32 |
-
| Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download |
|
33 |
-
| ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
34 |
-
| FPN | R-50 | 512x512 | 160000 | 4.9 | 55.77 | 37.49 | 39.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k_20200718_131734-5b5a6ab9.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k-20200718_131734.log.json) |
|
35 |
-
| FPN | R-101 | 512x512 | 160000 | 5.9 | 40.58 | 39.35 | 40.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r101_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k_20200718_131734-306b5004.pth) | [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k-20200718_131734.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-123/ImageNet-Editing/editing_diffusion/guided_diffusion/guided_diffusion/train_util.py
DELETED
@@ -1,301 +0,0 @@
|
|
1 |
-
import copy
|
2 |
-
import functools
|
3 |
-
import os
|
4 |
-
|
5 |
-
import blobfile as bf
|
6 |
-
import torch as th
|
7 |
-
import torch.distributed as dist
|
8 |
-
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
|
9 |
-
from torch.optim import AdamW
|
10 |
-
|
11 |
-
from . import dist_util, logger
|
12 |
-
from .fp16_util import MixedPrecisionTrainer
|
13 |
-
from .nn import update_ema
|
14 |
-
from .resample import LossAwareSampler, UniformSampler
|
15 |
-
|
16 |
-
# For ImageNet experiments, this was a good default value.
|
17 |
-
# We found that the lg_loss_scale quickly climbed to
|
18 |
-
# 20-21 within the first ~1K steps of training.
|
19 |
-
INITIAL_LOG_LOSS_SCALE = 20.0
|
20 |
-
|
21 |
-
|
22 |
-
class TrainLoop:
|
23 |
-
def __init__(
|
24 |
-
self,
|
25 |
-
*,
|
26 |
-
model,
|
27 |
-
diffusion,
|
28 |
-
data,
|
29 |
-
batch_size,
|
30 |
-
microbatch,
|
31 |
-
lr,
|
32 |
-
ema_rate,
|
33 |
-
log_interval,
|
34 |
-
save_interval,
|
35 |
-
resume_checkpoint,
|
36 |
-
use_fp16=False,
|
37 |
-
fp16_scale_growth=1e-3,
|
38 |
-
schedule_sampler=None,
|
39 |
-
weight_decay=0.0,
|
40 |
-
lr_anneal_steps=0,
|
41 |
-
):
|
42 |
-
self.model = model
|
43 |
-
self.diffusion = diffusion
|
44 |
-
self.data = data
|
45 |
-
self.batch_size = batch_size
|
46 |
-
self.microbatch = microbatch if microbatch > 0 else batch_size
|
47 |
-
self.lr = lr
|
48 |
-
self.ema_rate = (
|
49 |
-
[ema_rate]
|
50 |
-
if isinstance(ema_rate, float)
|
51 |
-
else [float(x) for x in ema_rate.split(",")]
|
52 |
-
)
|
53 |
-
self.log_interval = log_interval
|
54 |
-
self.save_interval = save_interval
|
55 |
-
self.resume_checkpoint = resume_checkpoint
|
56 |
-
self.use_fp16 = use_fp16
|
57 |
-
self.fp16_scale_growth = fp16_scale_growth
|
58 |
-
self.schedule_sampler = schedule_sampler or UniformSampler(diffusion)
|
59 |
-
self.weight_decay = weight_decay
|
60 |
-
self.lr_anneal_steps = lr_anneal_steps
|
61 |
-
|
62 |
-
self.step = 0
|
63 |
-
self.resume_step = 0
|
64 |
-
self.global_batch = self.batch_size * dist.get_world_size()
|
65 |
-
|
66 |
-
self.sync_cuda = th.cuda.is_available()
|
67 |
-
|
68 |
-
self._load_and_sync_parameters()
|
69 |
-
self.mp_trainer = MixedPrecisionTrainer(
|
70 |
-
model=self.model,
|
71 |
-
use_fp16=self.use_fp16,
|
72 |
-
fp16_scale_growth=fp16_scale_growth,
|
73 |
-
)
|
74 |
-
|
75 |
-
self.opt = AdamW(
|
76 |
-
self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
|
77 |
-
)
|
78 |
-
if self.resume_step:
|
79 |
-
self._load_optimizer_state()
|
80 |
-
# Model was resumed, either due to a restart or a checkpoint
|
81 |
-
# being specified at the command line.
|
82 |
-
self.ema_params = [
|
83 |
-
self._load_ema_parameters(rate) for rate in self.ema_rate
|
84 |
-
]
|
85 |
-
else:
|
86 |
-
self.ema_params = [
|
87 |
-
copy.deepcopy(self.mp_trainer.master_params)
|
88 |
-
for _ in range(len(self.ema_rate))
|
89 |
-
]
|
90 |
-
|
91 |
-
if th.cuda.is_available():
|
92 |
-
self.use_ddp = True
|
93 |
-
self.ddp_model = DDP(
|
94 |
-
self.model,
|
95 |
-
device_ids=[dist_util.dev()],
|
96 |
-
output_device=dist_util.dev(),
|
97 |
-
broadcast_buffers=False,
|
98 |
-
bucket_cap_mb=128,
|
99 |
-
find_unused_parameters=False,
|
100 |
-
)
|
101 |
-
else:
|
102 |
-
if dist.get_world_size() > 1:
|
103 |
-
logger.warn(
|
104 |
-
"Distributed training requires CUDA. "
|
105 |
-
"Gradients will not be synchronized properly!"
|
106 |
-
)
|
107 |
-
self.use_ddp = False
|
108 |
-
self.ddp_model = self.model
|
109 |
-
|
110 |
-
def _load_and_sync_parameters(self):
|
111 |
-
resume_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
|
112 |
-
|
113 |
-
if resume_checkpoint:
|
114 |
-
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
|
115 |
-
if dist.get_rank() == 0:
|
116 |
-
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
|
117 |
-
self.model.load_state_dict(
|
118 |
-
dist_util.load_state_dict(
|
119 |
-
resume_checkpoint, map_location=dist_util.dev()
|
120 |
-
)
|
121 |
-
)
|
122 |
-
|
123 |
-
dist_util.sync_params(self.model.parameters())
|
124 |
-
|
125 |
-
def _load_ema_parameters(self, rate):
|
126 |
-
ema_params = copy.deepcopy(self.mp_trainer.master_params)
|
127 |
-
|
128 |
-
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
|
129 |
-
ema_checkpoint = find_ema_checkpoint(main_checkpoint, self.resume_step, rate)
|
130 |
-
if ema_checkpoint:
|
131 |
-
if dist.get_rank() == 0:
|
132 |
-
logger.log(f"loading EMA from checkpoint: {ema_checkpoint}...")
|
133 |
-
state_dict = dist_util.load_state_dict(
|
134 |
-
ema_checkpoint, map_location=dist_util.dev()
|
135 |
-
)
|
136 |
-
ema_params = self.mp_trainer.state_dict_to_master_params(state_dict)
|
137 |
-
|
138 |
-
dist_util.sync_params(ema_params)
|
139 |
-
return ema_params
|
140 |
-
|
141 |
-
def _load_optimizer_state(self):
|
142 |
-
main_checkpoint = find_resume_checkpoint() or self.resume_checkpoint
|
143 |
-
opt_checkpoint = bf.join(
|
144 |
-
bf.dirname(main_checkpoint), f"opt{self.resume_step:06}.pt"
|
145 |
-
)
|
146 |
-
if bf.exists(opt_checkpoint):
|
147 |
-
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
|
148 |
-
state_dict = dist_util.load_state_dict(
|
149 |
-
opt_checkpoint, map_location=dist_util.dev()
|
150 |
-
)
|
151 |
-
self.opt.load_state_dict(state_dict)
|
152 |
-
|
153 |
-
def run_loop(self):
|
154 |
-
while (
|
155 |
-
not self.lr_anneal_steps
|
156 |
-
or self.step + self.resume_step < self.lr_anneal_steps
|
157 |
-
):
|
158 |
-
batch, cond = next(self.data)
|
159 |
-
self.run_step(batch, cond)
|
160 |
-
if self.step % self.log_interval == 0:
|
161 |
-
logger.dumpkvs()
|
162 |
-
if self.step % self.save_interval == 0:
|
163 |
-
self.save()
|
164 |
-
# Run for a finite amount of time in integration tests.
|
165 |
-
if os.environ.get("DIFFUSION_TRAINING_TEST", "") and self.step > 0:
|
166 |
-
return
|
167 |
-
self.step += 1
|
168 |
-
# Save the last checkpoint if it wasn't already saved.
|
169 |
-
if (self.step - 1) % self.save_interval != 0:
|
170 |
-
self.save()
|
171 |
-
|
172 |
-
def run_step(self, batch, cond):
|
173 |
-
self.forward_backward(batch, cond)
|
174 |
-
took_step = self.mp_trainer.optimize(self.opt)
|
175 |
-
if took_step:
|
176 |
-
self._update_ema()
|
177 |
-
self._anneal_lr()
|
178 |
-
self.log_step()
|
179 |
-
|
180 |
-
def forward_backward(self, batch, cond):
|
181 |
-
self.mp_trainer.zero_grad()
|
182 |
-
for i in range(0, batch.shape[0], self.microbatch):
|
183 |
-
micro = batch[i : i + self.microbatch].to(dist_util.dev())
|
184 |
-
micro_cond = {
|
185 |
-
k: v[i : i + self.microbatch].to(dist_util.dev())
|
186 |
-
for k, v in cond.items()
|
187 |
-
}
|
188 |
-
last_batch = (i + self.microbatch) >= batch.shape[0]
|
189 |
-
t, weights = self.schedule_sampler.sample(micro.shape[0], dist_util.dev())
|
190 |
-
|
191 |
-
compute_losses = functools.partial(
|
192 |
-
self.diffusion.training_losses,
|
193 |
-
self.ddp_model,
|
194 |
-
micro,
|
195 |
-
t,
|
196 |
-
model_kwargs=micro_cond,
|
197 |
-
)
|
198 |
-
|
199 |
-
if last_batch or not self.use_ddp:
|
200 |
-
losses = compute_losses()
|
201 |
-
else:
|
202 |
-
with self.ddp_model.no_sync():
|
203 |
-
losses = compute_losses()
|
204 |
-
|
205 |
-
if isinstance(self.schedule_sampler, LossAwareSampler):
|
206 |
-
self.schedule_sampler.update_with_local_losses(
|
207 |
-
t, losses["loss"].detach()
|
208 |
-
)
|
209 |
-
|
210 |
-
loss = (losses["loss"] * weights).mean()
|
211 |
-
log_loss_dict(
|
212 |
-
self.diffusion, t, {k: v * weights for k, v in losses.items()}
|
213 |
-
)
|
214 |
-
self.mp_trainer.backward(loss)
|
215 |
-
|
216 |
-
def _update_ema(self):
|
217 |
-
for rate, params in zip(self.ema_rate, self.ema_params):
|
218 |
-
update_ema(params, self.mp_trainer.master_params, rate=rate)
|
219 |
-
|
220 |
-
def _anneal_lr(self):
|
221 |
-
if not self.lr_anneal_steps:
|
222 |
-
return
|
223 |
-
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
|
224 |
-
lr = self.lr * (1 - frac_done)
|
225 |
-
for param_group in self.opt.param_groups:
|
226 |
-
param_group["lr"] = lr
|
227 |
-
|
228 |
-
def log_step(self):
|
229 |
-
logger.logkv("step", self.step + self.resume_step)
|
230 |
-
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
|
231 |
-
|
232 |
-
def save(self):
|
233 |
-
def save_checkpoint(rate, params):
|
234 |
-
state_dict = self.mp_trainer.master_params_to_state_dict(params)
|
235 |
-
if dist.get_rank() == 0:
|
236 |
-
logger.log(f"saving model {rate}...")
|
237 |
-
if not rate:
|
238 |
-
filename = f"model{(self.step+self.resume_step):06d}.pt"
|
239 |
-
else:
|
240 |
-
filename = f"ema_{rate}_{(self.step+self.resume_step):06d}.pt"
|
241 |
-
with bf.BlobFile(bf.join(get_blob_logdir(), filename), "wb") as f:
|
242 |
-
th.save(state_dict, f)
|
243 |
-
|
244 |
-
save_checkpoint(0, self.mp_trainer.master_params)
|
245 |
-
for rate, params in zip(self.ema_rate, self.ema_params):
|
246 |
-
save_checkpoint(rate, params)
|
247 |
-
|
248 |
-
if dist.get_rank() == 0:
|
249 |
-
with bf.BlobFile(
|
250 |
-
bf.join(get_blob_logdir(), f"opt{(self.step+self.resume_step):06d}.pt"),
|
251 |
-
"wb",
|
252 |
-
) as f:
|
253 |
-
th.save(self.opt.state_dict(), f)
|
254 |
-
|
255 |
-
dist.barrier()
|
256 |
-
|
257 |
-
|
258 |
-
def parse_resume_step_from_filename(filename):
|
259 |
-
"""
|
260 |
-
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
|
261 |
-
checkpoint's number of steps.
|
262 |
-
"""
|
263 |
-
split = filename.split("model")
|
264 |
-
if len(split) < 2:
|
265 |
-
return 0
|
266 |
-
split1 = split[-1].split(".")[0]
|
267 |
-
try:
|
268 |
-
return int(split1)
|
269 |
-
except ValueError:
|
270 |
-
return 0
|
271 |
-
|
272 |
-
|
273 |
-
def get_blob_logdir():
|
274 |
-
# You can change this to be a separate path to save checkpoints to
|
275 |
-
# a blobstore or some external drive.
|
276 |
-
return logger.get_dir()
|
277 |
-
|
278 |
-
|
279 |
-
def find_resume_checkpoint():
|
280 |
-
# On your infrastructure, you may want to override this to automatically
|
281 |
-
# discover the latest checkpoint on your blob storage, etc.
|
282 |
-
return None
|
283 |
-
|
284 |
-
|
285 |
-
def find_ema_checkpoint(main_checkpoint, step, rate):
|
286 |
-
if main_checkpoint is None:
|
287 |
-
return None
|
288 |
-
filename = f"ema_{rate}_{(step):06d}.pt"
|
289 |
-
path = bf.join(bf.dirname(main_checkpoint), filename)
|
290 |
-
if bf.exists(path):
|
291 |
-
return path
|
292 |
-
return None
|
293 |
-
|
294 |
-
|
295 |
-
def log_loss_dict(diffusion, ts, losses):
|
296 |
-
for key, values in losses.items():
|
297 |
-
logger.logkv_mean(key, values.mean().item())
|
298 |
-
# Log the quantiles (four quartiles, in particular).
|
299 |
-
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
|
300 |
-
quartile = int(4 * sub_t / diffusion.num_timesteps)
|
301 |
-
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Araloak/fz/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Fz
|
3 |
-
emoji: 🏆
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.27.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: openrail
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AsakuraMizu/moe-tts/text/cleaners.py
DELETED
@@ -1,150 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import pyopenjtalk
|
3 |
-
|
4 |
-
pyopenjtalk._lazy_init()
|
5 |
-
|
6 |
-
|
7 |
-
def japanese_cleaners(text):
|
8 |
-
from text.japanese import japanese_to_romaji_with_accent
|
9 |
-
text = japanese_to_romaji_with_accent(text)
|
10 |
-
text = re.sub(r'([A-Za-z])$', r'\1.', text)
|
11 |
-
return text
|
12 |
-
|
13 |
-
|
14 |
-
def japanese_cleaners2(text):
|
15 |
-
return japanese_cleaners(text).replace('ts', 'ʦ').replace('...', '…')
|
16 |
-
|
17 |
-
|
18 |
-
def korean_cleaners(text):
|
19 |
-
'''Pipeline for Korean text'''
|
20 |
-
from text.korean import latin_to_hangul, number_to_hangul, divide_hangul
|
21 |
-
text = latin_to_hangul(text)
|
22 |
-
text = number_to_hangul(text)
|
23 |
-
text = divide_hangul(text)
|
24 |
-
text = re.sub(r'([\u3131-\u3163])$', r'\1.', text)
|
25 |
-
return text
|
26 |
-
|
27 |
-
|
28 |
-
def chinese_cleaners(text):
|
29 |
-
'''Pipeline for Chinese text'''
|
30 |
-
from text.mandarin import number_to_chinese, chinese_to_bopomofo, latin_to_bopomofo
|
31 |
-
text = number_to_chinese(text)
|
32 |
-
text = chinese_to_bopomofo(text)
|
33 |
-
text = latin_to_bopomofo(text)
|
34 |
-
text = re.sub(r'([ˉˊˇˋ˙])$', r'\1。', text)
|
35 |
-
return text
|
36 |
-
|
37 |
-
|
38 |
-
def zh_ja_mixture_cleaners(text):
|
39 |
-
from text.mandarin import chinese_to_romaji
|
40 |
-
from text.japanese import japanese_to_romaji_with_accent
|
41 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
42 |
-
lambda x: chinese_to_romaji(x.group(1)) + ' ', text)
|
43 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_romaji_with_accent(
|
44 |
-
x.group(1)).replace('ts', 'ʦ').replace('u', 'ɯ').replace('...', '…') + ' ', text)
|
45 |
-
text = re.sub(r'\s+$', '', text)
|
46 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
47 |
-
return text
|
48 |
-
|
49 |
-
|
50 |
-
def sanskrit_cleaners(text):
|
51 |
-
text = text.replace('॥', '।').replace('ॐ', 'ओम्')
|
52 |
-
if text[-1] != '।':
|
53 |
-
text += ' ।'
|
54 |
-
return text
|
55 |
-
|
56 |
-
|
57 |
-
def cjks_cleaners(text):
|
58 |
-
from text.mandarin import chinese_to_lazy_ipa
|
59 |
-
from text.japanese import japanese_to_ipa
|
60 |
-
from text.korean import korean_to_lazy_ipa
|
61 |
-
from text.sanskrit import devanagari_to_ipa
|
62 |
-
from text.english import english_to_lazy_ipa
|
63 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
64 |
-
lambda x: chinese_to_lazy_ipa(x.group(1)) + ' ', text)
|
65 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]',
|
66 |
-
lambda x: japanese_to_ipa(x.group(1)) + ' ', text)
|
67 |
-
text = re.sub(r'\[KO\](.*?)\[KO\]',
|
68 |
-
lambda x: korean_to_lazy_ipa(x.group(1)) + ' ', text)
|
69 |
-
text = re.sub(r'\[SA\](.*?)\[SA\]',
|
70 |
-
lambda x: devanagari_to_ipa(x.group(1)) + ' ', text)
|
71 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]',
|
72 |
-
lambda x: english_to_lazy_ipa(x.group(1)) + ' ', text)
|
73 |
-
text = re.sub(r'\s+$', '', text)
|
74 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
75 |
-
return text
|
76 |
-
|
77 |
-
|
78 |
-
def cjke_cleaners(text):
|
79 |
-
from text.mandarin import chinese_to_lazy_ipa
|
80 |
-
from text.japanese import japanese_to_ipa
|
81 |
-
from text.korean import korean_to_ipa
|
82 |
-
from text.english import english_to_ipa2
|
83 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]', lambda x: chinese_to_lazy_ipa(x.group(1)).replace(
|
84 |
-
'ʧ', 'tʃ').replace('ʦ', 'ts').replace('ɥan', 'ɥæn') + ' ', text)
|
85 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]', lambda x: japanese_to_ipa(x.group(1)).replace('ʧ', 'tʃ').replace(
|
86 |
-
'ʦ', 'ts').replace('ɥan', 'ɥæn').replace('ʥ', 'dz') + ' ', text)
|
87 |
-
text = re.sub(r'\[KO\](.*?)\[KO\]',
|
88 |
-
lambda x: korean_to_ipa(x.group(1)) + ' ', text)
|
89 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]', lambda x: english_to_ipa2(x.group(1)).replace('ɑ', 'a').replace(
|
90 |
-
'ɔ', 'o').replace('ɛ', 'e').replace('ɪ', 'i').replace('ʊ', 'u') + ' ', text)
|
91 |
-
text = re.sub(r'\s+$', '', text)
|
92 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
93 |
-
return text
|
94 |
-
|
95 |
-
|
96 |
-
def cjke_cleaners2(text):
|
97 |
-
from text.mandarin import chinese_to_ipa
|
98 |
-
from text.japanese import japanese_to_ipa2
|
99 |
-
from text.korean import korean_to_ipa
|
100 |
-
from text.english import english_to_ipa2
|
101 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
102 |
-
lambda x: chinese_to_ipa(x.group(1)) + ' ', text)
|
103 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]',
|
104 |
-
lambda x: japanese_to_ipa2(x.group(1)) + ' ', text)
|
105 |
-
text = re.sub(r'\[KO\](.*?)\[KO\]',
|
106 |
-
lambda x: korean_to_ipa(x.group(1)) + ' ', text)
|
107 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]',
|
108 |
-
lambda x: english_to_ipa2(x.group(1)) + ' ', text)
|
109 |
-
text = re.sub(r'\s+$', '', text)
|
110 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
111 |
-
return text
|
112 |
-
|
113 |
-
|
114 |
-
def thai_cleaners(text):
|
115 |
-
from text.thai import num_to_thai, latin_to_thai
|
116 |
-
text = num_to_thai(text)
|
117 |
-
text = latin_to_thai(text)
|
118 |
-
return text
|
119 |
-
|
120 |
-
|
121 |
-
def shanghainese_cleaners(text):
|
122 |
-
from text.shanghainese import shanghainese_to_ipa
|
123 |
-
text = shanghainese_to_ipa(text)
|
124 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
125 |
-
return text
|
126 |
-
|
127 |
-
|
128 |
-
def chinese_dialect_cleaners(text):
|
129 |
-
from text.mandarin import chinese_to_ipa2
|
130 |
-
from text.japanese import japanese_to_ipa3
|
131 |
-
from text.shanghainese import shanghainese_to_ipa
|
132 |
-
from text.cantonese import cantonese_to_ipa
|
133 |
-
from text.english import english_to_lazy_ipa2
|
134 |
-
from text.ngu_dialect import ngu_dialect_to_ipa
|
135 |
-
text = re.sub(r'\[ZH\](.*?)\[ZH\]',
|
136 |
-
lambda x: chinese_to_ipa2(x.group(1)) + ' ', text)
|
137 |
-
text = re.sub(r'\[JA\](.*?)\[JA\]',
|
138 |
-
lambda x: japanese_to_ipa3(x.group(1)).replace('Q', 'ʔ') + ' ', text)
|
139 |
-
text = re.sub(r'\[SH\](.*?)\[SH\]', lambda x: shanghainese_to_ipa(x.group(1)).replace('1', '˥˧').replace('5',
|
140 |
-
'˧˧˦').replace(
|
141 |
-
'6', '˩˩˧').replace('7', '˥').replace('8', '˩˨').replace('ᴀ', 'ɐ').replace('ᴇ', 'e') + ' ', text)
|
142 |
-
text = re.sub(r'\[GD\](.*?)\[GD\]',
|
143 |
-
lambda x: cantonese_to_ipa(x.group(1)) + ' ', text)
|
144 |
-
text = re.sub(r'\[EN\](.*?)\[EN\]',
|
145 |
-
lambda x: english_to_lazy_ipa2(x.group(1)) + ' ', text)
|
146 |
-
text = re.sub(r'\[([A-Z]{2})\](.*?)\[\1\]', lambda x: ngu_dialect_to_ipa(x.group(2), x.group(
|
147 |
-
1)).replace('ʣ', 'dz').replace('ʥ', 'dʑ').replace('ʦ', 'ts').replace('ʨ', 'tɕ') + ' ', text)
|
148 |
-
text = re.sub(r'\s+$', '', text)
|
149 |
-
text = re.sub(r'([^\.,!\?\-…~])$', r'\1.', text)
|
150 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/wheel.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
from pip._vendor.packaging.utils import canonicalize_name
|
2 |
-
|
3 |
-
from pip._internal.distributions.base import AbstractDistribution
|
4 |
-
from pip._internal.index.package_finder import PackageFinder
|
5 |
-
from pip._internal.metadata import (
|
6 |
-
BaseDistribution,
|
7 |
-
FilesystemWheel,
|
8 |
-
get_wheel_distribution,
|
9 |
-
)
|
10 |
-
|
11 |
-
|
12 |
-
class WheelDistribution(AbstractDistribution):
|
13 |
-
"""Represents a wheel distribution.
|
14 |
-
|
15 |
-
This does not need any preparation as wheels can be directly unpacked.
|
16 |
-
"""
|
17 |
-
|
18 |
-
def get_metadata_distribution(self) -> BaseDistribution:
|
19 |
-
"""Loads the metadata from the wheel file into memory and returns a
|
20 |
-
Distribution that uses it, not relying on the wheel file or
|
21 |
-
requirement.
|
22 |
-
"""
|
23 |
-
assert self.req.local_file_path, "Set as part of preparation during download"
|
24 |
-
assert self.req.name, "Wheels are never unnamed"
|
25 |
-
wheel = FilesystemWheel(self.req.local_file_path)
|
26 |
-
return get_wheel_distribution(wheel, canonicalize_name(self.req.name))
|
27 |
-
|
28 |
-
def prepare_distribution_metadata(
|
29 |
-
self,
|
30 |
-
finder: PackageFinder,
|
31 |
-
build_isolation: bool,
|
32 |
-
check_build_deps: bool,
|
33 |
-
) -> None:
|
34 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bambicita/rvc-models/config.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
########################硬件参数########################
|
2 |
-
|
3 |
-
# 填写cuda:x, cpu 或 mps, x指代第几张卡,只支持 N卡 / Apple Silicon 加速
|
4 |
-
device = "cuda:0"
|
5 |
-
|
6 |
-
# 9-10-20-30-40系显卡无脑True,不影响质量,>=20显卡开启有加速
|
7 |
-
is_half = True
|
8 |
-
|
9 |
-
# 默认0用上所有线程,写数字限制CPU资源使用
|
10 |
-
n_cpu = 0
|
11 |
-
|
12 |
-
########################硬件参数########################
|
13 |
-
|
14 |
-
|
15 |
-
##################下为参数处理逻辑,勿动##################
|
16 |
-
|
17 |
-
########################命令行参数########################
|
18 |
-
import argparse
|
19 |
-
|
20 |
-
parser = argparse.ArgumentParser()
|
21 |
-
parser.add_argument("--port", type=int, default=7865, help="Listen port")
|
22 |
-
parser.add_argument("--pycmd", type=str, default="python", help="Python command")
|
23 |
-
parser.add_argument("--colab", action="store_true", help="Launch in colab")
|
24 |
-
parser.add_argument(
|
25 |
-
"--noparallel", action="store_true", help="Disable parallel processing"
|
26 |
-
)
|
27 |
-
parser.add_argument(
|
28 |
-
"--noautoopen", action="store_true", help="Do not open in browser automatically"
|
29 |
-
)
|
30 |
-
cmd_opts, unknown = parser.parse_known_args()
|
31 |
-
|
32 |
-
python_cmd = cmd_opts.pycmd
|
33 |
-
listen_port = cmd_opts.port
|
34 |
-
iscolab = cmd_opts.colab
|
35 |
-
noparallel = cmd_opts.noparallel
|
36 |
-
noautoopen = cmd_opts.noautoopen
|
37 |
-
########################命令行参数########################
|
38 |
-
|
39 |
-
import sys
|
40 |
-
import torch
|
41 |
-
|
42 |
-
|
43 |
-
# has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
|
44 |
-
# check `getattr` and try it for compatibility
|
45 |
-
def has_mps() -> bool:
|
46 |
-
if sys.platform != "darwin":
|
47 |
-
return False
|
48 |
-
else:
|
49 |
-
if not getattr(torch, "has_mps", False):
|
50 |
-
return False
|
51 |
-
try:
|
52 |
-
torch.zeros(1).to(torch.device("mps"))
|
53 |
-
return True
|
54 |
-
except Exception:
|
55 |
-
return False
|
56 |
-
|
57 |
-
|
58 |
-
if not torch.cuda.is_available():
|
59 |
-
if has_mps():
|
60 |
-
print("没有发现支持的N卡, 使用MPS进行推理")
|
61 |
-
device = "mps"
|
62 |
-
else:
|
63 |
-
print("没有发现支持的N卡, 使用CPU进行推理")
|
64 |
-
device = "cpu"
|
65 |
-
is_half = False
|
66 |
-
|
67 |
-
if device not in ["cpu", "mps"]:
|
68 |
-
gpu_name = torch.cuda.get_device_name(int(device.split(":")[-1]))
|
69 |
-
if "16" in gpu_name or "MX" in gpu_name:
|
70 |
-
print("16系显卡/MX系显卡强制单精度")
|
71 |
-
is_half = False
|
72 |
-
|
73 |
-
from multiprocessing import cpu_count
|
74 |
-
|
75 |
-
if n_cpu == 0:
|
76 |
-
n_cpu = cpu_count()
|
77 |
-
if is_half:
|
78 |
-
# 6G显存配置
|
79 |
-
x_pad = 3
|
80 |
-
x_query = 10
|
81 |
-
x_center = 60
|
82 |
-
x_max = 65
|
83 |
-
else:
|
84 |
-
# 5G显存配置
|
85 |
-
x_pad = 1
|
86 |
-
x_query = 6
|
87 |
-
x_center = 38
|
88 |
-
x_max = 41
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BertChristiaens/blip-diffusion/app.py
DELETED
@@ -1,112 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from streamlit_drawable_canvas import st_canvas
|
3 |
-
from PIL import Image
|
4 |
-
from typing import Union
|
5 |
-
import random
|
6 |
-
import numpy as np
|
7 |
-
import os
|
8 |
-
import time
|
9 |
-
|
10 |
-
st.set_page_config(layout="wide")
|
11 |
-
|
12 |
-
|
13 |
-
def create_edit_existing_image_tab():
|
14 |
-
st.write("# Edit existing image")
|
15 |
-
|
16 |
-
|
17 |
-
cols = st.columns(2)
|
18 |
-
with cols[0]:
|
19 |
-
image_source = st.file_uploader("Upload source image", type=["png", "jpg", "jpeg", "webp"], key="upload_source_edit_existing_image")
|
20 |
-
st.text_input("Source object", key="text_input_source_edit_existing_image")
|
21 |
-
st.image('content/dog.png')
|
22 |
-
with cols[1]:
|
23 |
-
image_target = st.file_uploader("Upload target image", type=["png", "jpg", "jpeg", "webp"], key="upload_target_edit_existing_image")
|
24 |
-
st.text_input("Target object", key="text_input_target_edit_existing_image")
|
25 |
-
st.image('content/cat-sofa.png')
|
26 |
-
|
27 |
-
st.text_input("Prompt", key="text_input_prompt_edit_existing_image")
|
28 |
-
st.text_input("Negative prompt", key="text_input_negative_prompt_edit_existing_image")
|
29 |
-
st.button("Generate", key="button_generate_edit_existing_image")
|
30 |
-
|
31 |
-
st.write("## Result")
|
32 |
-
st.image('content/after_editing.png')
|
33 |
-
|
34 |
-
|
35 |
-
def create_edit_generated_image_tab():
|
36 |
-
st.write("# Edit generated image")
|
37 |
-
|
38 |
-
cols = st.columns(2)
|
39 |
-
with cols[0]:
|
40 |
-
image_source = st.file_uploader("Upload source image", type=["png", "jpg", "jpeg", "webp"], key="upload_source_edit_generated_image")
|
41 |
-
st.text_input("Target object", key="text_input_source_edit_generated_image")
|
42 |
-
st.text_input("Prompt", key="text_input_prompt_edit_generated_image")
|
43 |
-
st.text_input("Negative prompt", key="text_input_negative_prompt_edit_generated_image")
|
44 |
-
if image_source:
|
45 |
-
st.button("Generate", key="button_generate_edit_generated_image")
|
46 |
-
with cols[1]:
|
47 |
-
st.image('content/dog.png')
|
48 |
-
|
49 |
-
|
50 |
-
st.write("## Result")
|
51 |
-
cols_result = st.columns(2)
|
52 |
-
with cols_result[0]:
|
53 |
-
st.write("### Generated image before editing")
|
54 |
-
st.image('content/before_editing_generated.png')
|
55 |
-
with cols_result[1]:
|
56 |
-
st.write("### Generated image after editing")
|
57 |
-
st.image('content/after_editing_generated.png')
|
58 |
-
|
59 |
-
def create_zero_shot_generation_tab():
|
60 |
-
st.write("# Zero-shot generation")
|
61 |
-
|
62 |
-
|
63 |
-
def create_zero_shot_stylization_tab():
|
64 |
-
st.write("# Zero-shot stylization")
|
65 |
-
|
66 |
-
def create_home_tab():
|
67 |
-
st.write("# Home of BLIP-Diffusion")
|
68 |
-
st.write("Welcome to the demo application of BLIP-Diffusion")
|
69 |
-
|
70 |
-
st.write("Project page is [here](https://dxli94.github.io/BLIP-Diffusion-website/.)")
|
71 |
-
st.write("Github page is [here](https://github.com/salesforce/LAVIS/tree/main/projects/blip-diffusion)")
|
72 |
-
st.write("Paper is [here](https://arxiv.org/abs/2305.14720)")
|
73 |
-
|
74 |
-
st.image('content/teaser-website.png')
|
75 |
-
|
76 |
-
|
77 |
-
def main():
|
78 |
-
|
79 |
-
with st.sidebar:
|
80 |
-
st.title("Navigation")
|
81 |
-
st.slider("Guidance scale", 0.0, 20.0, 7.5, 0.1)
|
82 |
-
st.slider("Inference steps", 5, 40, 20, 1)
|
83 |
-
st.number_input("Seed", 0, 100000, 0, 1)
|
84 |
-
|
85 |
-
|
86 |
-
tab_names = ["Home", "Edit existing image", "Edit generated image", "Zero-shot generation", "Zero-shot stylization"]
|
87 |
-
|
88 |
-
(home_tab,
|
89 |
-
edit_existing_image_tab,
|
90 |
-
edit_generated_image_tab,
|
91 |
-
zero_shot_generation_tab,
|
92 |
-
zero_shot_stylization_tab) = st.tabs(tab_names)
|
93 |
-
|
94 |
-
with home_tab:
|
95 |
-
create_home_tab()
|
96 |
-
|
97 |
-
with edit_existing_image_tab:
|
98 |
-
create_edit_existing_image_tab()
|
99 |
-
|
100 |
-
with edit_generated_image_tab:
|
101 |
-
create_edit_generated_image_tab()
|
102 |
-
|
103 |
-
with zero_shot_generation_tab:
|
104 |
-
create_zero_shot_generation_tab()
|
105 |
-
|
106 |
-
with zero_shot_stylization_tab:
|
107 |
-
create_zero_shot_stylization_tab()
|
108 |
-
|
109 |
-
|
110 |
-
if __name__ == "__main__":
|
111 |
-
main()
|
112 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bianca0930/Bianca/app.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from gradio.mix import Parallel
|
3 |
-
|
4 |
-
title="My First Text Generation"
|
5 |
-
description="Input text.
|
6 |
-
|
7 |
-
mode11=gr.Interface.load("huggingface?EleutherAI/gpt-j-6B")
|
8 |
-
mode12=gr.Interface.load("huggingface/gpt2")
|
9 |
-
mode13=gr.Interface.load("huggingface?EleutherAI/gpt-neo-125M")
|
10 |
-
|
11 |
-
gr.Parellel(mode11,mode12,mode13,title-title,description=description).launch()
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Biaolin/stabilityai-FreeWilly1-Delta-SafeTensor/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/stabilityai/FreeWilly1-Delta-SafeTensor").launch()
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/utils.py
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# https://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
import inspect
|
14 |
-
|
15 |
-
import jmespath
|
16 |
-
|
17 |
-
|
18 |
-
def get_resource_ignore_params(params):
|
19 |
-
"""Helper method to determine which parameters to ignore for actions
|
20 |
-
|
21 |
-
:returns: A list of the parameter names that does not need to be
|
22 |
-
included in a resource's method call for documentation purposes.
|
23 |
-
"""
|
24 |
-
ignore_params = []
|
25 |
-
for param in params:
|
26 |
-
result = jmespath.compile(param.target)
|
27 |
-
current = result.parsed
|
28 |
-
# Use JMESPath to find the left most element in the target expression
|
29 |
-
# which will be the parameter to ignore in the action call.
|
30 |
-
while current['children']:
|
31 |
-
current = current['children'][0]
|
32 |
-
# Make sure the parameter we are about to ignore is a field.
|
33 |
-
# If it is not, we should ignore the result to avoid false positives.
|
34 |
-
if current['type'] == 'field':
|
35 |
-
ignore_params.append(current['value'])
|
36 |
-
return ignore_params
|
37 |
-
|
38 |
-
|
39 |
-
def is_resource_action(action_handle):
|
40 |
-
return inspect.isfunction(action_handle)
|
41 |
-
|
42 |
-
|
43 |
-
def get_resource_public_actions(resource_class):
|
44 |
-
resource_class_members = inspect.getmembers(resource_class)
|
45 |
-
resource_methods = {}
|
46 |
-
for name, member in resource_class_members:
|
47 |
-
if not name.startswith('_'):
|
48 |
-
if not name[0].isupper():
|
49 |
-
if not name.startswith('wait_until'):
|
50 |
-
if is_resource_action(member):
|
51 |
-
resource_methods[name] = member
|
52 |
-
return resource_methods
|
53 |
-
|
54 |
-
|
55 |
-
def get_identifier_values_for_example(identifier_names):
|
56 |
-
return ','.join([f'\'{identifier}\'' for identifier in identifier_names])
|
57 |
-
|
58 |
-
|
59 |
-
def get_identifier_args_for_signature(identifier_names):
|
60 |
-
return ','.join(identifier_names)
|
61 |
-
|
62 |
-
|
63 |
-
def get_identifier_description(resource_name, identifier_name):
|
64 |
-
return (
|
65 |
-
f"The {resource_name}'s {identifier_name} identifier. "
|
66 |
-
f"This **must** be set."
|
67 |
-
)
|
68 |
-
|
69 |
-
|
70 |
-
def add_resource_type_overview(
|
71 |
-
section, resource_type, description, intro_link=None
|
72 |
-
):
|
73 |
-
section.style.new_line()
|
74 |
-
section.style.h3(resource_type)
|
75 |
-
section.style.new_line()
|
76 |
-
section.style.new_line()
|
77 |
-
section.write(description)
|
78 |
-
section.style.new_line()
|
79 |
-
if intro_link is not None:
|
80 |
-
section.write(
|
81 |
-
f'For more information about {resource_type.lower()} refer to the '
|
82 |
-
f':ref:`Resources Introduction Guide<{intro_link}>`.'
|
83 |
-
)
|
84 |
-
section.style.new_line()
|
85 |
-
|
86 |
-
|
87 |
-
class DocumentModifiedShape:
|
88 |
-
def __init__(
|
89 |
-
self, shape_name, new_type, new_description, new_example_value
|
90 |
-
):
|
91 |
-
self._shape_name = shape_name
|
92 |
-
self._new_type = new_type
|
93 |
-
self._new_description = new_description
|
94 |
-
self._new_example_value = new_example_value
|
95 |
-
|
96 |
-
def replace_documentation_for_matching_shape(
|
97 |
-
self, event_name, section, **kwargs
|
98 |
-
):
|
99 |
-
if self._shape_name == section.context.get('shape'):
|
100 |
-
self._replace_documentation(event_name, section)
|
101 |
-
for section_name in section.available_sections:
|
102 |
-
sub_section = section.get_section(section_name)
|
103 |
-
if self._shape_name == sub_section.context.get('shape'):
|
104 |
-
self._replace_documentation(event_name, sub_section)
|
105 |
-
else:
|
106 |
-
self.replace_documentation_for_matching_shape(
|
107 |
-
event_name, sub_section
|
108 |
-
)
|
109 |
-
|
110 |
-
def _replace_documentation(self, event_name, section):
|
111 |
-
if event_name.startswith(
|
112 |
-
'docs.request-example'
|
113 |
-
) or event_name.startswith('docs.response-example'):
|
114 |
-
section.remove_all_sections()
|
115 |
-
section.clear_text()
|
116 |
-
section.write(self._new_example_value)
|
117 |
-
|
118 |
-
if event_name.startswith(
|
119 |
-
'docs.request-params'
|
120 |
-
) or event_name.startswith('docs.response-params'):
|
121 |
-
allowed_sections = (
|
122 |
-
'param-name',
|
123 |
-
'param-documentation',
|
124 |
-
'end-structure',
|
125 |
-
'param-type',
|
126 |
-
'end-param',
|
127 |
-
)
|
128 |
-
for section_name in section.available_sections:
|
129 |
-
# Delete any extra members as a new shape is being
|
130 |
-
# used.
|
131 |
-
if section_name not in allowed_sections:
|
132 |
-
section.delete_section(section_name)
|
133 |
-
|
134 |
-
# Update the documentation
|
135 |
-
description_section = section.get_section('param-documentation')
|
136 |
-
description_section.clear_text()
|
137 |
-
description_section.write(self._new_description)
|
138 |
-
|
139 |
-
# Update the param type
|
140 |
-
type_section = section.get_section('param-type')
|
141 |
-
if type_section.getvalue().decode('utf-8').startswith(':type'):
|
142 |
-
type_section.clear_text()
|
143 |
-
type_section.write(f':type {section.name}: {self._new_type}')
|
144 |
-
else:
|
145 |
-
type_section.clear_text()
|
146 |
-
type_section.style.italics(f'({self._new_type}) -- ')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigSalmon/Bart/README.md
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Bart
|
3 |
-
emoji: 🦀
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: red
|
6 |
-
sdk: streamlit
|
7 |
-
app_file: app.py
|
8 |
-
pinned: false
|
9 |
-
---
|
10 |
-
|
11 |
-
# Configuration
|
12 |
-
|
13 |
-
`title`: _string_
|
14 |
-
Display title for the Space
|
15 |
-
|
16 |
-
`emoji`: _string_
|
17 |
-
Space emoji (emoji-only character allowed)
|
18 |
-
|
19 |
-
`colorFrom`: _string_
|
20 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
21 |
-
|
22 |
-
`colorTo`: _string_
|
23 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
24 |
-
|
25 |
-
`sdk`: _string_
|
26 |
-
Can be either `gradio` or `streamlit`
|
27 |
-
|
28 |
-
`sdk_version` : _string_
|
29 |
-
Only applicable for `streamlit` SDK.
|
30 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
31 |
-
|
32 |
-
`app_file`: _string_
|
33 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code).
|
34 |
-
Path is relative to the root of the repository.
|
35 |
-
|
36 |
-
`pinned`: _boolean_
|
37 |
-
Whether the Space stays on top of your list.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/equal.h
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/system/detail/generic/tag.h>
|
21 |
-
|
22 |
-
namespace thrust
|
23 |
-
{
|
24 |
-
namespace system
|
25 |
-
{
|
26 |
-
namespace detail
|
27 |
-
{
|
28 |
-
namespace generic
|
29 |
-
{
|
30 |
-
|
31 |
-
|
32 |
-
template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2>
|
33 |
-
__host__ __device__
|
34 |
-
bool equal(thrust::execution_policy<DerivedPolicy> &exec, InputIterator1 first1, InputIterator1 last1, InputIterator2 first2);
|
35 |
-
|
36 |
-
|
37 |
-
template<typename DerivedPolicy, typename InputIterator1, typename InputIterator2, typename BinaryPredicate>
|
38 |
-
__host__ __device__
|
39 |
-
bool equal(thrust::execution_policy<DerivedPolicy> &exec, InputIterator1 first1, InputIterator1 last1, InputIterator2 first2, BinaryPredicate binary_pred);
|
40 |
-
|
41 |
-
|
42 |
-
} // end namespace generic
|
43 |
-
} // end namespace detail
|
44 |
-
} // end namespace system
|
45 |
-
} // end namespace thrust
|
46 |
-
|
47 |
-
#include <thrust/system/detail/generic/equal.inl>
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/within_distance.h
DELETED
@@ -1,446 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include "diffvg.h"
|
4 |
-
#include "edge_query.h"
|
5 |
-
#include "shape.h"
|
6 |
-
#include "vector.h"
|
7 |
-
|
8 |
-
DEVICE
|
9 |
-
inline
|
10 |
-
bool within_distance(const Circle &circle, const Vector2f &pt, float r) {
|
11 |
-
auto dist_to_center = distance(circle.center, pt);
|
12 |
-
if (fabs(dist_to_center - circle.radius) < r) {
|
13 |
-
return true;
|
14 |
-
}
|
15 |
-
return false;
|
16 |
-
}
|
17 |
-
|
18 |
-
DEVICE
|
19 |
-
inline
|
20 |
-
bool within_distance(const Path &path, const BVHNode *bvh_nodes, const Vector2f &pt, float r) {
|
21 |
-
auto num_segments = path.num_base_points;
|
22 |
-
constexpr auto max_bvh_size = 128;
|
23 |
-
int bvh_stack[max_bvh_size];
|
24 |
-
auto stack_size = 0;
|
25 |
-
bvh_stack[stack_size++] = 2 * num_segments - 2;
|
26 |
-
while (stack_size > 0) {
|
27 |
-
const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]];
|
28 |
-
if (node.child1 < 0) {
|
29 |
-
// leaf
|
30 |
-
auto base_point_id = node.child0;
|
31 |
-
auto point_id = - node.child1 - 1;
|
32 |
-
assert(base_point_id < num_segments);
|
33 |
-
assert(point_id < path.num_points);
|
34 |
-
if (path.num_control_points[base_point_id] == 0) {
|
35 |
-
// Straight line
|
36 |
-
auto i0 = point_id;
|
37 |
-
auto i1 = (point_id + 1) % path.num_points;
|
38 |
-
auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
|
39 |
-
auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
|
40 |
-
// project pt to line
|
41 |
-
auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0);
|
42 |
-
auto r0 = r;
|
43 |
-
auto r1 = r;
|
44 |
-
// override radius if path has thickness
|
45 |
-
if (path.thickness != nullptr) {
|
46 |
-
r0 = path.thickness[i0];
|
47 |
-
r1 = path.thickness[i1];
|
48 |
-
}
|
49 |
-
if (t < 0) {
|
50 |
-
if (distance_squared(p0, pt) < r0 * r0) {
|
51 |
-
return true;
|
52 |
-
}
|
53 |
-
} else if (t > 1) {
|
54 |
-
if (distance_squared(p1, pt) < r1 * r1) {
|
55 |
-
return true;
|
56 |
-
}
|
57 |
-
} else {
|
58 |
-
auto r = r0 + t * (r1 - r0);
|
59 |
-
if (distance_squared(p0 + t * (p1 - p0), pt) < r * r) {
|
60 |
-
return true;
|
61 |
-
}
|
62 |
-
}
|
63 |
-
} else if (path.num_control_points[base_point_id] == 1) {
|
64 |
-
// Quadratic Bezier curve
|
65 |
-
auto i0 = point_id;
|
66 |
-
auto i1 = point_id + 1;
|
67 |
-
auto i2 = (point_id + 2) % path.num_points;
|
68 |
-
auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
|
69 |
-
auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
|
70 |
-
auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]};
|
71 |
-
if (path.use_distance_approx) {
|
72 |
-
auto cp = quadratic_closest_pt_approx(p0, p1, p2, pt);
|
73 |
-
return distance_squared(cp, pt) < r * r;
|
74 |
-
}
|
75 |
-
auto eval = [&](float t) -> Vector2f {
|
76 |
-
auto tt = 1 - t;
|
77 |
-
return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2;
|
78 |
-
};
|
79 |
-
auto r0 = r;
|
80 |
-
auto r1 = r;
|
81 |
-
auto r2 = r;
|
82 |
-
// override radius if path has thickness
|
83 |
-
if (path.thickness != nullptr) {
|
84 |
-
r0 = path.thickness[i0];
|
85 |
-
r1 = path.thickness[i1];
|
86 |
-
r2 = path.thickness[i2];
|
87 |
-
}
|
88 |
-
if (distance_squared(eval(0), pt) < r0 * r0) {
|
89 |
-
return true;
|
90 |
-
}
|
91 |
-
if (distance_squared(eval(1), pt) < r2 * r2) {
|
92 |
-
return true;
|
93 |
-
}
|
94 |
-
|
95 |
-
// The curve is (1-t)^2p0 + 2(1-t)tp1 + t^2p2
|
96 |
-
// = (p0-2p1+p2)t^2+(-2p0+2p1)t+p0 = q
|
97 |
-
// Want to solve (q - pt) dot q' = 0
|
98 |
-
// q' = (p0-2p1+p2)t + (-p0+p1)
|
99 |
-
// Expanding (p0-2p1+p2)^2 t^3 +
|
100 |
-
// 3(p0-2p1+p2)(-p0+p1) t^2 +
|
101 |
-
// (2(-p0+p1)^2+(p0-2p1+p2)(p0-pt))t +
|
102 |
-
// (-p0+p1)(p0-pt) = 0
|
103 |
-
auto A = sum((p0-2*p1+p2)*(p0-2*p1+p2));
|
104 |
-
auto B = sum(3*(p0-2*p1+p2)*(-p0+p1));
|
105 |
-
auto C = sum(2*(-p0+p1)*(-p0+p1)+(p0-2*p1+p2)*(p0-pt));
|
106 |
-
auto D = sum((-p0+p1)*(p0-pt));
|
107 |
-
float t[3];
|
108 |
-
int num_sol = solve_cubic(A, B, C, D, t);
|
109 |
-
for (int j = 0; j < num_sol; j++) {
|
110 |
-
if (t[j] >= 0 && t[j] <= 1) {
|
111 |
-
auto tt = 1 - t[j];
|
112 |
-
auto r = (tt*tt)*r0 + (2*tt*t[j])*r1 + (t[j]*t[j])*r2;
|
113 |
-
auto p = eval(t[j]);
|
114 |
-
if (distance_squared(p, pt) < r*r) {
|
115 |
-
return true;
|
116 |
-
}
|
117 |
-
}
|
118 |
-
}
|
119 |
-
} else if (path.num_control_points[base_point_id] == 2) {
|
120 |
-
// Cubic Bezier curve
|
121 |
-
auto i0 = point_id;
|
122 |
-
auto i1 = point_id + 1;
|
123 |
-
auto i2 = point_id + 2;
|
124 |
-
auto i3 = (point_id + 3) % path.num_points;
|
125 |
-
auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]};
|
126 |
-
auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]};
|
127 |
-
auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]};
|
128 |
-
auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]};
|
129 |
-
auto eval = [&](float t) -> Vector2f {
|
130 |
-
auto tt = 1 - t;
|
131 |
-
return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3;
|
132 |
-
};
|
133 |
-
auto r0 = r;
|
134 |
-
auto r1 = r;
|
135 |
-
auto r2 = r;
|
136 |
-
auto r3 = r;
|
137 |
-
// override radius if path has thickness
|
138 |
-
if (path.thickness != nullptr) {
|
139 |
-
r0 = path.thickness[i0];
|
140 |
-
r1 = path.thickness[i1];
|
141 |
-
r2 = path.thickness[i2];
|
142 |
-
r3 = path.thickness[i3];
|
143 |
-
}
|
144 |
-
if (distance_squared(eval(0), pt) < r0*r0) {
|
145 |
-
return true;
|
146 |
-
}
|
147 |
-
if (distance_squared(eval(1), pt) < r3*r3) {
|
148 |
-
return true;
|
149 |
-
}
|
150 |
-
// The curve is (1 - t)^3 p0 + 3 * (1 - t)^2 t p1 + 3 * (1 - t) t^2 p2 + t^3 p3
|
151 |
-
// = (-p0+3p1-3p2+p3) t^3 + (3p0-6p1+3p2) t^2 + (-3p0+3p1) t + p0
|
152 |
-
// Want to solve (q - pt) dot q' = 0
|
153 |
-
// q' = 3*(-p0+3p1-3p2+p3)t^2 + 2*(3p0-6p1+3p2)t + (-3p0+3p1)
|
154 |
-
// Expanding
|
155 |
-
// 3*(-p0+3p1-3p2+p3)^2 t^5
|
156 |
-
// 5*(-p0+3p1-3p2+p3)(3p0-6p1+3p2) t^4
|
157 |
-
// 4*(-p0+3p1-3p2+p3)(-3p0+3p1) + 2*(3p0-6p1+3p2)^2 t^3
|
158 |
-
// 3*(3p0-6p1+3p2)(-3p0+3p1) + 3*(-p0+3p1-3p2+p3)(p0-pt) t^2
|
159 |
-
// (-3p0+3p1)^2+2(p0-pt)(3p0-6p1+3p2) t
|
160 |
-
// (p0-pt)(-3p0+3p1)
|
161 |
-
double A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3));
|
162 |
-
double B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2));
|
163 |
-
double C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2));
|
164 |
-
double D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt)));
|
165 |
-
double E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2));
|
166 |
-
double F = sum((p0-pt)*(-3*p0+3*p1));
|
167 |
-
// normalize the polynomial
|
168 |
-
B /= A;
|
169 |
-
C /= A;
|
170 |
-
D /= A;
|
171 |
-
E /= A;
|
172 |
-
F /= A;
|
173 |
-
// Isolator Polynomials:
|
174 |
-
// https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.133.2233&rep=rep1&type=pdf
|
175 |
-
// x/5 + B/25
|
176 |
-
// /-----------------------------------------------------
|
177 |
-
// 5x^4 + 4B x^3 + 3C x^2 + 2D x + E / x^5 + B x^4 + C x^3 + D x^2 + E x + F
|
178 |
-
// x^5 + 4B/5 x^4 + 3C/5 x^3 + 2D/5 x^2 + E/5 x
|
179 |
-
// ----------------------------------------------------
|
180 |
-
// B/5 x^4 + 2C/5 x^3 + 3D/5 x^2 + 4E/5 x + F
|
181 |
-
// B/5 x^4 + 4B^2/25 x^3 + 3BC/25 x^2 + 2BD/25 x + BE/25
|
182 |
-
// ----------------------------------------------------
|
183 |
-
// (2C/5 - 4B^2/25)x^3 + (3D/5-3BC/25)x^2 + (4E/5-2BD/25) + (F-BE/25)
|
184 |
-
auto p1A = ((2 / 5.f) * C - (4 / 25.f) * B * B);
|
185 |
-
auto p1B = ((3 / 5.f) * D - (3 / 25.f) * B * C);
|
186 |
-
auto p1C = ((4 / 5.f) * E - (2 / 25.f) * B * D);
|
187 |
-
auto p1D = F - B * E / 25.f;
|
188 |
-
// auto q1A = 1 / 5.f;
|
189 |
-
// auto q1B = B / 25.f;
|
190 |
-
// x/5 + B/25 = 0
|
191 |
-
// x = -B/5
|
192 |
-
auto q_root = -B/5.f;
|
193 |
-
double p_roots[3];
|
194 |
-
int num_sol = solve_cubic(p1A, p1B, p1C, p1D, p_roots);
|
195 |
-
float intervals[4];
|
196 |
-
if (q_root >= 0 && q_root <= 1) {
|
197 |
-
intervals[0] = q_root;
|
198 |
-
}
|
199 |
-
for (int j = 0; j < num_sol; j++) {
|
200 |
-
intervals[j + 1] = p_roots[j];
|
201 |
-
}
|
202 |
-
auto num_intervals = 1 + num_sol;
|
203 |
-
// sort intervals
|
204 |
-
for (int j = 1; j < num_intervals; j++) {
|
205 |
-
for (int k = j; k > 0 && intervals[k - 1] > intervals[k]; k--) {
|
206 |
-
auto tmp = intervals[k];
|
207 |
-
intervals[k] = intervals[k - 1];
|
208 |
-
intervals[k - 1] = tmp;
|
209 |
-
}
|
210 |
-
}
|
211 |
-
auto eval_polynomial = [&] (double t) {
|
212 |
-
return t*t*t*t*t+
|
213 |
-
B*t*t*t*t+
|
214 |
-
C*t*t*t+
|
215 |
-
D*t*t+
|
216 |
-
E*t+
|
217 |
-
F;
|
218 |
-
};
|
219 |
-
auto eval_polynomial_deriv = [&] (double t) {
|
220 |
-
return 5*t*t*t*t+
|
221 |
-
4*B*t*t*t+
|
222 |
-
3*C*t*t+
|
223 |
-
2*D*t+
|
224 |
-
E;
|
225 |
-
};
|
226 |
-
auto lower_bound = 0.f;
|
227 |
-
for (int j = 0; j < num_intervals + 1; j++) {
|
228 |
-
if (j < num_intervals && intervals[j] < 0.f) {
|
229 |
-
continue;
|
230 |
-
}
|
231 |
-
auto upper_bound = j < num_intervals ?
|
232 |
-
min(intervals[j], 1.f) : 1.f;
|
233 |
-
auto lb = lower_bound;
|
234 |
-
auto ub = upper_bound;
|
235 |
-
auto lb_eval = eval_polynomial(lb);
|
236 |
-
auto ub_eval = eval_polynomial(ub);
|
237 |
-
if (lb_eval * ub_eval > 0) {
|
238 |
-
// Doesn't have root
|
239 |
-
continue;
|
240 |
-
}
|
241 |
-
if (lb_eval > ub_eval) {
|
242 |
-
swap_(lb, ub);
|
243 |
-
}
|
244 |
-
auto t = 0.5f * (lb + ub);
|
245 |
-
for (int it = 0; it < 20; it++) {
|
246 |
-
if (!(t >= lb && t <= ub)) {
|
247 |
-
t = 0.5f * (lb + ub);
|
248 |
-
}
|
249 |
-
auto value = eval_polynomial(t);
|
250 |
-
if (fabs(value) < 1e-5f || it == 19) {
|
251 |
-
break;
|
252 |
-
}
|
253 |
-
// The derivative may not be entirely accurate,
|
254 |
-
// but the bisection is going to handle this
|
255 |
-
if (value > 0.f) {
|
256 |
-
ub = t;
|
257 |
-
} else {
|
258 |
-
lb = t;
|
259 |
-
}
|
260 |
-
auto derivative = eval_polynomial_deriv(t);
|
261 |
-
t -= value / derivative;
|
262 |
-
}
|
263 |
-
auto tt = 1 - t;
|
264 |
-
auto r = (tt*tt*tt)*r0 + (3*tt*tt*t)*r1 + (3*tt*t*t)*r2 + (t*t*t)*r3;
|
265 |
-
if (distance_squared(eval(t), pt) < r * r) {
|
266 |
-
return true;
|
267 |
-
}
|
268 |
-
if (upper_bound >= 1.f) {
|
269 |
-
break;
|
270 |
-
}
|
271 |
-
lower_bound = upper_bound;
|
272 |
-
}
|
273 |
-
} else {
|
274 |
-
assert(false);
|
275 |
-
}
|
276 |
-
} else {
|
277 |
-
assert(node.child0 >= 0 && node.child1 >= 0);
|
278 |
-
const AABB &b0 = bvh_nodes[node.child0].box;
|
279 |
-
if (within_distance(b0, pt, bvh_nodes[node.child0].max_radius)) {
|
280 |
-
bvh_stack[stack_size++] = node.child0;
|
281 |
-
}
|
282 |
-
const AABB &b1 = bvh_nodes[node.child1].box;
|
283 |
-
if (within_distance(b1, pt, bvh_nodes[node.child1].max_radius)) {
|
284 |
-
bvh_stack[stack_size++] = node.child1;
|
285 |
-
}
|
286 |
-
assert(stack_size <= max_bvh_size);
|
287 |
-
}
|
288 |
-
}
|
289 |
-
return false;
|
290 |
-
}
|
291 |
-
|
292 |
-
DEVICE
|
293 |
-
inline
|
294 |
-
int within_distance(const Rect &rect, const Vector2f &pt, float r) {
|
295 |
-
auto test = [&](const Vector2f &p0, const Vector2f &p1) {
|
296 |
-
// project pt to line
|
297 |
-
auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0);
|
298 |
-
if (t < 0) {
|
299 |
-
if (distance_squared(p0, pt) < r * r) {
|
300 |
-
return true;
|
301 |
-
}
|
302 |
-
} else if (t > 1) {
|
303 |
-
if (distance_squared(p1, pt) < r * r) {
|
304 |
-
return true;
|
305 |
-
}
|
306 |
-
} else {
|
307 |
-
if (distance_squared(p0 + t * (p1 - p0), pt) < r * r) {
|
308 |
-
return true;
|
309 |
-
}
|
310 |
-
}
|
311 |
-
return false;
|
312 |
-
};
|
313 |
-
auto left_top = rect.p_min;
|
314 |
-
auto right_top = Vector2f{rect.p_max.x, rect.p_min.y};
|
315 |
-
auto left_bottom = Vector2f{rect.p_min.x, rect.p_max.y};
|
316 |
-
auto right_bottom = rect.p_max;
|
317 |
-
// left
|
318 |
-
if (test(left_top, left_bottom)) {
|
319 |
-
return true;
|
320 |
-
}
|
321 |
-
// top
|
322 |
-
if (test(left_top, right_top)) {
|
323 |
-
return true;
|
324 |
-
}
|
325 |
-
// right
|
326 |
-
if (test(right_top, right_bottom)) {
|
327 |
-
return true;
|
328 |
-
}
|
329 |
-
// bottom
|
330 |
-
if (test(left_bottom, right_bottom)) {
|
331 |
-
return true;
|
332 |
-
}
|
333 |
-
return false;
|
334 |
-
}
|
335 |
-
|
336 |
-
DEVICE
|
337 |
-
inline
|
338 |
-
bool within_distance(const Shape &shape, const BVHNode *bvh_nodes, const Vector2f &pt, float r) {
|
339 |
-
switch (shape.type) {
|
340 |
-
case ShapeType::Circle:
|
341 |
-
return within_distance(*(const Circle *)shape.ptr, pt, r);
|
342 |
-
case ShapeType::Ellipse:
|
343 |
-
// https://www.geometrictools.com/Documentation/DistancePointEllipseEllipsoid.pdf
|
344 |
-
assert(false);
|
345 |
-
return false;
|
346 |
-
case ShapeType::Path:
|
347 |
-
return within_distance(*(const Path *)shape.ptr, bvh_nodes, pt, r);
|
348 |
-
case ShapeType::Rect:
|
349 |
-
return within_distance(*(const Rect *)shape.ptr, pt, r);
|
350 |
-
}
|
351 |
-
assert(false);
|
352 |
-
return false;
|
353 |
-
}
|
354 |
-
|
355 |
-
DEVICE
|
356 |
-
inline
|
357 |
-
bool within_distance(const SceneData &scene,
|
358 |
-
int shape_group_id,
|
359 |
-
const Vector2f &pt) {
|
360 |
-
const ShapeGroup &shape_group = scene.shape_groups[shape_group_id];
|
361 |
-
// pt is in canvas space, transform it to shape's local space
|
362 |
-
auto local_pt = xform_pt(shape_group.canvas_to_shape, pt);
|
363 |
-
|
364 |
-
constexpr auto max_bvh_stack_size = 64;
|
365 |
-
int bvh_stack[max_bvh_stack_size];
|
366 |
-
auto stack_size = 0;
|
367 |
-
bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2;
|
368 |
-
const auto &bvh_nodes = scene.shape_groups_bvh_nodes[shape_group_id];
|
369 |
-
|
370 |
-
while (stack_size > 0) {
|
371 |
-
const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]];
|
372 |
-
if (node.child1 < 0) {
|
373 |
-
// leaf
|
374 |
-
auto shape_id = node.child0;
|
375 |
-
const auto &shape = scene.shapes[shape_id];
|
376 |
-
if (within_distance(shape, scene.path_bvhs[shape_id],
|
377 |
-
local_pt, shape.stroke_width)) {
|
378 |
-
return true;
|
379 |
-
}
|
380 |
-
} else {
|
381 |
-
assert(node.child0 >= 0 && node.child1 >= 0);
|
382 |
-
const AABB &b0 = bvh_nodes[node.child0].box;
|
383 |
-
if (inside(b0, local_pt, bvh_nodes[node.child0].max_radius)) {
|
384 |
-
bvh_stack[stack_size++] = node.child0;
|
385 |
-
}
|
386 |
-
const AABB &b1 = bvh_nodes[node.child1].box;
|
387 |
-
if (inside(b1, local_pt, bvh_nodes[node.child1].max_radius)) {
|
388 |
-
bvh_stack[stack_size++] = node.child1;
|
389 |
-
}
|
390 |
-
assert(stack_size <= max_bvh_stack_size);
|
391 |
-
}
|
392 |
-
}
|
393 |
-
|
394 |
-
return false;
|
395 |
-
}
|
396 |
-
|
397 |
-
DEVICE
|
398 |
-
inline
|
399 |
-
bool within_distance(const SceneData &scene,
|
400 |
-
int shape_group_id,
|
401 |
-
const Vector2f &pt,
|
402 |
-
EdgeQuery *edge_query) {
|
403 |
-
if (edge_query == nullptr || shape_group_id != edge_query->shape_group_id) {
|
404 |
-
// Specialized version
|
405 |
-
return within_distance(scene, shape_group_id, pt);
|
406 |
-
}
|
407 |
-
const ShapeGroup &shape_group = scene.shape_groups[shape_group_id];
|
408 |
-
// pt is in canvas space, transform it to shape's local space
|
409 |
-
auto local_pt = xform_pt(shape_group.canvas_to_shape, pt);
|
410 |
-
|
411 |
-
constexpr auto max_bvh_stack_size = 64;
|
412 |
-
int bvh_stack[max_bvh_stack_size];
|
413 |
-
auto stack_size = 0;
|
414 |
-
bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2;
|
415 |
-
const auto &bvh_nodes = scene.shape_groups_bvh_nodes[shape_group_id];
|
416 |
-
|
417 |
-
auto ret = false;
|
418 |
-
while (stack_size > 0) {
|
419 |
-
const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]];
|
420 |
-
if (node.child1 < 0) {
|
421 |
-
// leaf
|
422 |
-
auto shape_id = node.child0;
|
423 |
-
const auto &shape = scene.shapes[shape_id];
|
424 |
-
if (within_distance(shape, scene.path_bvhs[shape_id],
|
425 |
-
local_pt, shape.stroke_width)) {
|
426 |
-
ret = true;
|
427 |
-
if (shape_id == edge_query->shape_id) {
|
428 |
-
edge_query->hit = true;
|
429 |
-
}
|
430 |
-
}
|
431 |
-
} else {
|
432 |
-
assert(node.child0 >= 0 && node.child1 >= 0);
|
433 |
-
const AABB &b0 = bvh_nodes[node.child0].box;
|
434 |
-
if (inside(b0, local_pt, bvh_nodes[node.child0].max_radius)) {
|
435 |
-
bvh_stack[stack_size++] = node.child0;
|
436 |
-
}
|
437 |
-
const AABB &b1 = bvh_nodes[node.child1].box;
|
438 |
-
if (inside(b1, local_pt, bvh_nodes[node.child1].max_radius)) {
|
439 |
-
bvh_stack[stack_size++] = node.child1;
|
440 |
-
}
|
441 |
-
assert(stack_size <= max_bvh_stack_size);
|
442 |
-
}
|
443 |
-
}
|
444 |
-
|
445 |
-
return ret;
|
446 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/evaluation/masks/README.md
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
# Current algorithm
|
2 |
-
|
3 |
-
## Choice of mask objects
|
4 |
-
|
5 |
-
For identification of the objects which are suitable for mask obtaining, panoptic segmentation model
|
6 |
-
from [detectron2](https://github.com/facebookresearch/detectron2) trained on COCO. Categories of the detected instances
|
7 |
-
belong either to "stuff" or "things" types. We consider that instances of objects should have category belong
|
8 |
-
to "things". Besides, we set upper bound on area which is taken by the object — we consider that too big
|
9 |
-
area indicates either of the instance being a background or a main object which should not be removed.
|
10 |
-
|
11 |
-
## Choice of position for mask
|
12 |
-
|
13 |
-
We consider that input image has size 2^n x 2^m. We downsample it using
|
14 |
-
[COUNTLESS](https://github.com/william-silversmith/countless) algorithm so the width is equal to
|
15 |
-
64 = 2^8 = 2^{downsample_levels}.
|
16 |
-
|
17 |
-
### Augmentation
|
18 |
-
|
19 |
-
There are several parameters for augmentation:
|
20 |
-
- Scaling factor. We limit scaling to the case when a mask after scaling with pivot point in its center fits inside the
|
21 |
-
image completely.
|
22 |
-
-
|
23 |
-
|
24 |
-
### Shift
|
25 |
-
|
26 |
-
|
27 |
-
## Select
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/lib/events/message.js
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
import EventListener from '../listener/listener.js'
|
2 |
-
|
3 |
-
/**
|
4 |
-
* 监听群聊消息
|
5 |
-
*/
|
6 |
-
export default class messageEvent extends EventListener {
|
7 |
-
constructor () {
|
8 |
-
super({ event: 'message' })
|
9 |
-
}
|
10 |
-
|
11 |
-
async execute (e) {
|
12 |
-
this.plugins.deal(e)
|
13 |
-
}
|
14 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/lib/tools/web.js
DELETED
@@ -1,74 +0,0 @@
|
|
1 |
-
import express from 'express'
|
2 |
-
import template from 'express-art-template'
|
3 |
-
import fs from 'fs'
|
4 |
-
import lodash from 'lodash'
|
5 |
-
|
6 |
-
/*
|
7 |
-
* npm run app web-debug开启Bot后
|
8 |
-
* 可另外通过 npm run web 开启浏览器调试
|
9 |
-
* 访问 http://localhost:8000/ 即可看到对应页面
|
10 |
-
* 页面内的资源需使用 {{_res_path}}来作为resources目录的根目录
|
11 |
-
* 可编辑模板与页面查看效果
|
12 |
-
* todo: 预览页面的热更
|
13 |
-
*
|
14 |
-
* */
|
15 |
-
|
16 |
-
let app = express()
|
17 |
-
|
18 |
-
let _path = process.cwd()
|
19 |
-
|
20 |
-
app.engine('html', template)
|
21 |
-
app.set('views', _path + '/resources/')
|
22 |
-
app.set('view engine', 'art')
|
23 |
-
app.use(express.static(_path + '/resources'))
|
24 |
-
app.use('/plugins', express.static('plugins'))
|
25 |
-
|
26 |
-
app.get('/', function (req, res) {
|
27 |
-
let pluginList = fs.readdirSync(_path + '/temp/ViewData/') || []
|
28 |
-
let html = [
|
29 |
-
'在npm run web-dev模式下触发截图消息后,可在下方选择页面进行调试',
|
30 |
-
'如果页面内资源路径不正确请使用{{_res_path}}作为根路径,对应之前的../../../../',
|
31 |
-
'可直接修改模板html或css刷新查看效果'
|
32 |
-
]
|
33 |
-
let li = {}
|
34 |
-
for (let pIdx in pluginList) {
|
35 |
-
const plugin = pluginList[pIdx]
|
36 |
-
let fileList = fs.readdirSync(_path + `/temp/ViewData/${plugin}/`) || []
|
37 |
-
for (let idx in fileList) {
|
38 |
-
let ret = /(.+)\.json$/.exec(fileList[idx])
|
39 |
-
if (ret && ret[1]) {
|
40 |
-
let text = [plugin, ...ret[1].split('_')]
|
41 |
-
li[text.join('')] = (`<li style="font-size:18px; line-height:30px;"><a href="/${plugin}_${ret[1]}">${text.join(' / ')}</a></li>`)
|
42 |
-
}
|
43 |
-
}
|
44 |
-
}
|
45 |
-
res.send(html.join('</br>') + '<ul>' + lodash.values(li).join('') + '</ul>')
|
46 |
-
})
|
47 |
-
|
48 |
-
app.get('/:page', function (req, res) {
|
49 |
-
let [plugin, app, ...page] = req.params.page.split('_')
|
50 |
-
page = page.join('_')
|
51 |
-
if (plugin == 'favicon.ico') {
|
52 |
-
return res.send('')
|
53 |
-
}
|
54 |
-
let data = JSON.parse(fs.readFileSync(_path + `/temp/ViewData/${plugin}/${app}_${page}.json`, 'utf8'))
|
55 |
-
data = data || {}
|
56 |
-
data._res_path = ''
|
57 |
-
data._sys_res_path = data._res_path
|
58 |
-
|
59 |
-
if (data._plugin) {
|
60 |
-
data._res_path = `/plugins/${data._plugin}/resources/`
|
61 |
-
data.pluResPath = data._res_path
|
62 |
-
}
|
63 |
-
let htmlPath = ''
|
64 |
-
let tplPath = `${app}/${htmlPath}${page}/${page}.html`
|
65 |
-
if (data._plugin) {
|
66 |
-
tplPath = `../plugins/${data._plugin}/resources/${htmlPath}/${app}/${page.split('_').join('/')}.html`
|
67 |
-
} else if (data._no_type_path) {
|
68 |
-
tplPath = `${app}/${page}.html`
|
69 |
-
}
|
70 |
-
res.render(tplPath, data)
|
71 |
-
})
|
72 |
-
|
73 |
-
app.listen(8000)
|
74 |
-
console.log('页面服务已启动,触发消息图片后访问 http://localhost:8000/ 调试页面')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|