Commit
·
9214814
1
Parent(s):
dc4abcb
Update parquet files (step 79 of 249)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/0xSynapse/Segmagine/README.md +0 -13
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Comment utiliser Markzware PDF2DTP-torrent.rar pour importer des PDF dans InDesign.md +0 -134
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dcouvrez Gta Mumbai City Pc Game 18 le nouveau titre de la saga Grand Theft Auto.md +0 -109
- spaces/1gistliPinn/ChatGPT4/Examples/3DMGAME-OMSI.2.Cracked-3DM.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 8 Ball Pool with All Cues Unlocked - No Hack No Root.md +0 -116
- spaces/1phancelerku/anime-remove-background/Download Sigma Battle Royale APK for Android - Enjoy Creative and Stylized Survival Shooter Game.md +0 -96
- spaces/AI-Hobbyist/Hoyo-RVC/MDXNet.py +0 -274
- spaces/AI-Zero-to-Hero/06-SL-AI-Image-Music-Video-UI-UX-URL/Article.md +0 -51
- spaces/AICODER009/Food101_Detection/model.py +0 -36
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/utils.py +0 -361
- spaces/AIWaves/SOP_Generation-single/Memory/__init__.py +0 -1
- spaces/AchyuthGamer/ImMagician-Gradio/README.md +0 -12
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/PerplexityAi.py +0 -87
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Wewordle.py +0 -75
- spaces/AgentVerse/agentVerse/app.py +0 -599
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/dots/Factory.d.ts +0 -6
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/inputtext/Factory.js +0 -13
- spaces/Akhil-77/Toxicity_Detector/app.py +0 -15
- spaces/Akmyradov/chatbot_testing/README.md +0 -12
- spaces/AlekseyKorshuk/gai-project/modules/models.py +0 -73
- spaces/Alichuan/VITS-Umamusume-voice-synthesizer/utils.py +0 -226
- spaces/Amrrs/DragGan-Inversion/gui_utils/imgui_window.py +0 -110
- spaces/Amrrs/textsummarizer/README.md +0 -37
- spaces/An-619/FastSAM/utils/tools_gradio.py +0 -175
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/loading.md +0 -463
- spaces/Andy1621/uniformer_image_detection/configs/paa/paa_r50_fpn_1.5x_coco.py +0 -3
- spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/point_rend.py +0 -29
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py +0 -11
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes.py +0 -11
- spaces/Anon4review/HIPTDemo/vision_transformer.py +0 -330
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_voc12.py +0 -57
- spaces/Anonymous-sub/Rerender/gmflow_module/evaluate.py +0 -689
- spaces/ApathyINC/CustomGPT/utils.py +0 -54
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/msgpack/__init__.py +0 -57
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/register.py +0 -319
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/engine/train_loop.py +0 -417
- spaces/Awiny/Image2Paragraph/models/region_semantic.py +0 -61
- spaces/AzinZ/vitscn/monotonic_align/__init__.py +0 -20
- spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/layers_537227KB.py +0 -126
- spaces/Belligerent/word-sense-disambiguation/app.py +0 -30
- spaces/Benson/text-generation/Examples/Blxckie Ronda Mp4 Download.md +0 -132
- spaces/Benson/text-generation/Examples/Camin Simulador ltima Piel Del Camin.md +0 -50
- spaces/Benson/text-generation/Examples/Carretes Descargar Instagram Mp3.md +0 -140
- spaces/Benson/text-generation/Examples/Carx Street Android Hack Apk.md +0 -60
- spaces/Benson/text-generation/Examples/Descargar Error Genshin Impacto.md +0 -82
- spaces/CForGETaass/vits-uma-genshin-honkai/attentions.py +0 -300
- spaces/CVPR/LIVE/thrust/dependencies/cub/test/half.h +0 -317
- spaces/CVPR/monoscene_lite/monoscene/unet2d.py +0 -198
- spaces/CVPR/regionclip-demo/detectron2/export/README.md +0 -13
- spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/apps/notice/notice.js +0 -184
spaces/0xSynapse/Segmagine/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Segmagine
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: red
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.39.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: lgpl-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Comment utiliser Markzware PDF2DTP-torrent.rar pour importer des PDF dans InDesign.md
DELETED
@@ -1,134 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Markzware PDF2DTP-torrent.rar: What Is It and How to Use It?</h1>
|
3 |
-
<p>If you are looking for a way to convert your PDF files to InDesign files, you might have come across a file named <strong>Markzware PDF2DTP-torrent.rar</strong>. But what is this file and how can you use it? In this article, we will explain what Markzware PDF2DTP is, what a torrent file is, how to download and use Markzware PDF2DTP-torrent.rar, how much it costs, and where to get it.</p>
|
4 |
-
<h2>What is Markzware PDF2DTP?</h2>
|
5 |
-
<p>Markzware PDF2DTP is a plugin for Adobe InDesign that allows you to convert any PDF file to an editable InDesign file with a single click. It is developed by Markzware, a leading provider of software solutions for the printing, publishing, and graphic design industries.</p>
|
6 |
-
<h2>Markzware PDF2DTP-torrent.rar</h2><br /><p><b><b>Download</b> ❤❤❤ <a href="https://byltly.com/2uKvs0">https://byltly.com/2uKvs0</a></b></p><br /><br />
|
7 |
-
<h3>What is a PDF file and why do you need to convert it to InDesign?</h3>
|
8 |
-
<p>A PDF file (Portable Document Format) is a file format that preserves the layout, formatting, and quality of a document across different platforms and devices. It is widely used for viewing and printing documents, but not for editing them.</p>
|
9 |
-
<p>If you work with Adobe InDesign, you might need to convert a PDF file to InDesign for various reasons, such as:</p>
|
10 |
-
<ul>
|
11 |
-
<li>You want to edit the content of a PDF file in InDesign without having to rebuild it from scratch.</li>
|
12 |
-
<li>You want to save time and money by avoiding manual copying and pasting or hiring a professional designer.</li>
|
13 |
-
<li>You want to preserve the original layout, formatting, and quality of the PDF file as much as possible.</li>
|
14 |
-
<li>You want to access and modify the text, images, fonts, colors, styles, and other elements of the PDF file in InDesign.</li>
|
15 |
-
<li>You want to create new InDesign documents from existing PDF files or merge multiple PDF files into one InDesign file.</li>
|
16 |
-
</ul>
|
17 |
-
<h3>What are the benefits and features of Markzware PDF2DTP?</h3>
|
18 |
-
<p>Some of the benefits and features of Markzware PDF2DTP are:</p>
|
19 |
-
<ul>
|
20 |
-
<li>It is fast and easy to use. You can convert a PDF file to InDesign in minutes with just a few clicks.</li>
|
21 |
-
<li>It is accurate and reliable. It preserves the original appearance and quality of the PDF file as much as possible.</li>
|
22 |
-
<li>It is flexible and customizable. You can adjust the conversion settings and options according to your preferences and needs.</li>
|
23 |
-
<li>It is affordable and cost-effective. It saves you time and money by eliminating the need for manual labor or professional services.</li>
|
24 |
-
<li>It is compatible and up-to-date. It supports InDesign CC 2020 through InDesign CS6 on macOS.</li>
|
25 |
-
<li>It converts all types of elements from the PDF file to InDesign,</li>
|
26 |
-
<li>It allows you to set certain parameters and options for the conversion of PDF files,</li>
|
27 |
-
<li>It lets you select and convert all pages in a PDF file or only specific pages or ranges of pages.</li>
|
28 |
-
<li>It enables you to track the progress of the conversion process with indicators such as page number,</li>
|
29 |
-
<li>It provides access to the version number,</li>
|
30 |
-
</ul>
|
31 |
-
<h3>How does Markzware PDF2DTP work?</h3>
|
32 |
-
<p>Markzware PDF2DTP works by analyzing the structure and content of the PDF file and converting it into an equivalent InDesign file. It uses advanced algorithms and techniques to recreate or transfer all the elements of the PDF file into an editable format within InDesign.</p>
|
33 |
-
<p>Markzware PDF2DTP can handle virtually any type of PDF file,</p>
|
34 |
-
<h2>What is a torrent file and why do you need to download it?</h2>
|
35 |
-
<h3>What is a torrent file and how does it work?</h3>
|
36 |
-
<p>A torrent file (or .torrent) is a small file that contains information about a larger file that can be downloaded from other users on a peer-to-peer network. A peer-to-peer network is a system where users share files directly with each other without relying on a central server.</p>
|
37 |
-
<p>A torrent file works by using a software program called a torrent client (such as BitTorrent or uTorrent) that connects you with other users who have the same or parts of the same file that you want. The torrent client then downloads small pieces of the file from different sources until it completes the whole file. This way, you can download large files faster and more efficiently than from a single source.</p>
|
38 |
-
<h3>What are the advantages and disadvantages of using torrent files?</h3>
|
39 |
-
<p>Some of the advantages of using torrent files are:</p>
|
40 |
-
<p>Markzware PDF2DTP converter torrent download<br />
|
41 |
-
How to use Markzware PDF2DTP to edit PDF files<br />
|
42 |
-
Markzware PDF2DTP free trial rar file<br />
|
43 |
-
Markzware PDF2DTP crack serial keygen<br />
|
44 |
-
Markzware PDF2DTP for InDesign CC torrent<br />
|
45 |
-
Markzware PDF2DTP review and tutorial<br />
|
46 |
-
Markzware PDF2DTP alternative software<br />
|
47 |
-
Markzware PDF2DTP license activation code<br />
|
48 |
-
Markzware PDF2DTP system requirements and compatibility<br />
|
49 |
-
Markzware PDF2DTP discount coupon code<br />
|
50 |
-
Markzware PDF2DTP vs PDF2ID comparison<br />
|
51 |
-
Markzware PDF2DTP for QuarkXPress torrent<br />
|
52 |
-
Markzware PDF2DTP installation and troubleshooting guide<br />
|
53 |
-
Markzware PDF2DTP features and benefits<br />
|
54 |
-
Markzware PDF2DTP customer support and feedback<br />
|
55 |
-
Markzware PDF2DTP for Mac OS X torrent<br />
|
56 |
-
Markzware PDF2DTP for Windows torrent<br />
|
57 |
-
Markzware PDF2DTP online demo and webinar<br />
|
58 |
-
Markzware PDF2DTP testimonials and case studies<br />
|
59 |
-
Markzware PDF2DTP update and upgrade information<br />
|
60 |
-
Markzware PDF2DTP best practices and tips<br />
|
61 |
-
Markzware PDF2DTP pros and cons analysis<br />
|
62 |
-
Markzware PDF2DTP FAQ and help page<br />
|
63 |
-
Markzware PDF2DTP video tutorial and walkthrough<br />
|
64 |
-
Markzware PDF2DTP blog and news articles<br />
|
65 |
-
Markzware PDF2DTP forum and community discussion<br />
|
66 |
-
Markzware PDF2DTP affiliate program and commission rate<br />
|
67 |
-
Markzware PDF2DTP refund policy and guarantee<br />
|
68 |
-
Markzware PDF2DTP for Adobe Illustrator torrent<br />
|
69 |
-
Markzware PDF2DTP for Microsoft Word torrent<br />
|
70 |
-
Markzware PDF2DTP for Photoshop torrent<br />
|
71 |
-
Markzware PDF2DTP for CorelDraw torrent<br />
|
72 |
-
Markzware PDF2DTP for Publisher torrent<br />
|
73 |
-
Markzware PDF2DTP for PowerPoint torrent<br />
|
74 |
-
Markzware PDF2DTP for Excel torrent<br />
|
75 |
-
Markzware PDF2DTP for HTML torrent<br />
|
76 |
-
Markzware PDF2DTP for ePub torrent<br />
|
77 |
-
Markzware PDF2DTP for Kindle torrent<br />
|
78 |
-
Markzware PDF2DTP for XML torrent<br />
|
79 |
-
Markzware PDF2DTP for RTF torrent<br />
|
80 |
-
Markzware PDF2DTP for CSV torrent<br />
|
81 |
-
Markzware PDF2DTP for TXT torrent<br />
|
82 |
-
Markzware PDF2DTP for JPEG torrent<br />
|
83 |
-
Markzware PDF2DTP for PNG torrent<br />
|
84 |
-
Markzware PDF2DTP for GIF torrent<br />
|
85 |
-
Markzware PDF2DTP for BMP torrent<br />
|
86 |
-
Markzware PDF2DTP for TIFF torrent<br />
|
87 |
-
Markzware PDF2DTP for PSD torrent<br />
|
88 |
-
Markzware PDF2DTP for AI torrent</p>
|
89 |
-
<ul>
|
90 |
-
<li>You can download large files quickly and easily without depending on a single server or website.</li>
|
91 |
-
<li>You can download multiple files at once or resume interrupted downloads without losing data.</li>
|
92 |
-
<li>You can reduce bandwidth consumption by sharing files with other users instead of downloading them from a single source.</li>
|
93 |
-
</ul>
|
94 |
-
<p>Some of the disadvantages of using torrent files are:</p>
|
95 |
-
<ul>
|
96 |
-
<li>You might download malicious or illegal files that can harm your computer or violate copyright laws.</li>
|
97 |
-
<li>You might expose your IP address or personal information to other users or third parties that can track your online activity or target you with ads or malware.</li>
|
98 |
-
<li>You might face legal consequences or penalties if you download or share copyrighted or prohibited content without permission.</li>
|
99 |
-
</ul>
|
100 |
-
<h3>How to download a torrent file safely and legally?</h3>
|
101 |
-
<p>To download a torrent file safely and legally,</p>
|
102 |
-
- You should use a reputable torrent client that has security features such as encryption, - You should use a reliable VPN service that can hide your IP address, - You should scan your downloaded files with an antivirus program before opening them. - You should only download legal content that does not infringe on any copyrights or laws. <h2>How to use Markzware PDF2DTP-torrent.rar?</h2>
|
103 |
-
<h3>How to install and activate Markzware PDF2DTP?</h3>
|
104 |
-
<p>To install and activate Markzware PDF2DTP,</p>
|
105 |
-
- You should first extract the .rar file using a software program such as WinRAR or 7-Zip. - You should then run the installer for Markzware PDF2DTP - You should then follow the instructions on the screen - You should then enter your license key - You should then restart your computer <h3>How to choose and convert a PDF file to InDesign using Markzware PDF2DTP?</h3>
|
106 |
-
<p>To choose and convert a PDF file to InDesign using Markzware PDF2DTP,</p>
|
107 |
-
- You should first launch Adobe InDesign - You should then choose the “Convert PDF…” menu item from the “Markzware” menu in Adobe InDesign - You should then navigate to and choose the PDF document that you would like to open in Adobe InDesign - You should then click the “Open” button <h3>How to edit and save the converted InDesign file using Markzware PDF2DTP?</h3>
|
108 |
-
```html and save the converted InDesign file using Markzware PDF2DTP,</p>
|
109 |
-
- You can edit the content of the InDesign file as you would normally do with any InDesign document - You can access and modify the text, images, fonts, colors, styles, and other elements of the PDF file in InDesign - You can create new InDesign documents from existing PDF files or merge multiple PDF files into one InDesign file - You can save the InDesign file in any format that InDesign supports <h2>How much does Markzware PDF2DTP cost and where can you get it?</h2>
|
110 |
-
<h3>How much does Markzware PDF2DTP cost and what are the subscription plans?</h3>
|
111 |
-
<p>The price of Markzware PDF2DTP depends on which subscription plan you choose. There are two subscription plans available:</p>
|
112 |
-
- Annual Subscription Plan: This plan costs $199 per year . It gives you access to all updates and upgrades for one year. - Perpetual Subscription Plan: This plan costs $399. It gives you access to all updates and upgrades for life. <h3>Where can you get Markzware PDF2DTP and how to contact the support team?</h3>
|
113 |
-
<p>You can get Markzware PDF2DTP from Markzware's website. Here is the download link:</p>
|
114 |
-
<code><a href="https://markzware.com/products/pdf2dtp/">https://markzware.com/products/pdf2dtp/</a></code>
|
115 |
-
<p>If you have any questions or issues regarding Markzware PDF2DTP,</p>
|
116 |
-
- You can contact Markzware's customer support team by filling out an online form, sending an email to [email protected] or [email protected], or calling a phone number (+1 949 929 1710 for sales or +1 949 756 5100 for support). - You can also check out Markzware's product documentation, online store support, video tutorials, industry news, product articles and news links, press releases, mailing list, media kit, partners, resellers, affiliate program, etc. <h3>Conclusion</h3>
|
117 |
-
<p>In conclusion,</p>
|
118 |
-
- Markzware PDF2DTP-torrent.rar is a file that contains a plugin for Adobe InDesign that can convert any PDF file to an editable InDesign file with a single click. - A torrent file is a file that contains information about a larger file that can be downloaded from other users on a peer-to-peer network. - To use Markzware PDF2DTP-torrent.rar, you need to download and install the plugin, choose and convert a PDF file to InDesign, edit and save the converted InDesign file as needed. - Markzware PDF2DTP costs $199 per year or $399 for life, depending on the subscription plan you choose. You can get it from Markzware's website or contact their support team for any questions or issues. <h3>FAQs</h3>
|
119 |
-
<p>Here are some frequently asked questions about Markzware PDF2DTP-torrent.rar:</p>
|
120 |
-
<ol>
|
121 |
-
<li>Q: Is Markzware PDF2DTP-torrent.rar safe and legal to download?<br>
|
122 |
-
A: Yes, Markzware PDF2DTP-torrent.rar is safe and legal to download if you use a reputable torrent client, a reliable VPN service, an antivirus program, and only download legal content.</li>
|
123 |
-
<li>Q: Does Markzware PDF2DTP work with Windows or other versions of InDesign?<br>
|
124 |
-
A: No, Markzware PDF2DTP only works with macOS and InDesign CC 2020 through InDesign CS6. If you need to convert PDF files to other versions of InDesign or other formats, you can check out other products from Markzware such as OmniMarkz, PDFMarkz, QXPMarkz, or IDMarkz.</li>
|
125 |
-
<li>Q: Does Markzware PDF2DTP preserve all the elements of the PDF file in InDesign?<br>
|
126 |
-
A: Yes, Markzware PDF2DTP preserves all the elements of the PDF file in InDesign as much as possible. However, some elements may not be converted exactly due to differences between the formats or limitations of the software. For example, some fonts may not be available or some images may lose quality. You can always edit and adjust the converted InDesign file as needed.</li>
|
127 |
-
<li>Q: How long does it take to convert a PDF file to InDesign using Markzware PDF2DTP?<br>
|
128 |
-
A: The conversion time depends on several factors such as the size and complexity of the PDF file, the speed and performance of your computer, and the settings and options you choose for the conversion. Generally, it takes only a few minutes to convert a typical PDF file to InDesign using Markzware PDF2DTP.</li>
|
129 |
-
<li>Q: Can I try Markzware PDF2DTP for free before buying it?<br>
|
130 |
-
A: Yes, you can try Markzware PDF2DTP for free for 15 days by downloading the free trial version from Markzware's website. You can also get a full refund within 30 days of purchase if you are not satisfied with the product.</li>
|
131 |
-
</ol>
|
132 |
-
```</p> 0a6ba089eb<br />
|
133 |
-
<br />
|
134 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Dcouvrez Gta Mumbai City Pc Game 18 le nouveau titre de la saga Grand Theft Auto.md
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>GTA Mumbai City Pc Game 18: A Review</h1>
|
3 |
-
<p>If you are a fan of Grand Theft Auto (GTA) series, you might have heard of GTA Mumbai City Pc Game 18, a popular game that is set in the city of Mumbai, India. This game is not an official release by Rockstar Games, but a mod created by fans who wanted to experience the thrill of playing GTA in a different setting. In this article, we will review GTA Mumbai City Pc Game 18 and see what it has to offer.</p>
|
4 |
-
<h2>Gta Mumbai City Pc Game 18</h2><br /><p><b><b>Download Zip</b> ->->->-> <a href="https://byltly.com/2uKwHP">https://byltly.com/2uKwHP</a></b></p><br /><br />
|
5 |
-
<h2>Gameplay</h2>
|
6 |
-
<p>GTA Mumbai City Pc Game 18 follows the same gameplay mechanics as other GTA games. You can explore the open world of Mumbai, drive various vehicles, complete missions, fight enemies, and interact with other characters. The game also features some unique elements that reflect the culture and lifestyle of Mumbai, such as Bollywood music, local food, rickshaws, slums, and landmarks. You can also customize your character's appearance, clothes, weapons, and skills.</p>
|
7 |
-
<h2>Graphics</h2>
|
8 |
-
<p>GTA Mumbai City Pc Game 18 is based on GTA Vice City, which was released in 2002. Therefore, the graphics are not very impressive by today's standards. However, the game does a good job of recreating the atmosphere and scenery of Mumbai, with realistic textures, colors, and lighting. The game also runs smoothly on most PCs, as long as you have the minimum system requirements. You can also adjust the graphics settings to suit your preferences.</p>
|
9 |
-
<h2>Sound</h2>
|
10 |
-
<p>GTA Mumbai City Pc Game 18 has a great soundtrack that features songs from Bollywood movies and Indian pop artists. The songs match the mood and theme of the game, and add to the immersion. The game also has voice acting for some of the main characters, but not all of them. The voice actors have Indian accents and use some Hindi words, which adds to the authenticity. The sound effects are also decent, but not very realistic.</p>
|
11 |
-
<h2>Story</h2>
|
12 |
-
<p>GTA Mumbai City Pc Game 18 has a story that revolves around a young man named Raju, who comes to Mumbai from a small village to pursue his dreams. He gets involved in the criminal underworld of Mumbai, and works for various gangs and bosses. He also meets some friends and enemies along the way, who help or hinder his progress. The story is not very original or engaging, but it provides some motivation and context for the gameplay.</p>
|
13 |
-
<h2>Pros and Cons</h2>
|
14 |
-
<p>GTA Mumbai City Pc Game 18 has some pros and cons that you should consider before playing it. Here are some of them:</p>
|
15 |
-
<table>
|
16 |
-
<tr>
|
17 |
-
<th>Pros</th>
|
18 |
-
<th>Cons</th>
|
19 |
-
</tr>
|
20 |
-
<tr>
|
21 |
-
<td>- A different and interesting setting for GTA fans.</td>
|
22 |
-
<td>- Not an official game by Rockstar Games.</td>
|
23 |
-
</tr>
|
24 |
-
<tr>
|
25 |
-
<td>- A lot of content and variety in gameplay.</td>
|
26 |
-
<td>- Outdated graphics and sound quality.</td>
|
27 |
-
</tr>
|
28 |
-
<tr>
|
29 |
-
<td>- A fun and catchy soundtrack.</td>
|
30 |
-
<td>- A weak and cliched story.</td>
|
31 |
-
</tr>
|
32 |
-
<tr>
|
33 |
-
<td>- A free download for PC users.</td>
|
34 |
-
<td>- A potential risk of viruses or malware.</td>
|
35 |
-
</tr>
|
36 |
-
<tr>
|
37 |
-
<td>- A creative and impressive mod by fans.</td>
|
38 |
-
<td>- A possible violation of intellectual property rights.</td>
|
39 |
-
</tr></table>
|
40 |
-
<h2>Conclusion</h2>
|
41 |
-
<p>GTA Mumbai City Pc Game 18 is a game that offers a new and exciting experience for GTA fans who want to explore a different city and culture. The game has a lot of content and features that make it enjoyable and entertaining. However, the game also has some drawbacks that might disappoint some players who expect high-quality graphics, sound, and story. The game is also not an official product by Rockstar Games, but a mod created by fans who might have infringed on some copyrights. Therefore, you should play this game at your own risk and discretion.</p>
|
42 |
-
<p>Gta Mumbai City Pc Game 18 download<br />
|
43 |
-
Gta Mumbai City Pc Game 18 free<br />
|
44 |
-
Gta Mumbai City Pc Game 18 full version<br />
|
45 |
-
Gta Mumbai City Pc Game 18 gameplay<br />
|
46 |
-
Gta Mumbai City Pc Game 18 cheats<br />
|
47 |
-
Gta Mumbai City Pc Game 18 mods<br />
|
48 |
-
Gta Mumbai City Pc Game 18 system requirements<br />
|
49 |
-
Gta Mumbai City Pc Game 18 review<br />
|
50 |
-
Gta Mumbai City Pc Game 18 trailer<br />
|
51 |
-
Gta Mumbai City Pc Game 18 release date<br />
|
52 |
-
Gta Mumbai City Pc Game 18 online<br />
|
53 |
-
Gta Mumbai City Pc Game 18 multiplayer<br />
|
54 |
-
Gta Mumbai City Pc Game 18 crack<br />
|
55 |
-
Gta Mumbai City Pc Game 18 patch<br />
|
56 |
-
Gta Mumbai City Pc Game 18 torrent<br />
|
57 |
-
Gta Mumbai City Pc Game 18 iso<br />
|
58 |
-
Gta Mumbai City Pc Game 18 highly compressed<br />
|
59 |
-
Gta Mumbai City Pc Game 18 rar<br />
|
60 |
-
Gta Mumbai City Pc Game 18 zip<br />
|
61 |
-
Gta Mumbai City Pc Game 18 setup<br />
|
62 |
-
Gta Mumbai City Pc Game 18 exe<br />
|
63 |
-
Gta Mumbai City Pc Game 18 cd key<br />
|
64 |
-
Gta Mumbai City Pc Game 18 serial number<br />
|
65 |
-
Gta Mumbai City Pc Game 18 activation code<br />
|
66 |
-
Gta Mumbai City Pc Game 18 license key<br />
|
67 |
-
Gta Mumbai City Pc Game 18 steam key<br />
|
68 |
-
Gta Mumbai City Pc Game 18 epic games key<br />
|
69 |
-
Gta Mumbai City Pc Game 18 rockstar games key<br />
|
70 |
-
Gta Mumbai City Pc Game 18 origin key<br />
|
71 |
-
Gta Mumbai City Pc Game 18 ubisoft key<br />
|
72 |
-
Gta Mumbai City Pc Game 18 buy<br />
|
73 |
-
Gta Mumbai City Pc Game 18 price<br />
|
74 |
-
Gta Mumbai City Pc Game 18 amazon<br />
|
75 |
-
Gta Mumbai City Pc Game 18 flipkart<br />
|
76 |
-
Gta Mumbai City Pc Game 18 snapdeal<br />
|
77 |
-
Gta Mumbai City Pc Game 18 ebay<br />
|
78 |
-
Gta Mumbai City Pc Game 18 walmart<br />
|
79 |
-
Gta Mumbai City Pc Game 18 best buy<br />
|
80 |
-
Gta Mumbai City Pc Game 18 target<br />
|
81 |
-
Gta Mumbai City Pc Game 18 gamestop<br />
|
82 |
-
Gta Mumbai City Pc Game 18 steam store<br />
|
83 |
-
Gta Mumbai City Pc Game 18 epic games store<br />
|
84 |
-
Gta Mumbai City Pc Game 18 rockstar games store<br />
|
85 |
-
Gta Mumbai City Pc Game 18 origin store<br />
|
86 |
-
Gta Mumbai City Pc Game 18 ubisoft store<br />
|
87 |
-
Gta Mumbai City Pc Game 18 official website<br />
|
88 |
-
Gta Mumbai City Pc Game 18 wiki<br />
|
89 |
-
Gta Mumbai City Pc Game 18 reddit<br />
|
90 |
-
Gta Mumbai City Pc Game 18 youtube<br />
|
91 |
-
Gta Mumbai City Pc Game 18 facebook</p>
|
92 |
-
<h3>FAQs</h3>
|
93 |
-
<p>Here are some frequently asked questions about GTA Mumbai City Pc Game 18:</p>
|
94 |
-
<h4>Q1: Is GTA Mumbai City Pc Game 18 an official game or a mod?</h4>
|
95 |
-
<p>A1: GTA Mumbai City Pc Game 18 is not an official game by Rockstar Games, but a mod created by fans who used GTA Vice City as a base.</p>
|
96 |
-
<h4>Q2: Where can I download GTA Mumbai City Pc Game 18 for free?</h4>
|
97 |
-
<p>A2: You can download GTA Mumbai City Pc Game 18 for free from various websites that host mods for GTA games. However, you should be careful about downloading files from unknown sources, as they might contain viruses or malware that can harm your PC.</p>
|
98 |
-
<h4>Q3: How can I install GTA Mumbai City Pc Game 18 on my PC?</h4>
|
99 |
-
<p>A3: To install GTA Mumbai City Pc Game 18 on your PC, you need to have GTA Vice City installed first. Then, you need to extract the files from the downloaded zip file into your GTA Vice City folder. After that, you can run the game from your desktop shortcut or from your start menu.</p>
|
100 |
-
<h4>Q4: What are the minimum and recommended system requirements for GTA Mumbai City Pc Game 18?</h4>
|
101 |
-
<p>A4: The minimum system requirements for GTA Mumbai City Pc Game 18 are:</p>
|
102 |
-
<ul><li>OS: Windows XP/Vista/7/8/10</li><li>CPU: Intel Pentium III or AMD Athlon @800 MHz</li><li>RAM: 128 MB</li><li>Graphics Card: NVIDIA GeForce or ATI Radeon @32 MB</li><li>Sound Card: DirectX compatible</li><li>HDD Space: 1 GB</li></ul>
|
103 |
-
<p>The recommended system requirements for GTA Mumbai City Pc Game 18 are:</p>
|
104 |
-
<ul><li>OS: Windows XP/Vista/7/8/10</li><li>CPU: Intel Pentium IV or AMD Athlon XP @1.5 GHz</li><li>RAM: 256 MB</li><li>Graphics Card: NVIDIA GeForce FX or ATI Radeon @64 MB</li><li>Sound Card: DirectX compatible</li><li>HDD Space: 1 GB</li></ul>
|
105 |
-
<h4>Q5: Is GTA Mumbai City Pc Game 18 suitable for children?</h4>
|
106 |
-
<p>A5: No, GTA Mumbai City Pc Game 18 is not suitable for children under 18 years old. The game contains violence, blood, gore, profanity, drugs, alcohol, sex, nudity, gambling, crime, and other mature themes that are inappropriate for minors.</p>
|
107 |
-
</p> 0a6ba089eb<br />
|
108 |
-
<br />
|
109 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/3DMGAME-OMSI.2.Cracked-3DM.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>3DMGAME-OMSI.2.Cracked-3DM</h2><br /><p><b><b>Download Zip</b> 🗸 <a href="https://imgfil.com/2uxWU6">https://imgfil.com/2uxWU6</a></b></p><br /><br />
|
2 |
-
|
3 |
-
RAGE 2 v1.0-20210219 [Trainer +20] FLiNG [Feb,27 2021] Kingdoms Reborn v0.7-v0.14 ... of my Heart [cheats] [Dec,27 2020] OMSI 2 [cheats] [Dec,27 2020] 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download 8 Ball Pool with All Cues Unlocked - No Hack No Root.md
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download 8 Ball Pool Unlock All Cues: How to Get the Best Cues in the Game</h1>
|
3 |
-
<p>If you are a fan of pool games, you have probably heard of or played 8 Ball Pool, one of the most popular and addictive online multiplayer games. In this game, you can challenge your friends or other players from around the world in different game rooms and tournaments. You can also customize your profile, your table, and most importantly, your cue.</p>
|
4 |
-
<h2>download 8 ball pool unlock all cues</h2><br /><p><b><b>Download</b> ✦✦✦ <a href="https://urlin.us/2uSVRg">https://urlin.us/2uSVRg</a></b></p><br /><br />
|
5 |
-
<p>But what is a cue and why do you need it? How can you get the best cues in the game and improve your performance? And how can you download 8 ball pool unlock all cues and enjoy unlimited access to all the cues available in the game? In this article, we will answer these questions and more. So, keep reading and learn how to master the game of 8 ball pool with the best cues.</p>
|
6 |
-
<h2>What are Cues in 8 Ball Pool?</h2>
|
7 |
-
<p>Cues are the tools that you use to hit the balls on the table. They are not just for decoration, they actually have a significant impact on your gameplay. Each cue has four stats that determine its quality and performance: force, aim, spin, and time.</p>
|
8 |
-
<ul>
|
9 |
-
<li>Force: This stat determines how hard you can hit the ball. A higher force means more power and speed, which can help you break clusters, pot long shots, or apply backspin.</li>
|
10 |
-
<li>Aim: This stat determines how accurate you can aim at the ball. A higher aim means more precision and control, which can help you pot difficult shots, avoid scratches, or set up position.</li>
|
11 |
-
<li>Spin: This stat determines how much spin you can apply to the ball. A higher spin means more effect and curve, which can help you avoid obstacles, change direction, or create angles.</li>
|
12 |
-
<li>Time: This stat determines how much time you have to aim and shoot. A higher time means more seconds on the clock, which can help you plan your shots, adjust your aim, or avoid running out of time.</li>
|
13 |
-
</ul>
|
14 |
-
<p>As you can see, cues are very important for your gameplay. They can make the difference between winning and losing a match. That's why you should always choose a cue that suits your style and skill level.</p>
|
15 |
-
<h3>What are the different types of cues and how to get them?</h3>
|
16 |
-
<p>There are many types of cues in 8 ball pool, each with its own design, stats, and price. You can get cues in different ways:</p>
|
17 |
-
<ul>
|
18 |
-
<li>Standard Cues: These are the basic cues that you start with. They have low stats and no special features. You can buy them with coins or cash in the Pool Shop.</li>
|
19 |
-
<li>Premium Cues: These are the upgraded cues that have higher stats and some special features. You can buy them with cash in the Pool Shop or win them in Spin & Win or Scratch & Win.</li>
|
20 |
-
<li>Victory Cues: These are the exclusive cues that you can only get by winning matches in certain game rooms. They have high stats and unique designs. You can collect them by earning Victory Boxes.</li>
|
21 |
-
<li>Legendary Cues: These are the rarest and most powerful cues in the game. They have the highest stats and some amazing features. You can get them by opening Legendary Boxes or by collecting pieces of cues in the Pool Pass.</li>
|
22 |
-
</ul>
|
23 |
-
<p>As you can see, there are many cues to choose from in 8 ball pool. But which ones are the best and why?</p>
|
24 |
-
<p>How to get legendary cues in 8 ball pool<br />
|
25 |
-
8 ball pool all table exclusive cues<br />
|
26 |
-
8 ball pool golden shots unlock cues<br />
|
27 |
-
8 ball pool legendary cues hack<br />
|
28 |
-
8 ball pool allclash legendary cues guide<br />
|
29 |
-
8 ball pool gaming with k cues<br />
|
30 |
-
8 ball pool new cues 2023<br />
|
31 |
-
8 ball pool best cues for beginners<br />
|
32 |
-
8 ball pool free legendary boxes<br />
|
33 |
-
8 ball pool cue stats and upgrades<br />
|
34 |
-
8 ball pool cue collection rewards<br />
|
35 |
-
8 ball pool cue pieces exchange<br />
|
36 |
-
8 ball pool cue recharge trick<br />
|
37 |
-
8 ball pool cue of the week<br />
|
38 |
-
8 ball pool cue shop offers<br />
|
39 |
-
8 ball pool cue spin and force<br />
|
40 |
-
8 ball pool cue time and aim<br />
|
41 |
-
8 ball pool cue level max<br />
|
42 |
-
8 ball pool cue power bar<br />
|
43 |
-
8 ball pool cue customization<br />
|
44 |
-
8 ball pool cue codes and cheats<br />
|
45 |
-
8 ball pool cue reviews and ratings<br />
|
46 |
-
8 ball pool cue comparison and ranking<br />
|
47 |
-
8 ball pool cue tips and tricks<br />
|
48 |
-
8 ball pool cue challenges and achievements</p>
|
49 |
-
<h3>What are the best cues in 8 ball pool and why?</h3>
|
50 |
-
<p>The answer to this question depends on your personal preference and budget. However, some cues are generally considered to be the best in the game because of their stats, features, and popularity. Here are some of them:</p>
|
51 |
-
<ul>
|
52 |
-
<li>The Archangel Cue: This is one of the most expensive and powerful cues in the game. It has a perfect balance of force, aim, spin, and time. It also has a special feature that increases your chances of winning the game by 35%.</li>
|
53 |
-
<li>The Atlantis Cue: This is one of the most beautiful and unique cues in the game. It has a high force, aim, and spin, and a decent time. It also has a special feature that allows you to recharge your cue for free once per game.</li>
|
54 |
-
<li>The Inferno Cue: This is one of the most popular and cool cues in the game. It has a high force, spin, and time, and a good aim. It also has a special feature that gives you a fire effect on your cue ball when you hit it hard.</li>
|
55 |
-
<li>The Galaxy Cue: This is one of the most rare and legendary cues in the game. It has the highest stats of all cues in the game. It also has a special feature that gives you a random bonus every time you pot a ball.</li>
|
56 |
-
</ul>
|
57 |
-
<p>These are just some examples of the best cues in 8 ball pool. There are many more to discover and try out. But how can you get them without spending a lot of money or time?</p>
|
58 |
-
<h2>How to Download 8 Ball Pool Unlock All Cues?</h2>
|
59 |
-
<p>If you want to get all the cues in the game without spending a dime or waiting for hours, you might be tempted to download 8 ball pool unlock all cues. This is a modded version of the game that claims to give you unlimited access to all the cues available in the game. Sounds too good to be true, right? Well, it is.</p>
|
60 |
-
<h3>What are the benefits of downloading 8 ball pool unlock all cues?</h3>
|
61 |
-
<p>The only benefit of downloading 8 ball pool unlock all cues is that you can use any cue you want in the game without paying or earning it. You can enjoy playing with different cues and see how they affect your gameplay. You can also impress your friends or opponents with your collection of cues.</p>
|
62 |
-
<h3>What are the risks of downloading 8 ball pool unlock all cues?</h3>
|
63 |
-
<p>The risks of downloading 8 ball pool unlock all cues are far greater than the benefits. Here are some of them:</p>
|
64 |
-
<ul>
|
65 |
-
<li>You might download a virus or malware that can harm your device or steal your personal information.</li>
|
66 |
-
<li>You might get banned from the game or lose your account for violating the terms of service or cheating.</li>
|
67 |
-
<li>You might ruin your gaming experience or lose interest in the game by having everything unlocked without any challenge or reward.</li>
|
68 |
-
<li>You might miss out on the fun and excitement of earning and collecting cues legitimately by playing and winning matches.</li>
|
69 |
-
</ul>
|
70 |
-
<p>As you can see, downloading 8 ball pool unlock all cues is not worth it. It is risky, illegal, and unethical. So, how can you download 8 ball pool unlock all cues safely and legally?</p>
|
71 |
-
<h3>How to download 8 ball pool unlock all cues safely and legally?</h3>
|
72 |
-
<p>The answer is simple: you can't. There is no safe and legal way to download 8 ball pool unlock all cues. The only way to get all the cues in the game is to play fair and square, earn coins and cash, buy or win cues, and collect pieces of cues. This is how the game is meant to be played and enjoyed.</p>
|
73 |
-
<h2>How to Use 8 Ball Pool Unlock All Cues?</h2>
|
74 |
-
<p>If you have downloaded 8 ball pool unlock all cues, you might be wondering how to use it. Well, here are some tips on how to use 8 ball pool unlock all cues:</p>
|
75 |
-
<h3>How to select and customize your cue in the game?</h3>
|
76 |
-
<p>To select your cue in the game, you need to go to the Pool Shop and tap on Cues. There you will see all the cues that you have unlocked or bought. You can scroll through them and tap on the one that you want to use. You can also customize your cue by changing its color or adding stickers.</p>
|
77 |
-
<h3>How to use your cue effectively in different game modes and situations?</h3>
|
78 |
-
<p>To use your cue effectively in the game, you need to know how to adjust your aim, power, and spin according to the game mode and situation. Here are some tips on how to do that:</p>
|
79 |
-
<ul>
|
80 |
-
<li>In 1v1 mode, you need to use a cue that has a high aim and time, as these stats will help you pot more balls and avoid mistakes.</li>
|
81 |
-
<li>In 9 Ball mode, you need to use a cue that has a high force and spin, as these stats will help you break the rack and control the cue ball.</li>
|
82 |
-
<li>In Tournament mode, you need to use a cue that has a high force and time, as these stats will help you win more matches and earn more coins.</li>
|
83 |
-
<li>In No Guidelines mode, you need to use a cue that has a high aim and spin, as these stats will help you compensate for the lack of guidelines and apply more effects.</li>
|
84 |
-
<li>In Minigames mode, you need to use a cue that has a high force and aim, as these stats will help you win more prizes and rewards.</li>
|
85 |
-
</ul>
|
86 |
-
<h3>How to improve your skills and strategy with your cue?</h3>
|
87 |
-
<p>To improve your skills and strategy with your cue, you need to practice a lot and learn from your mistakes. Here are some tips on how to do that:</p>
|
88 |
-
<ul>
|
89 |
-
<li>Watch replays of your matches and analyze your shots. See what you did right and what you did wrong. Try to improve your accuracy, timing, and positioning.</li>
|
90 |
-
<li>Play against different opponents and learn from their styles. See how they use their cues and what strategies they employ. Try to adapt to their moves and counter them.</li>
|
91 |
-
<li>Try different cues and see how they affect your gameplay. Experiment with different combinations of stats and features. Find the cue that suits your style and skill level.</li>
|
92 |
-
</ul>
|
93 |
-
<h2>Conclusion</h2>
|
94 |
-
<p>8 Ball Pool is a fun and challenging game that requires skill, strategy, and luck. One of the most important aspects of the game is the cue, which can make or break your performance. There are many cues to choose from in the game, each with its own stats and features. Some of them are better than others, but none of them are free or easy to get.</p>
|
95 |
-
<p>If you want to get all the cues in the game without spending money or time, you might be tempted to download 8 ball pool unlock all cues. This is a modded version of the game that claims to give you unlimited access to all the cues available in the game. However, this is not a safe or legal way to play the game. It can expose you to viruses, malware, bans, or account loss. It can also ruin your gaming experience or interest in the game by having everything unlocked without any challenge or reward.</p>
|
96 |
-
<p>The best way to play the game is to play fair and square, earn coins and cash, buy or win cues, and collect pieces of cues. This is how the game is meant to be played and enjoyed. You can also improve your skills and strategy with your cue by practicing a lot and learning from your matches and opponents. This way, you can have fun and satisfaction with the game.</p>
|
97 |
-
<p>We hope this article has helped you understand how to download 8 ball pool unlock all cues and how to use them in the game. If you have any questions or comments, feel free to leave them below. And if you liked this article, please share it with your friends or fellow players. Thank you for reading!</p>
|
98 |
-
<h2>FAQs</h2>
|
99 |
-
<h4>Q: How can I get free coins and cash in 8 ball pool?</h4>
|
100 |
-
<p>A: There are several ways to get free coins and cash in 8 ball pool. You can:</p>
|
101 |
-
<ul>
|
102 |
-
<li>Watch videos or complete offers in the Free Coins section.</li>
|
103 |
-
<li>Invite your friends or connect your Facebook account in the Earn Cash section.</li>
|
104 |
-
<li>Play Spin & Win or Scratch & Win in the Minigames section.</li>
|
105 |
-
<li>Complete missions or achievements in the Missions section.</li>
|
106 |
-
<li>Participate in events or promotions in the Events section.</li>
|
107 |
-
</ul>
|
108 |
-
<h4>Q: How can I upgrade my cue in 8 ball pool?</h4>
|
109 |
-
<p>A: You can upgrade your cue by using Pool Cash or Cue Pieces. Pool Cash is a premium currency that you can buy with real money or earn by playing the game. Cue Pieces are fragments of cues that you can collect by opening Victory Boxes or Legendary Boxes. To upgrade your cue, go to the Pool Shop, tap on Cues, select your cue, and tap on Upgrade.</p>
|
110 |
-
<h4>Q: How can I change my cue in 8 ball pool?</h4 <p>A: You can change your cue by going to the Pool Shop, tapping on Cues, and selecting the cue that you want to use. You can also change your cue before or during a match by tapping on the cue icon on the bottom left corner of the screen.</p>
|
111 |
-
<h4>Q: How can I get Legendary Cues in 8 ball pool?</h4>
|
112 |
-
<p>A: You can get Legendary Cues by opening Legendary Boxes or by collecting pieces of cues in the Pool Pass. Legendary Boxes are special boxes that contain pieces of Legendary Cues. You can buy them with Pool Cash or win them in some events or promotions. Pool Pass is a seasonal feature that allows you to earn rewards by completing challenges and leveling up. Some of the rewards are pieces of Legendary Cues.</p>
|
113 |
-
<h4>Q: How can I contact the support team of 8 ball pool?</h4>
|
114 |
-
<p>A: You can contact the support team of 8 ball pool by going to the Settings, tapping on Help and Support, and choosing the option that suits your issue. You can also visit the official website or social media pages of 8 ball pool and send them a message or feedback.</p> 197e85843d<br />
|
115 |
-
<br />
|
116 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Sigma Battle Royale APK for Android - Enjoy Creative and Stylized Survival Shooter Game.md
DELETED
@@ -1,96 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Game Sigma APK: A Stylized Survival Shooter Game for Mobile Phones</h1>
|
3 |
-
<p>If you are looking for a new and exciting game to play on your mobile phone, you might want to check out Game Sigma APK. This is a stylized survival shooter game that offers two different modes: Classic Battle Royale and 4v4 Fight Out. In this article, we will tell you what Game Sigma APK is, what features it has, how to download and install it, and some tips and tricks for playing it.</p>
|
4 |
-
<h2>download game sigma apk</h2><br /><p><b><b>Download</b> ===== <a href="https://jinyurl.com/2uNMdw">https://jinyurl.com/2uNMdw</a></b></p><br /><br />
|
5 |
-
<h2>What is Game Sigma APK?</h2>
|
6 |
-
<p>Game Sigma APK is a game developed by Studio Arm Private Limited, a company based in India. It is a survival shooter game that combines elements of action, strategy, and creativity. The game is available on Android devices and can be downloaded from various websites, such as APKCombo. The game has been updated recently, with the latest version being 1.0.113 as of January 14, 2023.</p>
|
7 |
-
<h3>Features of Game Sigma APK</h3>
|
8 |
-
<p>Game Sigma APK has many features that make it stand out from other survival shooter games. Here are some of them:</p>
|
9 |
-
<h4>- Stylized graphics</h4>
|
10 |
-
<p>The game has a unique and creative art style that immerses you into a stylized survival world. The game uses vibrant colors, cartoon-like characters, and dynamic effects to create a visually appealing experience. The game also runs smoothly on most devices, thanks to its optimized performance.</p>
|
11 |
-
<p>download sigma battle royale apk<br />
|
12 |
-
download sigma game android apk<br />
|
13 |
-
download sigma apk latest version<br />
|
14 |
-
download sigma apk for free<br />
|
15 |
-
download sigma apk from apkcombo<br />
|
16 |
-
download sigma apk mod<br />
|
17 |
-
download sigma apk offline<br />
|
18 |
-
download sigma apk obb<br />
|
19 |
-
download sigma apk xapk<br />
|
20 |
-
download sigma apk full version<br />
|
21 |
-
download sigma game for android<br />
|
22 |
-
download sigma game free<br />
|
23 |
-
download sigma game mod apk<br />
|
24 |
-
download sigma game offline<br />
|
25 |
-
download sigma game online<br />
|
26 |
-
download sigma game update<br />
|
27 |
-
download sigma game hack<br />
|
28 |
-
download sigma game cheats<br />
|
29 |
-
download sigma game tips and tricks<br />
|
30 |
-
download sigma game guide<br />
|
31 |
-
how to download sigma apk<br />
|
32 |
-
how to download sigma game on android<br />
|
33 |
-
how to download sigma game for free<br />
|
34 |
-
how to download sigma game mod apk<br />
|
35 |
-
how to download sigma game offline<br />
|
36 |
-
how to download sigma game online<br />
|
37 |
-
how to download sigma game update<br />
|
38 |
-
how to download sigma game hack<br />
|
39 |
-
how to download sigma game cheats<br />
|
40 |
-
how to download sigma game tips and tricks<br />
|
41 |
-
where to download sigma apk<br />
|
42 |
-
where to download sigma game for android<br />
|
43 |
-
where to download sigma game free<br />
|
44 |
-
where to download sigma game mod apk<br />
|
45 |
-
where to download sigma game offline<br />
|
46 |
-
where to download sigma game online<br />
|
47 |
-
where to download sigma game update<br />
|
48 |
-
where to download sigma game hack<br />
|
49 |
-
where to download sigma game cheats<br />
|
50 |
-
where to download sigma game tips and tricks<br />
|
51 |
-
best site to download sigma apk<br />
|
52 |
-
best site to download sigma game for android<br />
|
53 |
-
best site to download sigma game free<br />
|
54 |
-
best site to download sigma game mod apk<br />
|
55 |
-
best site to download sigma game offline<br />
|
56 |
-
best site to download sigma game online<br />
|
57 |
-
best site to download sigma game update<br />
|
58 |
-
best site to download sigma game hack<br />
|
59 |
-
best site to download sigma game cheats</p>
|
60 |
-
<h4>- Unique survival shooter experience</h4>
|
61 |
-
<p>The game has easy-to-use controls that promise an unforgettable survival experience on mobile. You can move, aim, shoot, jump, crouch, and interact with the environment using simple gestures and buttons. You can also customize your controls and settings according to your preferences.</p>
|
62 |
-
<h4>- Classic Battle Royale mode</h4>
|
63 |
-
<p>In this mode, you will compete against 49 other players in a fast-paced and lite gameplay. You can choose your starting point with your parachute, and then explore the vast map to find weapons, items, and vehicles. You have to stay in the safe zone as long as possible, while avoiding or eliminating other players. The last one standing wins the match.</p>
|
64 |
-
<h4>- 4v4 Fight Out mode</h4>
|
65 |
-
<p>In this mode, you will team up with three other players to fight against another squad in a tense and strategic battle. You have to allocate resources, purchase weapons, and outlast your enemies in various creative maps. You have to fight for your faith and lead your team to victory.</p>
|
66 |
-
<h2>How to download and install Game Sigma APK?</h2>
|
67 |
-
<p>If you want to play Game Sigma APK on your Android device, you have to download and install it from a third-party source, such as APKCombo. Here are the steps to do so:</p>
|
68 |
-
<h3>Steps to download Game Sigma APK from APKCombo</h3>
|
69 |
-
<h4>- Visit the APKCombo website</h4>
|
70 |
-
<p>Go to <a href="(^1^)">https://apkcombo.com/sigma/com.studioarm.sigma/</a> using your browser. This is the official page of Game Sigma APK on APKCombo.</p>
|
71 |
-
<h4>- Search for Game Sigma APK</h4>
|
72 |
-
<p>Type "Game Sigma APK" in the search bar and hit enter <h4>- Choose the version and device compatibility</h4>
|
73 |
-
<p>On the APKCombo page, you will see different versions of Game Sigma APK, along with their file size, update date, and device compatibility. Choose the version that suits your device and click on the download button.</p>
|
74 |
-
<h4>- Download the APK file</h4>
|
75 |
-
<p>Wait for the download to finish. You will see a notification on your device when the APK file is downloaded. You can also check the download progress in your browser or in your file manager.</p>
|
76 |
-
<h4>- Enable unknown sources on your device</h4>
|
77 |
-
<p>Before you can install the APK file, you have to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources. Turn on the toggle to enable unknown sources.</p>
|
78 |
-
<h4>- Install the APK file</h4>
|
79 |
-
<p>Now you can install the APK file on your device. Locate the file in your file manager and tap on it. You will see a prompt asking you to confirm the installation. Tap on install and wait for the process to complete. You will see a notification when the app is installed.</p>
|
80 |
-
<h3>Tips and tricks for playing Game Sigma APK</h3>
|
81 |
-
<p>Now that you have downloaded and installed Game Sigma APK, you are ready to play it. Here are some tips and tricks to help you enjoy the game more:</p>
|
82 |
-
<h4>- Customize your controls and settings</h4>
|
83 |
-
<p>Before you start playing, you should customize your controls and settings according to your preferences. You can access the settings menu from the main screen of the game. Here you can adjust the sensitivity, sound, graphics, language, and other options. You can also customize your controls by dragging and resizing the buttons on the screen.</p>
|
84 |
-
<h4>- Choose your landing spot wisely</h4>
|
85 |
-
<p>In Classic Battle Royale mode, you have to choose your landing spot with your parachute. You should choose a spot that has good loot, but also has less enemies. You can use the map to see where other players are landing, and avoid crowded areas. You can also use the markers to communicate with your teammates and coordinate your landing.</p>
|
86 |
-
<h4>- Loot and equip the best weapons and items</h4>
|
87 |
-
<p>Once you land, you have to loot and equip the best weapons and items you can find. You can loot from buildings, crates, vehicles, and dead enemies. You can equip up to two primary weapons, one secondary weapon, and one melee weapon. You can also equip armor, helmets, backpacks, grenades, medkits, and other items. You should always look for better loot as you play.</p>
|
88 |
-
<h4>- Use cover and stealth to your advantage</h4>
|
89 |
-
<p>The game is not only about shooting, but also about survival. You have to use cover and stealth to your advantage. You can use buildings, trees, rocks, vehicles, and other objects as cover from enemy fire. You can also use crouch and prone positions to reduce your visibility and noise. You should always be aware of your surroundings and avoid exposing yourself too much.</p>
|
90 |
-
<h4>- Communicate and cooperate with your teammates</h4>
|
91 |
-
<p>The game is more fun and easier when you play with your teammates. You can communicate and cooperate with them using voice chat or text chat. You can also use gestures, markers, pings, and other tools to convey information. You should always stick with your teammates, share loot, revive them when they are downed, and support them in combat.</p>
|
92 |
-
<h2>Conclusion</h2>
|
93 |
-
<p>Game Sigma APK is a stylized survival shooter game that offers two different modes: Classic Battle Royale and 4v4 Fight Out. It has many features that make it stand out from other survival shooter games, such as stylized graphics, unique survival shooter experience, easy-to-use controls, and optimized performance. You can download and install Game Sigma APK from APKCombo, following the steps we have provided in this article. You can also use our tips and tricks to improve your gameplay and have more fun.</p>
|
94 |
-
FAQs - Q: Is Game Sigma APK safe to download? - A: Yes, Game Sigma APK is safe to download from APKCombo, as it is verified by VirusTotal and does not contain any malware or viruses. - Q: Is Game Sigma APK free to play? - A: Yes, Game Sigma APK is free to play, but it may contain ads and in-app purchases. - Q: What are the minimum requirements to play Game Sigma APK? - A: The minimum requirements to play Game Sigma APK are Android 5.0 or higher, 2 GB of RAM, 1 GB of storage space, and a stable internet connection. - Q: How can I update Game Sigma APK? - A: You can update Game Sigma APK by visiting the APKCombo website and downloading the latest version of the game. You can also check for updates from within the game settings. - Q: How can I contact the developers of Game Sigma APK? - A: You can contact the developers of Game Sigma APK by visiting their official website, Facebook page, or Instagram account. You can also send them an email at [email protected].</p> 401be4b1e0<br />
|
95 |
-
<br />
|
96 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Hobbyist/Hoyo-RVC/MDXNet.py
DELETED
@@ -1,274 +0,0 @@
|
|
1 |
-
import soundfile as sf
|
2 |
-
import torch, pdb, time, argparse, os, warnings, sys, librosa
|
3 |
-
import numpy as np
|
4 |
-
import onnxruntime as ort
|
5 |
-
from scipy.io.wavfile import write
|
6 |
-
from tqdm import tqdm
|
7 |
-
import torch
|
8 |
-
import torch.nn as nn
|
9 |
-
|
10 |
-
dim_c = 4
|
11 |
-
|
12 |
-
|
13 |
-
class Conv_TDF_net_trim:
|
14 |
-
def __init__(
|
15 |
-
self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024
|
16 |
-
):
|
17 |
-
super(Conv_TDF_net_trim, self).__init__()
|
18 |
-
|
19 |
-
self.dim_f = dim_f
|
20 |
-
self.dim_t = 2**dim_t
|
21 |
-
self.n_fft = n_fft
|
22 |
-
self.hop = hop
|
23 |
-
self.n_bins = self.n_fft // 2 + 1
|
24 |
-
self.chunk_size = hop * (self.dim_t - 1)
|
25 |
-
self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(
|
26 |
-
device
|
27 |
-
)
|
28 |
-
self.target_name = target_name
|
29 |
-
self.blender = "blender" in model_name
|
30 |
-
|
31 |
-
out_c = dim_c * 4 if target_name == "*" else dim_c
|
32 |
-
self.freq_pad = torch.zeros(
|
33 |
-
[1, out_c, self.n_bins - self.dim_f, self.dim_t]
|
34 |
-
).to(device)
|
35 |
-
|
36 |
-
self.n = L // 2
|
37 |
-
|
38 |
-
def stft(self, x):
|
39 |
-
x = x.reshape([-1, self.chunk_size])
|
40 |
-
x = torch.stft(
|
41 |
-
x,
|
42 |
-
n_fft=self.n_fft,
|
43 |
-
hop_length=self.hop,
|
44 |
-
window=self.window,
|
45 |
-
center=True,
|
46 |
-
return_complex=True,
|
47 |
-
)
|
48 |
-
x = torch.view_as_real(x)
|
49 |
-
x = x.permute([0, 3, 1, 2])
|
50 |
-
x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape(
|
51 |
-
[-1, dim_c, self.n_bins, self.dim_t]
|
52 |
-
)
|
53 |
-
return x[:, :, : self.dim_f]
|
54 |
-
|
55 |
-
def istft(self, x, freq_pad=None):
|
56 |
-
freq_pad = (
|
57 |
-
self.freq_pad.repeat([x.shape[0], 1, 1, 1])
|
58 |
-
if freq_pad is None
|
59 |
-
else freq_pad
|
60 |
-
)
|
61 |
-
x = torch.cat([x, freq_pad], -2)
|
62 |
-
c = 4 * 2 if self.target_name == "*" else 2
|
63 |
-
x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape(
|
64 |
-
[-1, 2, self.n_bins, self.dim_t]
|
65 |
-
)
|
66 |
-
x = x.permute([0, 2, 3, 1])
|
67 |
-
x = x.contiguous()
|
68 |
-
x = torch.view_as_complex(x)
|
69 |
-
x = torch.istft(
|
70 |
-
x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True
|
71 |
-
)
|
72 |
-
return x.reshape([-1, c, self.chunk_size])
|
73 |
-
|
74 |
-
|
75 |
-
def get_models(device, dim_f, dim_t, n_fft):
|
76 |
-
return Conv_TDF_net_trim(
|
77 |
-
device=device,
|
78 |
-
model_name="Conv-TDF",
|
79 |
-
target_name="vocals",
|
80 |
-
L=11,
|
81 |
-
dim_f=dim_f,
|
82 |
-
dim_t=dim_t,
|
83 |
-
n_fft=n_fft,
|
84 |
-
)
|
85 |
-
|
86 |
-
|
87 |
-
warnings.filterwarnings("ignore")
|
88 |
-
cpu = torch.device("cpu")
|
89 |
-
if torch.cuda.is_available():
|
90 |
-
device = torch.device("cuda:0")
|
91 |
-
elif torch.backends.mps.is_available():
|
92 |
-
device = torch.device("mps")
|
93 |
-
else:
|
94 |
-
device = torch.device("cpu")
|
95 |
-
|
96 |
-
|
97 |
-
class Predictor:
|
98 |
-
def __init__(self, args):
|
99 |
-
self.args = args
|
100 |
-
self.model_ = get_models(
|
101 |
-
device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft
|
102 |
-
)
|
103 |
-
self.model = ort.InferenceSession(
|
104 |
-
os.path.join(args.onnx, self.model_.target_name + ".onnx"),
|
105 |
-
providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
|
106 |
-
)
|
107 |
-
print("onnx load done")
|
108 |
-
|
109 |
-
def demix(self, mix):
|
110 |
-
samples = mix.shape[-1]
|
111 |
-
margin = self.args.margin
|
112 |
-
chunk_size = self.args.chunks * 44100
|
113 |
-
assert not margin == 0, "margin cannot be zero!"
|
114 |
-
if margin > chunk_size:
|
115 |
-
margin = chunk_size
|
116 |
-
|
117 |
-
segmented_mix = {}
|
118 |
-
|
119 |
-
if self.args.chunks == 0 or samples < chunk_size:
|
120 |
-
chunk_size = samples
|
121 |
-
|
122 |
-
counter = -1
|
123 |
-
for skip in range(0, samples, chunk_size):
|
124 |
-
counter += 1
|
125 |
-
|
126 |
-
s_margin = 0 if counter == 0 else margin
|
127 |
-
end = min(skip + chunk_size + margin, samples)
|
128 |
-
|
129 |
-
start = skip - s_margin
|
130 |
-
|
131 |
-
segmented_mix[skip] = mix[:, start:end].copy()
|
132 |
-
if end == samples:
|
133 |
-
break
|
134 |
-
|
135 |
-
sources = self.demix_base(segmented_mix, margin_size=margin)
|
136 |
-
"""
|
137 |
-
mix:(2,big_sample)
|
138 |
-
segmented_mix:offset->(2,small_sample)
|
139 |
-
sources:(1,2,big_sample)
|
140 |
-
"""
|
141 |
-
return sources
|
142 |
-
|
143 |
-
def demix_base(self, mixes, margin_size):
|
144 |
-
chunked_sources = []
|
145 |
-
progress_bar = tqdm(total=len(mixes))
|
146 |
-
progress_bar.set_description("Processing")
|
147 |
-
for mix in mixes:
|
148 |
-
cmix = mixes[mix]
|
149 |
-
sources = []
|
150 |
-
n_sample = cmix.shape[1]
|
151 |
-
model = self.model_
|
152 |
-
trim = model.n_fft // 2
|
153 |
-
gen_size = model.chunk_size - 2 * trim
|
154 |
-
pad = gen_size - n_sample % gen_size
|
155 |
-
mix_p = np.concatenate(
|
156 |
-
(np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1
|
157 |
-
)
|
158 |
-
mix_waves = []
|
159 |
-
i = 0
|
160 |
-
while i < n_sample + pad:
|
161 |
-
waves = np.array(mix_p[:, i : i + model.chunk_size])
|
162 |
-
mix_waves.append(waves)
|
163 |
-
i += gen_size
|
164 |
-
mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu)
|
165 |
-
with torch.no_grad():
|
166 |
-
_ort = self.model
|
167 |
-
spek = model.stft(mix_waves)
|
168 |
-
if self.args.denoise:
|
169 |
-
spec_pred = (
|
170 |
-
-_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5
|
171 |
-
+ _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5
|
172 |
-
)
|
173 |
-
tar_waves = model.istft(torch.tensor(spec_pred))
|
174 |
-
else:
|
175 |
-
tar_waves = model.istft(
|
176 |
-
torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0])
|
177 |
-
)
|
178 |
-
tar_signal = (
|
179 |
-
tar_waves[:, :, trim:-trim]
|
180 |
-
.transpose(0, 1)
|
181 |
-
.reshape(2, -1)
|
182 |
-
.numpy()[:, :-pad]
|
183 |
-
)
|
184 |
-
|
185 |
-
start = 0 if mix == 0 else margin_size
|
186 |
-
end = None if mix == list(mixes.keys())[::-1][0] else -margin_size
|
187 |
-
if margin_size == 0:
|
188 |
-
end = None
|
189 |
-
sources.append(tar_signal[:, start:end])
|
190 |
-
|
191 |
-
progress_bar.update(1)
|
192 |
-
|
193 |
-
chunked_sources.append(sources)
|
194 |
-
_sources = np.concatenate(chunked_sources, axis=-1)
|
195 |
-
# del self.model
|
196 |
-
progress_bar.close()
|
197 |
-
return _sources
|
198 |
-
|
199 |
-
def prediction(self, m, vocal_root, others_root, format):
|
200 |
-
os.makedirs(vocal_root, exist_ok=True)
|
201 |
-
os.makedirs(others_root, exist_ok=True)
|
202 |
-
basename = os.path.basename(m)
|
203 |
-
mix, rate = librosa.load(m, mono=False, sr=44100)
|
204 |
-
if mix.ndim == 1:
|
205 |
-
mix = np.asfortranarray([mix, mix])
|
206 |
-
mix = mix.T
|
207 |
-
sources = self.demix(mix.T)
|
208 |
-
opt = sources[0].T
|
209 |
-
if format in ["wav", "flac"]:
|
210 |
-
sf.write(
|
211 |
-
"%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate
|
212 |
-
)
|
213 |
-
sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate)
|
214 |
-
else:
|
215 |
-
path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename)
|
216 |
-
path_other = "%s/%s_others.wav" % (others_root, basename)
|
217 |
-
sf.write(path_vocal, mix - opt, rate)
|
218 |
-
sf.write(path_other, opt, rate)
|
219 |
-
if os.path.exists(path_vocal):
|
220 |
-
os.system(
|
221 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
222 |
-
% (path_vocal, path_vocal[:-4] + ".%s" % format)
|
223 |
-
)
|
224 |
-
if os.path.exists(path_other):
|
225 |
-
os.system(
|
226 |
-
"ffmpeg -i %s -vn %s -q:a 2 -y"
|
227 |
-
% (path_other, path_other[:-4] + ".%s" % format)
|
228 |
-
)
|
229 |
-
|
230 |
-
|
231 |
-
class MDXNetDereverb:
|
232 |
-
def __init__(self, chunks):
|
233 |
-
self.onnx = "uvr5_weights/onnx_dereverb_By_FoxJoy"
|
234 |
-
self.shifts = 10 #'Predict with randomised equivariant stabilisation'
|
235 |
-
self.mixing = "min_mag" # ['default','min_mag','max_mag']
|
236 |
-
self.chunks = chunks
|
237 |
-
self.margin = 44100
|
238 |
-
self.dim_t = 9
|
239 |
-
self.dim_f = 3072
|
240 |
-
self.n_fft = 6144
|
241 |
-
self.denoise = True
|
242 |
-
self.pred = Predictor(self)
|
243 |
-
|
244 |
-
def _path_audio_(self, input, vocal_root, others_root, format):
|
245 |
-
self.pred.prediction(input, vocal_root, others_root, format)
|
246 |
-
|
247 |
-
|
248 |
-
if __name__ == "__main__":
|
249 |
-
dereverb = MDXNetDereverb(15)
|
250 |
-
from time import time as ttime
|
251 |
-
|
252 |
-
t0 = ttime()
|
253 |
-
dereverb._path_audio_(
|
254 |
-
"雪雪伴奏对消HP5.wav",
|
255 |
-
"vocal",
|
256 |
-
"others",
|
257 |
-
)
|
258 |
-
t1 = ttime()
|
259 |
-
print(t1 - t0)
|
260 |
-
|
261 |
-
|
262 |
-
"""
|
263 |
-
|
264 |
-
runtime\python.exe MDXNet.py
|
265 |
-
|
266 |
-
6G:
|
267 |
-
15/9:0.8G->6.8G
|
268 |
-
14:0.8G->6.5G
|
269 |
-
25:炸
|
270 |
-
|
271 |
-
half15:0.7G->6.6G,22.69s
|
272 |
-
fp32-15:0.7G->6.6G,20.85s
|
273 |
-
|
274 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AI-Zero-to-Hero/06-SL-AI-Image-Music-Video-UI-UX-URL/Article.md
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
|
2 |
-
# Image Generation for Art, Marketing, Ideation, Design, and Use in Business
|
3 |
-
|
4 |
-
A number of multiple AI pipeline element strategies have evolved on the open market which allow you to generate images using a combination of image prompts and word prompts. This brief analysis gives an idea of the prompting capabilities as well as image rendering techniques that are used in the strategy to generate art from human understanding of images and text used to describe a scene.
|
5 |
-
|
6 |
-
First a top five list on state of the art generators both free and paid is worth consideration.
|
7 |
-
|
8 |
-
1) Midjourney - a Discord server based chatboat AI that allows /imagine prompts which can generate multiple images at a time. This is best at parallel creation, high accuracy even photo real creations.
|
9 |
-
2) Artbreeder - A multiple capability tool which now features a Collager which assists in starting image composition. By far the most innovative approach which does great to combine the right partial elements in a scene.
|
10 |
-
3) Dreamstudio - A Huggingface derived art program in beta which uses stable diffusion to create highly accurate art and images.
|
11 |
-
4) Nightcafe - A credit based creation AI app that can do generation of video dives into an AI art piece which can produce some of the best experiences in Video.
|
12 |
-
5) RunwayML - a quintessential tool in processing morph audio and video tracks which rival most high end video edit tools.
|
13 |
-
|
14 |
-
These 5 tools make up some of the best AI pipeline programs that are cloud based that allow anyone to begin easily building their portfolio of art.
|
15 |
-
|
16 |
-
The prompting capabilities often involve having a set of text based prompts to get started. Most also feature a starter image which could be an example of what you would like to create.
|
17 |
-
|
18 |
-
URL Links:
|
19 |
-
1) Collager: https://www.artbreeder.com/beta/collage
|
20 |
-
2) NightCafe: https://creator.nightcafe.studio/explore
|
21 |
-
3) Midjourney: https://www.midjourney.com/app/users/779773261440614430/
|
22 |
-
4) Dreamstudio: https://beta.dreamstudio.ai/dream
|
23 |
-
5) RunwayML: https://app.runwayml.com/
|
24 |
-
|
25 |
-
## Getting Started and Organizing Your AI Pipeline and Process
|
26 |
-
|
27 |
-
Any great strategy has a number of steps that combine all capabilities at your disposal. It is useful to note how you can easily fir these together into a process that works for you.
|
28 |
-
|
29 |
-
The techniques worth noted are listed below. Consider how you will use them will make your pipeline easier and more automated to allow you to spend the majority of your time curating what you have made, and ideating what you want to create next.
|
30 |
-
|
31 |
-
1) Source materials: Since prompting requires text and text examples can quickly help you compose good input, its worth considering and documenting some effective prompts. Nightcafe with its integration into email, sends you a copy of your creation plus the prompting text so one option is to use your email account to keep a record of which prompts work for which outputs.
|
32 |
-
2) Source materials: Discord since its a public chat format allows you to easily see what others are using for prompts in bulk. There are a number of chat channels designed for people new to the platform and often you can copy and paste if you see very effective prompts with material you are looking for.
|
33 |
-
3) Source materials: Collager is unique in its ability to add additive parts and then dial in the percent of AI you would like with that. This allows you to add a few image elements which help start out your generation.
|
34 |
-
4) Source materials: Since images and prompts are going to be your mainstay for inputs its worth considering an open standard for storing and retrieving these from anywhere. Github is a good place since markdown language can involve text in table or list format and includes a capability to reference uploaded images within markdown. This is also a good form for portability since you can later fork and download your repository with a few clicks from anywhere.
|
35 |
-
5) Source materials: Google drive is integrated into the Artbreeder Collager workflow which allows you easily expand your work and even compose albums of the ones you like to place in Google photo albums. The portfolio you save on different sites have different degrees of ease when aggregating your collections. Collager for instance allows right click save for instant saving of your creation. Dreamstudio features a history. Midjourney features a profile site for you to store and review creations even triggering Upscales which important to use to get the highest resolution output for your creations.
|
36 |
-
|
37 |
-
## Social Media integration
|
38 |
-
|
39 |
-
Depending on your target "safe for work" exports of your work, it is sometimes important to know your accepted social media outlets that you can integrate. Cloud based interactions are the key to successful audiences if you want to scale and share your process with others.
|
40 |
-
|
41 |
-
The key social media outlets supported for these tools are here in a sorted link list which start with public open source first:
|
42 |
-
|
43 |
-
1) Github - Github is open at most companies and allow creation of a free space to share your content.
|
44 |
-
2) LinkedIn - LinkedIn is acceptable use at nearly every company.
|
45 |
-
3) Twitter - Twitter is supported as a social media outlet at most companies yet can also be used with security restrictions which might limit posting but allow read access.
|
46 |
-
4) Facebook - Meta's Facebook is a good outlet since it allows creation of large folios of your images along with stories. This venue however is locked down at many organizations.
|
47 |
-
5) Instagram - Instagram is supported as an output channel for many tools yet has decreased in popularity due to high frequency of ads and pay for likes models. While it can still be one of the best places for domain specific arrangements of images it is likely locked down in most secure organizations.
|
48 |
-
6) Youtube - For video uploads with automated captioning and long term storage of short and long form video this is an essential for any creation you compose as video. It is also useful to review and compose playlists of videos here for yourself that speed up your learning - Spend some time at Youtube university and keep a record of keyword searches there sometimes along with your playlists to accelerate learning.
|
49 |
-
7) Gmail - With the baility to move email in and out its useful to create and wrap up details within email. Most email policies come with a content limitation (for example no files larger than 25MB. For this reason get used to creating pproject wrap up archives with winzip or compression software. With the convenience of keyword searching you can usually use this as a base.
|
50 |
-
8) Last a worth mention is Huggingface.com. Like github as you become more sophisticated in your public open source capabilities, HuggingFace can allow you to wrap up using one of three software development kits which are gadio, streamlit, and HTML5 each with unique AI and UI integration components and features. If you want to create your own AI pipelines this one also has the open source code and models ready to go to help you on your journey.
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AICODER009/Food101_Detection/model.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torchvision
|
3 |
-
|
4 |
-
from torch import nn
|
5 |
-
|
6 |
-
|
7 |
-
def create_effnetb2_model(num_classes:int=3,
|
8 |
-
seed:int=42):
|
9 |
-
"""Creates an EfficientNetB2 feature extractor model and transforms.
|
10 |
-
|
11 |
-
Args:
|
12 |
-
num_classes (int, optional): number of classes in the classifier head.
|
13 |
-
Defaults to 3.
|
14 |
-
seed (int, optional): random seed value. Defaults to 42.
|
15 |
-
|
16 |
-
Returns:
|
17 |
-
model (torch.nn.Module): EffNetB2 feature extractor model.
|
18 |
-
transforms (torchvision.transforms): EffNetB2 image transforms.
|
19 |
-
"""
|
20 |
-
# Create EffNetB2 pretrained weights, transforms and model
|
21 |
-
weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
|
22 |
-
transforms = weights.transforms()
|
23 |
-
model = torchvision.models.efficientnet_b2(weights=weights)
|
24 |
-
|
25 |
-
# Freeze all layers in base model
|
26 |
-
for param in model.parameters():
|
27 |
-
param.requires_grad = False
|
28 |
-
|
29 |
-
# Change classifier head with random seed for reproducibility
|
30 |
-
torch.manual_seed(seed)
|
31 |
-
model.classifier = nn.Sequential(
|
32 |
-
nn.Dropout(p=0.3, inplace=True),
|
33 |
-
nn.Linear(in_features=1408, out_features=num_classes),
|
34 |
-
)
|
35 |
-
|
36 |
-
return model, transforms
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/utils.py
DELETED
@@ -1,361 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
from torch import nn as nn
|
4 |
-
from torchvision.ops.misc import FrozenBatchNorm2d
|
5 |
-
import logging
|
6 |
-
# import h5py
|
7 |
-
from tqdm import tqdm
|
8 |
-
import random
|
9 |
-
import json
|
10 |
-
import os
|
11 |
-
import pathlib
|
12 |
-
|
13 |
-
# TODO: (yusong) this not a good place to store those information and does not scale. Need to be fixed later.
|
14 |
-
dataset_split = {
|
15 |
-
"audiocaps": ["train", "valid", "test"],
|
16 |
-
"audioset": ["balanced_train", "unbalanced_train", "eval"],
|
17 |
-
"BBCSoundEffects": ["train", "test"],
|
18 |
-
"Clotho": ["train", "test", "valid"],
|
19 |
-
"free_to_use_sounds": ["train", "test"],
|
20 |
-
"paramount_motion": ["train", "test"],
|
21 |
-
"sonniss_game_effects": ["train", "test"],
|
22 |
-
"wesoundeffects": ["train", "test"],
|
23 |
-
"MACS": ["train", "test"],
|
24 |
-
"freesound": ["train", "test"],
|
25 |
-
"FSD50K": ["train", "test", "valid"],
|
26 |
-
"fsd50k_class_label": ["train", "test", "valid"],
|
27 |
-
"esc50": ["train", "test"],
|
28 |
-
"audiostock": ["train", "test"],
|
29 |
-
"freesound_no_overlap_noesc50": ["train", "test"],
|
30 |
-
"epidemic_sound_effects": ["train", "test"],
|
31 |
-
"VGGSound": ["train", "test"],
|
32 |
-
"urbansound8k_class_label": ["train", "test"],
|
33 |
-
"audioset_t5": ["balanced_train", "unbalanced_train", "eval"],
|
34 |
-
"epidemic_sound_effects_t5": ["train", "test"],
|
35 |
-
"WavText5K": ["train", "test"],
|
36 |
-
"esc50_no_overlap": ["train", "test"],
|
37 |
-
"usd8k_no_overlap": ["train", "test"],
|
38 |
-
"fsd50k_200_class_label": ["train", "test", "valid"],
|
39 |
-
}
|
40 |
-
|
41 |
-
|
42 |
-
def freeze_batch_norm_2d(module, module_match={}, name=""):
|
43 |
-
"""
|
44 |
-
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
|
45 |
-
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
|
46 |
-
returned. Otherwise, the module is walked recursively and submodules are converted in place.
|
47 |
-
|
48 |
-
Args:
|
49 |
-
module (torch.nn.Module): Any PyTorch module.
|
50 |
-
module_match (dict): Dictionary of full module names to freeze (all if empty)
|
51 |
-
name (str): Full module name (prefix)
|
52 |
-
|
53 |
-
Returns:
|
54 |
-
torch.nn.Module: Resulting module
|
55 |
-
|
56 |
-
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
|
57 |
-
"""
|
58 |
-
res = module
|
59 |
-
is_match = True
|
60 |
-
if module_match:
|
61 |
-
is_match = name in module_match
|
62 |
-
if is_match and isinstance(
|
63 |
-
module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)
|
64 |
-
):
|
65 |
-
res = FrozenBatchNorm2d(module.num_features)
|
66 |
-
res.num_features = module.num_features
|
67 |
-
res.affine = module.affine
|
68 |
-
if module.affine:
|
69 |
-
res.weight.data = module.weight.data.clone().detach()
|
70 |
-
res.bias.data = module.bias.data.clone().detach()
|
71 |
-
res.running_mean.data = module.running_mean.data
|
72 |
-
res.running_var.data = module.running_var.data
|
73 |
-
res.eps = module.eps
|
74 |
-
else:
|
75 |
-
for child_name, child in module.named_children():
|
76 |
-
full_child_name = ".".join([name, child_name]) if name else child_name
|
77 |
-
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
|
78 |
-
if new_child is not child:
|
79 |
-
res.add_module(child_name, new_child)
|
80 |
-
return res
|
81 |
-
|
82 |
-
|
83 |
-
def exist(dataset_name, dataset_type):
|
84 |
-
"""
|
85 |
-
Check if dataset exists
|
86 |
-
"""
|
87 |
-
if dataset_type in dataset_split[dataset_name]:
|
88 |
-
return True
|
89 |
-
else:
|
90 |
-
return False
|
91 |
-
|
92 |
-
|
93 |
-
def get_tar_path_from_dataset_name(
|
94 |
-
dataset_names, dataset_types, islocal, dataset_path, proportion=1, full_dataset=None
|
95 |
-
):
|
96 |
-
"""
|
97 |
-
Get tar path from dataset name and type
|
98 |
-
"""
|
99 |
-
output = []
|
100 |
-
for n in dataset_names:
|
101 |
-
if full_dataset is not None and n in full_dataset:
|
102 |
-
current_dataset_types = dataset_split[n]
|
103 |
-
else:
|
104 |
-
current_dataset_types = dataset_types
|
105 |
-
for s in current_dataset_types:
|
106 |
-
tmp = []
|
107 |
-
if islocal:
|
108 |
-
sizefilepath_ = f"{dataset_path}/{n}/{s}/sizes.json"
|
109 |
-
if not os.path.exists(sizefilepath_):
|
110 |
-
sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
|
111 |
-
else:
|
112 |
-
sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
|
113 |
-
if not os.path.exists(sizefilepath_):
|
114 |
-
continue
|
115 |
-
sizes = json.load(open(sizefilepath_, "r"))
|
116 |
-
for k in sizes.keys():
|
117 |
-
if islocal:
|
118 |
-
tmp.append(f"{dataset_path}/{n}/{s}/{k}")
|
119 |
-
else:
|
120 |
-
tmp.append(
|
121 |
-
f"pipe:aws s3 --cli-connect-timeout 0 cp s3://s-laion-audio/webdataset_tar/{n}/{s}/{k} -"
|
122 |
-
)
|
123 |
-
if proportion != 1:
|
124 |
-
tmp = random.sample(tmp, int(proportion * len(tmp)))
|
125 |
-
output.append(tmp)
|
126 |
-
return sum(output, [])
|
127 |
-
|
128 |
-
|
129 |
-
def get_tar_path_from_txts(txt_path, islocal, proportion=1):
|
130 |
-
"""
|
131 |
-
Get tar path from txt path
|
132 |
-
"""
|
133 |
-
if isinstance(txt_path, (list, tuple)):
|
134 |
-
return sum(
|
135 |
-
[
|
136 |
-
get_tar_path_from_txts(
|
137 |
-
txt_path[i], islocal=islocal, proportion=proportion
|
138 |
-
)
|
139 |
-
for i in range(len(txt_path))
|
140 |
-
],
|
141 |
-
[],
|
142 |
-
)
|
143 |
-
if isinstance(txt_path, str):
|
144 |
-
with open(txt_path) as f:
|
145 |
-
lines = f.readlines()
|
146 |
-
if islocal:
|
147 |
-
lines = [
|
148 |
-
lines[i]
|
149 |
-
.split("\n")[0]
|
150 |
-
.replace("pipe:aws s3 cp s3://s-laion-audio/", "/mnt/audio_clip/")
|
151 |
-
for i in range(len(lines))
|
152 |
-
]
|
153 |
-
else:
|
154 |
-
lines = [
|
155 |
-
lines[i].split("\n")[0].replace(".tar", ".tar -")
|
156 |
-
for i in range(len(lines))
|
157 |
-
]
|
158 |
-
if proportion != 1:
|
159 |
-
print("Sampling tars with proportion of {}".format(proportion))
|
160 |
-
lines = random.sample(lines, int(proportion * len(lines)))
|
161 |
-
return lines
|
162 |
-
|
163 |
-
|
164 |
-
def get_mix_lambda(mixup_alpha, batch_size):
|
165 |
-
mixup_lambdas = [
|
166 |
-
np.random.beta(mixup_alpha, mixup_alpha, 1)[0] for _ in range(batch_size)
|
167 |
-
]
|
168 |
-
return np.array(mixup_lambdas).astype(np.float32)
|
169 |
-
|
170 |
-
|
171 |
-
def do_mixup(x, mixup_lambda):
|
172 |
-
"""
|
173 |
-
Args:
|
174 |
-
x: (batch_size , ...)
|
175 |
-
mixup_lambda: (batch_size,)
|
176 |
-
Returns:
|
177 |
-
out: (batch_size, ...)
|
178 |
-
"""
|
179 |
-
out = (
|
180 |
-
x.transpose(0, -1) * mixup_lambda
|
181 |
-
+ torch.flip(x, dims=[0]).transpose(0, -1) * (1 - mixup_lambda)
|
182 |
-
).transpose(0, -1)
|
183 |
-
return out
|
184 |
-
|
185 |
-
|
186 |
-
def interpolate(x, ratio):
|
187 |
-
"""Interpolate data in time domain. This is used to compensate the
|
188 |
-
resolution reduction in downsampling of a CNN.
|
189 |
-
|
190 |
-
Args:
|
191 |
-
x: (batch_size, time_steps, classes_num)
|
192 |
-
ratio: int, ratio to interpolate
|
193 |
-
Returns:
|
194 |
-
upsampled: (batch_size, time_steps * ratio, classes_num)
|
195 |
-
"""
|
196 |
-
(batch_size, time_steps, classes_num) = x.shape
|
197 |
-
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
|
198 |
-
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
|
199 |
-
return upsampled
|
200 |
-
|
201 |
-
|
202 |
-
def pad_framewise_output(framewise_output, frames_num):
|
203 |
-
"""Pad framewise_output to the same length as input frames. The pad value
|
204 |
-
is the same as the value of the last frame.
|
205 |
-
Args:
|
206 |
-
framewise_output: (batch_size, frames_num, classes_num)
|
207 |
-
frames_num: int, number of frames to pad
|
208 |
-
Outputs:
|
209 |
-
output: (batch_size, frames_num, classes_num)
|
210 |
-
"""
|
211 |
-
pad = framewise_output[:, -1:, :].repeat(
|
212 |
-
1, frames_num - framewise_output.shape[1], 1
|
213 |
-
)
|
214 |
-
"""tensor for padding"""
|
215 |
-
|
216 |
-
output = torch.cat((framewise_output, pad), dim=1)
|
217 |
-
"""(batch_size, frames_num, classes_num)"""
|
218 |
-
|
219 |
-
|
220 |
-
# def process_ipc(index_path, classes_num, filename):
|
221 |
-
# # load data
|
222 |
-
# logging.info("Load Data...............")
|
223 |
-
# ipc = [[] for _ in range(classes_num)]
|
224 |
-
# with h5py.File(index_path, "r") as f:
|
225 |
-
# for i in tqdm(range(len(f["target"]))):
|
226 |
-
# t_class = np.where(f["target"][i])[0]
|
227 |
-
# for t in t_class:
|
228 |
-
# ipc[t].append(i)
|
229 |
-
# print(ipc)
|
230 |
-
# np.save(filename, ipc)
|
231 |
-
# logging.info("Load Data Succeed...............")
|
232 |
-
|
233 |
-
|
234 |
-
def save_to_dict(s, o_={}):
|
235 |
-
sp = s.split(": ")
|
236 |
-
o_.update({sp[0]: float(sp[1])})
|
237 |
-
return o_
|
238 |
-
|
239 |
-
|
240 |
-
def get_data_from_log(txt_path):
|
241 |
-
"""
|
242 |
-
Output dictionary from out.txt log file
|
243 |
-
"""
|
244 |
-
with open(txt_path) as f:
|
245 |
-
lines = f.readlines()
|
246 |
-
val_data = {}
|
247 |
-
train_data = {}
|
248 |
-
train_losses = []
|
249 |
-
train_losses_epoch = []
|
250 |
-
for i in range(len(lines)):
|
251 |
-
if "| INFO |" in lines[i]:
|
252 |
-
if "Eval Epoch" in lines[i]:
|
253 |
-
if "val_loss" in lines[i]:
|
254 |
-
# float(regex.sub("", lines[310].split(" ")[-1]).replace(" ", ""))
|
255 |
-
line = lines[i].split("Eval Epoch: ")[-1]
|
256 |
-
num_epoch = int(line.split(" ")[0].split(" ")[0])
|
257 |
-
d = {
|
258 |
-
line.split(" ")[0]
|
259 |
-
.split(" ")[1]
|
260 |
-
.replace(":", ""): float(line.split(" ")[0].split(" ")[-1])
|
261 |
-
}
|
262 |
-
for i in range(1, len(line.split(" "))):
|
263 |
-
d = save_to_dict(line.split(" ")[i], d)
|
264 |
-
val_data[num_epoch] = d
|
265 |
-
elif "Train Epoch" in lines[i]:
|
266 |
-
num_epoch = int(lines[i].split("Train Epoch: ")[1][0])
|
267 |
-
loss = float(lines[i].split("Loss: ")[-1].split(" (")[0])
|
268 |
-
train_losses.append(loss)
|
269 |
-
train_losses_epoch.append(num_epoch)
|
270 |
-
for i in range(len(train_losses)):
|
271 |
-
train_data[i] = {
|
272 |
-
"num_epoch": train_losses_epoch[i],
|
273 |
-
"train_loss": train_losses[i],
|
274 |
-
}
|
275 |
-
return train_data, val_data
|
276 |
-
|
277 |
-
|
278 |
-
def save_p(obj, filename):
|
279 |
-
import pickle
|
280 |
-
|
281 |
-
try:
|
282 |
-
from deepdiff import DeepDiff
|
283 |
-
except:
|
284 |
-
os.system("pip install deepdiff")
|
285 |
-
from deepdiff import DeepDiff
|
286 |
-
with open(filename, "wb") as file:
|
287 |
-
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL) # highest protocol
|
288 |
-
with open(filename, "rb") as file:
|
289 |
-
z = pickle.load(file)
|
290 |
-
assert (
|
291 |
-
DeepDiff(obj, z, ignore_string_case=True) == {}
|
292 |
-
), "there is something wrong with the saving process"
|
293 |
-
return
|
294 |
-
|
295 |
-
|
296 |
-
def load_p(filename):
|
297 |
-
import pickle
|
298 |
-
|
299 |
-
with open(filename, "rb") as file:
|
300 |
-
z = pickle.load(file)
|
301 |
-
return z
|
302 |
-
|
303 |
-
|
304 |
-
def save_json(data, name="data.json"):
|
305 |
-
import json
|
306 |
-
|
307 |
-
with open(name, "w") as fp:
|
308 |
-
json.dump(data, fp)
|
309 |
-
return
|
310 |
-
|
311 |
-
|
312 |
-
def load_json(name):
|
313 |
-
import json
|
314 |
-
|
315 |
-
with open(name, "r") as fp:
|
316 |
-
data = json.load(fp)
|
317 |
-
return data
|
318 |
-
|
319 |
-
|
320 |
-
from multiprocessing import Process, Manager
|
321 |
-
from multiprocessing import Process, Value, Array
|
322 |
-
from ctypes import c_wchar
|
323 |
-
|
324 |
-
|
325 |
-
def load_class_label(path):
|
326 |
-
# https://stackoverflow.com/questions/48004243/how-to-share-large-read-only-dictionary-list-across-processes-in-multiprocessing
|
327 |
-
# https://stackoverflow.com/questions/45693949/storing-strings-in-a-multiprocessing-sharedctypes-array
|
328 |
-
out = None
|
329 |
-
if path is not None:
|
330 |
-
if pathlib.Path(path).suffix in [".pkl", ".pickle"]:
|
331 |
-
out = load_p(path)
|
332 |
-
elif pathlib.Path(path).suffix in [".json", ".txt"]:
|
333 |
-
out = load_json(path)
|
334 |
-
elif pathlib.Path(path).suffix in [".npy", ".npz"]:
|
335 |
-
out = np.load(path)
|
336 |
-
elif pathlib.Path(path).suffix in [".csv"]:
|
337 |
-
import pandas as pd
|
338 |
-
|
339 |
-
out = pd.read_csv(path)
|
340 |
-
return out
|
341 |
-
# if out is None:
|
342 |
-
# return None
|
343 |
-
# else:
|
344 |
-
# key = Array(c_wchar, '\n'.join(list(out.keys())), lock=False)
|
345 |
-
# val = Array('i', out.values(), lock=False)
|
346 |
-
# return (key, val)
|
347 |
-
|
348 |
-
|
349 |
-
from torch import optim
|
350 |
-
|
351 |
-
|
352 |
-
def get_optimizer(params, lr, betas, eps, momentum, optimizer_name):
|
353 |
-
if optimizer_name.lower() == "adamw":
|
354 |
-
optimizer = optim.AdamW(params, lr=lr, betas=betas, eps=eps)
|
355 |
-
elif optimizer_name.lower() == "sgd":
|
356 |
-
optimizer = optim.SGD(params, lr=lr, momentum=momentum)
|
357 |
-
elif optimizer_name.lower() == "adam":
|
358 |
-
optimizer = optim.Adam(params, lr=lr, betas=betas, eps=eps)
|
359 |
-
else:
|
360 |
-
raise ValueError("optimizer name is not correct")
|
361 |
-
return optimizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIWaves/SOP_Generation-single/Memory/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .base_Memory import Memory
|
|
|
|
spaces/AchyuthGamer/ImMagician-Gradio/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ImMagician Gradio
|
3 |
-
emoji: 🪄
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.44.4
|
8 |
-
app_file: app.py
|
9 |
-
pinned: true
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/PerplexityAi.py
DELETED
@@ -1,87 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
import time
|
5 |
-
import base64
|
6 |
-
from curl_cffi.requests import AsyncSession
|
7 |
-
|
8 |
-
from .base_provider import AsyncProvider, format_prompt
|
9 |
-
|
10 |
-
|
11 |
-
class PerplexityAi(AsyncProvider):
|
12 |
-
url = "https://www.perplexity.ai"
|
13 |
-
working = True
|
14 |
-
supports_gpt_35_turbo = True
|
15 |
-
_sources = []
|
16 |
-
|
17 |
-
@classmethod
|
18 |
-
async def create_async(
|
19 |
-
cls,
|
20 |
-
model: str,
|
21 |
-
messages: list[dict[str, str]],
|
22 |
-
proxy: str = None,
|
23 |
-
**kwargs
|
24 |
-
) -> str:
|
25 |
-
url = cls.url + "/socket.io/?EIO=4&transport=polling"
|
26 |
-
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session:
|
27 |
-
url_session = "https://www.perplexity.ai/api/auth/session"
|
28 |
-
response = await session.get(url_session)
|
29 |
-
|
30 |
-
response = await session.get(url, params={"t": timestamp()})
|
31 |
-
response.raise_for_status()
|
32 |
-
sid = json.loads(response.text[1:])["sid"]
|
33 |
-
|
34 |
-
data = '40{"jwt":"anonymous-ask-user"}'
|
35 |
-
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
|
36 |
-
response.raise_for_status()
|
37 |
-
|
38 |
-
data = "424" + json.dumps([
|
39 |
-
"perplexity_ask",
|
40 |
-
format_prompt(messages),
|
41 |
-
{
|
42 |
-
"version":"2.1",
|
43 |
-
"source":"default",
|
44 |
-
"language":"en",
|
45 |
-
"timezone": time.tzname[0],
|
46 |
-
"search_focus":"internet",
|
47 |
-
"mode":"concise"
|
48 |
-
}
|
49 |
-
])
|
50 |
-
response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
|
51 |
-
response.raise_for_status()
|
52 |
-
|
53 |
-
while True:
|
54 |
-
response = await session.get(url, params={"t": timestamp(), "sid": sid})
|
55 |
-
response.raise_for_status()
|
56 |
-
for line in response.text.splitlines():
|
57 |
-
if line.startswith("434"):
|
58 |
-
result = json.loads(json.loads(line[3:])[0]["text"])
|
59 |
-
|
60 |
-
cls._sources = [{
|
61 |
-
"title": source["name"],
|
62 |
-
"url": source["url"],
|
63 |
-
"snippet": source["snippet"]
|
64 |
-
} for source in result["web_results"]]
|
65 |
-
|
66 |
-
return result["answer"]
|
67 |
-
|
68 |
-
@classmethod
|
69 |
-
def get_sources(cls):
|
70 |
-
return cls._sources
|
71 |
-
|
72 |
-
|
73 |
-
@classmethod
|
74 |
-
@property
|
75 |
-
def params(cls):
|
76 |
-
params = [
|
77 |
-
("model", "str"),
|
78 |
-
("messages", "list[dict[str, str]]"),
|
79 |
-
("stream", "bool"),
|
80 |
-
("proxy", "str"),
|
81 |
-
]
|
82 |
-
param = ", ".join([": ".join(p) for p in params])
|
83 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
84 |
-
|
85 |
-
|
86 |
-
def timestamp() -> str:
|
87 |
-
return base64.urlsafe_b64encode(int(time.time()-1407782612).to_bytes(4, 'big')).decode()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Wewordle.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import requests
|
3 |
-
import json
|
4 |
-
import random
|
5 |
-
import time
|
6 |
-
import string
|
7 |
-
from ...typing import sha256, Dict, get_type_hints
|
8 |
-
|
9 |
-
url = "https://wewordle.org/gptapi/v1/android/turbo"
|
10 |
-
model = ['gpt-3.5-turbo']
|
11 |
-
supports_stream = False
|
12 |
-
needs_auth = False
|
13 |
-
|
14 |
-
|
15 |
-
def _create_completion(model: str, messages: list, stream: bool, **kwargs):
|
16 |
-
base = ''
|
17 |
-
for message in messages:
|
18 |
-
base += '%s: %s\n' % (message['role'], message['content'])
|
19 |
-
base += 'assistant:'
|
20 |
-
# randomize user id and app id
|
21 |
-
_user_id = ''.join(random.choices(
|
22 |
-
f'{string.ascii_lowercase}{string.digits}', k=16))
|
23 |
-
_app_id = ''.join(random.choices(
|
24 |
-
f'{string.ascii_lowercase}{string.digits}', k=31))
|
25 |
-
# make current date with format utc
|
26 |
-
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
|
27 |
-
headers = {
|
28 |
-
'accept': '*/*',
|
29 |
-
'pragma': 'no-cache',
|
30 |
-
'Content-Type': 'application/json',
|
31 |
-
'Connection': 'keep-alive'
|
32 |
-
}
|
33 |
-
data = {
|
34 |
-
"user": _user_id,
|
35 |
-
"messages": [
|
36 |
-
{"role": "user", "content": base}
|
37 |
-
],
|
38 |
-
"subscriber": {
|
39 |
-
"originalPurchaseDate": None,
|
40 |
-
"originalApplicationVersion": None,
|
41 |
-
"allPurchaseDatesMillis": {},
|
42 |
-
"entitlements": {
|
43 |
-
"active": {},
|
44 |
-
"all": {}
|
45 |
-
},
|
46 |
-
"allPurchaseDates": {},
|
47 |
-
"allExpirationDatesMillis": {},
|
48 |
-
"allExpirationDates": {},
|
49 |
-
"originalAppUserId": f"$RCAnonymousID:{_app_id}",
|
50 |
-
"latestExpirationDate": None,
|
51 |
-
"requestDate": _request_date,
|
52 |
-
"latestExpirationDateMillis": None,
|
53 |
-
"nonSubscriptionTransactions": [],
|
54 |
-
"originalPurchaseDateMillis": None,
|
55 |
-
"managementURL": None,
|
56 |
-
"allPurchasedProductIdentifiers": [],
|
57 |
-
"firstSeen": _request_date,
|
58 |
-
"activeSubscriptions": []
|
59 |
-
}
|
60 |
-
}
|
61 |
-
response = requests.post(url, headers=headers, data=json.dumps(data))
|
62 |
-
if response.status_code == 200:
|
63 |
-
_json = response.json()
|
64 |
-
if 'message' in _json:
|
65 |
-
message_content = _json['message']['content']
|
66 |
-
message_content = message_content.replace('**assistant:** ', '')
|
67 |
-
yield message_content
|
68 |
-
else:
|
69 |
-
print(f"Error Occurred::{response.status_code}")
|
70 |
-
return None
|
71 |
-
|
72 |
-
|
73 |
-
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
|
74 |
-
'(%s)' % ', '.join(
|
75 |
-
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/app.py
DELETED
@@ -1,599 +0,0 @@
|
|
1 |
-
import base64
|
2 |
-
import openai
|
3 |
-
import itertools
|
4 |
-
import json
|
5 |
-
from typing import Dict, List, Tuple
|
6 |
-
|
7 |
-
import cv2
|
8 |
-
import gradio as gr
|
9 |
-
|
10 |
-
from agentverse import TaskSolving
|
11 |
-
from agentverse.simulation import Simulation
|
12 |
-
from agentverse.message import Message
|
13 |
-
|
14 |
-
|
15 |
-
def cover_img(background, img, place: Tuple[int, int]):
|
16 |
-
"""
|
17 |
-
Overlays the specified image to the specified position of the background image.
|
18 |
-
:param background: background image
|
19 |
-
:param img: the specified image
|
20 |
-
:param place: the top-left coordinate of the target location
|
21 |
-
"""
|
22 |
-
back_h, back_w, _ = background.shape
|
23 |
-
height, width, _ = img.shape
|
24 |
-
for i, j in itertools.product(range(height), range(width)):
|
25 |
-
if img[i, j, 3]:
|
26 |
-
background[place[0] + i, place[1] + j] = img[i, j, :3]
|
27 |
-
|
28 |
-
|
29 |
-
class GUI:
|
30 |
-
"""
|
31 |
-
the UI of frontend
|
32 |
-
"""
|
33 |
-
|
34 |
-
def __init__(
|
35 |
-
self,
|
36 |
-
task: str = "simulation/nlp_classroom_9players",
|
37 |
-
tasks_dir: str = "agentverse/tasks",
|
38 |
-
):
|
39 |
-
"""
|
40 |
-
init a UI.
|
41 |
-
default number of students is 0
|
42 |
-
"""
|
43 |
-
self.messages = []
|
44 |
-
self.task = task
|
45 |
-
self.tasks_dir = tasks_dir
|
46 |
-
if task == "pipeline_brainstorming":
|
47 |
-
self.backend = TaskSolving.from_task(task, tasks_dir)
|
48 |
-
else:
|
49 |
-
self.backend = Simulation.from_task(task, tasks_dir)
|
50 |
-
self.turns_remain = 0
|
51 |
-
self.agent_id = {
|
52 |
-
self.backend.agents[idx].name: idx
|
53 |
-
for idx in range(len(self.backend.agents))
|
54 |
-
}
|
55 |
-
self.stu_num = len(self.agent_id) - 1
|
56 |
-
self.autoplay = False
|
57 |
-
self.image_now = None
|
58 |
-
self.text_now = None
|
59 |
-
self.tot_solutions = 5
|
60 |
-
self.solution_status = [False] * self.tot_solutions
|
61 |
-
|
62 |
-
def get_avatar(self, idx):
|
63 |
-
if idx == -1:
|
64 |
-
img = cv2.imread("./imgs/db_diag/-1.png")
|
65 |
-
elif self.task == "simulation/prisoner_dilemma":
|
66 |
-
img = cv2.imread(f"./imgs/prison/{idx}.png")
|
67 |
-
else:
|
68 |
-
img = cv2.imread(f"./imgs/{idx}.png")
|
69 |
-
base64_str = cv2.imencode(".png", img)[1].tostring()
|
70 |
-
return "data:image/png;base64," + base64.b64encode(base64_str).decode("utf-8")
|
71 |
-
|
72 |
-
def stop_autoplay(self):
|
73 |
-
self.autoplay = False
|
74 |
-
return (
|
75 |
-
gr.Button.update(interactive=False),
|
76 |
-
gr.Button.update(interactive=False),
|
77 |
-
gr.Button.update(interactive=False),
|
78 |
-
)
|
79 |
-
|
80 |
-
def start_autoplay(self):
|
81 |
-
self.autoplay = True
|
82 |
-
yield (
|
83 |
-
self.image_now,
|
84 |
-
self.text_now,
|
85 |
-
gr.Button.update(interactive=False),
|
86 |
-
gr.Button.update(interactive=True),
|
87 |
-
gr.Button.update(interactive=False),
|
88 |
-
*[gr.Button.update(visible=statu) for statu in self.solution_status],
|
89 |
-
gr.Box.update(visible=any(self.solution_status)),
|
90 |
-
)
|
91 |
-
|
92 |
-
while self.autoplay and self.turns_remain > 0:
|
93 |
-
outputs = self.gen_output()
|
94 |
-
self.image_now, self.text_now = outputs
|
95 |
-
|
96 |
-
yield (
|
97 |
-
*outputs,
|
98 |
-
gr.Button.update(
|
99 |
-
interactive=not self.autoplay and self.turns_remain > 0
|
100 |
-
),
|
101 |
-
gr.Button.update(interactive=self.autoplay and self.turns_remain > 0),
|
102 |
-
gr.Button.update(
|
103 |
-
interactive=not self.autoplay and self.turns_remain > 0
|
104 |
-
),
|
105 |
-
*[gr.Button.update(visible=statu) for statu in self.solution_status],
|
106 |
-
gr.Box.update(visible=any(self.solution_status)),
|
107 |
-
)
|
108 |
-
|
109 |
-
def delay_gen_output(
|
110 |
-
self,
|
111 |
-
):
|
112 |
-
yield (
|
113 |
-
self.image_now,
|
114 |
-
self.text_now,
|
115 |
-
gr.Button.update(interactive=False),
|
116 |
-
gr.Button.update(interactive=False),
|
117 |
-
*[gr.Button.update(visible=statu) for statu in self.solution_status],
|
118 |
-
gr.Box.update(visible=any(self.solution_status)),
|
119 |
-
)
|
120 |
-
|
121 |
-
outputs = self.gen_output()
|
122 |
-
self.image_now, self.text_now = outputs
|
123 |
-
|
124 |
-
yield (
|
125 |
-
self.image_now,
|
126 |
-
self.text_now,
|
127 |
-
gr.Button.update(interactive=self.turns_remain > 0),
|
128 |
-
gr.Button.update(interactive=self.turns_remain > 0),
|
129 |
-
*[gr.Button.update(visible=statu) for statu in self.solution_status],
|
130 |
-
gr.Box.update(visible=any(self.solution_status)),
|
131 |
-
)
|
132 |
-
|
133 |
-
def delay_reset(self, task_dropdown, api_key_text, organization_text):
|
134 |
-
self.autoplay = False
|
135 |
-
self.image_now, self.text_now = self.reset(
|
136 |
-
task_dropdown, api_key_text, organization_text
|
137 |
-
)
|
138 |
-
return (
|
139 |
-
self.image_now,
|
140 |
-
self.text_now,
|
141 |
-
gr.Button.update(interactive=True),
|
142 |
-
gr.Button.update(interactive=False),
|
143 |
-
gr.Button.update(interactive=True),
|
144 |
-
*[gr.Button.update(visible=statu) for statu in self.solution_status],
|
145 |
-
gr.Box.update(visible=any(self.solution_status)),
|
146 |
-
)
|
147 |
-
|
148 |
-
def reset(
|
149 |
-
self,
|
150 |
-
task_dropdown="simulation/nlp_classroom_9players",
|
151 |
-
api_key_text="",
|
152 |
-
organization_text="",
|
153 |
-
):
|
154 |
-
openai.api_key = api_key_text
|
155 |
-
openai.organization = organization_text
|
156 |
-
"""
|
157 |
-
tell backend the new number of students and generate new empty image
|
158 |
-
:param stu_num:
|
159 |
-
:return: [empty image, empty message]
|
160 |
-
"""
|
161 |
-
# if not 0 <= stu_num <= 30:
|
162 |
-
# raise gr.Error("the number of students must be between 0 and 30.")
|
163 |
-
|
164 |
-
"""
|
165 |
-
# [To-Do] Need to add a function to assign agent numbers into the backend.
|
166 |
-
"""
|
167 |
-
# self.backend.reset(stu_num)
|
168 |
-
# self.stu_num = stu_num
|
169 |
-
|
170 |
-
"""
|
171 |
-
# [To-Do] Pass the parameters to reset
|
172 |
-
"""
|
173 |
-
if task_dropdown == "pipeline_brainstorming":
|
174 |
-
self.backend = TaskSolving.from_task(task_dropdown, self.tasks_dir)
|
175 |
-
else:
|
176 |
-
self.backend = Simulation.from_task(task_dropdown, self.tasks_dir)
|
177 |
-
self.agent_id = {
|
178 |
-
self.backend.agents[idx].name: idx
|
179 |
-
for idx in range(len(self.backend.agents))
|
180 |
-
}
|
181 |
-
|
182 |
-
self.task = task_dropdown
|
183 |
-
self.stu_num = len(self.agent_id) - 1
|
184 |
-
self.backend.reset()
|
185 |
-
self.turns_remain = self.backend.environment.max_turns
|
186 |
-
|
187 |
-
if task_dropdown == "simulation/prisoner_dilemma":
|
188 |
-
background = cv2.imread("./imgs/prison/case_1.png")
|
189 |
-
elif task_dropdown == "simulation/db_diag":
|
190 |
-
background = cv2.imread("./imgs/db_diag/background.png")
|
191 |
-
elif "sde" in task_dropdown:
|
192 |
-
background = cv2.imread("./imgs/sde/background.png")
|
193 |
-
else:
|
194 |
-
background = cv2.imread("./imgs/background.png")
|
195 |
-
back_h, back_w, _ = background.shape
|
196 |
-
stu_cnt = 0
|
197 |
-
for h_begin, w_begin in itertools.product(
|
198 |
-
range(800, back_h, 300), range(135, back_w - 200, 200)
|
199 |
-
):
|
200 |
-
stu_cnt += 1
|
201 |
-
img = cv2.imread(
|
202 |
-
f"./imgs/{(stu_cnt - 1) % 11 + 1 if stu_cnt <= self.stu_num else 'empty'}.png",
|
203 |
-
cv2.IMREAD_UNCHANGED,
|
204 |
-
)
|
205 |
-
cover_img(
|
206 |
-
background,
|
207 |
-
img,
|
208 |
-
(h_begin - 30 if img.shape[0] > 190 else h_begin, w_begin),
|
209 |
-
)
|
210 |
-
self.messages = []
|
211 |
-
self.solution_status = [False] * self.tot_solutions
|
212 |
-
return [cv2.cvtColor(background, cv2.COLOR_BGR2RGB), ""]
|
213 |
-
|
214 |
-
def gen_img(self, data: List[Dict]):
|
215 |
-
"""
|
216 |
-
generate new image with sender rank
|
217 |
-
:param data:
|
218 |
-
:return: the new image
|
219 |
-
"""
|
220 |
-
# The following code need to be more general. This one is too task-specific.
|
221 |
-
# if len(data) != self.stu_num:
|
222 |
-
if len(data) != self.stu_num + 1:
|
223 |
-
raise gr.Error("data length is not equal to the total number of students.")
|
224 |
-
if self.task == "simulation/prisoner_dilemma":
|
225 |
-
img = cv2.imread("./imgs/speaking.png", cv2.IMREAD_UNCHANGED)
|
226 |
-
if (
|
227 |
-
len(self.messages) < 2
|
228 |
-
or self.messages[-1][0] == 1
|
229 |
-
or self.messages[-2][0] == 2
|
230 |
-
):
|
231 |
-
background = cv2.imread("./imgs/prison/case_1.png")
|
232 |
-
if data[0]["message"] != "":
|
233 |
-
cover_img(background, img, (400, 480))
|
234 |
-
else:
|
235 |
-
background = cv2.imread("./imgs/prison/case_2.png")
|
236 |
-
if data[0]["message"] != "":
|
237 |
-
cover_img(background, img, (400, 880))
|
238 |
-
if data[1]["message"] != "":
|
239 |
-
cover_img(background, img, (550, 480))
|
240 |
-
if data[2]["message"] != "":
|
241 |
-
cover_img(background, img, (550, 880))
|
242 |
-
elif self.task == "db_diag":
|
243 |
-
background = cv2.imread("./imgs/db_diag/background.png")
|
244 |
-
img = cv2.imread("./imgs/db_diag/speaking.png", cv2.IMREAD_UNCHANGED)
|
245 |
-
if data[0]["message"] != "":
|
246 |
-
cover_img(background, img, (750, 80))
|
247 |
-
if data[1]["message"] != "":
|
248 |
-
cover_img(background, img, (310, 220))
|
249 |
-
if data[2]["message"] != "":
|
250 |
-
cover_img(background, img, (522, 11))
|
251 |
-
elif "sde" in self.task:
|
252 |
-
background = cv2.imread("./imgs/sde/background.png")
|
253 |
-
img = cv2.imread("./imgs/sde/speaking.png", cv2.IMREAD_UNCHANGED)
|
254 |
-
if data[0]["message"] != "":
|
255 |
-
cover_img(background, img, (692, 330))
|
256 |
-
if data[1]["message"] != "":
|
257 |
-
cover_img(background, img, (692, 660))
|
258 |
-
if data[2]["message"] != "":
|
259 |
-
cover_img(background, img, (692, 990))
|
260 |
-
else:
|
261 |
-
background = cv2.imread("./imgs/background.png")
|
262 |
-
back_h, back_w, _ = background.shape
|
263 |
-
stu_cnt = 0
|
264 |
-
if data[stu_cnt]["message"] not in ["", "[RaiseHand]"]:
|
265 |
-
img = cv2.imread("./imgs/speaking.png", cv2.IMREAD_UNCHANGED)
|
266 |
-
cover_img(background, img, (370, 1250))
|
267 |
-
for h_begin, w_begin in itertools.product(
|
268 |
-
range(800, back_h, 300), range(135, back_w - 200, 200)
|
269 |
-
):
|
270 |
-
stu_cnt += 1
|
271 |
-
if stu_cnt <= self.stu_num:
|
272 |
-
img = cv2.imread(
|
273 |
-
f"./imgs/{(stu_cnt - 1) % 11 + 1}.png", cv2.IMREAD_UNCHANGED
|
274 |
-
)
|
275 |
-
cover_img(
|
276 |
-
background,
|
277 |
-
img,
|
278 |
-
(h_begin - 30 if img.shape[0] > 190 else h_begin, w_begin),
|
279 |
-
)
|
280 |
-
if "[RaiseHand]" in data[stu_cnt]["message"]:
|
281 |
-
# elif data[stu_cnt]["message"] == "[RaiseHand]":
|
282 |
-
img = cv2.imread("./imgs/hand.png", cv2.IMREAD_UNCHANGED)
|
283 |
-
cover_img(background, img, (h_begin - 90, w_begin + 10))
|
284 |
-
elif data[stu_cnt]["message"] not in ["", "[RaiseHand]"]:
|
285 |
-
img = cv2.imread("./imgs/speaking.png", cv2.IMREAD_UNCHANGED)
|
286 |
-
cover_img(background, img, (h_begin - 90, w_begin + 10))
|
287 |
-
|
288 |
-
else:
|
289 |
-
img = cv2.imread("./imgs/empty.png", cv2.IMREAD_UNCHANGED)
|
290 |
-
cover_img(background, img, (h_begin, w_begin))
|
291 |
-
return cv2.cvtColor(background, cv2.COLOR_BGR2RGB)
|
292 |
-
|
293 |
-
def return_format(self, messages: List[Message]):
|
294 |
-
_format = [{"message": "", "sender": idx} for idx in range(len(self.agent_id))]
|
295 |
-
|
296 |
-
for message in messages:
|
297 |
-
if self.task == "db_diag":
|
298 |
-
content_json: dict = message.content
|
299 |
-
content_json[
|
300 |
-
"diagnose"
|
301 |
-
] = f"[{message.sender}]: {content_json['diagnose']}"
|
302 |
-
_format[self.agent_id[message.sender]]["message"] = json.dumps(
|
303 |
-
content_json
|
304 |
-
)
|
305 |
-
elif "sde" in self.task:
|
306 |
-
if message.sender == "code_tester":
|
307 |
-
pre_message, message_ = message.content.split("\n")
|
308 |
-
message_ = "{}\n{}".format(
|
309 |
-
pre_message, json.loads(message_)["feedback"]
|
310 |
-
)
|
311 |
-
_format[self.agent_id[message.sender]][
|
312 |
-
"message"
|
313 |
-
] = "[{}]: {}".format(message.sender, message_)
|
314 |
-
else:
|
315 |
-
_format[self.agent_id[message.sender]][
|
316 |
-
"message"
|
317 |
-
] = "[{}]: {}".format(message.sender, message.content)
|
318 |
-
|
319 |
-
else:
|
320 |
-
_format[self.agent_id[message.sender]]["message"] = "[{}]: {}".format(
|
321 |
-
message.sender, message.content
|
322 |
-
)
|
323 |
-
|
324 |
-
return _format
|
325 |
-
|
326 |
-
def gen_output(self):
|
327 |
-
"""
|
328 |
-
generate new image and message of next step
|
329 |
-
:return: [new image, new message]
|
330 |
-
"""
|
331 |
-
|
332 |
-
# data = self.backend.next_data()
|
333 |
-
|
334 |
-
return_message = self.backend.next()
|
335 |
-
|
336 |
-
data = self.return_format(return_message)
|
337 |
-
|
338 |
-
# data.sort(key=lambda item: item["sender"])
|
339 |
-
"""
|
340 |
-
# [To-Do]; Check the message from the backend: only 1 person can speak
|
341 |
-
"""
|
342 |
-
|
343 |
-
for item in data:
|
344 |
-
if item["message"] not in ["", "[RaiseHand]"]:
|
345 |
-
self.messages.append((item["sender"], item["message"]))
|
346 |
-
|
347 |
-
message = self.gen_message()
|
348 |
-
self.turns_remain -= 1
|
349 |
-
return [self.gen_img(data), message]
|
350 |
-
|
351 |
-
def gen_message(self):
|
352 |
-
# If the backend cannot handle this error, use the following code.
|
353 |
-
message = ""
|
354 |
-
"""
|
355 |
-
for item in data:
|
356 |
-
if item["message"] not in ["", "[RaiseHand]"]:
|
357 |
-
message = item["message"]
|
358 |
-
break
|
359 |
-
"""
|
360 |
-
for sender, msg in self.messages:
|
361 |
-
if sender == 0:
|
362 |
-
avatar = self.get_avatar(0)
|
363 |
-
elif sender == -1:
|
364 |
-
avatar = self.get_avatar(-1)
|
365 |
-
else:
|
366 |
-
avatar = self.get_avatar((sender - 1) % 11 + 1)
|
367 |
-
if self.task == "db_diag":
|
368 |
-
msg_json = json.loads(msg)
|
369 |
-
self.solution_status = [False] * self.tot_solutions
|
370 |
-
msg = msg_json["diagnose"]
|
371 |
-
if msg_json["solution"] != "":
|
372 |
-
solution: List[str] = msg_json["solution"]
|
373 |
-
for solu in solution:
|
374 |
-
if "query" in solu or "queries" in solu:
|
375 |
-
self.solution_status[0] = True
|
376 |
-
solu = solu.replace(
|
377 |
-
"query", '<span style="color:yellow;">query</span>'
|
378 |
-
)
|
379 |
-
solu = solu.replace(
|
380 |
-
"queries", '<span style="color:yellow;">queries</span>'
|
381 |
-
)
|
382 |
-
if "join" in solu:
|
383 |
-
self.solution_status[1] = True
|
384 |
-
solu = solu.replace(
|
385 |
-
"join", '<span style="color:yellow;">join</span>'
|
386 |
-
)
|
387 |
-
if "index" in solu:
|
388 |
-
self.solution_status[2] = True
|
389 |
-
solu = solu.replace(
|
390 |
-
"index", '<span style="color:yellow;">index</span>'
|
391 |
-
)
|
392 |
-
if "system configuration" in solu:
|
393 |
-
self.solution_status[3] = True
|
394 |
-
solu = solu.replace(
|
395 |
-
"system configuration",
|
396 |
-
'<span style="color:yellow;">system configuration</span>',
|
397 |
-
)
|
398 |
-
if (
|
399 |
-
"monitor" in solu
|
400 |
-
or "Monitor" in solu
|
401 |
-
or "Investigate" in solu
|
402 |
-
):
|
403 |
-
self.solution_status[4] = True
|
404 |
-
solu = solu.replace(
|
405 |
-
"monitor", '<span style="color:yellow;">monitor</span>'
|
406 |
-
)
|
407 |
-
solu = solu.replace(
|
408 |
-
"Monitor", '<span style="color:yellow;">Monitor</span>'
|
409 |
-
)
|
410 |
-
solu = solu.replace(
|
411 |
-
"Investigate",
|
412 |
-
'<span style="color:yellow;">Investigate</span>',
|
413 |
-
)
|
414 |
-
msg = f"{msg}<br>{solu}"
|
415 |
-
if msg_json["knowledge"] != "":
|
416 |
-
msg = f'{msg}<hr style="margin: 5px 0"><span style="font-style: italic">{msg_json["knowledge"]}<span>'
|
417 |
-
else:
|
418 |
-
msg = msg.replace("<", "<")
|
419 |
-
msg = msg.replace(">", ">")
|
420 |
-
message = (
|
421 |
-
f'<div style="display: flex; align-items: center; margin-bottom: 10px;overflow:auto;">'
|
422 |
-
f'<img src="{avatar}" style="width: 5%; height: 5%; border-radius: 25px; margin-right: 10px;">'
|
423 |
-
f'<div style="background-color: gray; color: white; padding: 10px; border-radius: 10px;'
|
424 |
-
f'max-width: 70%; white-space: pre-wrap">'
|
425 |
-
f"{msg}"
|
426 |
-
f"</div></div>" + message
|
427 |
-
)
|
428 |
-
message = (
|
429 |
-
'<div id="divDetail" style="height:600px;overflow:auto;">'
|
430 |
-
+ message
|
431 |
-
+ "</div>"
|
432 |
-
)
|
433 |
-
return message
|
434 |
-
|
435 |
-
def submit(self, message: str):
|
436 |
-
"""
|
437 |
-
submit message to backend
|
438 |
-
:param message: message
|
439 |
-
:return: [new image, new message]
|
440 |
-
"""
|
441 |
-
self.backend.submit(message)
|
442 |
-
self.messages.append((-1, f"[User]: {message}"))
|
443 |
-
return self.gen_img([{"message": ""}] * len(self.agent_id)), self.gen_message()
|
444 |
-
|
445 |
-
def launch(self, single_agent=False, discussion_mode=False):
|
446 |
-
if self.task == "pipeline_brainstorming":
|
447 |
-
with gr.Blocks() as demo:
|
448 |
-
chatbot = gr.Chatbot(height=800, show_label=False)
|
449 |
-
msg = gr.Textbox(label="Input")
|
450 |
-
|
451 |
-
def respond(message, chat_history):
|
452 |
-
chat_history.append((message, None))
|
453 |
-
yield "", chat_history
|
454 |
-
for response in self.backend.iter_run(
|
455 |
-
single_agent=single_agent, discussion_mode=discussion_mode
|
456 |
-
):
|
457 |
-
print(response)
|
458 |
-
chat_history.append((None, response))
|
459 |
-
yield "", chat_history
|
460 |
-
|
461 |
-
msg.submit(respond, [msg, chatbot], [msg, chatbot])
|
462 |
-
else:
|
463 |
-
with gr.Blocks() as demo:
|
464 |
-
with gr.Row():
|
465 |
-
task_dropdown = gr.Dropdown(
|
466 |
-
choices=[
|
467 |
-
"simulation/nlp_classroom_9players",
|
468 |
-
"simulation/prisoner_dilemma",
|
469 |
-
],
|
470 |
-
value="simulation/nlp_classroom_9players",
|
471 |
-
label="Task",
|
472 |
-
)
|
473 |
-
api_key_text = gr.Textbox(label="OPENAI API KEY")
|
474 |
-
organization_text = gr.Textbox(label="Organization")
|
475 |
-
with gr.Row():
|
476 |
-
with gr.Column():
|
477 |
-
image_output = gr.Image()
|
478 |
-
with gr.Row():
|
479 |
-
reset_btn = gr.Button("Build/Reset")
|
480 |
-
# next_btn = gr.Button("Next", variant="primary")
|
481 |
-
next_btn = gr.Button("Next", interactive=False)
|
482 |
-
stop_autoplay_btn = gr.Button(
|
483 |
-
"Stop Autoplay", interactive=False
|
484 |
-
)
|
485 |
-
start_autoplay_btn = gr.Button(
|
486 |
-
"Start Autoplay", interactive=False
|
487 |
-
)
|
488 |
-
with gr.Box(visible=False) as solutions:
|
489 |
-
with gr.Column():
|
490 |
-
gr.HTML("Optimization Solutions:")
|
491 |
-
with gr.Row():
|
492 |
-
rewrite_slow_query_btn = gr.Button(
|
493 |
-
"Rewrite Slow Query", visible=False
|
494 |
-
)
|
495 |
-
add_query_hints_btn = gr.Button(
|
496 |
-
"Add Query Hints", visible=False
|
497 |
-
)
|
498 |
-
update_indexes_btn = gr.Button(
|
499 |
-
"Update Indexes", visible=False
|
500 |
-
)
|
501 |
-
tune_parameters_btn = gr.Button(
|
502 |
-
"Tune Parameters", visible=False
|
503 |
-
)
|
504 |
-
gather_more_info_btn = gr.Button(
|
505 |
-
"Gather More Info", visible=False
|
506 |
-
)
|
507 |
-
# text_output = gr.Textbox()
|
508 |
-
text_output = gr.HTML(self.reset()[1])
|
509 |
-
|
510 |
-
# Given a botton to provide student numbers and their inf.
|
511 |
-
# stu_num = gr.Number(label="Student Number", precision=0)
|
512 |
-
# stu_num = self.stu_num
|
513 |
-
|
514 |
-
if self.task == "db_diag":
|
515 |
-
user_msg = gr.Textbox()
|
516 |
-
submit_btn = gr.Button("Submit", variant="primary")
|
517 |
-
|
518 |
-
submit_btn.click(
|
519 |
-
fn=self.submit,
|
520 |
-
inputs=user_msg,
|
521 |
-
outputs=[image_output, text_output],
|
522 |
-
show_progress=False,
|
523 |
-
)
|
524 |
-
else:
|
525 |
-
pass
|
526 |
-
|
527 |
-
# next_btn.click(fn=self.gen_output, inputs=None, outputs=[image_output, text_output],
|
528 |
-
# show_progress=False)
|
529 |
-
next_btn.click(
|
530 |
-
fn=self.delay_gen_output,
|
531 |
-
inputs=None,
|
532 |
-
outputs=[
|
533 |
-
image_output,
|
534 |
-
text_output,
|
535 |
-
next_btn,
|
536 |
-
start_autoplay_btn,
|
537 |
-
rewrite_slow_query_btn,
|
538 |
-
add_query_hints_btn,
|
539 |
-
update_indexes_btn,
|
540 |
-
tune_parameters_btn,
|
541 |
-
gather_more_info_btn,
|
542 |
-
solutions,
|
543 |
-
],
|
544 |
-
show_progress=False,
|
545 |
-
)
|
546 |
-
|
547 |
-
# [To-Do] Add botton: re-start (load different people and env)
|
548 |
-
# reset_btn.click(fn=self.reset, inputs=stu_num, outputs=[image_output, text_output],
|
549 |
-
# show_progress=False)
|
550 |
-
# reset_btn.click(fn=self.reset, inputs=None, outputs=[image_output, text_output], show_progress=False)
|
551 |
-
reset_btn.click(
|
552 |
-
fn=self.delay_reset,
|
553 |
-
inputs=[task_dropdown, api_key_text, organization_text],
|
554 |
-
outputs=[
|
555 |
-
image_output,
|
556 |
-
text_output,
|
557 |
-
next_btn,
|
558 |
-
stop_autoplay_btn,
|
559 |
-
start_autoplay_btn,
|
560 |
-
rewrite_slow_query_btn,
|
561 |
-
add_query_hints_btn,
|
562 |
-
update_indexes_btn,
|
563 |
-
tune_parameters_btn,
|
564 |
-
gather_more_info_btn,
|
565 |
-
solutions,
|
566 |
-
],
|
567 |
-
show_progress=False,
|
568 |
-
)
|
569 |
-
|
570 |
-
stop_autoplay_btn.click(
|
571 |
-
fn=self.stop_autoplay,
|
572 |
-
inputs=None,
|
573 |
-
outputs=[next_btn, stop_autoplay_btn, start_autoplay_btn],
|
574 |
-
show_progress=False,
|
575 |
-
)
|
576 |
-
start_autoplay_btn.click(
|
577 |
-
fn=self.start_autoplay,
|
578 |
-
inputs=None,
|
579 |
-
outputs=[
|
580 |
-
image_output,
|
581 |
-
text_output,
|
582 |
-
next_btn,
|
583 |
-
stop_autoplay_btn,
|
584 |
-
start_autoplay_btn,
|
585 |
-
rewrite_slow_query_btn,
|
586 |
-
add_query_hints_btn,
|
587 |
-
update_indexes_btn,
|
588 |
-
tune_parameters_btn,
|
589 |
-
gather_more_info_btn,
|
590 |
-
solutions,
|
591 |
-
],
|
592 |
-
show_progress=False,
|
593 |
-
)
|
594 |
-
|
595 |
-
demo.queue(concurrency_count=5, max_size=20).launch()
|
596 |
-
# demo.launch()
|
597 |
-
|
598 |
-
|
599 |
-
GUI().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/dots/Factory.d.ts
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
import Dots from './Dots';
|
2 |
-
import Base from '../base/Base';
|
3 |
-
|
4 |
-
export default function Factory(
|
5 |
-
config?: Base.IConfig
|
6 |
-
): Dots;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/inputtext/Factory.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
import InputText from './InputText.js';
|
2 |
-
import ObjectFactory from '../ObjectFactory.js';
|
3 |
-
import SetValue from '../../../plugins/utils/object/SetValue.js';
|
4 |
-
|
5 |
-
ObjectFactory.register('inputText', function (config) {
|
6 |
-
var gameObject = new InputText(this.scene, config);
|
7 |
-
this.scene.add.existing(gameObject);
|
8 |
-
return gameObject;
|
9 |
-
});
|
10 |
-
|
11 |
-
SetValue(window, 'RexPlugins.UI.InputText', InputText);
|
12 |
-
|
13 |
-
export default InputText;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akhil-77/Toxicity_Detector/app.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
-
|
4 |
-
sentiment = pipeline('sentiment-analysis')
|
5 |
-
|
6 |
-
def get_sentiment(input_text):
|
7 |
-
return sentiment(input_text)
|
8 |
-
|
9 |
-
front_end = gr.Interface(fn = get_sentiment,
|
10 |
-
inputs = "text",
|
11 |
-
outputs = ["text"],
|
12 |
-
title = "Toxicity Detector",
|
13 |
-
description = "A simple web-app to find out that text is toxic or not")
|
14 |
-
|
15 |
-
front_end.launch(inline=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Akmyradov/chatbot_testing/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Chatbot Testing
|
3 |
-
emoji: 🔥
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.6
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlekseyKorshuk/gai-project/modules/models.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import gradio as gr
|
3 |
-
|
4 |
-
from config import GUANACO_DEVELOPER_KEY, MODELS
|
5 |
-
|
6 |
-
|
7 |
-
class ModelConfig():
|
8 |
-
def __init__(self, config):
|
9 |
-
self.name = config['name']
|
10 |
-
self.endpoint = config['endpoint']
|
11 |
-
self.generation_params = config.get('params', {})
|
12 |
-
self.author_id = config.get('author-id')
|
13 |
-
|
14 |
-
|
15 |
-
class ChaiBot():
|
16 |
-
def __init__(self, bot_config):
|
17 |
-
self.messages = []
|
18 |
-
self.config = bot_config
|
19 |
-
self.bot_label = bot_config.get("botLabel", "Character")
|
20 |
-
self.user_label = bot_config.get("userLabel", "User")
|
21 |
-
self.add_bot_message(bot_config.get("firstMessage"))
|
22 |
-
|
23 |
-
def add_user_message(self, message):
|
24 |
-
self.messages.append((self.user_label, message.strip()))
|
25 |
-
|
26 |
-
def add_bot_message(self, message):
|
27 |
-
self.messages.append((self.bot_label, message.strip()))
|
28 |
-
|
29 |
-
def get_conversation(self):
|
30 |
-
conversation = []
|
31 |
-
for label, value in self.messages:
|
32 |
-
role_type = "user" if label == self.user_label else "bot"
|
33 |
-
message = {
|
34 |
-
"from": label,
|
35 |
-
"value": value,
|
36 |
-
"role_type": role_type
|
37 |
-
}
|
38 |
-
conversation.append(message)
|
39 |
-
return conversation
|
40 |
-
|
41 |
-
|
42 |
-
class BaseModel:
|
43 |
-
def __init__(self, model_config):
|
44 |
-
self.config = model_config
|
45 |
-
|
46 |
-
def generate_response(self, chaibot):
|
47 |
-
raise NotImplemented
|
48 |
-
|
49 |
-
|
50 |
-
class GuanacoModel(BaseModel):
|
51 |
-
def generate_response(self, chaibot):
|
52 |
-
model_inputs = self._get_model_input(chaibot)
|
53 |
-
return self._get_response(model_inputs)
|
54 |
-
|
55 |
-
def _get_model_input(self, chaibot):
|
56 |
-
model_inputs = {
|
57 |
-
"bot_name": chaibot.bot_label,
|
58 |
-
"memory": chaibot.config.get('memory', ""),
|
59 |
-
"prompt": chaibot.config.get('prompt', ""),
|
60 |
-
"chat_history": [{"sender": sender, "message": message} for sender, message in chaibot.messages],
|
61 |
-
"user_name": "You"
|
62 |
-
}
|
63 |
-
return model_inputs
|
64 |
-
|
65 |
-
def _get_response(self, inputs):
|
66 |
-
headers = {"Authorization": f"Bearer {GUANACO_DEVELOPER_KEY}"}
|
67 |
-
model_id = MODELS[self.config]
|
68 |
-
url = f'https://guanaco-submitter.chai-research.com/models/{model_id}/chat'
|
69 |
-
try:
|
70 |
-
response = requests.post(url=url, json=inputs, headers=headers, timeout=20)
|
71 |
-
except requests.ReadTimeout:
|
72 |
-
raise gr.Error("Generating response took too long, please try again in new conversation.")
|
73 |
-
return response.json()["model_output"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/utils.py
DELETED
@@ -1,226 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import glob
|
3 |
-
import sys
|
4 |
-
import argparse
|
5 |
-
import logging
|
6 |
-
import json
|
7 |
-
import subprocess
|
8 |
-
import numpy as np
|
9 |
-
from scipy.io.wavfile import read
|
10 |
-
import torch
|
11 |
-
|
12 |
-
MATPLOTLIB_FLAG = False
|
13 |
-
|
14 |
-
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
|
15 |
-
logger = logging
|
16 |
-
|
17 |
-
|
18 |
-
def load_checkpoint(checkpoint_path, model, optimizer=None):
|
19 |
-
assert os.path.isfile(checkpoint_path)
|
20 |
-
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
21 |
-
iteration = checkpoint_dict['iteration']
|
22 |
-
learning_rate = checkpoint_dict['learning_rate']
|
23 |
-
if optimizer is not None:
|
24 |
-
optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
25 |
-
saved_state_dict = checkpoint_dict['model']
|
26 |
-
if hasattr(model, 'module'):
|
27 |
-
state_dict = model.module.state_dict()
|
28 |
-
else:
|
29 |
-
state_dict = model.state_dict()
|
30 |
-
new_state_dict = {}
|
31 |
-
for k, v in state_dict.items():
|
32 |
-
try:
|
33 |
-
new_state_dict[k] = saved_state_dict[k]
|
34 |
-
except:
|
35 |
-
logger.info("%s is not in the checkpoint" % k)
|
36 |
-
new_state_dict[k] = v
|
37 |
-
if hasattr(model, 'module'):
|
38 |
-
model.module.load_state_dict(new_state_dict)
|
39 |
-
else:
|
40 |
-
model.load_state_dict(new_state_dict)
|
41 |
-
logger.info("Loaded checkpoint '{}' (iteration {})".format(
|
42 |
-
checkpoint_path, iteration))
|
43 |
-
return model, optimizer, learning_rate, iteration
|
44 |
-
|
45 |
-
|
46 |
-
def plot_spectrogram_to_numpy(spectrogram):
|
47 |
-
global MATPLOTLIB_FLAG
|
48 |
-
if not MATPLOTLIB_FLAG:
|
49 |
-
import matplotlib
|
50 |
-
matplotlib.use("Agg")
|
51 |
-
MATPLOTLIB_FLAG = True
|
52 |
-
mpl_logger = logging.getLogger('matplotlib')
|
53 |
-
mpl_logger.setLevel(logging.WARNING)
|
54 |
-
import matplotlib.pylab as plt
|
55 |
-
import numpy as np
|
56 |
-
|
57 |
-
fig, ax = plt.subplots(figsize=(10, 2))
|
58 |
-
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
|
59 |
-
interpolation='none')
|
60 |
-
plt.colorbar(im, ax=ax)
|
61 |
-
plt.xlabel("Frames")
|
62 |
-
plt.ylabel("Channels")
|
63 |
-
plt.tight_layout()
|
64 |
-
|
65 |
-
fig.canvas.draw()
|
66 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
67 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
68 |
-
plt.close()
|
69 |
-
return data
|
70 |
-
|
71 |
-
|
72 |
-
def plot_alignment_to_numpy(alignment, info=None):
|
73 |
-
global MATPLOTLIB_FLAG
|
74 |
-
if not MATPLOTLIB_FLAG:
|
75 |
-
import matplotlib
|
76 |
-
matplotlib.use("Agg")
|
77 |
-
MATPLOTLIB_FLAG = True
|
78 |
-
mpl_logger = logging.getLogger('matplotlib')
|
79 |
-
mpl_logger.setLevel(logging.WARNING)
|
80 |
-
import matplotlib.pylab as plt
|
81 |
-
import numpy as np
|
82 |
-
|
83 |
-
fig, ax = plt.subplots(figsize=(6, 4))
|
84 |
-
im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower',
|
85 |
-
interpolation='none')
|
86 |
-
fig.colorbar(im, ax=ax)
|
87 |
-
xlabel = 'Decoder timestep'
|
88 |
-
if info is not None:
|
89 |
-
xlabel += '\n\n' + info
|
90 |
-
plt.xlabel(xlabel)
|
91 |
-
plt.ylabel('Encoder timestep')
|
92 |
-
plt.tight_layout()
|
93 |
-
|
94 |
-
fig.canvas.draw()
|
95 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
|
96 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
97 |
-
plt.close()
|
98 |
-
return data
|
99 |
-
|
100 |
-
|
101 |
-
def load_wav_to_torch(full_path):
|
102 |
-
sampling_rate, data = read(full_path)
|
103 |
-
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
|
104 |
-
|
105 |
-
|
106 |
-
def load_filepaths_and_text(filename, split="|"):
|
107 |
-
with open(filename, encoding='utf-8') as f:
|
108 |
-
filepaths_and_text = [line.strip().split(split) for line in f]
|
109 |
-
return filepaths_and_text
|
110 |
-
|
111 |
-
|
112 |
-
def get_hparams(init=True):
|
113 |
-
parser = argparse.ArgumentParser()
|
114 |
-
parser.add_argument('-c', '--config', type=str, default="./configs/base.json",
|
115 |
-
help='JSON file for configuration')
|
116 |
-
parser.add_argument('-m', '--model', type=str, required=True,
|
117 |
-
help='Model name')
|
118 |
-
|
119 |
-
args = parser.parse_args()
|
120 |
-
model_dir = os.path.join("./logs", args.model)
|
121 |
-
|
122 |
-
if not os.path.exists(model_dir):
|
123 |
-
os.makedirs(model_dir)
|
124 |
-
|
125 |
-
config_path = args.config
|
126 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
127 |
-
if init:
|
128 |
-
with open(config_path, "r") as f:
|
129 |
-
data = f.read()
|
130 |
-
with open(config_save_path, "w") as f:
|
131 |
-
f.write(data)
|
132 |
-
else:
|
133 |
-
with open(config_save_path, "r") as f:
|
134 |
-
data = f.read()
|
135 |
-
config = json.loads(data)
|
136 |
-
|
137 |
-
hparams = HParams(**config)
|
138 |
-
hparams.model_dir = model_dir
|
139 |
-
return hparams
|
140 |
-
|
141 |
-
|
142 |
-
def get_hparams_from_dir(model_dir):
|
143 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
144 |
-
with open(config_save_path, "r") as f:
|
145 |
-
data = f.read()
|
146 |
-
config = json.loads(data)
|
147 |
-
|
148 |
-
hparams = HParams(**config)
|
149 |
-
hparams.model_dir = model_dir
|
150 |
-
return hparams
|
151 |
-
|
152 |
-
|
153 |
-
def get_hparams_from_file(config_path):
|
154 |
-
with open(config_path, "r", encoding="utf-8") as f:
|
155 |
-
data = f.read()
|
156 |
-
config = json.loads(data)
|
157 |
-
|
158 |
-
hparams = HParams(**config)
|
159 |
-
return hparams
|
160 |
-
|
161 |
-
|
162 |
-
def check_git_hash(model_dir):
|
163 |
-
source_dir = os.path.dirname(os.path.realpath(__file__))
|
164 |
-
if not os.path.exists(os.path.join(source_dir, ".git")):
|
165 |
-
logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
166 |
-
source_dir
|
167 |
-
))
|
168 |
-
return
|
169 |
-
|
170 |
-
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
171 |
-
|
172 |
-
path = os.path.join(model_dir, "githash")
|
173 |
-
if os.path.exists(path):
|
174 |
-
saved_hash = open(path).read()
|
175 |
-
if saved_hash != cur_hash:
|
176 |
-
logger.warn("git hash values are different. {}(saved) != {}(current)".format(
|
177 |
-
saved_hash[:8], cur_hash[:8]))
|
178 |
-
else:
|
179 |
-
open(path, "w").write(cur_hash)
|
180 |
-
|
181 |
-
|
182 |
-
def get_logger(model_dir, filename="train.log"):
|
183 |
-
global logger
|
184 |
-
logger = logging.getLogger(os.path.basename(model_dir))
|
185 |
-
logger.setLevel(logging.DEBUG)
|
186 |
-
|
187 |
-
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
188 |
-
if not os.path.exists(model_dir):
|
189 |
-
os.makedirs(model_dir)
|
190 |
-
h = logging.FileHandler(os.path.join(model_dir, filename))
|
191 |
-
h.setLevel(logging.DEBUG)
|
192 |
-
h.setFormatter(formatter)
|
193 |
-
logger.addHandler(h)
|
194 |
-
return logger
|
195 |
-
|
196 |
-
|
197 |
-
class HParams():
|
198 |
-
def __init__(self, **kwargs):
|
199 |
-
for k, v in kwargs.items():
|
200 |
-
if type(v) == dict:
|
201 |
-
v = HParams(**v)
|
202 |
-
self[k] = v
|
203 |
-
|
204 |
-
def keys(self):
|
205 |
-
return self.__dict__.keys()
|
206 |
-
|
207 |
-
def items(self):
|
208 |
-
return self.__dict__.items()
|
209 |
-
|
210 |
-
def values(self):
|
211 |
-
return self.__dict__.values()
|
212 |
-
|
213 |
-
def __len__(self):
|
214 |
-
return len(self.__dict__)
|
215 |
-
|
216 |
-
def __getitem__(self, key):
|
217 |
-
return getattr(self, key)
|
218 |
-
|
219 |
-
def __setitem__(self, key, value):
|
220 |
-
return setattr(self, key, value)
|
221 |
-
|
222 |
-
def __contains__(self, key):
|
223 |
-
return key in self.__dict__
|
224 |
-
|
225 |
-
def __repr__(self):
|
226 |
-
return self.__dict__.__repr__()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/gui_utils/imgui_window.py
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
import os
|
10 |
-
import imgui
|
11 |
-
import imgui.integrations.glfw
|
12 |
-
|
13 |
-
from . import glfw_window
|
14 |
-
from . import imgui_utils
|
15 |
-
from . import text_utils
|
16 |
-
|
17 |
-
# ----------------------------------------------------------------------------
|
18 |
-
|
19 |
-
|
20 |
-
class ImguiWindow(glfw_window.GlfwWindow):
|
21 |
-
def __init__(self, *, title='ImguiWindow', font=None, font_sizes=range(14, 24), **glfw_kwargs):
|
22 |
-
if font is None:
|
23 |
-
font = text_utils.get_default_font()
|
24 |
-
font_sizes = {int(size) for size in font_sizes}
|
25 |
-
super().__init__(title=title, **glfw_kwargs)
|
26 |
-
|
27 |
-
# Init fields.
|
28 |
-
self._imgui_context = None
|
29 |
-
self._imgui_renderer = None
|
30 |
-
self._imgui_fonts = None
|
31 |
-
self._cur_font_size = max(font_sizes)
|
32 |
-
|
33 |
-
# Delete leftover imgui.ini to avoid unexpected behavior.
|
34 |
-
if os.path.isfile('imgui.ini'):
|
35 |
-
os.remove('imgui.ini')
|
36 |
-
|
37 |
-
# Init ImGui.
|
38 |
-
self._imgui_context = imgui.create_context()
|
39 |
-
self._imgui_renderer = _GlfwRenderer(self._glfw_window)
|
40 |
-
self._attach_glfw_callbacks()
|
41 |
-
# Disable creating imgui.ini at runtime.
|
42 |
-
imgui.get_io().ini_saving_rate = 0
|
43 |
-
# Improve behavior with imgui_utils.drag_custom().
|
44 |
-
imgui.get_io().mouse_drag_threshold = 0
|
45 |
-
self._imgui_fonts = {size: imgui.get_io().fonts.add_font_from_file_ttf(
|
46 |
-
font, size) for size in font_sizes}
|
47 |
-
self._imgui_renderer.refresh_font_texture()
|
48 |
-
|
49 |
-
def close(self):
|
50 |
-
self.make_context_current()
|
51 |
-
self._imgui_fonts = None
|
52 |
-
if self._imgui_renderer is not None:
|
53 |
-
self._imgui_renderer.shutdown()
|
54 |
-
self._imgui_renderer = None
|
55 |
-
if self._imgui_context is not None:
|
56 |
-
# imgui.destroy_context(self._imgui_context) # Commented out to avoid creating imgui.ini at the end.
|
57 |
-
self._imgui_context = None
|
58 |
-
super().close()
|
59 |
-
|
60 |
-
def _glfw_key_callback(self, *args):
|
61 |
-
super()._glfw_key_callback(*args)
|
62 |
-
self._imgui_renderer.keyboard_callback(*args)
|
63 |
-
|
64 |
-
@property
|
65 |
-
def font_size(self):
|
66 |
-
return self._cur_font_size
|
67 |
-
|
68 |
-
@property
|
69 |
-
def spacing(self):
|
70 |
-
return round(self._cur_font_size * 0.4)
|
71 |
-
|
72 |
-
def set_font_size(self, target): # Applied on next frame.
|
73 |
-
self._cur_font_size = min((abs(key - target), key)
|
74 |
-
for key in self._imgui_fonts.keys())[1]
|
75 |
-
|
76 |
-
def begin_frame(self):
|
77 |
-
# Begin glfw frame.
|
78 |
-
super().begin_frame()
|
79 |
-
|
80 |
-
# Process imgui events.
|
81 |
-
self._imgui_renderer.mouse_wheel_multiplier = self._cur_font_size / 10
|
82 |
-
if self.content_width > 0 and self.content_height > 0:
|
83 |
-
self._imgui_renderer.process_inputs()
|
84 |
-
|
85 |
-
# Begin imgui frame.
|
86 |
-
imgui.new_frame()
|
87 |
-
imgui.push_font(self._imgui_fonts[self._cur_font_size])
|
88 |
-
imgui_utils.set_default_style(
|
89 |
-
spacing=self.spacing, indent=self.font_size, scrollbar=self.font_size+4)
|
90 |
-
|
91 |
-
def end_frame(self):
|
92 |
-
imgui.pop_font()
|
93 |
-
imgui.render()
|
94 |
-
imgui.end_frame()
|
95 |
-
self._imgui_renderer.render(imgui.get_draw_data())
|
96 |
-
super().end_frame()
|
97 |
-
|
98 |
-
# ----------------------------------------------------------------------------
|
99 |
-
# Wrapper class for GlfwRenderer to fix a mouse wheel bug on Linux.
|
100 |
-
|
101 |
-
|
102 |
-
class _GlfwRenderer(imgui.integrations.glfw.GlfwRenderer):
|
103 |
-
def __init__(self, *args, **kwargs):
|
104 |
-
super().__init__(*args, **kwargs)
|
105 |
-
self.mouse_wheel_multiplier = 1
|
106 |
-
|
107 |
-
def scroll_callback(self, window, x_offset, y_offset):
|
108 |
-
self.io.mouse_wheel += y_offset * self.mouse_wheel_multiplier
|
109 |
-
|
110 |
-
# ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/textsummarizer/README.md
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Textsummarizer
|
3 |
-
emoji: ⚡
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
app_file: app.py
|
8 |
-
pinned: false
|
9 |
-
---
|
10 |
-
|
11 |
-
# Configuration
|
12 |
-
|
13 |
-
`title`: _string_
|
14 |
-
Display title for the Space
|
15 |
-
|
16 |
-
`emoji`: _string_
|
17 |
-
Space emoji (emoji-only character allowed)
|
18 |
-
|
19 |
-
`colorFrom`: _string_
|
20 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
21 |
-
|
22 |
-
`colorTo`: _string_
|
23 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
24 |
-
|
25 |
-
`sdk`: _string_
|
26 |
-
Can be either `gradio` or `streamlit`
|
27 |
-
|
28 |
-
`sdk_version` : _string_
|
29 |
-
Only applicable for `streamlit` SDK.
|
30 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
31 |
-
|
32 |
-
`app_file`: _string_
|
33 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code).
|
34 |
-
Path is relative to the root of the repository.
|
35 |
-
|
36 |
-
`pinned`: _boolean_
|
37 |
-
Whether the Space stays on top of your list.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/An-619/FastSAM/utils/tools_gradio.py
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
from PIL import Image
|
3 |
-
import matplotlib.pyplot as plt
|
4 |
-
import cv2
|
5 |
-
import torch
|
6 |
-
|
7 |
-
|
8 |
-
def fast_process(
|
9 |
-
annotations,
|
10 |
-
image,
|
11 |
-
device,
|
12 |
-
scale,
|
13 |
-
better_quality=False,
|
14 |
-
mask_random_color=True,
|
15 |
-
bbox=None,
|
16 |
-
use_retina=True,
|
17 |
-
withContours=True,
|
18 |
-
):
|
19 |
-
if isinstance(annotations[0], dict):
|
20 |
-
annotations = [annotation['segmentation'] for annotation in annotations]
|
21 |
-
|
22 |
-
original_h = image.height
|
23 |
-
original_w = image.width
|
24 |
-
if better_quality:
|
25 |
-
if isinstance(annotations[0], torch.Tensor):
|
26 |
-
annotations = np.array(annotations.cpu())
|
27 |
-
for i, mask in enumerate(annotations):
|
28 |
-
mask = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8))
|
29 |
-
annotations[i] = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8))
|
30 |
-
if device == 'cpu':
|
31 |
-
annotations = np.array(annotations)
|
32 |
-
inner_mask = fast_show_mask(
|
33 |
-
annotations,
|
34 |
-
plt.gca(),
|
35 |
-
random_color=mask_random_color,
|
36 |
-
bbox=bbox,
|
37 |
-
retinamask=use_retina,
|
38 |
-
target_height=original_h,
|
39 |
-
target_width=original_w,
|
40 |
-
)
|
41 |
-
else:
|
42 |
-
if isinstance(annotations[0], np.ndarray):
|
43 |
-
annotations = torch.from_numpy(annotations)
|
44 |
-
inner_mask = fast_show_mask_gpu(
|
45 |
-
annotations,
|
46 |
-
plt.gca(),
|
47 |
-
random_color=mask_random_color,
|
48 |
-
bbox=bbox,
|
49 |
-
retinamask=use_retina,
|
50 |
-
target_height=original_h,
|
51 |
-
target_width=original_w,
|
52 |
-
)
|
53 |
-
if isinstance(annotations, torch.Tensor):
|
54 |
-
annotations = annotations.cpu().numpy()
|
55 |
-
|
56 |
-
if withContours:
|
57 |
-
contour_all = []
|
58 |
-
temp = np.zeros((original_h, original_w, 1))
|
59 |
-
for i, mask in enumerate(annotations):
|
60 |
-
if type(mask) == dict:
|
61 |
-
mask = mask['segmentation']
|
62 |
-
annotation = mask.astype(np.uint8)
|
63 |
-
if use_retina == False:
|
64 |
-
annotation = cv2.resize(
|
65 |
-
annotation,
|
66 |
-
(original_w, original_h),
|
67 |
-
interpolation=cv2.INTER_NEAREST,
|
68 |
-
)
|
69 |
-
contours, _ = cv2.findContours(annotation, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
|
70 |
-
for contour in contours:
|
71 |
-
contour_all.append(contour)
|
72 |
-
cv2.drawContours(temp, contour_all, -1, (255, 255, 255), 2 // scale)
|
73 |
-
color = np.array([0 / 255, 0 / 255, 255 / 255, 0.9])
|
74 |
-
contour_mask = temp / 255 * color.reshape(1, 1, -1)
|
75 |
-
|
76 |
-
image = image.convert('RGBA')
|
77 |
-
overlay_inner = Image.fromarray((inner_mask * 255).astype(np.uint8), 'RGBA')
|
78 |
-
image.paste(overlay_inner, (0, 0), overlay_inner)
|
79 |
-
|
80 |
-
if withContours:
|
81 |
-
overlay_contour = Image.fromarray((contour_mask * 255).astype(np.uint8), 'RGBA')
|
82 |
-
image.paste(overlay_contour, (0, 0), overlay_contour)
|
83 |
-
|
84 |
-
return image
|
85 |
-
|
86 |
-
|
87 |
-
# CPU post process
|
88 |
-
def fast_show_mask(
|
89 |
-
annotation,
|
90 |
-
ax,
|
91 |
-
random_color=False,
|
92 |
-
bbox=None,
|
93 |
-
retinamask=True,
|
94 |
-
target_height=960,
|
95 |
-
target_width=960,
|
96 |
-
):
|
97 |
-
mask_sum = annotation.shape[0]
|
98 |
-
height = annotation.shape[1]
|
99 |
-
weight = annotation.shape[2]
|
100 |
-
# 将annotation 按照面积 排序
|
101 |
-
areas = np.sum(annotation, axis=(1, 2))
|
102 |
-
sorted_indices = np.argsort(areas)[::1]
|
103 |
-
annotation = annotation[sorted_indices]
|
104 |
-
|
105 |
-
index = (annotation != 0).argmax(axis=0)
|
106 |
-
if random_color:
|
107 |
-
color = np.random.random((mask_sum, 1, 1, 3))
|
108 |
-
else:
|
109 |
-
color = np.ones((mask_sum, 1, 1, 3)) * np.array([30 / 255, 144 / 255, 255 / 255])
|
110 |
-
transparency = np.ones((mask_sum, 1, 1, 1)) * 0.6
|
111 |
-
visual = np.concatenate([color, transparency], axis=-1)
|
112 |
-
mask_image = np.expand_dims(annotation, -1) * visual
|
113 |
-
|
114 |
-
mask = np.zeros((height, weight, 4))
|
115 |
-
|
116 |
-
h_indices, w_indices = np.meshgrid(np.arange(height), np.arange(weight), indexing='ij')
|
117 |
-
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
|
118 |
-
|
119 |
-
mask[h_indices, w_indices, :] = mask_image[indices]
|
120 |
-
if bbox is not None:
|
121 |
-
x1, y1, x2, y2 = bbox
|
122 |
-
ax.add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='b', linewidth=1))
|
123 |
-
|
124 |
-
if not retinamask:
|
125 |
-
mask = cv2.resize(mask, (target_width, target_height), interpolation=cv2.INTER_NEAREST)
|
126 |
-
|
127 |
-
return mask
|
128 |
-
|
129 |
-
|
130 |
-
def fast_show_mask_gpu(
|
131 |
-
annotation,
|
132 |
-
ax,
|
133 |
-
random_color=False,
|
134 |
-
bbox=None,
|
135 |
-
retinamask=True,
|
136 |
-
target_height=960,
|
137 |
-
target_width=960,
|
138 |
-
):
|
139 |
-
device = annotation.device
|
140 |
-
mask_sum = annotation.shape[0]
|
141 |
-
height = annotation.shape[1]
|
142 |
-
weight = annotation.shape[2]
|
143 |
-
areas = torch.sum(annotation, dim=(1, 2))
|
144 |
-
sorted_indices = torch.argsort(areas, descending=False)
|
145 |
-
annotation = annotation[sorted_indices]
|
146 |
-
# 找每个位置第一个非零值下标
|
147 |
-
index = (annotation != 0).to(torch.long).argmax(dim=0)
|
148 |
-
if random_color:
|
149 |
-
color = torch.rand((mask_sum, 1, 1, 3)).to(device)
|
150 |
-
else:
|
151 |
-
color = torch.ones((mask_sum, 1, 1, 3)).to(device) * torch.tensor(
|
152 |
-
[30 / 255, 144 / 255, 255 / 255]
|
153 |
-
).to(device)
|
154 |
-
transparency = torch.ones((mask_sum, 1, 1, 1)).to(device) * 0.6
|
155 |
-
visual = torch.cat([color, transparency], dim=-1)
|
156 |
-
mask_image = torch.unsqueeze(annotation, -1) * visual
|
157 |
-
# 按index取数,index指每个位置选哪个batch的数,把mask_image转成一个batch的形式
|
158 |
-
mask = torch.zeros((height, weight, 4)).to(device)
|
159 |
-
h_indices, w_indices = torch.meshgrid(torch.arange(height), torch.arange(weight))
|
160 |
-
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
|
161 |
-
# 使用向量化索引更新show的值
|
162 |
-
mask[h_indices, w_indices, :] = mask_image[indices]
|
163 |
-
mask_cpu = mask.cpu().numpy()
|
164 |
-
if bbox is not None:
|
165 |
-
x1, y1, x2, y2 = bbox
|
166 |
-
ax.add_patch(
|
167 |
-
plt.Rectangle(
|
168 |
-
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1
|
169 |
-
)
|
170 |
-
)
|
171 |
-
if not retinamask:
|
172 |
-
mask_cpu = cv2.resize(
|
173 |
-
mask_cpu, (target_width, target_height), interpolation=cv2.INTER_NEAREST
|
174 |
-
)
|
175 |
-
return mask_cpu
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/using-diffusers/loading.md
DELETED
@@ -1,463 +0,0 @@
|
|
1 |
-
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
|
3 |
-
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
4 |
-
the License. You may obtain a copy of the License at
|
5 |
-
|
6 |
-
http://www.apache.org/licenses/LICENSE-2.0
|
7 |
-
|
8 |
-
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
9 |
-
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
10 |
-
specific language governing permissions and limitations under the License.
|
11 |
-
-->
|
12 |
-
|
13 |
-
# Load pipelines, models, and schedulers
|
14 |
-
|
15 |
-
[[open-in-colab]]
|
16 |
-
|
17 |
-
Having an easy way to use a diffusion system for inference is essential to 🧨 Diffusers. Diffusion systems often consist of multiple components like parameterized models, tokenizers, and schedulers that interact in complex ways. That is why we designed the [`DiffusionPipeline`] to wrap the complexity of the entire diffusion system into an easy-to-use API, while remaining flexible enough to be adapted for other use cases, such as loading each component individually as building blocks to assemble your own diffusion system.
|
18 |
-
|
19 |
-
Everything you need for inference or training is accessible with the `from_pretrained()` method.
|
20 |
-
|
21 |
-
This guide will show you how to load:
|
22 |
-
|
23 |
-
- pipelines from the Hub and locally
|
24 |
-
- different components into a pipeline
|
25 |
-
- checkpoint variants such as different floating point types or non-exponential mean averaged (EMA) weights
|
26 |
-
- models and schedulers
|
27 |
-
|
28 |
-
## Diffusion Pipeline
|
29 |
-
|
30 |
-
<Tip>
|
31 |
-
|
32 |
-
💡 Skip to the [DiffusionPipeline explained](#diffusionpipeline-explained) section if you interested in learning in more detail about how the [`DiffusionPipeline`] class works.
|
33 |
-
|
34 |
-
</Tip>
|
35 |
-
|
36 |
-
The [`DiffusionPipeline`] class is the simplest and most generic way to load any diffusion model from the [Hub](https://huggingface.co/models?library=diffusers). The [`DiffusionPipeline.from_pretrained`] method automatically detects the correct pipeline class from the checkpoint, downloads and caches all the required configuration and weight files, and returns a pipeline instance ready for inference.
|
37 |
-
|
38 |
-
```python
|
39 |
-
from diffusers import DiffusionPipeline
|
40 |
-
|
41 |
-
repo_id = "runwayml/stable-diffusion-v1-5"
|
42 |
-
pipe = DiffusionPipeline.from_pretrained(repo_id)
|
43 |
-
```
|
44 |
-
|
45 |
-
You can also load a checkpoint with it's specific pipeline class. The example above loaded a Stable Diffusion model; to get the same result, use the [`StableDiffusionPipeline`] class:
|
46 |
-
|
47 |
-
```python
|
48 |
-
from diffusers import StableDiffusionPipeline
|
49 |
-
|
50 |
-
repo_id = "runwayml/stable-diffusion-v1-5"
|
51 |
-
pipe = StableDiffusionPipeline.from_pretrained(repo_id)
|
52 |
-
```
|
53 |
-
|
54 |
-
A checkpoint (such as [`CompVis/stable-diffusion-v1-4`](https://huggingface.co/CompVis/stable-diffusion-v1-4) or [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)) may also be used for more than one task, like text-to-image or image-to-image. To differentiate what task you want to use the checkpoint for, you have to load it directly with it's corresponding task-specific pipeline class:
|
55 |
-
|
56 |
-
```python
|
57 |
-
from diffusers import StableDiffusionImg2ImgPipeline
|
58 |
-
|
59 |
-
repo_id = "runwayml/stable-diffusion-v1-5"
|
60 |
-
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(repo_id)
|
61 |
-
```
|
62 |
-
|
63 |
-
### Local pipeline
|
64 |
-
|
65 |
-
To load a diffusion pipeline locally, use [`git-lfs`](https://git-lfs.github.com/) to manually download the checkpoint (in this case, [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)) to your local disk. This creates a local folder, `./stable-diffusion-v1-5`, on your disk:
|
66 |
-
|
67 |
-
```bash
|
68 |
-
git lfs install
|
69 |
-
git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
|
70 |
-
```
|
71 |
-
|
72 |
-
Then pass the local path to [`~DiffusionPipeline.from_pretrained`]:
|
73 |
-
|
74 |
-
```python
|
75 |
-
from diffusers import DiffusionPipeline
|
76 |
-
|
77 |
-
repo_id = "./stable-diffusion-v1-5"
|
78 |
-
stable_diffusion = DiffusionPipeline.from_pretrained(repo_id)
|
79 |
-
```
|
80 |
-
|
81 |
-
The [`~DiffusionPipeline.from_pretrained`] method won't download any files from the Hub when it detects a local path, but this also means it won't download and cache the latest changes to a checkpoint.
|
82 |
-
|
83 |
-
### Swap components in a pipeline
|
84 |
-
|
85 |
-
You can customize the default components of any pipeline with another compatible component. Customization is important because:
|
86 |
-
|
87 |
-
- Changing the scheduler is important for exploring the trade-off between generation speed and quality.
|
88 |
-
- Different components of a model are typically trained independently and you can swap out a component with a better-performing one.
|
89 |
-
- During finetuning, usually only some components - like the UNet or text encoder - are trained.
|
90 |
-
|
91 |
-
To find out which schedulers are compatible for customization, you can use the `compatibles` method:
|
92 |
-
|
93 |
-
```py
|
94 |
-
from diffusers import DiffusionPipeline
|
95 |
-
|
96 |
-
repo_id = "runwayml/stable-diffusion-v1-5"
|
97 |
-
stable_diffusion = DiffusionPipeline.from_pretrained(repo_id)
|
98 |
-
stable_diffusion.scheduler.compatibles
|
99 |
-
```
|
100 |
-
|
101 |
-
Let's use the [`SchedulerMixin.from_pretrained`] method to replace the default [`PNDMScheduler`] with a more performant scheduler, [`EulerDiscreteScheduler`]. The `subfolder="scheduler"` argument is required to load the scheduler configuration from the correct [subfolder](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/scheduler) of the pipeline repository.
|
102 |
-
|
103 |
-
Then you can pass the new [`EulerDiscreteScheduler`] instance to the `scheduler` argument in [`DiffusionPipeline`]:
|
104 |
-
|
105 |
-
```python
|
106 |
-
from diffusers import DiffusionPipeline, EulerDiscreteScheduler, DPMSolverMultistepScheduler
|
107 |
-
|
108 |
-
repo_id = "runwayml/stable-diffusion-v1-5"
|
109 |
-
|
110 |
-
scheduler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler")
|
111 |
-
|
112 |
-
stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, scheduler=scheduler)
|
113 |
-
```
|
114 |
-
|
115 |
-
### Safety checker
|
116 |
-
|
117 |
-
Diffusion models like Stable Diffusion can generate harmful content, which is why 🧨 Diffusers has a [safety checker](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) to check generated outputs against known hardcoded NSFW content. If you'd like to disable the safety checker for whatever reason, pass `None` to the `safety_checker` argument:
|
118 |
-
|
119 |
-
```python
|
120 |
-
from diffusers import DiffusionPipeline
|
121 |
-
|
122 |
-
repo_id = "runwayml/stable-diffusion-v1-5"
|
123 |
-
stable_diffusion = DiffusionPipeline.from_pretrained(repo_id, safety_checker=None)
|
124 |
-
```
|
125 |
-
|
126 |
-
### Reuse components across pipelines
|
127 |
-
|
128 |
-
You can also reuse the same components in multiple pipelines to avoid loading the weights into RAM twice. Use the [`~DiffusionPipeline.components`] method to save the components:
|
129 |
-
|
130 |
-
```python
|
131 |
-
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
|
132 |
-
|
133 |
-
model_id = "runwayml/stable-diffusion-v1-5"
|
134 |
-
stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id)
|
135 |
-
|
136 |
-
components = stable_diffusion_txt2img.components
|
137 |
-
```
|
138 |
-
|
139 |
-
Then you can pass the `components` to another pipeline without reloading the weights into RAM:
|
140 |
-
|
141 |
-
```py
|
142 |
-
stable_diffusion_img2img = StableDiffusionImg2ImgPipeline(**components)
|
143 |
-
```
|
144 |
-
|
145 |
-
You can also pass the components individually to the pipeline if you want more flexibility over which components to reuse or disable. For example, to reuse the same components in the text-to-image pipeline, except for the safety checker and feature extractor, in the image-to-image pipeline:
|
146 |
-
|
147 |
-
```py
|
148 |
-
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
|
149 |
-
|
150 |
-
model_id = "runwayml/stable-diffusion-v1-5"
|
151 |
-
stable_diffusion_txt2img = StableDiffusionPipeline.from_pretrained(model_id)
|
152 |
-
stable_diffusion_img2img = StableDiffusionImg2ImgPipeline(
|
153 |
-
vae=stable_diffusion_txt2img.vae,
|
154 |
-
text_encoder=stable_diffusion_txt2img.text_encoder,
|
155 |
-
tokenizer=stable_diffusion_txt2img.tokenizer,
|
156 |
-
unet=stable_diffusion_txt2img.unet,
|
157 |
-
scheduler=stable_diffusion_txt2img.scheduler,
|
158 |
-
safety_checker=None,
|
159 |
-
feature_extractor=None,
|
160 |
-
requires_safety_checker=False,
|
161 |
-
)
|
162 |
-
```
|
163 |
-
|
164 |
-
## Checkpoint variants
|
165 |
-
|
166 |
-
A checkpoint variant is usually a checkpoint where it's weights are:
|
167 |
-
|
168 |
-
- Stored in a different floating point type for lower precision and lower storage, such as [`torch.float16`](https://pytorch.org/docs/stable/tensors.html#data-types), because it only requires half the bandwidth and storage to download. You can't use this variant if you're continuing training or using a CPU.
|
169 |
-
- Non-exponential mean averaged (EMA) weights which shouldn't be used for inference. You should use these to continue finetuning a model.
|
170 |
-
|
171 |
-
<Tip>
|
172 |
-
|
173 |
-
💡 When the checkpoints have identical model structures, but they were trained on different datasets and with a different training setup, they should be stored in separate repositories instead of variations (for example, [`stable-diffusion-v1-4`] and [`stable-diffusion-v1-5`]).
|
174 |
-
|
175 |
-
</Tip>
|
176 |
-
|
177 |
-
Otherwise, a variant is **identical** to the original checkpoint. They have exactly the same serialization format (like [Safetensors](./using_safetensors)), model structure, and weights have identical tensor shapes.
|
178 |
-
|
179 |
-
| **checkpoint type** | **weight name** | **argument for loading weights** |
|
180 |
-
|---------------------|-------------------------------------|----------------------------------|
|
181 |
-
| original | diffusion_pytorch_model.bin | |
|
182 |
-
| floating point | diffusion_pytorch_model.fp16.bin | `variant`, `torch_dtype` |
|
183 |
-
| non-EMA | diffusion_pytorch_model.non_ema.bin | `variant` |
|
184 |
-
|
185 |
-
There are two important arguments to know for loading variants:
|
186 |
-
|
187 |
-
- `torch_dtype` defines the floating point precision of the loaded checkpoints. For example, if you want to save bandwidth by loading a `fp16` variant, you should specify `torch_dtype=torch.float16` to *convert the weights* to `fp16`. Otherwise, the `fp16` weights are converted to the default `fp32` precision. You can also load the original checkpoint without defining the `variant` argument, and convert it to `fp16` with `torch_dtype=torch.float16`. In this case, the default `fp32` weights are downloaded first, and then they're converted to `fp16` after loading.
|
188 |
-
|
189 |
-
- `variant` defines which files should be loaded from the repository. For example, if you want to load a `non_ema` variant from the [`diffusers/stable-diffusion-variants`](https://huggingface.co/diffusers/stable-diffusion-variants/tree/main/unet) repository, you should specify `variant="non_ema"` to download the `non_ema` files.
|
190 |
-
|
191 |
-
```python
|
192 |
-
from diffusers import DiffusionPipeline
|
193 |
-
import torch
|
194 |
-
|
195 |
-
# load fp16 variant
|
196 |
-
stable_diffusion = DiffusionPipeline.from_pretrained(
|
197 |
-
"runwayml/stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16
|
198 |
-
)
|
199 |
-
# load non_ema variant
|
200 |
-
stable_diffusion = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema")
|
201 |
-
```
|
202 |
-
|
203 |
-
To save a checkpoint stored in a different floating point type or as a non-EMA variant, use the [`DiffusionPipeline.save_pretrained`] method and specify the `variant` argument. You should try and save a variant to the same folder as the original checkpoint, so you can load both from the same folder:
|
204 |
-
|
205 |
-
```python
|
206 |
-
from diffusers import DiffusionPipeline
|
207 |
-
|
208 |
-
# save as fp16 variant
|
209 |
-
stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="fp16")
|
210 |
-
# save as non-ema variant
|
211 |
-
stable_diffusion.save_pretrained("runwayml/stable-diffusion-v1-5", variant="non_ema")
|
212 |
-
```
|
213 |
-
|
214 |
-
If you don't save the variant to an existing folder, you must specify the `variant` argument otherwise it'll throw an `Exception` because it can't find the original checkpoint:
|
215 |
-
|
216 |
-
```python
|
217 |
-
# 👎 this won't work
|
218 |
-
stable_diffusion = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", torch_dtype=torch.float16)
|
219 |
-
# 👍 this works
|
220 |
-
stable_diffusion = DiffusionPipeline.from_pretrained(
|
221 |
-
"./stable-diffusion-v1-5", variant="fp16", torch_dtype=torch.float16
|
222 |
-
)
|
223 |
-
```
|
224 |
-
|
225 |
-
<!--
|
226 |
-
TODO(Patrick) - Make sure to uncomment this part as soon as things are deprecated.
|
227 |
-
|
228 |
-
#### Using `revision` to load pipeline variants is deprecated
|
229 |
-
|
230 |
-
Previously the `revision` argument of [`DiffusionPipeline.from_pretrained`] was heavily used to
|
231 |
-
load model variants, e.g.:
|
232 |
-
|
233 |
-
```python
|
234 |
-
from diffusers import DiffusionPipeline
|
235 |
-
|
236 |
-
pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16")
|
237 |
-
```
|
238 |
-
|
239 |
-
However, this behavior is now deprecated since the "revision" argument should (just as it's done in GitHub) better be used to load model checkpoints from a specific commit or branch in development.
|
240 |
-
|
241 |
-
The above example is therefore deprecated and won't be supported anymore for `diffusers >= 1.0.0`.
|
242 |
-
|
243 |
-
<Tip warning={true}>
|
244 |
-
|
245 |
-
If you load diffusers pipelines or models with `revision="fp16"` or `revision="non_ema"`,
|
246 |
-
please make sure to update to code and use `variant="fp16"` or `variation="non_ema"` respectively
|
247 |
-
instead.
|
248 |
-
|
249 |
-
</Tip>
|
250 |
-
-->
|
251 |
-
|
252 |
-
## Models
|
253 |
-
|
254 |
-
Models are loaded from the [`ModelMixin.from_pretrained`] method, which downloads and caches the latest version of the model weights and configurations. If the latest files are available in the local cache, [`~ModelMixin.from_pretrained`] reuses files in the cache instead of redownloading them.
|
255 |
-
|
256 |
-
Models can be loaded from a subfolder with the `subfolder` argument. For example, the model weights for `runwayml/stable-diffusion-v1-5` are stored in the [`unet`](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main/unet) subfolder:
|
257 |
-
|
258 |
-
```python
|
259 |
-
from diffusers import UNet2DConditionModel
|
260 |
-
|
261 |
-
repo_id = "runwayml/stable-diffusion-v1-5"
|
262 |
-
model = UNet2DConditionModel.from_pretrained(repo_id, subfolder="unet")
|
263 |
-
```
|
264 |
-
|
265 |
-
Or directly from a repository's [directory](https://huggingface.co/google/ddpm-cifar10-32/tree/main):
|
266 |
-
|
267 |
-
```python
|
268 |
-
from diffusers import UNet2DModel
|
269 |
-
|
270 |
-
repo_id = "google/ddpm-cifar10-32"
|
271 |
-
model = UNet2DModel.from_pretrained(repo_id)
|
272 |
-
```
|
273 |
-
|
274 |
-
You can also load and save model variants by specifying the `variant` argument in [`ModelMixin.from_pretrained`] and [`ModelMixin.save_pretrained`]:
|
275 |
-
|
276 |
-
```python
|
277 |
-
from diffusers import UNet2DConditionModel
|
278 |
-
|
279 |
-
model = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", variant="non-ema")
|
280 |
-
model.save_pretrained("./local-unet", variant="non-ema")
|
281 |
-
```
|
282 |
-
|
283 |
-
## Schedulers
|
284 |
-
|
285 |
-
Schedulers are loaded from the [`SchedulerMixin.from_pretrained`] method, and unlike models, schedulers are **not parameterized** or **trained**; they are defined by a configuration file.
|
286 |
-
|
287 |
-
Loading schedulers does not consume any significant amount of memory and the same configuration file can be used for a variety of different schedulers.
|
288 |
-
For example, the following schedulers are compatible with [`StableDiffusionPipeline`] which means you can load the same scheduler configuration file in any of these classes:
|
289 |
-
|
290 |
-
```python
|
291 |
-
from diffusers import StableDiffusionPipeline
|
292 |
-
from diffusers import (
|
293 |
-
DDPMScheduler,
|
294 |
-
DDIMScheduler,
|
295 |
-
PNDMScheduler,
|
296 |
-
LMSDiscreteScheduler,
|
297 |
-
EulerDiscreteScheduler,
|
298 |
-
EulerAncestralDiscreteScheduler,
|
299 |
-
DPMSolverMultistepScheduler,
|
300 |
-
)
|
301 |
-
|
302 |
-
repo_id = "runwayml/stable-diffusion-v1-5"
|
303 |
-
|
304 |
-
ddpm = DDPMScheduler.from_pretrained(repo_id, subfolder="scheduler")
|
305 |
-
ddim = DDIMScheduler.from_pretrained(repo_id, subfolder="scheduler")
|
306 |
-
pndm = PNDMScheduler.from_pretrained(repo_id, subfolder="scheduler")
|
307 |
-
lms = LMSDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler")
|
308 |
-
euler_anc = EulerAncestralDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler")
|
309 |
-
euler = EulerDiscreteScheduler.from_pretrained(repo_id, subfolder="scheduler")
|
310 |
-
dpm = DPMSolverMultistepScheduler.from_pretrained(repo_id, subfolder="scheduler")
|
311 |
-
|
312 |
-
# replace `dpm` with any of `ddpm`, `ddim`, `pndm`, `lms`, `euler_anc`, `euler`
|
313 |
-
pipeline = StableDiffusionPipeline.from_pretrained(repo_id, scheduler=dpm)
|
314 |
-
```
|
315 |
-
|
316 |
-
## DiffusionPipeline explained
|
317 |
-
|
318 |
-
As a class method, [`DiffusionPipeline.from_pretrained`] is responsible for two things:
|
319 |
-
|
320 |
-
- Download the latest version of the folder structure required for inference and cache it. If the latest folder structure is available in the local cache, [`DiffusionPipeline.from_pretrained`] reuses the cache and won't redownload the files.
|
321 |
-
- Load the cached weights into the correct pipeline [class](./api/pipelines/overview#diffusers-summary) - retrieved from the `model_index.json` file - and return an instance of it.
|
322 |
-
|
323 |
-
The pipelines underlying folder structure corresponds directly with their class instances. For example, the [`StableDiffusionPipeline`] corresponds to the folder structure in [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5).
|
324 |
-
|
325 |
-
```python
|
326 |
-
from diffusers import DiffusionPipeline
|
327 |
-
|
328 |
-
repo_id = "runwayml/stable-diffusion-v1-5"
|
329 |
-
pipeline = DiffusionPipeline.from_pretrained(repo_id)
|
330 |
-
print(pipeline)
|
331 |
-
```
|
332 |
-
|
333 |
-
You'll see pipeline is an instance of [`StableDiffusionPipeline`], which consists of seven components:
|
334 |
-
|
335 |
-
- `"feature_extractor"`: a [`~transformers.CLIPFeatureExtractor`] from 🤗 Transformers.
|
336 |
-
- `"safety_checker"`: a [component](https://github.com/huggingface/diffusers/blob/e55687e1e15407f60f32242027b7bb8170e58266/src/diffusers/pipelines/stable_diffusion/safety_checker.py#L32) for screening against harmful content.
|
337 |
-
- `"scheduler"`: an instance of [`PNDMScheduler`].
|
338 |
-
- `"text_encoder"`: a [`~transformers.CLIPTextModel`] from 🤗 Transformers.
|
339 |
-
- `"tokenizer"`: a [`~transformers.CLIPTokenizer`] from 🤗 Transformers.
|
340 |
-
- `"unet"`: an instance of [`UNet2DConditionModel`].
|
341 |
-
- `"vae"` an instance of [`AutoencoderKL`].
|
342 |
-
|
343 |
-
```json
|
344 |
-
StableDiffusionPipeline {
|
345 |
-
"feature_extractor": [
|
346 |
-
"transformers",
|
347 |
-
"CLIPImageProcessor"
|
348 |
-
],
|
349 |
-
"safety_checker": [
|
350 |
-
"stable_diffusion",
|
351 |
-
"StableDiffusionSafetyChecker"
|
352 |
-
],
|
353 |
-
"scheduler": [
|
354 |
-
"diffusers",
|
355 |
-
"PNDMScheduler"
|
356 |
-
],
|
357 |
-
"text_encoder": [
|
358 |
-
"transformers",
|
359 |
-
"CLIPTextModel"
|
360 |
-
],
|
361 |
-
"tokenizer": [
|
362 |
-
"transformers",
|
363 |
-
"CLIPTokenizer"
|
364 |
-
],
|
365 |
-
"unet": [
|
366 |
-
"diffusers",
|
367 |
-
"UNet2DConditionModel"
|
368 |
-
],
|
369 |
-
"vae": [
|
370 |
-
"diffusers",
|
371 |
-
"AutoencoderKL"
|
372 |
-
]
|
373 |
-
}
|
374 |
-
```
|
375 |
-
|
376 |
-
Compare the components of the pipeline instance to the [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) folder structure, and you'll see there is a separate folder for each of the components in the repository:
|
377 |
-
|
378 |
-
```
|
379 |
-
.
|
380 |
-
├── feature_extractor
|
381 |
-
│ └── preprocessor_config.json
|
382 |
-
├── model_index.json
|
383 |
-
├── safety_checker
|
384 |
-
│ ├── config.json
|
385 |
-
│ └── pytorch_model.bin
|
386 |
-
├── scheduler
|
387 |
-
│ └── scheduler_config.json
|
388 |
-
├── text_encoder
|
389 |
-
│ ├── config.json
|
390 |
-
│ └── pytorch_model.bin
|
391 |
-
├── tokenizer
|
392 |
-
│ ├── merges.txt
|
393 |
-
│ ├── special_tokens_map.json
|
394 |
-
│ ├── tokenizer_config.json
|
395 |
-
│ └── vocab.json
|
396 |
-
├── unet
|
397 |
-
│ ├── config.json
|
398 |
-
│ ├── diffusion_pytorch_model.bin
|
399 |
-
└── vae
|
400 |
-
├── config.json
|
401 |
-
├── diffusion_pytorch_model.bin
|
402 |
-
```
|
403 |
-
|
404 |
-
You can access each of the components of the pipeline as an attribute to view its configuration:
|
405 |
-
|
406 |
-
```py
|
407 |
-
pipeline.tokenizer
|
408 |
-
CLIPTokenizer(
|
409 |
-
name_or_path="/root/.cache/huggingface/hub/models--runwayml--stable-diffusion-v1-5/snapshots/39593d5650112b4cc580433f6b0435385882d819/tokenizer",
|
410 |
-
vocab_size=49408,
|
411 |
-
model_max_length=77,
|
412 |
-
is_fast=False,
|
413 |
-
padding_side="right",
|
414 |
-
truncation_side="right",
|
415 |
-
special_tokens={
|
416 |
-
"bos_token": AddedToken("<|startoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True),
|
417 |
-
"eos_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True),
|
418 |
-
"unk_token": AddedToken("<|endoftext|>", rstrip=False, lstrip=False, single_word=False, normalized=True),
|
419 |
-
"pad_token": "<|endoftext|>",
|
420 |
-
},
|
421 |
-
)
|
422 |
-
```
|
423 |
-
|
424 |
-
Every pipeline expects a `model_index.json` file that tells the [`DiffusionPipeline`]:
|
425 |
-
|
426 |
-
- which pipeline class to load from `_class_name`
|
427 |
-
- which version of 🧨 Diffusers was used to create the model in `_diffusers_version`
|
428 |
-
- what components from which library are stored in the subfolders (`name` corresponds to the component and subfolder name, `library` corresponds to the name of the library to load the class from, and `class` corresponds to the class name)
|
429 |
-
|
430 |
-
```json
|
431 |
-
{
|
432 |
-
"_class_name": "StableDiffusionPipeline",
|
433 |
-
"_diffusers_version": "0.6.0",
|
434 |
-
"feature_extractor": [
|
435 |
-
"transformers",
|
436 |
-
"CLIPImageProcessor"
|
437 |
-
],
|
438 |
-
"safety_checker": [
|
439 |
-
"stable_diffusion",
|
440 |
-
"StableDiffusionSafetyChecker"
|
441 |
-
],
|
442 |
-
"scheduler": [
|
443 |
-
"diffusers",
|
444 |
-
"PNDMScheduler"
|
445 |
-
],
|
446 |
-
"text_encoder": [
|
447 |
-
"transformers",
|
448 |
-
"CLIPTextModel"
|
449 |
-
],
|
450 |
-
"tokenizer": [
|
451 |
-
"transformers",
|
452 |
-
"CLIPTokenizer"
|
453 |
-
],
|
454 |
-
"unet": [
|
455 |
-
"diffusers",
|
456 |
-
"UNet2DConditionModel"
|
457 |
-
],
|
458 |
-
"vae": [
|
459 |
-
"diffusers",
|
460 |
-
"AutoencoderKL"
|
461 |
-
]
|
462 |
-
}
|
463 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/paa/paa_r50_fpn_1.5x_coco.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
_base_ = './paa_r50_fpn_1x_coco.py'
|
2 |
-
lr_config = dict(step=[12, 16])
|
3 |
-
runner = dict(type='EpochBasedRunner', max_epochs=18)
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/models/detectors/point_rend.py
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
from ..builder import DETECTORS
|
2 |
-
from .two_stage import TwoStageDetector
|
3 |
-
|
4 |
-
|
5 |
-
@DETECTORS.register_module()
|
6 |
-
class PointRend(TwoStageDetector):
|
7 |
-
"""PointRend: Image Segmentation as Rendering
|
8 |
-
|
9 |
-
This detector is the implementation of
|
10 |
-
`PointRend <https://arxiv.org/abs/1912.08193>`_.
|
11 |
-
|
12 |
-
"""
|
13 |
-
|
14 |
-
def __init__(self,
|
15 |
-
backbone,
|
16 |
-
rpn_head,
|
17 |
-
roi_head,
|
18 |
-
train_cfg,
|
19 |
-
test_cfg,
|
20 |
-
neck=None,
|
21 |
-
pretrained=None):
|
22 |
-
super(PointRend, self).__init__(
|
23 |
-
backbone=backbone,
|
24 |
-
neck=neck,
|
25 |
-
rpn_head=rpn_head,
|
26 |
-
roi_head=roi_head,
|
27 |
-
train_cfg=train_cfg,
|
28 |
-
test_cfg=test_cfg,
|
29 |
-
pretrained=pretrained)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d16-mg124_512x1024_80k_cityscapes.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnet101_v1c',
|
4 |
-
backbone=dict(
|
5 |
-
depth=101,
|
6 |
-
dilations=(1, 1, 1, 2),
|
7 |
-
strides=(1, 2, 2, 1),
|
8 |
-
multi_grid=(1, 2, 4)),
|
9 |
-
decode_head=dict(
|
10 |
-
dilations=(1, 6, 12, 18),
|
11 |
-
sampler=dict(type='OHEMPixelSampler', min_kept=100000)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d16-mg124_512x1024_80k_cityscapes.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnet101_v1c',
|
4 |
-
backbone=dict(
|
5 |
-
depth=101,
|
6 |
-
dilations=(1, 1, 1, 2),
|
7 |
-
strides=(1, 2, 2, 1),
|
8 |
-
multi_grid=(1, 2, 4)),
|
9 |
-
decode_head=dict(
|
10 |
-
dilations=(1, 6, 12, 18),
|
11 |
-
sampler=dict(type='OHEMPixelSampler', min_kept=100000)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anon4review/HIPTDemo/vision_transformer.py
DELETED
@@ -1,330 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
"""
|
15 |
-
Mostly copy-paste from timm library.
|
16 |
-
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
|
17 |
-
"""
|
18 |
-
import math
|
19 |
-
from functools import partial
|
20 |
-
|
21 |
-
import torch
|
22 |
-
import torch.nn as nn
|
23 |
-
|
24 |
-
|
25 |
-
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
|
26 |
-
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
27 |
-
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
28 |
-
def norm_cdf(x):
|
29 |
-
# Computes standard normal cumulative distribution function
|
30 |
-
return (1. + math.erf(x / math.sqrt(2.))) / 2.
|
31 |
-
|
32 |
-
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
33 |
-
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
34 |
-
"The distribution of values may be incorrect.",
|
35 |
-
stacklevel=2)
|
36 |
-
|
37 |
-
with torch.no_grad():
|
38 |
-
# Values are generated by using a truncated uniform distribution and
|
39 |
-
# then using the inverse CDF for the normal distribution.
|
40 |
-
# Get upper and lower cdf values
|
41 |
-
l = norm_cdf((a - mean) / std)
|
42 |
-
u = norm_cdf((b - mean) / std)
|
43 |
-
|
44 |
-
# Uniformly fill tensor with values from [l, u], then translate to
|
45 |
-
# [2l-1, 2u-1].
|
46 |
-
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
47 |
-
|
48 |
-
# Use inverse cdf transform for normal distribution to get truncated
|
49 |
-
# standard normal
|
50 |
-
tensor.erfinv_()
|
51 |
-
|
52 |
-
# Transform to proper mean, std
|
53 |
-
tensor.mul_(std * math.sqrt(2.))
|
54 |
-
tensor.add_(mean)
|
55 |
-
|
56 |
-
# Clamp to ensure it's in the proper range
|
57 |
-
tensor.clamp_(min=a, max=b)
|
58 |
-
return tensor
|
59 |
-
|
60 |
-
|
61 |
-
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
|
62 |
-
# type: (Tensor, float, float, float, float) -> Tensor
|
63 |
-
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
64 |
-
|
65 |
-
|
66 |
-
def drop_path(x, drop_prob: float = 0., training: bool = False):
|
67 |
-
if drop_prob == 0. or not training:
|
68 |
-
return x
|
69 |
-
keep_prob = 1 - drop_prob
|
70 |
-
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
|
71 |
-
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
|
72 |
-
random_tensor.floor_() # binarize
|
73 |
-
output = x.div(keep_prob) * random_tensor
|
74 |
-
return output
|
75 |
-
|
76 |
-
|
77 |
-
class DropPath(nn.Module):
|
78 |
-
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
79 |
-
"""
|
80 |
-
def __init__(self, drop_prob=None):
|
81 |
-
super(DropPath, self).__init__()
|
82 |
-
self.drop_prob = drop_prob
|
83 |
-
|
84 |
-
def forward(self, x):
|
85 |
-
return drop_path(x, self.drop_prob, self.training)
|
86 |
-
|
87 |
-
|
88 |
-
class Mlp(nn.Module):
|
89 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
90 |
-
super().__init__()
|
91 |
-
out_features = out_features or in_features
|
92 |
-
hidden_features = hidden_features or in_features
|
93 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
94 |
-
self.act = act_layer()
|
95 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
96 |
-
self.drop = nn.Dropout(drop)
|
97 |
-
|
98 |
-
def forward(self, x):
|
99 |
-
x = self.fc1(x)
|
100 |
-
x = self.act(x)
|
101 |
-
x = self.drop(x)
|
102 |
-
x = self.fc2(x)
|
103 |
-
x = self.drop(x)
|
104 |
-
return x
|
105 |
-
|
106 |
-
|
107 |
-
class Attention(nn.Module):
|
108 |
-
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
|
109 |
-
super().__init__()
|
110 |
-
self.num_heads = num_heads
|
111 |
-
head_dim = dim // num_heads
|
112 |
-
self.scale = qk_scale or head_dim ** -0.5
|
113 |
-
|
114 |
-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
115 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
116 |
-
self.proj = nn.Linear(dim, dim)
|
117 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
118 |
-
|
119 |
-
def forward(self, x):
|
120 |
-
B, N, C = x.shape
|
121 |
-
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
|
122 |
-
q, k, v = qkv[0], qkv[1], qkv[2]
|
123 |
-
|
124 |
-
attn = (q @ k.transpose(-2, -1)) * self.scale
|
125 |
-
attn = attn.softmax(dim=-1)
|
126 |
-
attn = self.attn_drop(attn)
|
127 |
-
|
128 |
-
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
|
129 |
-
x = self.proj(x)
|
130 |
-
x = self.proj_drop(x)
|
131 |
-
return x, attn
|
132 |
-
|
133 |
-
|
134 |
-
class Block(nn.Module):
|
135 |
-
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
|
136 |
-
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
|
137 |
-
super().__init__()
|
138 |
-
self.norm1 = norm_layer(dim)
|
139 |
-
self.attn = Attention(
|
140 |
-
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
|
141 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
142 |
-
self.norm2 = norm_layer(dim)
|
143 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
144 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
145 |
-
|
146 |
-
def forward(self, x, return_attention=False):
|
147 |
-
y, attn = self.attn(self.norm1(x))
|
148 |
-
if return_attention:
|
149 |
-
return attn
|
150 |
-
x = x + self.drop_path(y)
|
151 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
152 |
-
return x
|
153 |
-
|
154 |
-
|
155 |
-
class PatchEmbed(nn.Module):
|
156 |
-
""" Image to Patch Embedding
|
157 |
-
"""
|
158 |
-
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
|
159 |
-
super().__init__()
|
160 |
-
num_patches = (img_size // patch_size) * (img_size // patch_size)
|
161 |
-
self.img_size = img_size
|
162 |
-
self.patch_size = patch_size
|
163 |
-
self.num_patches = num_patches
|
164 |
-
|
165 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
166 |
-
|
167 |
-
def forward(self, x):
|
168 |
-
B, C, H, W = x.shape
|
169 |
-
x = self.proj(x).flatten(2).transpose(1, 2)
|
170 |
-
return x
|
171 |
-
|
172 |
-
|
173 |
-
class VisionTransformer(nn.Module):
|
174 |
-
""" Vision Transformer """
|
175 |
-
def __init__(self, img_size=[224], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
|
176 |
-
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
|
177 |
-
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
|
178 |
-
super().__init__()
|
179 |
-
self.num_features = self.embed_dim = embed_dim
|
180 |
-
|
181 |
-
self.patch_embed = PatchEmbed(
|
182 |
-
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
|
183 |
-
num_patches = self.patch_embed.num_patches
|
184 |
-
|
185 |
-
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
186 |
-
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
|
187 |
-
self.pos_drop = nn.Dropout(p=drop_rate)
|
188 |
-
|
189 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
|
190 |
-
self.blocks = nn.ModuleList([
|
191 |
-
Block(
|
192 |
-
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
|
193 |
-
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
|
194 |
-
for i in range(depth)])
|
195 |
-
self.norm = norm_layer(embed_dim)
|
196 |
-
|
197 |
-
# Classifier head
|
198 |
-
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
|
199 |
-
|
200 |
-
trunc_normal_(self.pos_embed, std=.02)
|
201 |
-
trunc_normal_(self.cls_token, std=.02)
|
202 |
-
self.apply(self._init_weights)
|
203 |
-
|
204 |
-
def _init_weights(self, m):
|
205 |
-
if isinstance(m, nn.Linear):
|
206 |
-
trunc_normal_(m.weight, std=.02)
|
207 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
208 |
-
nn.init.constant_(m.bias, 0)
|
209 |
-
elif isinstance(m, nn.LayerNorm):
|
210 |
-
nn.init.constant_(m.bias, 0)
|
211 |
-
nn.init.constant_(m.weight, 1.0)
|
212 |
-
|
213 |
-
def interpolate_pos_encoding(self, x, w, h):
|
214 |
-
npatch = x.shape[1] - 1
|
215 |
-
N = self.pos_embed.shape[1] - 1
|
216 |
-
if npatch == N and w == h:
|
217 |
-
return self.pos_embed
|
218 |
-
class_pos_embed = self.pos_embed[:, 0]
|
219 |
-
patch_pos_embed = self.pos_embed[:, 1:]
|
220 |
-
dim = x.shape[-1]
|
221 |
-
w0 = w // self.patch_embed.patch_size
|
222 |
-
h0 = h // self.patch_embed.patch_size
|
223 |
-
# we add a small number to avoid floating point error in the interpolation
|
224 |
-
# see discussion at https://github.com/facebookresearch/dino/issues/8
|
225 |
-
w0, h0 = w0 + 0.1, h0 + 0.1
|
226 |
-
patch_pos_embed = nn.functional.interpolate(
|
227 |
-
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
|
228 |
-
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
|
229 |
-
mode='bicubic',
|
230 |
-
)
|
231 |
-
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
|
232 |
-
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
233 |
-
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
|
234 |
-
|
235 |
-
def prepare_tokens(self, x):
|
236 |
-
B, nc, w, h = x.shape
|
237 |
-
x = self.patch_embed(x) # patch linear embedding
|
238 |
-
|
239 |
-
# add the [CLS] token to the embed patch tokens
|
240 |
-
cls_tokens = self.cls_token.expand(B, -1, -1)
|
241 |
-
x = torch.cat((cls_tokens, x), dim=1)
|
242 |
-
|
243 |
-
# add positional encoding to each token
|
244 |
-
x = x + self.interpolate_pos_encoding(x, w, h)
|
245 |
-
|
246 |
-
return self.pos_drop(x)
|
247 |
-
|
248 |
-
def forward(self, x):
|
249 |
-
x = self.prepare_tokens(x)
|
250 |
-
for blk in self.blocks:
|
251 |
-
x = blk(x)
|
252 |
-
x = self.norm(x)
|
253 |
-
return x[:, 0]
|
254 |
-
|
255 |
-
def get_last_selfattention(self, x):
|
256 |
-
x = self.prepare_tokens(x)
|
257 |
-
for i, blk in enumerate(self.blocks):
|
258 |
-
if i < len(self.blocks) - 1:
|
259 |
-
x = blk(x)
|
260 |
-
else:
|
261 |
-
# return attention of the last block
|
262 |
-
return blk(x, return_attention=True)
|
263 |
-
|
264 |
-
def get_intermediate_layers(self, x, n=1):
|
265 |
-
x = self.prepare_tokens(x)
|
266 |
-
# we return the output tokens from the `n` last blocks
|
267 |
-
output = []
|
268 |
-
for i, blk in enumerate(self.blocks):
|
269 |
-
x = blk(x)
|
270 |
-
if len(self.blocks) - i <= n:
|
271 |
-
output.append(self.norm(x))
|
272 |
-
return output
|
273 |
-
|
274 |
-
|
275 |
-
def vit_tiny(patch_size=16, **kwargs):
|
276 |
-
model = VisionTransformer(
|
277 |
-
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
|
278 |
-
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
279 |
-
return model
|
280 |
-
|
281 |
-
|
282 |
-
def vit_small(patch_size=16, **kwargs):
|
283 |
-
model = VisionTransformer(
|
284 |
-
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
|
285 |
-
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
286 |
-
return model
|
287 |
-
|
288 |
-
|
289 |
-
def vit_base(patch_size=16, **kwargs):
|
290 |
-
model = VisionTransformer(
|
291 |
-
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
|
292 |
-
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
|
293 |
-
return model
|
294 |
-
|
295 |
-
|
296 |
-
class DINOHead(nn.Module):
|
297 |
-
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
|
298 |
-
super().__init__()
|
299 |
-
nlayers = max(nlayers, 1)
|
300 |
-
if nlayers == 1:
|
301 |
-
self.mlp = nn.Linear(in_dim, bottleneck_dim)
|
302 |
-
else:
|
303 |
-
layers = [nn.Linear(in_dim, hidden_dim)]
|
304 |
-
if use_bn:
|
305 |
-
layers.append(nn.BatchNorm1d(hidden_dim))
|
306 |
-
layers.append(nn.GELU())
|
307 |
-
for _ in range(nlayers - 2):
|
308 |
-
layers.append(nn.Linear(hidden_dim, hidden_dim))
|
309 |
-
if use_bn:
|
310 |
-
layers.append(nn.BatchNorm1d(hidden_dim))
|
311 |
-
layers.append(nn.GELU())
|
312 |
-
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
|
313 |
-
self.mlp = nn.Sequential(*layers)
|
314 |
-
self.apply(self._init_weights)
|
315 |
-
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
|
316 |
-
self.last_layer.weight_g.data.fill_(1)
|
317 |
-
if norm_last_layer:
|
318 |
-
self.last_layer.weight_g.requires_grad = False
|
319 |
-
|
320 |
-
def _init_weights(self, m):
|
321 |
-
if isinstance(m, nn.Linear):
|
322 |
-
trunc_normal_(m.weight, std=.02)
|
323 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
324 |
-
nn.init.constant_(m.bias, 0)
|
325 |
-
|
326 |
-
def forward(self, x):
|
327 |
-
x = self.mlp(x)
|
328 |
-
x = nn.functional.normalize(x, dim=-1, p=2)
|
329 |
-
x = self.last_layer(x)
|
330 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/pascal_voc12.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
# dataset settings
|
2 |
-
dataset_type = 'PascalVOCDataset'
|
3 |
-
data_root = 'data/VOCdevkit/VOC2012'
|
4 |
-
img_norm_cfg = dict(
|
5 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
6 |
-
crop_size = (512, 512)
|
7 |
-
train_pipeline = [
|
8 |
-
dict(type='LoadImageFromFile'),
|
9 |
-
dict(type='LoadAnnotations'),
|
10 |
-
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
11 |
-
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
12 |
-
dict(type='RandomFlip', prob=0.5),
|
13 |
-
dict(type='PhotoMetricDistortion'),
|
14 |
-
dict(type='Normalize', **img_norm_cfg),
|
15 |
-
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
16 |
-
dict(type='DefaultFormatBundle'),
|
17 |
-
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
18 |
-
]
|
19 |
-
test_pipeline = [
|
20 |
-
dict(type='LoadImageFromFile'),
|
21 |
-
dict(
|
22 |
-
type='MultiScaleFlipAug',
|
23 |
-
img_scale=(2048, 512),
|
24 |
-
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
25 |
-
flip=False,
|
26 |
-
transforms=[
|
27 |
-
dict(type='Resize', keep_ratio=True),
|
28 |
-
dict(type='RandomFlip'),
|
29 |
-
dict(type='Normalize', **img_norm_cfg),
|
30 |
-
dict(type='ImageToTensor', keys=['img']),
|
31 |
-
dict(type='Collect', keys=['img']),
|
32 |
-
])
|
33 |
-
]
|
34 |
-
data = dict(
|
35 |
-
samples_per_gpu=4,
|
36 |
-
workers_per_gpu=4,
|
37 |
-
train=dict(
|
38 |
-
type=dataset_type,
|
39 |
-
data_root=data_root,
|
40 |
-
img_dir='JPEGImages',
|
41 |
-
ann_dir='SegmentationClass',
|
42 |
-
split='ImageSets/Segmentation/train.txt',
|
43 |
-
pipeline=train_pipeline),
|
44 |
-
val=dict(
|
45 |
-
type=dataset_type,
|
46 |
-
data_root=data_root,
|
47 |
-
img_dir='JPEGImages',
|
48 |
-
ann_dir='SegmentationClass',
|
49 |
-
split='ImageSets/Segmentation/val.txt',
|
50 |
-
pipeline=test_pipeline),
|
51 |
-
test=dict(
|
52 |
-
type=dataset_type,
|
53 |
-
data_root=data_root,
|
54 |
-
img_dir='JPEGImages',
|
55 |
-
ann_dir='SegmentationClass',
|
56 |
-
split='ImageSets/Segmentation/val.txt',
|
57 |
-
pipeline=test_pipeline))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/gmflow_module/evaluate.py
DELETED
@@ -1,689 +0,0 @@
|
|
1 |
-
from PIL import Image
|
2 |
-
import os
|
3 |
-
import time
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torch.nn.functional as F
|
7 |
-
|
8 |
-
import data
|
9 |
-
from utils import frame_utils
|
10 |
-
from utils.flow_viz import save_vis_flow_tofile
|
11 |
-
|
12 |
-
from utils.utils import InputPadder, compute_out_of_boundary_mask
|
13 |
-
from glob import glob
|
14 |
-
from gmflow.geometry import forward_backward_consistency_check
|
15 |
-
|
16 |
-
|
17 |
-
@torch.no_grad()
|
18 |
-
def create_sintel_submission(model,
|
19 |
-
output_path='sintel_submission',
|
20 |
-
padding_factor=8,
|
21 |
-
save_vis_flow=False,
|
22 |
-
no_save_flo=False,
|
23 |
-
attn_splits_list=None,
|
24 |
-
corr_radius_list=None,
|
25 |
-
prop_radius_list=None,
|
26 |
-
):
|
27 |
-
""" Create submission for the Sintel leaderboard """
|
28 |
-
model.eval()
|
29 |
-
for dstype in ['clean', 'final']:
|
30 |
-
test_dataset = data.MpiSintel(split='test', aug_params=None, dstype=dstype)
|
31 |
-
|
32 |
-
flow_prev, sequence_prev = None, None
|
33 |
-
for test_id in range(len(test_dataset)):
|
34 |
-
image1, image2, (sequence, frame) = test_dataset[test_id]
|
35 |
-
if sequence != sequence_prev:
|
36 |
-
flow_prev = None
|
37 |
-
|
38 |
-
padder = InputPadder(image1.shape, padding_factor=padding_factor)
|
39 |
-
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
|
40 |
-
|
41 |
-
results_dict = model(image1, image2,
|
42 |
-
attn_splits_list=attn_splits_list,
|
43 |
-
corr_radius_list=corr_radius_list,
|
44 |
-
prop_radius_list=prop_radius_list,
|
45 |
-
)
|
46 |
-
|
47 |
-
flow_pr = results_dict['flow_preds'][-1] # [B, 2, H, W]
|
48 |
-
|
49 |
-
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
|
50 |
-
|
51 |
-
output_dir = os.path.join(output_path, dstype, sequence)
|
52 |
-
output_file = os.path.join(output_dir, 'frame%04d.flo' % (frame + 1))
|
53 |
-
|
54 |
-
if not os.path.exists(output_dir):
|
55 |
-
os.makedirs(output_dir)
|
56 |
-
|
57 |
-
if not no_save_flo:
|
58 |
-
frame_utils.writeFlow(output_file, flow)
|
59 |
-
sequence_prev = sequence
|
60 |
-
|
61 |
-
# Save vis flow
|
62 |
-
if save_vis_flow:
|
63 |
-
vis_flow_file = output_file.replace('.flo', '.png')
|
64 |
-
save_vis_flow_tofile(flow, vis_flow_file)
|
65 |
-
|
66 |
-
|
67 |
-
@torch.no_grad()
|
68 |
-
def create_kitti_submission(model,
|
69 |
-
output_path='kitti_submission',
|
70 |
-
padding_factor=8,
|
71 |
-
save_vis_flow=False,
|
72 |
-
attn_splits_list=None,
|
73 |
-
corr_radius_list=None,
|
74 |
-
prop_radius_list=None,
|
75 |
-
):
|
76 |
-
""" Create submission for the Sintel leaderboard """
|
77 |
-
model.eval()
|
78 |
-
test_dataset = data.KITTI(split='testing', aug_params=None)
|
79 |
-
|
80 |
-
if not os.path.exists(output_path):
|
81 |
-
os.makedirs(output_path)
|
82 |
-
|
83 |
-
for test_id in range(len(test_dataset)):
|
84 |
-
image1, image2, (frame_id,) = test_dataset[test_id]
|
85 |
-
padder = InputPadder(image1.shape, mode='kitti', padding_factor=padding_factor)
|
86 |
-
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
|
87 |
-
|
88 |
-
results_dict = model(image1, image2,
|
89 |
-
attn_splits_list=attn_splits_list,
|
90 |
-
corr_radius_list=corr_radius_list,
|
91 |
-
prop_radius_list=prop_radius_list,
|
92 |
-
)
|
93 |
-
|
94 |
-
flow_pr = results_dict['flow_preds'][-1]
|
95 |
-
|
96 |
-
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
|
97 |
-
|
98 |
-
output_filename = os.path.join(output_path, frame_id)
|
99 |
-
|
100 |
-
if save_vis_flow:
|
101 |
-
vis_flow_file = output_filename
|
102 |
-
save_vis_flow_tofile(flow, vis_flow_file)
|
103 |
-
else:
|
104 |
-
frame_utils.writeFlowKITTI(output_filename, flow)
|
105 |
-
|
106 |
-
|
107 |
-
@torch.no_grad()
|
108 |
-
def validate_chairs(model,
|
109 |
-
with_speed_metric=False,
|
110 |
-
attn_splits_list=False,
|
111 |
-
corr_radius_list=False,
|
112 |
-
prop_radius_list=False,
|
113 |
-
):
|
114 |
-
""" Perform evaluation on the FlyingChairs (test) split """
|
115 |
-
model.eval()
|
116 |
-
epe_list = []
|
117 |
-
results = {}
|
118 |
-
|
119 |
-
if with_speed_metric:
|
120 |
-
s0_10_list = []
|
121 |
-
s10_40_list = []
|
122 |
-
s40plus_list = []
|
123 |
-
|
124 |
-
val_dataset = data.FlyingChairs(split='validation')
|
125 |
-
|
126 |
-
print('Number of validation image pairs: %d' % len(val_dataset))
|
127 |
-
|
128 |
-
for val_id in range(len(val_dataset)):
|
129 |
-
image1, image2, flow_gt, _ = val_dataset[val_id]
|
130 |
-
|
131 |
-
image1 = image1[None].cuda()
|
132 |
-
image2 = image2[None].cuda()
|
133 |
-
|
134 |
-
results_dict = model(image1, image2,
|
135 |
-
attn_splits_list=attn_splits_list,
|
136 |
-
corr_radius_list=corr_radius_list,
|
137 |
-
prop_radius_list=prop_radius_list,
|
138 |
-
)
|
139 |
-
|
140 |
-
flow_pr = results_dict['flow_preds'][-1] # [B, 2, H, W]
|
141 |
-
|
142 |
-
assert flow_pr.size()[-2:] == flow_gt.size()[-2:]
|
143 |
-
|
144 |
-
epe = torch.sum((flow_pr[0].cpu() - flow_gt) ** 2, dim=0).sqrt()
|
145 |
-
epe_list.append(epe.view(-1).numpy())
|
146 |
-
|
147 |
-
if with_speed_metric:
|
148 |
-
flow_gt_speed = torch.sum(flow_gt ** 2, dim=0).sqrt()
|
149 |
-
valid_mask = (flow_gt_speed < 10)
|
150 |
-
if valid_mask.max() > 0:
|
151 |
-
s0_10_list.append(epe[valid_mask].cpu().numpy())
|
152 |
-
|
153 |
-
valid_mask = (flow_gt_speed >= 10) * (flow_gt_speed <= 40)
|
154 |
-
if valid_mask.max() > 0:
|
155 |
-
s10_40_list.append(epe[valid_mask].cpu().numpy())
|
156 |
-
|
157 |
-
valid_mask = (flow_gt_speed > 40)
|
158 |
-
if valid_mask.max() > 0:
|
159 |
-
s40plus_list.append(epe[valid_mask].cpu().numpy())
|
160 |
-
|
161 |
-
epe_all = np.concatenate(epe_list)
|
162 |
-
epe = np.mean(epe_all)
|
163 |
-
px1 = np.mean(epe_all > 1)
|
164 |
-
px3 = np.mean(epe_all > 3)
|
165 |
-
px5 = np.mean(epe_all > 5)
|
166 |
-
print("Validation Chairs EPE: %.3f, 1px: %.3f, 3px: %.3f, 5px: %.3f" % (epe, px1, px3, px5))
|
167 |
-
results['chairs_epe'] = epe
|
168 |
-
results['chairs_1px'] = px1
|
169 |
-
results['chairs_3px'] = px3
|
170 |
-
results['chairs_5px'] = px5
|
171 |
-
|
172 |
-
if with_speed_metric:
|
173 |
-
s0_10 = np.mean(np.concatenate(s0_10_list))
|
174 |
-
s10_40 = np.mean(np.concatenate(s10_40_list))
|
175 |
-
s40plus = np.mean(np.concatenate(s40plus_list))
|
176 |
-
|
177 |
-
print("Validation Chairs s0_10: %.3f, s10_40: %.3f, s40+: %.3f" % (
|
178 |
-
s0_10,
|
179 |
-
s10_40,
|
180 |
-
s40plus))
|
181 |
-
|
182 |
-
results['chairs_s0_10'] = s0_10
|
183 |
-
results['chairs_s10_40'] = s10_40
|
184 |
-
results['chairs_s40+'] = s40plus
|
185 |
-
|
186 |
-
return results
|
187 |
-
|
188 |
-
|
189 |
-
@torch.no_grad()
|
190 |
-
def validate_things(model,
|
191 |
-
padding_factor=8,
|
192 |
-
with_speed_metric=False,
|
193 |
-
max_val_flow=400,
|
194 |
-
val_things_clean_only=True,
|
195 |
-
attn_splits_list=False,
|
196 |
-
corr_radius_list=False,
|
197 |
-
prop_radius_list=False,
|
198 |
-
):
|
199 |
-
""" Peform validation using the Things (test) split """
|
200 |
-
model.eval()
|
201 |
-
results = {}
|
202 |
-
|
203 |
-
for dstype in ['frames_cleanpass', 'frames_finalpass']:
|
204 |
-
if val_things_clean_only:
|
205 |
-
if dstype == 'frames_finalpass':
|
206 |
-
continue
|
207 |
-
|
208 |
-
val_dataset = data.FlyingThings3D(dstype=dstype, test_set=True, validate_subset=True,
|
209 |
-
)
|
210 |
-
print('Number of validation image pairs: %d' % len(val_dataset))
|
211 |
-
epe_list = []
|
212 |
-
|
213 |
-
if with_speed_metric:
|
214 |
-
s0_10_list = []
|
215 |
-
s10_40_list = []
|
216 |
-
s40plus_list = []
|
217 |
-
|
218 |
-
for val_id in range(len(val_dataset)):
|
219 |
-
image1, image2, flow_gt, valid_gt = val_dataset[val_id]
|
220 |
-
image1 = image1[None].cuda()
|
221 |
-
image2 = image2[None].cuda()
|
222 |
-
|
223 |
-
padder = InputPadder(image1.shape, padding_factor=padding_factor)
|
224 |
-
image1, image2 = padder.pad(image1, image2)
|
225 |
-
|
226 |
-
results_dict = model(image1, image2,
|
227 |
-
attn_splits_list=attn_splits_list,
|
228 |
-
corr_radius_list=corr_radius_list,
|
229 |
-
prop_radius_list=prop_radius_list,
|
230 |
-
)
|
231 |
-
flow_pr = results_dict['flow_preds'][-1]
|
232 |
-
|
233 |
-
flow = padder.unpad(flow_pr[0]).cpu()
|
234 |
-
|
235 |
-
# Evaluation on flow <= max_val_flow
|
236 |
-
flow_gt_speed = torch.sum(flow_gt ** 2, dim=0).sqrt()
|
237 |
-
valid_gt = valid_gt * (flow_gt_speed < max_val_flow)
|
238 |
-
valid_gt = valid_gt.contiguous()
|
239 |
-
|
240 |
-
epe = torch.sum((flow - flow_gt) ** 2, dim=0).sqrt()
|
241 |
-
val = valid_gt >= 0.5
|
242 |
-
epe_list.append(epe[val].cpu().numpy())
|
243 |
-
|
244 |
-
if with_speed_metric:
|
245 |
-
valid_mask = (flow_gt_speed < 10) * (valid_gt >= 0.5)
|
246 |
-
if valid_mask.max() > 0:
|
247 |
-
s0_10_list.append(epe[valid_mask].cpu().numpy())
|
248 |
-
|
249 |
-
valid_mask = (flow_gt_speed >= 10) * (flow_gt_speed <= 40) * (valid_gt >= 0.5)
|
250 |
-
if valid_mask.max() > 0:
|
251 |
-
s10_40_list.append(epe[valid_mask].cpu().numpy())
|
252 |
-
|
253 |
-
valid_mask = (flow_gt_speed > 40) * (valid_gt >= 0.5)
|
254 |
-
if valid_mask.max() > 0:
|
255 |
-
s40plus_list.append(epe[valid_mask].cpu().numpy())
|
256 |
-
|
257 |
-
epe_list = np.mean(np.concatenate(epe_list))
|
258 |
-
|
259 |
-
epe = np.mean(epe_list)
|
260 |
-
|
261 |
-
if dstype == 'frames_cleanpass':
|
262 |
-
dstype = 'things_clean'
|
263 |
-
if dstype == 'frames_finalpass':
|
264 |
-
dstype = 'things_final'
|
265 |
-
|
266 |
-
print("Validation Things test set (%s) EPE: %.3f" % (dstype, epe))
|
267 |
-
results[dstype + '_epe'] = epe
|
268 |
-
|
269 |
-
if with_speed_metric:
|
270 |
-
s0_10 = np.mean(np.concatenate(s0_10_list))
|
271 |
-
s10_40 = np.mean(np.concatenate(s10_40_list))
|
272 |
-
s40plus = np.mean(np.concatenate(s40plus_list))
|
273 |
-
|
274 |
-
print("Validation Things test (%s) s0_10: %.3f, s10_40: %.3f, s40+: %.3f" % (
|
275 |
-
dstype, s0_10,
|
276 |
-
s10_40,
|
277 |
-
s40plus))
|
278 |
-
|
279 |
-
results[dstype + '_s0_10'] = s0_10
|
280 |
-
results[dstype + '_s10_40'] = s10_40
|
281 |
-
results[dstype + '_s40+'] = s40plus
|
282 |
-
|
283 |
-
return results
|
284 |
-
|
285 |
-
|
286 |
-
@torch.no_grad()
|
287 |
-
def validate_sintel(model,
|
288 |
-
count_time=False,
|
289 |
-
padding_factor=8,
|
290 |
-
with_speed_metric=False,
|
291 |
-
evaluate_matched_unmatched=False,
|
292 |
-
attn_splits_list=False,
|
293 |
-
corr_radius_list=False,
|
294 |
-
prop_radius_list=False,
|
295 |
-
):
|
296 |
-
""" Peform validation using the Sintel (train) split """
|
297 |
-
model.eval()
|
298 |
-
results = {}
|
299 |
-
|
300 |
-
if count_time:
|
301 |
-
total_time = 0
|
302 |
-
num_runs = 100
|
303 |
-
|
304 |
-
for dstype in ['clean', 'final']:
|
305 |
-
val_dataset = data.MpiSintel(split='training', dstype=dstype,
|
306 |
-
load_occlusion=evaluate_matched_unmatched,
|
307 |
-
)
|
308 |
-
|
309 |
-
print('Number of validation image pairs: %d' % len(val_dataset))
|
310 |
-
epe_list = []
|
311 |
-
|
312 |
-
if evaluate_matched_unmatched:
|
313 |
-
matched_epe_list = []
|
314 |
-
unmatched_epe_list = []
|
315 |
-
|
316 |
-
if with_speed_metric:
|
317 |
-
s0_10_list = []
|
318 |
-
s10_40_list = []
|
319 |
-
s40plus_list = []
|
320 |
-
|
321 |
-
for val_id in range(len(val_dataset)):
|
322 |
-
if evaluate_matched_unmatched:
|
323 |
-
image1, image2, flow_gt, valid, noc_valid = val_dataset[val_id]
|
324 |
-
|
325 |
-
# compuate in-image-plane valid mask
|
326 |
-
in_image_valid = compute_out_of_boundary_mask(flow_gt.unsqueeze(0)).squeeze(0) # [H, W]
|
327 |
-
|
328 |
-
else:
|
329 |
-
image1, image2, flow_gt, _ = val_dataset[val_id]
|
330 |
-
|
331 |
-
image1 = image1[None].cuda()
|
332 |
-
image2 = image2[None].cuda()
|
333 |
-
|
334 |
-
padder = InputPadder(image1.shape, padding_factor=padding_factor)
|
335 |
-
image1, image2 = padder.pad(image1, image2)
|
336 |
-
|
337 |
-
if count_time and val_id >= 5: # 5 warmup
|
338 |
-
torch.cuda.synchronize()
|
339 |
-
time_start = time.perf_counter()
|
340 |
-
|
341 |
-
results_dict = model(image1, image2,
|
342 |
-
attn_splits_list=attn_splits_list,
|
343 |
-
corr_radius_list=corr_radius_list,
|
344 |
-
prop_radius_list=prop_radius_list,
|
345 |
-
)
|
346 |
-
|
347 |
-
# useful when using parallel branches
|
348 |
-
flow_pr = results_dict['flow_preds'][-1]
|
349 |
-
|
350 |
-
if count_time and val_id >= 5:
|
351 |
-
torch.cuda.synchronize()
|
352 |
-
total_time += time.perf_counter() - time_start
|
353 |
-
|
354 |
-
if val_id >= num_runs + 4:
|
355 |
-
break
|
356 |
-
|
357 |
-
flow = padder.unpad(flow_pr[0]).cpu()
|
358 |
-
|
359 |
-
epe = torch.sum((flow - flow_gt) ** 2, dim=0).sqrt()
|
360 |
-
epe_list.append(epe.view(-1).numpy())
|
361 |
-
|
362 |
-
if evaluate_matched_unmatched:
|
363 |
-
matched_valid_mask = (noc_valid > 0.5) & (in_image_valid > 0.5)
|
364 |
-
|
365 |
-
if matched_valid_mask.max() > 0:
|
366 |
-
matched_epe_list.append(epe[matched_valid_mask].cpu().numpy())
|
367 |
-
unmatched_epe_list.append(epe[~matched_valid_mask].cpu().numpy())
|
368 |
-
|
369 |
-
if with_speed_metric:
|
370 |
-
flow_gt_speed = torch.sum(flow_gt ** 2, dim=0).sqrt()
|
371 |
-
valid_mask = (flow_gt_speed < 10)
|
372 |
-
if valid_mask.max() > 0:
|
373 |
-
s0_10_list.append(epe[valid_mask].cpu().numpy())
|
374 |
-
|
375 |
-
valid_mask = (flow_gt_speed >= 10) * (flow_gt_speed <= 40)
|
376 |
-
if valid_mask.max() > 0:
|
377 |
-
s10_40_list.append(epe[valid_mask].cpu().numpy())
|
378 |
-
|
379 |
-
valid_mask = (flow_gt_speed > 40)
|
380 |
-
if valid_mask.max() > 0:
|
381 |
-
s40plus_list.append(epe[valid_mask].cpu().numpy())
|
382 |
-
|
383 |
-
epe_all = np.concatenate(epe_list)
|
384 |
-
epe = np.mean(epe_all)
|
385 |
-
px1 = np.mean(epe_all > 1)
|
386 |
-
px3 = np.mean(epe_all > 3)
|
387 |
-
px5 = np.mean(epe_all > 5)
|
388 |
-
|
389 |
-
dstype_ori = dstype
|
390 |
-
|
391 |
-
print("Validation Sintel (%s) EPE: %.3f, 1px: %.3f, 3px: %.3f, 5px: %.3f" % (dstype_ori, epe, px1, px3, px5))
|
392 |
-
|
393 |
-
dstype = 'sintel_' + dstype
|
394 |
-
|
395 |
-
results[dstype + '_epe'] = np.mean(epe_list)
|
396 |
-
results[dstype + '_1px'] = px1
|
397 |
-
results[dstype + '_3px'] = px3
|
398 |
-
results[dstype + '_5px'] = px5
|
399 |
-
|
400 |
-
if with_speed_metric:
|
401 |
-
s0_10 = np.mean(np.concatenate(s0_10_list))
|
402 |
-
s10_40 = np.mean(np.concatenate(s10_40_list))
|
403 |
-
s40plus = np.mean(np.concatenate(s40plus_list))
|
404 |
-
|
405 |
-
print("Validation Sintel (%s) s0_10: %.3f, s10_40: %.3f, s40+: %.3f" % (
|
406 |
-
dstype_ori, s0_10,
|
407 |
-
s10_40,
|
408 |
-
s40plus))
|
409 |
-
|
410 |
-
results[dstype + '_s0_10'] = s0_10
|
411 |
-
results[dstype + '_s10_40'] = s10_40
|
412 |
-
results[dstype + '_s40+'] = s40plus
|
413 |
-
|
414 |
-
if count_time:
|
415 |
-
print('Time: %.6fs' % (total_time / num_runs))
|
416 |
-
break # only the clean pass when counting time
|
417 |
-
|
418 |
-
if evaluate_matched_unmatched:
|
419 |
-
matched_epe = np.mean(np.concatenate(matched_epe_list))
|
420 |
-
unmatched_epe = np.mean(np.concatenate(unmatched_epe_list))
|
421 |
-
|
422 |
-
print('Validatation Sintel (%s) matched epe: %.3f, unmatched epe: %.3f' % (
|
423 |
-
dstype_ori, matched_epe, unmatched_epe))
|
424 |
-
|
425 |
-
results[dstype + '_matched'] = matched_epe
|
426 |
-
results[dstype + '_unmatched'] = unmatched_epe
|
427 |
-
|
428 |
-
return results
|
429 |
-
|
430 |
-
|
431 |
-
@torch.no_grad()
|
432 |
-
def validate_kitti(model,
|
433 |
-
padding_factor=8,
|
434 |
-
with_speed_metric=False,
|
435 |
-
average_over_pixels=True,
|
436 |
-
attn_splits_list=False,
|
437 |
-
corr_radius_list=False,
|
438 |
-
prop_radius_list=False,
|
439 |
-
):
|
440 |
-
""" Peform validation using the KITTI-2015 (train) split """
|
441 |
-
model.eval()
|
442 |
-
|
443 |
-
val_dataset = data.KITTI(split='training')
|
444 |
-
print('Number of validation image pairs: %d' % len(val_dataset))
|
445 |
-
|
446 |
-
out_list, epe_list = [], []
|
447 |
-
results = {}
|
448 |
-
|
449 |
-
if with_speed_metric:
|
450 |
-
if average_over_pixels:
|
451 |
-
s0_10_list = []
|
452 |
-
s10_40_list = []
|
453 |
-
s40plus_list = []
|
454 |
-
else:
|
455 |
-
s0_10_epe_sum = 0
|
456 |
-
s0_10_valid_samples = 0
|
457 |
-
s10_40_epe_sum = 0
|
458 |
-
s10_40_valid_samples = 0
|
459 |
-
s40plus_epe_sum = 0
|
460 |
-
s40plus_valid_samples = 0
|
461 |
-
|
462 |
-
for val_id in range(len(val_dataset)):
|
463 |
-
image1, image2, flow_gt, valid_gt = val_dataset[val_id]
|
464 |
-
image1 = image1[None].cuda()
|
465 |
-
image2 = image2[None].cuda()
|
466 |
-
|
467 |
-
padder = InputPadder(image1.shape, mode='kitti', padding_factor=padding_factor)
|
468 |
-
image1, image2 = padder.pad(image1, image2)
|
469 |
-
|
470 |
-
results_dict = model(image1, image2,
|
471 |
-
attn_splits_list=attn_splits_list,
|
472 |
-
corr_radius_list=corr_radius_list,
|
473 |
-
prop_radius_list=prop_radius_list,
|
474 |
-
)
|
475 |
-
|
476 |
-
# useful when using parallel branches
|
477 |
-
flow_pr = results_dict['flow_preds'][-1]
|
478 |
-
|
479 |
-
flow = padder.unpad(flow_pr[0]).cpu()
|
480 |
-
|
481 |
-
epe = torch.sum((flow - flow_gt) ** 2, dim=0).sqrt()
|
482 |
-
mag = torch.sum(flow_gt ** 2, dim=0).sqrt()
|
483 |
-
|
484 |
-
if with_speed_metric:
|
485 |
-
# flow_gt_speed = torch.sum(flow_gt ** 2, dim=0).sqrt()
|
486 |
-
flow_gt_speed = mag
|
487 |
-
|
488 |
-
if average_over_pixels:
|
489 |
-
valid_mask = (flow_gt_speed < 10) * (valid_gt >= 0.5) # note KITTI GT is sparse
|
490 |
-
if valid_mask.max() > 0:
|
491 |
-
s0_10_list.append(epe[valid_mask].cpu().numpy())
|
492 |
-
|
493 |
-
valid_mask = (flow_gt_speed >= 10) * (flow_gt_speed <= 40) * (valid_gt >= 0.5)
|
494 |
-
if valid_mask.max() > 0:
|
495 |
-
s10_40_list.append(epe[valid_mask].cpu().numpy())
|
496 |
-
|
497 |
-
valid_mask = (flow_gt_speed > 40) * (valid_gt >= 0.5)
|
498 |
-
if valid_mask.max() > 0:
|
499 |
-
s40plus_list.append(epe[valid_mask].cpu().numpy())
|
500 |
-
|
501 |
-
else:
|
502 |
-
valid_mask = (flow_gt_speed < 10) * (valid_gt >= 0.5) # note KITTI GT is sparse
|
503 |
-
if valid_mask.max() > 0:
|
504 |
-
s0_10_epe_sum += (epe * valid_mask).sum() / valid_mask.sum()
|
505 |
-
s0_10_valid_samples += 1
|
506 |
-
|
507 |
-
valid_mask = (flow_gt_speed >= 10) * (flow_gt_speed <= 40) * (valid_gt >= 0.5)
|
508 |
-
if valid_mask.max() > 0:
|
509 |
-
s10_40_epe_sum += (epe * valid_mask).sum() / valid_mask.sum()
|
510 |
-
s10_40_valid_samples += 1
|
511 |
-
|
512 |
-
valid_mask = (flow_gt_speed > 40) * (valid_gt >= 0.5)
|
513 |
-
if valid_mask.max() > 0:
|
514 |
-
s40plus_epe_sum += (epe * valid_mask).sum() / valid_mask.sum()
|
515 |
-
s40plus_valid_samples += 1
|
516 |
-
|
517 |
-
epe = epe.view(-1)
|
518 |
-
mag = mag.view(-1)
|
519 |
-
val = valid_gt.view(-1) >= 0.5
|
520 |
-
|
521 |
-
out = ((epe > 3.0) & ((epe / mag) > 0.05)).float()
|
522 |
-
|
523 |
-
if average_over_pixels:
|
524 |
-
epe_list.append(epe[val].cpu().numpy())
|
525 |
-
else:
|
526 |
-
epe_list.append(epe[val].mean().item())
|
527 |
-
|
528 |
-
out_list.append(out[val].cpu().numpy())
|
529 |
-
|
530 |
-
if average_over_pixels:
|
531 |
-
epe_list = np.concatenate(epe_list)
|
532 |
-
else:
|
533 |
-
epe_list = np.array(epe_list)
|
534 |
-
out_list = np.concatenate(out_list)
|
535 |
-
|
536 |
-
epe = np.mean(epe_list)
|
537 |
-
f1 = 100 * np.mean(out_list)
|
538 |
-
|
539 |
-
print("Validation KITTI EPE: %.3f, F1-all: %.3f" % (epe, f1))
|
540 |
-
results['kitti_epe'] = epe
|
541 |
-
results['kitti_f1'] = f1
|
542 |
-
|
543 |
-
if with_speed_metric:
|
544 |
-
if average_over_pixels:
|
545 |
-
s0_10 = np.mean(np.concatenate(s0_10_list))
|
546 |
-
s10_40 = np.mean(np.concatenate(s10_40_list))
|
547 |
-
s40plus = np.mean(np.concatenate(s40plus_list))
|
548 |
-
else:
|
549 |
-
s0_10 = s0_10_epe_sum / s0_10_valid_samples
|
550 |
-
s10_40 = s10_40_epe_sum / s10_40_valid_samples
|
551 |
-
s40plus = s40plus_epe_sum / s40plus_valid_samples
|
552 |
-
|
553 |
-
print("Validation KITTI s0_10: %.3f, s10_40: %.3f, s40+: %.3f" % (
|
554 |
-
s0_10,
|
555 |
-
s10_40,
|
556 |
-
s40plus))
|
557 |
-
|
558 |
-
results['kitti_s0_10'] = s0_10
|
559 |
-
results['kitti_s10_40'] = s10_40
|
560 |
-
results['kitti_s40+'] = s40plus
|
561 |
-
|
562 |
-
return results
|
563 |
-
|
564 |
-
|
565 |
-
@torch.no_grad()
|
566 |
-
def inference_on_dir(model,
|
567 |
-
inference_dir,
|
568 |
-
output_path='output',
|
569 |
-
padding_factor=8,
|
570 |
-
inference_size=None,
|
571 |
-
paired_data=False, # dir of paired testdata instead of a sequence
|
572 |
-
save_flo_flow=False, # save as .flo for quantative evaluation
|
573 |
-
attn_splits_list=None,
|
574 |
-
corr_radius_list=None,
|
575 |
-
prop_radius_list=None,
|
576 |
-
pred_bidir_flow=False,
|
577 |
-
fwd_bwd_consistency_check=False,
|
578 |
-
):
|
579 |
-
""" Inference on a directory """
|
580 |
-
model.eval()
|
581 |
-
|
582 |
-
if fwd_bwd_consistency_check:
|
583 |
-
assert pred_bidir_flow
|
584 |
-
|
585 |
-
if not os.path.exists(output_path):
|
586 |
-
os.makedirs(output_path)
|
587 |
-
|
588 |
-
filenames = sorted(glob(inference_dir + '/*'))
|
589 |
-
print('%d images found' % len(filenames))
|
590 |
-
|
591 |
-
stride = 2 if paired_data else 1
|
592 |
-
|
593 |
-
if paired_data:
|
594 |
-
assert len(filenames) % 2 == 0
|
595 |
-
|
596 |
-
for test_id in range(0, len(filenames) - 1, stride):
|
597 |
-
|
598 |
-
image1 = frame_utils.read_gen(filenames[test_id])
|
599 |
-
image2 = frame_utils.read_gen(filenames[test_id + 1])
|
600 |
-
|
601 |
-
image1 = np.array(image1).astype(np.uint8)
|
602 |
-
image2 = np.array(image2).astype(np.uint8)
|
603 |
-
|
604 |
-
if len(image1.shape) == 2: # gray image, for example, HD1K
|
605 |
-
image1 = np.tile(image1[..., None], (1, 1, 3))
|
606 |
-
image2 = np.tile(image2[..., None], (1, 1, 3))
|
607 |
-
else:
|
608 |
-
image1 = image1[..., :3]
|
609 |
-
image2 = image2[..., :3]
|
610 |
-
|
611 |
-
image1 = torch.from_numpy(image1).permute(2, 0, 1).float()
|
612 |
-
image2 = torch.from_numpy(image2).permute(2, 0, 1).float()
|
613 |
-
|
614 |
-
if inference_size is None:
|
615 |
-
padder = InputPadder(image1.shape, padding_factor=padding_factor)
|
616 |
-
image1, image2 = padder.pad(image1[None].cuda(), image2[None].cuda())
|
617 |
-
else:
|
618 |
-
image1, image2 = image1[None].cuda(), image2[None].cuda()
|
619 |
-
|
620 |
-
# resize before inference
|
621 |
-
if inference_size is not None:
|
622 |
-
assert isinstance(inference_size, list) or isinstance(inference_size, tuple)
|
623 |
-
ori_size = image1.shape[-2:]
|
624 |
-
image1 = F.interpolate(image1, size=inference_size, mode='bilinear',
|
625 |
-
align_corners=True)
|
626 |
-
image2 = F.interpolate(image2, size=inference_size, mode='bilinear',
|
627 |
-
align_corners=True)
|
628 |
-
|
629 |
-
results_dict = model(image1, image2,
|
630 |
-
attn_splits_list=attn_splits_list,
|
631 |
-
corr_radius_list=corr_radius_list,
|
632 |
-
prop_radius_list=prop_radius_list,
|
633 |
-
pred_bidir_flow=pred_bidir_flow,
|
634 |
-
)
|
635 |
-
|
636 |
-
flow_pr = results_dict['flow_preds'][-1] # [B, 2, H, W]
|
637 |
-
|
638 |
-
# resize back
|
639 |
-
if inference_size is not None:
|
640 |
-
flow_pr = F.interpolate(flow_pr, size=ori_size, mode='bilinear',
|
641 |
-
align_corners=True)
|
642 |
-
flow_pr[:, 0] = flow_pr[:, 0] * ori_size[-1] / inference_size[-1]
|
643 |
-
flow_pr[:, 1] = flow_pr[:, 1] * ori_size[-2] / inference_size[-2]
|
644 |
-
|
645 |
-
if inference_size is None:
|
646 |
-
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy() # [H, W, 2]
|
647 |
-
else:
|
648 |
-
flow = flow_pr[0].permute(1, 2, 0).cpu().numpy() # [H, W, 2]
|
649 |
-
|
650 |
-
output_file = os.path.join(output_path, os.path.basename(filenames[test_id])[:-4] + '_flow.png')
|
651 |
-
|
652 |
-
# save vis flow
|
653 |
-
save_vis_flow_tofile(flow, output_file)
|
654 |
-
|
655 |
-
# also predict backward flow
|
656 |
-
if pred_bidir_flow:
|
657 |
-
assert flow_pr.size(0) == 2 # [2, H, W, 2]
|
658 |
-
|
659 |
-
if inference_size is None:
|
660 |
-
flow_bwd = padder.unpad(flow_pr[1]).permute(1, 2, 0).cpu().numpy() # [H, W, 2]
|
661 |
-
else:
|
662 |
-
flow_bwd = flow_pr[1].permute(1, 2, 0).cpu().numpy() # [H, W, 2]
|
663 |
-
|
664 |
-
output_file = os.path.join(output_path, os.path.basename(filenames[test_id])[:-4] + '_flow_bwd.png')
|
665 |
-
|
666 |
-
# save vis flow
|
667 |
-
save_vis_flow_tofile(flow_bwd, output_file)
|
668 |
-
|
669 |
-
# forward-backward consistency check
|
670 |
-
# occlusion is 1
|
671 |
-
if fwd_bwd_consistency_check:
|
672 |
-
if inference_size is None:
|
673 |
-
fwd_flow = padder.unpad(flow_pr[0]).unsqueeze(0) # [1, 2, H, W]
|
674 |
-
bwd_flow = padder.unpad(flow_pr[1]).unsqueeze(0) # [1, 2, H, W]
|
675 |
-
else:
|
676 |
-
fwd_flow = flow_pr[0].unsqueeze(0)
|
677 |
-
bwd_flow = flow_pr[1].unsqueeze(0)
|
678 |
-
|
679 |
-
fwd_occ, bwd_occ = forward_backward_consistency_check(fwd_flow, bwd_flow) # [1, H, W] float
|
680 |
-
|
681 |
-
fwd_occ_file = os.path.join(output_path, os.path.basename(filenames[test_id])[:-4] + '_occ.png')
|
682 |
-
bwd_occ_file = os.path.join(output_path, os.path.basename(filenames[test_id])[:-4] + '_occ_bwd.png')
|
683 |
-
|
684 |
-
Image.fromarray((fwd_occ[0].cpu().numpy() * 255.).astype(np.uint8)).save(fwd_occ_file)
|
685 |
-
Image.fromarray((bwd_occ[0].cpu().numpy() * 255.).astype(np.uint8)).save(bwd_occ_file)
|
686 |
-
|
687 |
-
if save_flo_flow:
|
688 |
-
output_file = os.path.join(output_path, os.path.basename(filenames[test_id])[:-4] + '_pred.flo')
|
689 |
-
frame_utils.writeFlow(output_file, flow)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ApathyINC/CustomGPT/utils.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
import json, os
|
2 |
-
from tencentcloud.common import credential
|
3 |
-
from tencentcloud.common.profile.client_profile import ClientProfile
|
4 |
-
from tencentcloud.common.profile.http_profile import HttpProfile
|
5 |
-
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
|
6 |
-
from tencentcloud.tmt.v20180321 import tmt_client, models
|
7 |
-
|
8 |
-
def get_tmt_client():
|
9 |
-
try:
|
10 |
-
# 实例化一个认证对象,入参需要传入腾讯云账户 SecretId 和 SecretKey,此处还需注意密钥对的保密
|
11 |
-
# 代码泄露可能会导致 SecretId 和 SecretKey 泄露,并威胁账号下所有资源的安全性。以下代码示例仅供参考,建议采用更安全的方式来使用密钥,请参见:https://cloud.tencent.com/document/product/1278/85305
|
12 |
-
# 密钥可前往官网控制台 https://console.cloud.tencent.com/cam/capi 进行获取
|
13 |
-
SecretId = os.environ.get("TENCENTCLOUD_SECRET_ID")
|
14 |
-
SecretKey = os.environ.get("TENCENTCLOUD_SECRET_KEY")
|
15 |
-
cred = credential.Credential(SecretId, SecretKey)
|
16 |
-
# 实例化一个http选项,可选的,没有特殊需求可以跳过
|
17 |
-
httpProfile = HttpProfile()
|
18 |
-
httpProfile.endpoint = "tmt.tencentcloudapi.com"
|
19 |
-
|
20 |
-
# 实例化一个client选项,可选的,没有特殊需求可以跳过
|
21 |
-
clientProfile = ClientProfile()
|
22 |
-
clientProfile.httpProfile = httpProfile
|
23 |
-
# 实例化要请求产品的client对象,clientProfile是可选的
|
24 |
-
client = tmt_client.TmtClient(cred, "ap-shanghai", clientProfile)
|
25 |
-
print(f'client_{client}')
|
26 |
-
return client
|
27 |
-
except TencentCloudSDKException as err:
|
28 |
-
print(f'client_err_{err}')
|
29 |
-
return None
|
30 |
-
|
31 |
-
def getTextTrans_tmt(tmt_client, text, source='zh', target='en'):
|
32 |
-
def is_chinese(string):
|
33 |
-
for ch in string:
|
34 |
-
if u'\u4e00' <= ch <= u'\u9fff':
|
35 |
-
return True
|
36 |
-
return False
|
37 |
-
|
38 |
-
if tmt_client is None:
|
39 |
-
return text
|
40 |
-
if not is_chinese(text) and target == 'en':
|
41 |
-
return text
|
42 |
-
try:
|
43 |
-
req = models.TextTranslateRequest()
|
44 |
-
params = {
|
45 |
-
"SourceText": text,
|
46 |
-
"Source": source,
|
47 |
-
"Target": target,
|
48 |
-
"ProjectId": 0
|
49 |
-
}
|
50 |
-
req.from_json_string(json.dumps(params))
|
51 |
-
resp = tmt_client.TextTranslate(req)
|
52 |
-
return resp.TargetText
|
53 |
-
except Exception as e:
|
54 |
-
return text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/msgpack/__init__.py
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
# coding: utf-8
|
2 |
-
from .exceptions import *
|
3 |
-
from .ext import ExtType, Timestamp
|
4 |
-
|
5 |
-
import os
|
6 |
-
import sys
|
7 |
-
|
8 |
-
|
9 |
-
version = (1, 0, 5)
|
10 |
-
__version__ = "1.0.5"
|
11 |
-
|
12 |
-
|
13 |
-
if os.environ.get("MSGPACK_PUREPYTHON") or sys.version_info[0] == 2:
|
14 |
-
from .fallback import Packer, unpackb, Unpacker
|
15 |
-
else:
|
16 |
-
try:
|
17 |
-
from ._cmsgpack import Packer, unpackb, Unpacker
|
18 |
-
except ImportError:
|
19 |
-
from .fallback import Packer, unpackb, Unpacker
|
20 |
-
|
21 |
-
|
22 |
-
def pack(o, stream, **kwargs):
|
23 |
-
"""
|
24 |
-
Pack object `o` and write it to `stream`
|
25 |
-
|
26 |
-
See :class:`Packer` for options.
|
27 |
-
"""
|
28 |
-
packer = Packer(**kwargs)
|
29 |
-
stream.write(packer.pack(o))
|
30 |
-
|
31 |
-
|
32 |
-
def packb(o, **kwargs):
|
33 |
-
"""
|
34 |
-
Pack object `o` and return packed bytes
|
35 |
-
|
36 |
-
See :class:`Packer` for options.
|
37 |
-
"""
|
38 |
-
return Packer(**kwargs).pack(o)
|
39 |
-
|
40 |
-
|
41 |
-
def unpack(stream, **kwargs):
|
42 |
-
"""
|
43 |
-
Unpack an object from `stream`.
|
44 |
-
|
45 |
-
Raises `ExtraData` when `stream` contains extra bytes.
|
46 |
-
See :class:`Unpacker` for options.
|
47 |
-
"""
|
48 |
-
data = stream.read()
|
49 |
-
return unpackb(data, **kwargs)
|
50 |
-
|
51 |
-
|
52 |
-
# alias for compatibility to simplejson/marshal/pickle.
|
53 |
-
load = unpack
|
54 |
-
loads = unpackb
|
55 |
-
|
56 |
-
dump = pack
|
57 |
-
dumps = packb
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/register.py
DELETED
@@ -1,319 +0,0 @@
|
|
1 |
-
"""distutils.command.register
|
2 |
-
|
3 |
-
Implements the Distutils 'register' command (register with the repository).
|
4 |
-
"""
|
5 |
-
|
6 |
-
# created 2002/10/21, Richard Jones
|
7 |
-
|
8 |
-
import getpass
|
9 |
-
import io
|
10 |
-
import urllib.parse
|
11 |
-
import urllib.request
|
12 |
-
from warnings import warn
|
13 |
-
|
14 |
-
from distutils.core import PyPIRCCommand
|
15 |
-
from distutils import log
|
16 |
-
|
17 |
-
|
18 |
-
class register(PyPIRCCommand):
|
19 |
-
|
20 |
-
description = "register the distribution with the Python package index"
|
21 |
-
user_options = PyPIRCCommand.user_options + [
|
22 |
-
('list-classifiers', None, 'list the valid Trove classifiers'),
|
23 |
-
(
|
24 |
-
'strict',
|
25 |
-
None,
|
26 |
-
'Will stop the registering if the meta-data are not fully compliant',
|
27 |
-
),
|
28 |
-
]
|
29 |
-
boolean_options = PyPIRCCommand.boolean_options + [
|
30 |
-
'verify',
|
31 |
-
'list-classifiers',
|
32 |
-
'strict',
|
33 |
-
]
|
34 |
-
|
35 |
-
sub_commands = [('check', lambda self: True)]
|
36 |
-
|
37 |
-
def initialize_options(self):
|
38 |
-
PyPIRCCommand.initialize_options(self)
|
39 |
-
self.list_classifiers = 0
|
40 |
-
self.strict = 0
|
41 |
-
|
42 |
-
def finalize_options(self):
|
43 |
-
PyPIRCCommand.finalize_options(self)
|
44 |
-
# setting options for the `check` subcommand
|
45 |
-
check_options = {
|
46 |
-
'strict': ('register', self.strict),
|
47 |
-
'restructuredtext': ('register', 1),
|
48 |
-
}
|
49 |
-
self.distribution.command_options['check'] = check_options
|
50 |
-
|
51 |
-
def run(self):
|
52 |
-
self.finalize_options()
|
53 |
-
self._set_config()
|
54 |
-
|
55 |
-
# Run sub commands
|
56 |
-
for cmd_name in self.get_sub_commands():
|
57 |
-
self.run_command(cmd_name)
|
58 |
-
|
59 |
-
if self.dry_run:
|
60 |
-
self.verify_metadata()
|
61 |
-
elif self.list_classifiers:
|
62 |
-
self.classifiers()
|
63 |
-
else:
|
64 |
-
self.send_metadata()
|
65 |
-
|
66 |
-
def check_metadata(self):
|
67 |
-
"""Deprecated API."""
|
68 |
-
warn(
|
69 |
-
"distutils.command.register.check_metadata is deprecated; "
|
70 |
-
"use the check command instead",
|
71 |
-
DeprecationWarning,
|
72 |
-
)
|
73 |
-
check = self.distribution.get_command_obj('check')
|
74 |
-
check.ensure_finalized()
|
75 |
-
check.strict = self.strict
|
76 |
-
check.restructuredtext = 1
|
77 |
-
check.run()
|
78 |
-
|
79 |
-
def _set_config(self):
|
80 |
-
'''Reads the configuration file and set attributes.'''
|
81 |
-
config = self._read_pypirc()
|
82 |
-
if config != {}:
|
83 |
-
self.username = config['username']
|
84 |
-
self.password = config['password']
|
85 |
-
self.repository = config['repository']
|
86 |
-
self.realm = config['realm']
|
87 |
-
self.has_config = True
|
88 |
-
else:
|
89 |
-
if self.repository not in ('pypi', self.DEFAULT_REPOSITORY):
|
90 |
-
raise ValueError('%s not found in .pypirc' % self.repository)
|
91 |
-
if self.repository == 'pypi':
|
92 |
-
self.repository = self.DEFAULT_REPOSITORY
|
93 |
-
self.has_config = False
|
94 |
-
|
95 |
-
def classifiers(self):
|
96 |
-
'''Fetch the list of classifiers from the server.'''
|
97 |
-
url = self.repository + '?:action=list_classifiers'
|
98 |
-
response = urllib.request.urlopen(url)
|
99 |
-
log.info(self._read_pypi_response(response))
|
100 |
-
|
101 |
-
def verify_metadata(self):
|
102 |
-
'''Send the metadata to the package index server to be checked.'''
|
103 |
-
# send the info to the server and report the result
|
104 |
-
(code, result) = self.post_to_server(self.build_post_data('verify'))
|
105 |
-
log.info('Server response (%s): %s', code, result)
|
106 |
-
|
107 |
-
def send_metadata(self): # noqa: C901
|
108 |
-
'''Send the metadata to the package index server.
|
109 |
-
|
110 |
-
Well, do the following:
|
111 |
-
1. figure who the user is, and then
|
112 |
-
2. send the data as a Basic auth'ed POST.
|
113 |
-
|
114 |
-
First we try to read the username/password from $HOME/.pypirc,
|
115 |
-
which is a ConfigParser-formatted file with a section
|
116 |
-
[distutils] containing username and password entries (both
|
117 |
-
in clear text). Eg:
|
118 |
-
|
119 |
-
[distutils]
|
120 |
-
index-servers =
|
121 |
-
pypi
|
122 |
-
|
123 |
-
[pypi]
|
124 |
-
username: fred
|
125 |
-
password: sekrit
|
126 |
-
|
127 |
-
Otherwise, to figure who the user is, we offer the user three
|
128 |
-
choices:
|
129 |
-
|
130 |
-
1. use existing login,
|
131 |
-
2. register as a new user, or
|
132 |
-
3. set the password to a random string and email the user.
|
133 |
-
|
134 |
-
'''
|
135 |
-
# see if we can short-cut and get the username/password from the
|
136 |
-
# config
|
137 |
-
if self.has_config:
|
138 |
-
choice = '1'
|
139 |
-
username = self.username
|
140 |
-
password = self.password
|
141 |
-
else:
|
142 |
-
choice = 'x'
|
143 |
-
username = password = ''
|
144 |
-
|
145 |
-
# get the user's login info
|
146 |
-
choices = '1 2 3 4'.split()
|
147 |
-
while choice not in choices:
|
148 |
-
self.announce(
|
149 |
-
'''\
|
150 |
-
We need to know who you are, so please choose either:
|
151 |
-
1. use your existing login,
|
152 |
-
2. register as a new user,
|
153 |
-
3. have the server generate a new password for you (and email it to you), or
|
154 |
-
4. quit
|
155 |
-
Your selection [default 1]: ''',
|
156 |
-
log.INFO,
|
157 |
-
)
|
158 |
-
choice = input()
|
159 |
-
if not choice:
|
160 |
-
choice = '1'
|
161 |
-
elif choice not in choices:
|
162 |
-
print('Please choose one of the four options!')
|
163 |
-
|
164 |
-
if choice == '1':
|
165 |
-
# get the username and password
|
166 |
-
while not username:
|
167 |
-
username = input('Username: ')
|
168 |
-
while not password:
|
169 |
-
password = getpass.getpass('Password: ')
|
170 |
-
|
171 |
-
# set up the authentication
|
172 |
-
auth = urllib.request.HTTPPasswordMgr()
|
173 |
-
host = urllib.parse.urlparse(self.repository)[1]
|
174 |
-
auth.add_password(self.realm, host, username, password)
|
175 |
-
# send the info to the server and report the result
|
176 |
-
code, result = self.post_to_server(self.build_post_data('submit'), auth)
|
177 |
-
self.announce('Server response ({}): {}'.format(code, result), log.INFO)
|
178 |
-
|
179 |
-
# possibly save the login
|
180 |
-
if code == 200:
|
181 |
-
if self.has_config:
|
182 |
-
# sharing the password in the distribution instance
|
183 |
-
# so the upload command can reuse it
|
184 |
-
self.distribution.password = password
|
185 |
-
else:
|
186 |
-
self.announce(
|
187 |
-
(
|
188 |
-
'I can store your PyPI login so future '
|
189 |
-
'submissions will be faster.'
|
190 |
-
),
|
191 |
-
log.INFO,
|
192 |
-
)
|
193 |
-
self.announce(
|
194 |
-
'(the login will be stored in %s)' % self._get_rc_file(),
|
195 |
-
log.INFO,
|
196 |
-
)
|
197 |
-
choice = 'X'
|
198 |
-
while choice.lower() not in 'yn':
|
199 |
-
choice = input('Save your login (y/N)?')
|
200 |
-
if not choice:
|
201 |
-
choice = 'n'
|
202 |
-
if choice.lower() == 'y':
|
203 |
-
self._store_pypirc(username, password)
|
204 |
-
|
205 |
-
elif choice == '2':
|
206 |
-
data = {':action': 'user'}
|
207 |
-
data['name'] = data['password'] = data['email'] = ''
|
208 |
-
data['confirm'] = None
|
209 |
-
while not data['name']:
|
210 |
-
data['name'] = input('Username: ')
|
211 |
-
while data['password'] != data['confirm']:
|
212 |
-
while not data['password']:
|
213 |
-
data['password'] = getpass.getpass('Password: ')
|
214 |
-
while not data['confirm']:
|
215 |
-
data['confirm'] = getpass.getpass(' Confirm: ')
|
216 |
-
if data['password'] != data['confirm']:
|
217 |
-
data['password'] = ''
|
218 |
-
data['confirm'] = None
|
219 |
-
print("Password and confirm don't match!")
|
220 |
-
while not data['email']:
|
221 |
-
data['email'] = input(' EMail: ')
|
222 |
-
code, result = self.post_to_server(data)
|
223 |
-
if code != 200:
|
224 |
-
log.info('Server response (%s): %s', code, result)
|
225 |
-
else:
|
226 |
-
log.info('You will receive an email shortly.')
|
227 |
-
log.info('Follow the instructions in it to ' 'complete registration.')
|
228 |
-
elif choice == '3':
|
229 |
-
data = {':action': 'password_reset'}
|
230 |
-
data['email'] = ''
|
231 |
-
while not data['email']:
|
232 |
-
data['email'] = input('Your email address: ')
|
233 |
-
code, result = self.post_to_server(data)
|
234 |
-
log.info('Server response (%s): %s', code, result)
|
235 |
-
|
236 |
-
def build_post_data(self, action):
|
237 |
-
# figure the data to send - the metadata plus some additional
|
238 |
-
# information used by the package server
|
239 |
-
meta = self.distribution.metadata
|
240 |
-
data = {
|
241 |
-
':action': action,
|
242 |
-
'metadata_version': '1.0',
|
243 |
-
'name': meta.get_name(),
|
244 |
-
'version': meta.get_version(),
|
245 |
-
'summary': meta.get_description(),
|
246 |
-
'home_page': meta.get_url(),
|
247 |
-
'author': meta.get_contact(),
|
248 |
-
'author_email': meta.get_contact_email(),
|
249 |
-
'license': meta.get_licence(),
|
250 |
-
'description': meta.get_long_description(),
|
251 |
-
'keywords': meta.get_keywords(),
|
252 |
-
'platform': meta.get_platforms(),
|
253 |
-
'classifiers': meta.get_classifiers(),
|
254 |
-
'download_url': meta.get_download_url(),
|
255 |
-
# PEP 314
|
256 |
-
'provides': meta.get_provides(),
|
257 |
-
'requires': meta.get_requires(),
|
258 |
-
'obsoletes': meta.get_obsoletes(),
|
259 |
-
}
|
260 |
-
if data['provides'] or data['requires'] or data['obsoletes']:
|
261 |
-
data['metadata_version'] = '1.1'
|
262 |
-
return data
|
263 |
-
|
264 |
-
def post_to_server(self, data, auth=None): # noqa: C901
|
265 |
-
'''Post a query to the server, and return a string response.'''
|
266 |
-
if 'name' in data:
|
267 |
-
self.announce(
|
268 |
-
'Registering {} to {}'.format(data['name'], self.repository), log.INFO
|
269 |
-
)
|
270 |
-
# Build up the MIME payload for the urllib2 POST data
|
271 |
-
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
|
272 |
-
sep_boundary = '\n--' + boundary
|
273 |
-
end_boundary = sep_boundary + '--'
|
274 |
-
body = io.StringIO()
|
275 |
-
for key, value in data.items():
|
276 |
-
# handle multiple entries for the same name
|
277 |
-
if type(value) not in (type([]), type(())):
|
278 |
-
value = [value]
|
279 |
-
for value in value:
|
280 |
-
value = str(value)
|
281 |
-
body.write(sep_boundary)
|
282 |
-
body.write('\nContent-Disposition: form-data; name="%s"' % key)
|
283 |
-
body.write("\n\n")
|
284 |
-
body.write(value)
|
285 |
-
if value and value[-1] == '\r':
|
286 |
-
body.write('\n') # write an extra newline (lurve Macs)
|
287 |
-
body.write(end_boundary)
|
288 |
-
body.write("\n")
|
289 |
-
body = body.getvalue().encode("utf-8")
|
290 |
-
|
291 |
-
# build the Request
|
292 |
-
headers = {
|
293 |
-
'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'
|
294 |
-
% boundary,
|
295 |
-
'Content-length': str(len(body)),
|
296 |
-
}
|
297 |
-
req = urllib.request.Request(self.repository, body, headers)
|
298 |
-
|
299 |
-
# handle HTTP and include the Basic Auth handler
|
300 |
-
opener = urllib.request.build_opener(
|
301 |
-
urllib.request.HTTPBasicAuthHandler(password_mgr=auth)
|
302 |
-
)
|
303 |
-
data = ''
|
304 |
-
try:
|
305 |
-
result = opener.open(req)
|
306 |
-
except urllib.error.HTTPError as e:
|
307 |
-
if self.show_response:
|
308 |
-
data = e.fp.read()
|
309 |
-
result = e.code, e.msg
|
310 |
-
except urllib.error.URLError as e:
|
311 |
-
result = 500, str(e)
|
312 |
-
else:
|
313 |
-
if self.show_response:
|
314 |
-
data = self._read_pypi_response(result)
|
315 |
-
result = 200, 'OK'
|
316 |
-
if self.show_response:
|
317 |
-
msg = '\n'.join(('-' * 75, data, '-' * 75))
|
318 |
-
self.announce(msg, log.INFO)
|
319 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/engine/train_loop.py
DELETED
@@ -1,417 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import logging
|
5 |
-
import numpy as np
|
6 |
-
import time
|
7 |
-
import weakref
|
8 |
-
from typing import List, Mapping, Optional
|
9 |
-
import torch
|
10 |
-
from torch.nn.parallel import DataParallel, DistributedDataParallel
|
11 |
-
|
12 |
-
import detectron2.utils.comm as comm
|
13 |
-
from detectron2.utils.events import EventStorage, get_event_storage
|
14 |
-
from detectron2.utils.logger import _log_api_usage
|
15 |
-
|
16 |
-
__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"]
|
17 |
-
|
18 |
-
|
19 |
-
class HookBase:
|
20 |
-
"""
|
21 |
-
Base class for hooks that can be registered with :class:`TrainerBase`.
|
22 |
-
|
23 |
-
Each hook can implement 4 methods. The way they are called is demonstrated
|
24 |
-
in the following snippet:
|
25 |
-
::
|
26 |
-
hook.before_train()
|
27 |
-
for iter in range(start_iter, max_iter):
|
28 |
-
hook.before_step()
|
29 |
-
trainer.run_step()
|
30 |
-
hook.after_step()
|
31 |
-
iter += 1
|
32 |
-
hook.after_train()
|
33 |
-
|
34 |
-
Notes:
|
35 |
-
1. In the hook method, users can access ``self.trainer`` to access more
|
36 |
-
properties about the context (e.g., model, current iteration, or config
|
37 |
-
if using :class:`DefaultTrainer`).
|
38 |
-
|
39 |
-
2. A hook that does something in :meth:`before_step` can often be
|
40 |
-
implemented equivalently in :meth:`after_step`.
|
41 |
-
If the hook takes non-trivial time, it is strongly recommended to
|
42 |
-
implement the hook in :meth:`after_step` instead of :meth:`before_step`.
|
43 |
-
The convention is that :meth:`before_step` should only take negligible time.
|
44 |
-
|
45 |
-
Following this convention will allow hooks that do care about the difference
|
46 |
-
between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
|
47 |
-
function properly.
|
48 |
-
|
49 |
-
"""
|
50 |
-
|
51 |
-
trainer: "TrainerBase" = None
|
52 |
-
"""
|
53 |
-
A weak reference to the trainer object. Set by the trainer when the hook is registered.
|
54 |
-
"""
|
55 |
-
|
56 |
-
def before_train(self):
|
57 |
-
"""
|
58 |
-
Called before the first iteration.
|
59 |
-
"""
|
60 |
-
pass
|
61 |
-
|
62 |
-
def after_train(self):
|
63 |
-
"""
|
64 |
-
Called after the last iteration.
|
65 |
-
"""
|
66 |
-
pass
|
67 |
-
|
68 |
-
def before_step(self):
|
69 |
-
"""
|
70 |
-
Called before each iteration.
|
71 |
-
"""
|
72 |
-
pass
|
73 |
-
|
74 |
-
def after_step(self):
|
75 |
-
"""
|
76 |
-
Called after each iteration.
|
77 |
-
"""
|
78 |
-
pass
|
79 |
-
|
80 |
-
def state_dict(self):
|
81 |
-
"""
|
82 |
-
Hooks are stateless by default, but can be made checkpointable by
|
83 |
-
implementing `state_dict` and `load_state_dict`.
|
84 |
-
"""
|
85 |
-
return {}
|
86 |
-
|
87 |
-
|
88 |
-
class TrainerBase:
|
89 |
-
"""
|
90 |
-
Base class for iterative trainer with hooks.
|
91 |
-
|
92 |
-
The only assumption we made here is: the training runs in a loop.
|
93 |
-
A subclass can implement what the loop is.
|
94 |
-
We made no assumptions about the existence of dataloader, optimizer, model, etc.
|
95 |
-
|
96 |
-
Attributes:
|
97 |
-
iter(int): the current iteration.
|
98 |
-
|
99 |
-
start_iter(int): The iteration to start with.
|
100 |
-
By convention the minimum possible value is 0.
|
101 |
-
|
102 |
-
max_iter(int): The iteration to end training.
|
103 |
-
|
104 |
-
storage(EventStorage): An EventStorage that's opened during the course of training.
|
105 |
-
"""
|
106 |
-
|
107 |
-
def __init__(self) -> None:
|
108 |
-
self._hooks: List[HookBase] = []
|
109 |
-
self.iter: int = 0
|
110 |
-
self.start_iter: int = 0
|
111 |
-
self.max_iter: int
|
112 |
-
self.storage: EventStorage
|
113 |
-
_log_api_usage("trainer." + self.__class__.__name__)
|
114 |
-
|
115 |
-
def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:
|
116 |
-
"""
|
117 |
-
Register hooks to the trainer. The hooks are executed in the order
|
118 |
-
they are registered.
|
119 |
-
|
120 |
-
Args:
|
121 |
-
hooks (list[Optional[HookBase]]): list of hooks
|
122 |
-
"""
|
123 |
-
hooks = [h for h in hooks if h is not None]
|
124 |
-
for h in hooks:
|
125 |
-
assert isinstance(h, HookBase)
|
126 |
-
# To avoid circular reference, hooks and trainer cannot own each other.
|
127 |
-
# This normally does not matter, but will cause memory leak if the
|
128 |
-
# involved objects contain __del__:
|
129 |
-
# See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
|
130 |
-
h.trainer = weakref.proxy(self)
|
131 |
-
self._hooks.extend(hooks)
|
132 |
-
|
133 |
-
def train(self, start_iter: int, max_iter: int):
|
134 |
-
"""
|
135 |
-
Args:
|
136 |
-
start_iter, max_iter (int): See docs above
|
137 |
-
"""
|
138 |
-
logger = logging.getLogger(__name__)
|
139 |
-
logger.info("Starting training from iteration {}".format(start_iter))
|
140 |
-
|
141 |
-
self.iter = self.start_iter = start_iter
|
142 |
-
self.max_iter = max_iter
|
143 |
-
|
144 |
-
with EventStorage(start_iter) as self.storage:
|
145 |
-
try:
|
146 |
-
self.before_train()
|
147 |
-
for self.iter in range(start_iter, max_iter):
|
148 |
-
self.before_step()
|
149 |
-
self.run_step()
|
150 |
-
self.after_step()
|
151 |
-
# self.iter == max_iter can be used by `after_train` to
|
152 |
-
# tell whether the training successfully finished or failed
|
153 |
-
# due to exceptions.
|
154 |
-
self.iter += 1
|
155 |
-
except Exception:
|
156 |
-
logger.exception("Exception during training:")
|
157 |
-
raise
|
158 |
-
finally:
|
159 |
-
self.after_train()
|
160 |
-
|
161 |
-
def before_train(self):
|
162 |
-
for h in self._hooks:
|
163 |
-
h.before_train()
|
164 |
-
|
165 |
-
def after_train(self):
|
166 |
-
self.storage.iter = self.iter
|
167 |
-
for h in self._hooks:
|
168 |
-
h.after_train()
|
169 |
-
|
170 |
-
def before_step(self):
|
171 |
-
# Maintain the invariant that storage.iter == trainer.iter
|
172 |
-
# for the entire execution of each step
|
173 |
-
self.storage.iter = self.iter
|
174 |
-
|
175 |
-
for h in self._hooks:
|
176 |
-
h.before_step()
|
177 |
-
|
178 |
-
def after_step(self):
|
179 |
-
for h in self._hooks:
|
180 |
-
h.after_step()
|
181 |
-
|
182 |
-
def run_step(self):
|
183 |
-
raise NotImplementedError
|
184 |
-
|
185 |
-
def state_dict(self):
|
186 |
-
ret = {"iteration": self.iter}
|
187 |
-
hooks_state = {}
|
188 |
-
for h in self._hooks:
|
189 |
-
sd = h.state_dict()
|
190 |
-
if sd:
|
191 |
-
name = type(h).__qualname__
|
192 |
-
if name in hooks_state:
|
193 |
-
# TODO handle repetitive stateful hooks
|
194 |
-
continue
|
195 |
-
hooks_state[name] = sd
|
196 |
-
if hooks_state:
|
197 |
-
ret["hooks"] = hooks_state
|
198 |
-
return ret
|
199 |
-
|
200 |
-
def load_state_dict(self, state_dict):
|
201 |
-
logger = logging.getLogger(__name__)
|
202 |
-
self.iter = state_dict["iteration"]
|
203 |
-
for key, value in state_dict.get("hooks", {}).items():
|
204 |
-
for h in self._hooks:
|
205 |
-
try:
|
206 |
-
name = type(h).__qualname__
|
207 |
-
except AttributeError:
|
208 |
-
continue
|
209 |
-
if name == key:
|
210 |
-
h.load_state_dict(value)
|
211 |
-
break
|
212 |
-
else:
|
213 |
-
logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.")
|
214 |
-
|
215 |
-
|
216 |
-
class SimpleTrainer(TrainerBase):
|
217 |
-
"""
|
218 |
-
A simple trainer for the most common type of task:
|
219 |
-
single-cost single-optimizer single-data-source iterative optimization,
|
220 |
-
optionally using data-parallelism.
|
221 |
-
It assumes that every step, you:
|
222 |
-
|
223 |
-
1. Compute the loss with a data from the data_loader.
|
224 |
-
2. Compute the gradients with the above loss.
|
225 |
-
3. Update the model with the optimizer.
|
226 |
-
|
227 |
-
All other tasks during training (checkpointing, logging, evaluation, LR schedule)
|
228 |
-
are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
|
229 |
-
|
230 |
-
If you want to do anything fancier than this,
|
231 |
-
either subclass TrainerBase and implement your own `run_step`,
|
232 |
-
or write your own training loop.
|
233 |
-
"""
|
234 |
-
|
235 |
-
def __init__(self, model, data_loader, optimizer):
|
236 |
-
"""
|
237 |
-
Args:
|
238 |
-
model: a torch Module. Takes a data from data_loader and returns a
|
239 |
-
dict of losses.
|
240 |
-
data_loader: an iterable. Contains data to be used to call model.
|
241 |
-
optimizer: a torch optimizer.
|
242 |
-
"""
|
243 |
-
super().__init__()
|
244 |
-
|
245 |
-
"""
|
246 |
-
We set the model to training mode in the trainer.
|
247 |
-
However it's valid to train a model that's in eval mode.
|
248 |
-
If you want your model (or a submodule of it) to behave
|
249 |
-
like evaluation during training, you can overwrite its train() method.
|
250 |
-
"""
|
251 |
-
model.train()
|
252 |
-
|
253 |
-
self.model = model
|
254 |
-
self.data_loader = data_loader
|
255 |
-
self._data_loader_iter = iter(data_loader)
|
256 |
-
self.optimizer = optimizer
|
257 |
-
|
258 |
-
def run_step(self):
|
259 |
-
"""
|
260 |
-
Implement the standard training logic described above.
|
261 |
-
"""
|
262 |
-
assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
|
263 |
-
start = time.perf_counter()
|
264 |
-
"""
|
265 |
-
If you want to do something with the data, you can wrap the dataloader.
|
266 |
-
"""
|
267 |
-
data = next(self._data_loader_iter)
|
268 |
-
data_time = time.perf_counter() - start
|
269 |
-
|
270 |
-
"""
|
271 |
-
If you want to do something with the losses, you can wrap the model.
|
272 |
-
"""
|
273 |
-
loss_dict = self.model(data)
|
274 |
-
if isinstance(loss_dict, torch.Tensor):
|
275 |
-
losses = loss_dict
|
276 |
-
loss_dict = {"total_loss": loss_dict}
|
277 |
-
else:
|
278 |
-
losses = sum(loss_dict.values())
|
279 |
-
|
280 |
-
"""
|
281 |
-
If you need to accumulate gradients or do something similar, you can
|
282 |
-
wrap the optimizer with your custom `zero_grad()` method.
|
283 |
-
"""
|
284 |
-
self.optimizer.zero_grad()
|
285 |
-
losses.backward()
|
286 |
-
|
287 |
-
self._write_metrics(loss_dict, data_time)
|
288 |
-
|
289 |
-
"""
|
290 |
-
If you need gradient clipping/scaling or other processing, you can
|
291 |
-
wrap the optimizer with your custom `step()` method. But it is
|
292 |
-
suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
|
293 |
-
"""
|
294 |
-
self.optimizer.step()
|
295 |
-
|
296 |
-
def _write_metrics(
|
297 |
-
self,
|
298 |
-
loss_dict: Mapping[str, torch.Tensor],
|
299 |
-
data_time: float,
|
300 |
-
prefix: str = "",
|
301 |
-
) -> None:
|
302 |
-
SimpleTrainer.write_metrics(loss_dict, data_time, prefix)
|
303 |
-
|
304 |
-
@staticmethod
|
305 |
-
def write_metrics(
|
306 |
-
loss_dict: Mapping[str, torch.Tensor],
|
307 |
-
data_time: float,
|
308 |
-
prefix: str = "",
|
309 |
-
) -> None:
|
310 |
-
"""
|
311 |
-
Args:
|
312 |
-
loss_dict (dict): dict of scalar losses
|
313 |
-
data_time (float): time taken by the dataloader iteration
|
314 |
-
prefix (str): prefix for logging keys
|
315 |
-
"""
|
316 |
-
metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
|
317 |
-
metrics_dict["data_time"] = data_time
|
318 |
-
|
319 |
-
# Gather metrics among all workers for logging
|
320 |
-
# This assumes we do DDP-style training, which is currently the only
|
321 |
-
# supported method in detectron2.
|
322 |
-
all_metrics_dict = comm.gather(metrics_dict)
|
323 |
-
|
324 |
-
if comm.is_main_process():
|
325 |
-
storage = get_event_storage()
|
326 |
-
|
327 |
-
# data_time among workers can have high variance. The actual latency
|
328 |
-
# caused by data_time is the maximum among workers.
|
329 |
-
data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
|
330 |
-
storage.put_scalar("data_time", data_time)
|
331 |
-
|
332 |
-
# average the rest metrics
|
333 |
-
metrics_dict = {
|
334 |
-
k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
|
335 |
-
}
|
336 |
-
total_losses_reduced = sum(metrics_dict.values())
|
337 |
-
if not np.isfinite(total_losses_reduced):
|
338 |
-
raise FloatingPointError(
|
339 |
-
f"Loss became infinite or NaN at iteration={storage.iter}!\n"
|
340 |
-
f"loss_dict = {metrics_dict}"
|
341 |
-
)
|
342 |
-
|
343 |
-
storage.put_scalar("{}total_loss".format(prefix), total_losses_reduced)
|
344 |
-
if len(metrics_dict) > 1:
|
345 |
-
storage.put_scalars(**metrics_dict)
|
346 |
-
|
347 |
-
def state_dict(self):
|
348 |
-
ret = super().state_dict()
|
349 |
-
ret["optimizer"] = self.optimizer.state_dict()
|
350 |
-
return ret
|
351 |
-
|
352 |
-
def load_state_dict(self, state_dict):
|
353 |
-
super().load_state_dict(state_dict)
|
354 |
-
self.optimizer.load_state_dict(state_dict["optimizer"])
|
355 |
-
|
356 |
-
|
357 |
-
class AMPTrainer(SimpleTrainer):
|
358 |
-
"""
|
359 |
-
Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
|
360 |
-
in the training loop.
|
361 |
-
"""
|
362 |
-
|
363 |
-
def __init__(self, model, data_loader, optimizer, grad_scaler=None):
|
364 |
-
"""
|
365 |
-
Args:
|
366 |
-
model, data_loader, optimizer: same as in :class:`SimpleTrainer`.
|
367 |
-
grad_scaler: torch GradScaler to automatically scale gradients.
|
368 |
-
"""
|
369 |
-
unsupported = "AMPTrainer does not support single-process multi-device training!"
|
370 |
-
if isinstance(model, DistributedDataParallel):
|
371 |
-
assert not (model.device_ids and len(model.device_ids) > 1), unsupported
|
372 |
-
assert not isinstance(model, DataParallel), unsupported
|
373 |
-
|
374 |
-
super().__init__(model, data_loader, optimizer)
|
375 |
-
|
376 |
-
if grad_scaler is None:
|
377 |
-
from torch.cuda.amp import GradScaler
|
378 |
-
|
379 |
-
grad_scaler = GradScaler()
|
380 |
-
self.grad_scaler = grad_scaler
|
381 |
-
|
382 |
-
def run_step(self):
|
383 |
-
"""
|
384 |
-
Implement the AMP training logic.
|
385 |
-
"""
|
386 |
-
assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
|
387 |
-
assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
|
388 |
-
from torch.cuda.amp import autocast
|
389 |
-
|
390 |
-
start = time.perf_counter()
|
391 |
-
data = next(self._data_loader_iter)
|
392 |
-
data_time = time.perf_counter() - start
|
393 |
-
|
394 |
-
with autocast():
|
395 |
-
loss_dict = self.model(data)
|
396 |
-
if isinstance(loss_dict, torch.Tensor):
|
397 |
-
losses = loss_dict
|
398 |
-
loss_dict = {"total_loss": loss_dict}
|
399 |
-
else:
|
400 |
-
losses = sum(loss_dict.values())
|
401 |
-
|
402 |
-
self.optimizer.zero_grad()
|
403 |
-
self.grad_scaler.scale(losses).backward()
|
404 |
-
|
405 |
-
self._write_metrics(loss_dict, data_time)
|
406 |
-
|
407 |
-
self.grad_scaler.step(self.optimizer)
|
408 |
-
self.grad_scaler.update()
|
409 |
-
|
410 |
-
def state_dict(self):
|
411 |
-
ret = super().state_dict()
|
412 |
-
ret["grad_scaler"] = self.grad_scaler.state_dict()
|
413 |
-
return ret
|
414 |
-
|
415 |
-
def load_state_dict(self, state_dict):
|
416 |
-
super().load_state_dict(state_dict)
|
417 |
-
self.grad_scaler.load_state_dict(state_dict["grad_scaler"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/region_semantic.py
DELETED
@@ -1,61 +0,0 @@
|
|
1 |
-
from models.segment_models.semgent_anything_model import SegmentAnything
|
2 |
-
from models.segment_models.semantic_segment_anything_model import SemanticSegment
|
3 |
-
from models.segment_models.edit_anything_model import EditAnything
|
4 |
-
|
5 |
-
|
6 |
-
class RegionSemantic():
|
7 |
-
def __init__(self, device, image_caption_model, region_classify_model='edit_anything', sam_arch='vit_b'):
|
8 |
-
self.device = device
|
9 |
-
self.sam_arch = sam_arch
|
10 |
-
self.image_caption_model = image_caption_model
|
11 |
-
self.region_classify_model = region_classify_model
|
12 |
-
self.init_models()
|
13 |
-
|
14 |
-
def init_models(self):
|
15 |
-
self.segment_model = SegmentAnything(self.device, arch=self.sam_arch)
|
16 |
-
if self.region_classify_model == 'ssa':
|
17 |
-
self.semantic_segment_model = SemanticSegment(self.device)
|
18 |
-
elif self.region_classify_model == 'edit_anything':
|
19 |
-
self.edit_anything_model = EditAnything(self.image_caption_model)
|
20 |
-
print('initalize edit anything model')
|
21 |
-
else:
|
22 |
-
raise ValueError("semantic_class_model must be 'ssa' or 'edit_anything'")
|
23 |
-
|
24 |
-
def semantic_prompt_gen(self, anns, topk=5):
|
25 |
-
"""
|
26 |
-
fliter too small objects and objects with low stability score
|
27 |
-
anns: [{'class_name': 'person', 'bbox': [0.0, 0.0, 0.0, 0.0], 'size': [0, 0], 'stability_score': 0.0}, ...]
|
28 |
-
semantic_prompt: "person: [0.0, 0.0, 0.0, 0.0]; ..."
|
29 |
-
"""
|
30 |
-
# Sort annotations by area in descending order
|
31 |
-
sorted_annotations = sorted(anns, key=lambda x: x['area'], reverse=True)
|
32 |
-
anns_len = len(sorted_annotations)
|
33 |
-
# Select the top 10 largest regions
|
34 |
-
top_10_largest_regions = sorted_annotations[:min(anns_len, topk)]
|
35 |
-
semantic_prompt = ""
|
36 |
-
for region in top_10_largest_regions:
|
37 |
-
semantic_prompt += region['class_name'] + ': ' + str(region['bbox']) + "; "
|
38 |
-
print(semantic_prompt)
|
39 |
-
print('\033[1;35m' + '*' * 100 + '\033[0m')
|
40 |
-
return semantic_prompt
|
41 |
-
|
42 |
-
def region_semantic(self, img_src, region_classify_model='edit_anything'):
|
43 |
-
print('\033[1;35m' + '*' * 100 + '\033[0m')
|
44 |
-
print("\nStep3, Semantic Prompt:")
|
45 |
-
print('extract region segmentation with SAM model....\n')
|
46 |
-
anns = self.segment_model.generate_mask(img_src)
|
47 |
-
print('finished...\n')
|
48 |
-
if region_classify_model == 'ssa':
|
49 |
-
print('generate region supervision with blip2 model....\n')
|
50 |
-
anns_w_class = self.semantic_segment_model.semantic_class_w_mask(img_src, anns)
|
51 |
-
print('finished...\n')
|
52 |
-
elif region_classify_model == 'edit_anything':
|
53 |
-
print('generate region supervision with edit anything model....\n')
|
54 |
-
anns_w_class = self.edit_anything_model.semantic_class_w_mask(img_src, anns)
|
55 |
-
print('finished...\n')
|
56 |
-
else:
|
57 |
-
raise ValueError("semantic_class_model must be 'ssa' or 'edit_anything'")
|
58 |
-
return self.semantic_prompt_gen(anns_w_class)
|
59 |
-
|
60 |
-
def region_semantic_debug(self, img_src):
|
61 |
-
return "region_semantic_debug"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AzinZ/vitscn/monotonic_align/__init__.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
from numpy import zeros, int32, float32
|
2 |
-
from torch import from_numpy
|
3 |
-
|
4 |
-
from .core import maximum_path_jit
|
5 |
-
|
6 |
-
|
7 |
-
def maximum_path(neg_cent, mask):
|
8 |
-
""" numba optimized version.
|
9 |
-
neg_cent: [b, t_t, t_s]
|
10 |
-
mask: [b, t_t, t_s]
|
11 |
-
"""
|
12 |
-
device = neg_cent.device
|
13 |
-
dtype = neg_cent.dtype
|
14 |
-
neg_cent = neg_cent.data.cpu().numpy().astype(float32)
|
15 |
-
path = zeros(neg_cent.shape, dtype=int32)
|
16 |
-
|
17 |
-
t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32)
|
18 |
-
t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32)
|
19 |
-
maximum_path_jit(path, neg_cent, t_t_max, t_s_max)
|
20 |
-
return from_numpy(path).to(device=device, dtype=dtype)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/lib/uvr5_pack/lib_v5/layers_537227KB.py
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from torch import nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
from . import spec_utils
|
6 |
-
|
7 |
-
|
8 |
-
class Conv2DBNActiv(nn.Module):
|
9 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
-
super(Conv2DBNActiv, self).__init__()
|
11 |
-
self.conv = nn.Sequential(
|
12 |
-
nn.Conv2d(
|
13 |
-
nin,
|
14 |
-
nout,
|
15 |
-
kernel_size=ksize,
|
16 |
-
stride=stride,
|
17 |
-
padding=pad,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False,
|
20 |
-
),
|
21 |
-
nn.BatchNorm2d(nout),
|
22 |
-
activ(),
|
23 |
-
)
|
24 |
-
|
25 |
-
def __call__(self, x):
|
26 |
-
return self.conv(x)
|
27 |
-
|
28 |
-
|
29 |
-
class SeperableConv2DBNActiv(nn.Module):
|
30 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
-
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
-
self.conv = nn.Sequential(
|
33 |
-
nn.Conv2d(
|
34 |
-
nin,
|
35 |
-
nin,
|
36 |
-
kernel_size=ksize,
|
37 |
-
stride=stride,
|
38 |
-
padding=pad,
|
39 |
-
dilation=dilation,
|
40 |
-
groups=nin,
|
41 |
-
bias=False,
|
42 |
-
),
|
43 |
-
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
-
nn.BatchNorm2d(nout),
|
45 |
-
activ(),
|
46 |
-
)
|
47 |
-
|
48 |
-
def __call__(self, x):
|
49 |
-
return self.conv(x)
|
50 |
-
|
51 |
-
|
52 |
-
class Encoder(nn.Module):
|
53 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
-
super(Encoder, self).__init__()
|
55 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
-
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
-
|
58 |
-
def __call__(self, x):
|
59 |
-
skip = self.conv1(x)
|
60 |
-
h = self.conv2(skip)
|
61 |
-
|
62 |
-
return h, skip
|
63 |
-
|
64 |
-
|
65 |
-
class Decoder(nn.Module):
|
66 |
-
def __init__(
|
67 |
-
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
-
):
|
69 |
-
super(Decoder, self).__init__()
|
70 |
-
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
-
|
73 |
-
def __call__(self, x, skip=None):
|
74 |
-
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
-
if skip is not None:
|
76 |
-
skip = spec_utils.crop_center(skip, x)
|
77 |
-
x = torch.cat([x, skip], dim=1)
|
78 |
-
h = self.conv(x)
|
79 |
-
|
80 |
-
if self.dropout is not None:
|
81 |
-
h = self.dropout(h)
|
82 |
-
|
83 |
-
return h
|
84 |
-
|
85 |
-
|
86 |
-
class ASPPModule(nn.Module):
|
87 |
-
def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
|
88 |
-
super(ASPPModule, self).__init__()
|
89 |
-
self.conv1 = nn.Sequential(
|
90 |
-
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
-
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
-
)
|
93 |
-
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
-
self.conv3 = SeperableConv2DBNActiv(
|
95 |
-
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
-
)
|
97 |
-
self.conv4 = SeperableConv2DBNActiv(
|
98 |
-
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
-
)
|
100 |
-
self.conv5 = SeperableConv2DBNActiv(
|
101 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
-
)
|
103 |
-
self.conv6 = SeperableConv2DBNActiv(
|
104 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
105 |
-
)
|
106 |
-
self.conv7 = SeperableConv2DBNActiv(
|
107 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
108 |
-
)
|
109 |
-
self.bottleneck = nn.Sequential(
|
110 |
-
Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
111 |
-
)
|
112 |
-
|
113 |
-
def forward(self, x):
|
114 |
-
_, _, h, w = x.size()
|
115 |
-
feat1 = F.interpolate(
|
116 |
-
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
117 |
-
)
|
118 |
-
feat2 = self.conv2(x)
|
119 |
-
feat3 = self.conv3(x)
|
120 |
-
feat4 = self.conv4(x)
|
121 |
-
feat5 = self.conv5(x)
|
122 |
-
feat6 = self.conv6(x)
|
123 |
-
feat7 = self.conv7(x)
|
124 |
-
out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
|
125 |
-
bottle = self.bottleneck(out)
|
126 |
-
return bottle
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Belligerent/word-sense-disambiguation/app.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, Text2TextGenerationPipeline
|
3 |
-
|
4 |
-
pipe = Text2TextGenerationPipeline(model = AutoModelForSeq2SeqLM.from_pretrained("jpelhaw/t5-word-sense-disambiguation"),
|
5 |
-
tokenizer = AutoTokenizer.from_pretrained("jpelhaw/t5-word-sense-disambiguation"))
|
6 |
-
|
7 |
-
def wsd_gen(word, context, d1, d2, d3):
|
8 |
-
question = 'question: question: which description describes the word' + ' " ' + word + ' " '
|
9 |
-
descriptions_context = 'best in the following context? \descriptions:[ " ' + d1 + '" , " ' + d2 + ' " , or " '+ d3 + ' " ] context: ' + context + "'"
|
10 |
-
raw_input = question + descriptions_context
|
11 |
-
output = pipe(raw_input)[0]['generated_text']
|
12 |
-
return output
|
13 |
-
|
14 |
-
examples = [["beat", 'The underdog team "beat" the reigning champion.', " A main accent or rhythmic unit in music or poetry. " , " To strike repeatedly and violently so as to hurt or injure.", " To defeat (someone) in a game or other competitive situation. "], ["shell", 'The first "shell" exploded in mid air taking out an enemy plane.', "The hard protective outer case of a mollusk or crustacean.", "An explosive artillery projectile or bomb.", "Something resembling or likened to a shell because of its shape or its function as an outer case."]]
|
15 |
-
|
16 |
-
word_mask = gr.inputs.Textbox(lines=1, placeholder= "Enter word to disambiguate", default="", label = "Based on the context, which description best matches this word: ")
|
17 |
-
input_context = gr.inputs.Textbox(lines=1, placeholder="Enter context", default="", label = "context: ")
|
18 |
-
input_desc1 = gr.inputs.Textbox(lines=1, placeholder="Enter description", default="", label = "description 1: ")
|
19 |
-
input_desc2 = gr.inputs.Textbox(lines=1, placeholder="Enter description", default="", label = "description 2: ")
|
20 |
-
input_desc3 = gr.inputs.Textbox(lines=1, placeholder="Enter description", default="", label = "description 3: ")
|
21 |
-
|
22 |
-
gr.Interface(wsd_gen,
|
23 |
-
inputs = [word_mask , input_context, input_desc1, input_desc2, input_desc3],
|
24 |
-
outputs= "textbox",
|
25 |
-
examples = examples,
|
26 |
-
title = "T5-Word Sense Disambiguation",
|
27 |
-
description = "Determines which 'sense' (meaning) of a word is activated by the use of the word in a particular context given three different descriptions.",
|
28 |
-
theme = "seafoam",
|
29 |
-
article = "This is an implementation of Google's T5-large model applied to Word Sense Disambiguation (WSD) and trained on the SemCor dataset. the SemCor dataset is a corpus made up of 352 documents for a total of 226,040 manually sense-annotated annotations used specifically used to train supervised WSD systems. The model used in this spaces was uploaded by Jan Philip Wahle (jpelhaw) in huggingface.",
|
30 |
-
allow_flagging="never").launch(inbrowser=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Blxckie Ronda Mp4 Download.md
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Descargar Blxckie Ronda MP4: Todo lo que necesitas saber</h1>
|
3 |
-
<p>Si usted es un fan del hip hop sudafricano, es posible que haya oído hablar de blxckie ronda mp4 descargar. Es una popular opción de descarga de vídeo para la canción Ronda por Blxckie, uno de los más prometedores de la nueva era SA Hip Hop raperos de Durban. En este artículo, le diremos todo lo que necesita saber sobre la descarga de blxckie ronda mp4, incluyendo quién es Blxckie, qué significa Ronda, por qué el formato MP4 es ideal para videos y cómo descargar videos MP4 desde cualquier sitio web de forma gratuita. </p>
|
4 |
-
<h2>¿Quién es Blxckie y cuáles son sus antecedentes? </h2>
|
5 |
-
<p>Blxckie, cuyo verdadero nombre es Sihle Sithole, nació el 24 de noviembre de 1999 en Sydenham Heights, Durban. Comenzó a hacer música a la edad de 8 años con sus amigos y se matriculó en la Universidad de KwaZulu-Natal con un título en Psicología. Sin embargo, se retiró debido a la pandemia COVID-19 y se centró en su carrera musical. </p>
|
6 |
-
<h2>blxckie ronda mp4 download</h2><br /><p><b><b>DOWNLOAD</b> ⚙ <a href="https://bltlly.com/2v6MMo">https://bltlly.com/2v6MMo</a></b></p><br /><br />
|
7 |
-
<p>Blxckie saltó a la fama en 2020 cuando lanzó varias canciones en SoundCloud y colaboró con otros artistas como Nasty C, LucasRaps, FLVME, Rowlene y LeoDaleo. También se convirtió en el primer artista sudafricano en ser nombrado Up Next por Apple Music en marzo de 2021. </p>
|
8 |
-
<p>Su álbum debut B4Now fue lanzado el 21 de mayo de 2021 y fue certificado oro en Sudáfrica. Cuenta con sus sencillos de éxito como David y Ye 4, que también fueron certificados de oro y doble platino respectivamente. </p>
|
9 |
-
<h2>¿Qué es Ronda y qué significa? </h2>
|
10 |
-
<p>Ronda es una de las canciones del álbum B4Now de Blxckie. Fue lanzado como sencillo el 30 de abril de 2021 junto con un video musical oficial. </p>
|
11 |
-
<p>La canción trata sobre la confianza y la ambición de Blxckie como rapero. Utiliza la palabra Ronda, que significa ronda o círculo en español, para referirse a su éxito y dominio en la industria de la música. También se compara con Ronda Rousey, un famoso artista marcial mixto estadounidense y ex campeón de UFC. </p>
|
12 |
-
<p>El coro de la canción va así:</p>
|
13 |
-
<blockquote>
|
14 |
-
|
15 |
-
Estoy dando vueltas como Ronda<br>
|
16 |
-
Estoy dando vueltas como Ronda<br>
|
17 |
-
Estoy dando vueltas como Ronda<br>
|
18 |
-
Estoy dando vueltas como Ronda<br>
|
19 |
-
Estoy dando vueltas como Ronda<br>
|
20 |
-
Estoy dando vueltas como Ronda<br>
|
21 |
-
Estoy dando vueltas como Ronda</p>
|
22 |
-
</blockquote>
|
23 |
-
<h2>¿Cuáles son las ventajas del formato MP4 para los vídeos? </h2>
|
24 |
-
<p>MP4 es uno de los formatos de medios más comunes para la transmisión y descarga de vídeo desde Internet. Tiene muchas ventajas sobre otros formatos como AVI o MKV. Algunos de ellos son:</p>
|
25 |
-
<p></p>
|
26 |
-
<ul>
|
27 |
-
<li>Se puede utilizar en múltiples plataformas, lo que facilita su uso y distribución. </li>
|
28 |
-
<li> Tiene un alto grado de compresión, lo que resulta en tamaños de archivo más pequeños y tiempos de carga más rápidos. </li>
|
29 |
-
<li>Puede almacenar tipos de datos distintos de vídeo y audio, como subtítulos, imágenes, metadatos y funciones interactivas. </li>
|
30 |
-
<li> Tiene una salida de alta calidad que puede soportar resoluciones de hasta 4K. </li>
|
31 |
-
</ul>
|
32 |
-
<h2>¿Cómo descargar videos MP4 de cualquier sitio web gratis? </h2>
|
33 |
-
<p>Si desea descargar gratis blx ckie ronda mp4 o cualquier otro video MP4 desde cualquier sitio web, puede usar uno de los siguientes métodos:</p>
|
34 |
-
<ol>
|
35 |
-
<li>Utilice una herramienta de descarga de vídeo en línea. Hay muchos sitios web que ofrecen este servicio, como Y2Mate, SaveFrom y OnlineVideoConverter. Todo lo que necesita hacer es copiar y pegar la URL del video que desea descargar, elija el formato MP4 y haga clic en el botón de descarga. </li>
|
36 |
-
<li>Usa una extensión del navegador o un complemento. Algunos navegadores, como Chrome y Firefox, tienen extensiones o complementos que pueden ayudarte a descargar vídeos MP4 desde cualquier sitio web. Por ejemplo, Video DownloadHelper, Video Downloader Professional y Flash Video Downloader. Puede instalarlos desde la tienda web del navegador y utilizarlos para descargar vídeos con un solo clic. </li>
|
37 |
-
|
38 |
-
</ol>
|
39 |
-
<h2>Conclusión</h2>
|
40 |
-
<p>Blxckie ronda mp4 descarga es una gran manera de disfrutar de la canción Ronda by Blxckie, uno de los artistas de hip hop más calientes de Sudáfrica en este momento. Puedes aprender más sobre los antecedentes de Blxckie, el significado de Ronda, los beneficios del formato MP4 y cómo descargar videos MP4 desde cualquier sitio web de forma gratuita en este artículo. Esperamos que le resulte útil e informativo. </p>
|
41 |
-
<p>Si te gustó este artículo, por favor compártelo con tus amigos y familiares que también son fans de Blxckie y el hip hop sudafricano. También puede dejar un comentario a continuación y háganos saber lo que piensa de Blxckie ronda mp4 descarga. Gracias por leer! </p>
|
42 |
-
<h2>Preguntas frecuentes</h2>
|
43 |
-
<h3>¿Cuál es el mejor sitio web para descargar blxckie ronda mp4? </h3>
|
44 |
-
<p>No hay una respuesta definitiva a esta pregunta, ya que diferentes sitios web pueden tener diferentes características y cualidades. Sin embargo, algunos de los factores que puede considerar al elegir un sitio web para descargar blxckie ronda mp4 son:</p>
|
45 |
-
<ul>
|
46 |
-
<li>La velocidad y fiabilidad del proceso de descarga. </li>
|
47 |
-
<li>La calidad y la resolución del vídeo. </li>
|
48 |
-
<li>La seguridad y privacidad del sitio web. </li>
|
49 |
-
<li>Disponibilidad y compatibilidad del sitio web. </li>
|
50 |
-
</ul>
|
51 |
-
<p>Puedes probar diferentes sitios web y ver cuál funciona mejor para ti. </p>
|
52 |
-
<h3>¿Cómo puedo convertir un disco mp4 a mp3? </h3>
|
53 |
-
<p>Si desea convertir blxckie ronda mp4 a mp3, que es un formato de audio, puede utilizar uno de los siguientes métodos:</p>
|
54 |
-
<ol>
|
55 |
-
<li>Utilice una herramienta de conversión de vídeo en línea. Hay muchos sitios web que ofrecen este servicio, como OnlineVideoConverter, Convert2MP3 y CloudConvert. Todo lo que necesitas hacer es subir el archivo blxckie ronda mp4 o pegar su URL, elegir el formato mp3, y haga clic en el botón convertir. </li>
|
56 |
-
|
57 |
-
</ol>
|
58 |
-
<h3>¿Cómo puedo ver blxckie ronda mp4 en mi TV? </h3>
|
59 |
-
<p>Si quieres ver blxckie ronda mp4 en tu televisor, puedes usar uno de los siguientes métodos:</p>
|
60 |
-
<ol>
|
61 |
-
<li>Usa un cable HDMI. Puedes conectar tu ordenador o dispositivo móvil que tenga el archivo blxckie ronda mp4 o acceder a su URL a tu televisor mediante un cable HDMI. Luego, puede seleccionar la entrada HDMI en su televisor y reproducir el video en su dispositivo. </li>
|
62 |
-
<li>Utilice un dispositivo de transmisión. Puede usar un dispositivo que puede transmitir videos en línea desde su computadora o dispositivo móvil a su televisor utilizando Wi-Fi o Bluetooth. Por ejemplo, Chromecast, Roku, Apple TV y Fire TV Stick. Puede configurar el dispositivo de acuerdo con sus instrucciones y usarlo para emitir o reflejar el video en su TV.</li>
|
63 |
-
</ol>
|
64 |
-
<h3>¿Es legal el blxckie ronda mp4? </h3>
|
65 |
-
<p>La legalidad de blxckie ronda mp4 depende de varios factores, como:</p>
|
66 |
-
<ul>
|
67 |
-
<li>La fuente y propiedad del video. Si el video es subido por Blxckie o su canal oficial, o si ha dado permiso a otros canales o sitios web para compartir su video, entonces es legal descargarlo y verlo. Sin embargo, si el video es subido por alguien que no tiene los derechos del video, o si viola los derechos de propiedad intelectual de Blxckie, entonces es ilegal descargarlo y verlo. </li>
|
68 |
-
<li>El propósito y uso del video. Si descarga y ve el video para uso personal y no comercial, como para entretenimiento o educación, por lo general es legal hacerlo. Sin embargo, si descarga y ve el video para uso comercial o malicioso, como para ganar dinero o dañar la reputación de Blxckie, entonces es ilegal hacerlo. </li>
|
69 |
-
|
70 |
-
</ul>
|
71 |
-
<p>Por lo tanto, blxckie ronda mp4 puede ser legal o ilegal dependiendo de estos factores. Debes tener cuidado y discreción al descargar y ver blxckie ronda mp4. </p>
|
72 |
-
<h3>¿Cuáles son algunas otras canciones de Blxckie que puedo descargar? </h3>
|
73 |
-
<p>Si te gusta blxckie ronda mp4, también te pueden gustar otras canciones de Blxckie que puedes descargar. Aquí están algunas de sus canciones más populares que puedes encontrar en varios sitios web y plataformas:</p>
|
74 |
-
<tabla>
|
75 |
-
<tr>
|
76 |
-
<th>Canción</th>
|
77 |
-
<th>Álbum</th>
|
78 |
-
<th>Fecha de publicación</th>
|
79 |
-
</tr>
|
80 |
-
<tr>
|
81 |
-
<td>David</td>
|
82 |
-
<td>B4Now</td>
|
83 |
-
<td>21 de mayo de 2021</td>
|
84 |
-
</tr>
|
85 |
-
<tr>
|
86 |
-
<td>Ye 4</td>
|
87 |
-
<td>B4Now</td>
|
88 |
-
<td>21 de mayo de 2021</td>
|
89 |
-
</tr>
|
90 |
-
<tr>
|
91 |
-
<td>Gran Sh'lappa</td>
|
92 |
-
<td>B4Now</td>
|
93 |
-
<td>21 de mayo de 2021</td>
|
94 |
-
</tr>
|
95 |
-
<tr>
|
96 |
-
<td>Rayas</td>
|
97 |
-
<td>B4Now</td>
|
98 |
-
<td>21 de mayo de 2021</td>
|
99 |
-
</tr>
|
100 |
-
<tr>
|
101 |
-
<td>Mantener </td>
|
102 |
-
<td>B4Now</td>
|
103 |
-
<td>21 de mayo de 2021</td>
|
104 |
-
</tr>
|
105 |
-
<tr>
|
106 |
-
<td>Ladrido Hond</td>
|
107 |
-
<td>Ladrido Hond - Sencillo</td>
|
108 |
-
<td>11 de junio de 2021</td>
|
109 |
-
</tr>
|
110 |
-
<tr>
|
111 |
-
<td>Salsa</td>
|
112 |
-
<td>Salsa - Sencillo</td>
|
113 |
-
<td>18 de junio de 2021</td>
|
114 |
-
</tr> <tr>
|
115 |
-
<td>Gas</td>
|
116 |
-
<td>Gas - Sencillo</td>
|
117 |
-
<td>25 de junio de 2021</td>
|
118 |
-
</tr>
|
119 |
-
<tr>
|
120 |
-
<td>Steppin</td>
|
121 |
-
<td>Steppin - Single</td>
|
122 |
-
<td>2 de julio de 2021</td>
|
123 |
-
</tr>
|
124 |
-
<tr>
|
125 |
-
<td>Uppity</td>
|
126 |
-
<td>Uppity - Single</td>
|
127 |
-
<td>9 de julio de 2021</td>
|
128 |
-
</tr>
|
129 |
-
</tabla>
|
130 |
-
<p>También puede consultar el sitio web oficial de Blxckie, el canal de YouTube, Instagram, Twitter y Facebook para obtener más actualizaciones e información sobre su música y su carrera. </p> 64aa2da5cf<br />
|
131 |
-
<br />
|
132 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Camin Simulador ltima Piel Del Camin.md
DELETED
@@ -1,50 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Camión Simulador Ultimate Truck Skin Descargar: Cómo cambiar la apariencia de su camión y tener más diversión</h1>
|
3 |
-
<p>Si eres un fan de los juegos de simulación, es posible que hayas oído hablar de Truck Simulator Ultimate, un juego que te permite conducir varios camiones a través de diferentes países y ciudades. El juego es desarrollado por Zuuks Games, la misma compañía que produjo Bus Simulator Ultimate, que tiene más de 300 millones de jugadores en todo el mundo. Truck Simulator Ultimate combina elementos de simulación y magnate, lo que le permite no solo conducir su camión, sino también gestionar su propio negocio, contratar empleados, ampliar su flota y participar en subastas y carreras. </p>
|
4 |
-
<h2>camión simulador última piel del camión</h2><br /><p><b><b>Download File</b> --->>> <a href="https://bltlly.com/2v6Jim">https://bltlly.com/2v6Jim</a></b></p><br /><br />
|
5 |
-
<p>Una de las características más agradables de Truck Simulator Ultimate es que puede personalizar sus camiones con diferentes pieles, que son básicamente diferentes diseños y colores para el exterior de su camión. Las pieles pueden hacer que su camión se vea más realista, elegante o único, dependiendo de su preferencia. Puede elegir entre camiones oficiales con licencia de Mercedes-Benz u otras marcas como BMW, Ford, DAF, MAN, Volvo y más. También puedes encontrar skins inspirados en empresas famosas, países, gasolineras, o incluso películas y dibujos animados. </p>
|
6 |
-
<p>En este artículo, le mostraremos cómo instalar pieles de camiones en Truck Simulator Ultimate, y cuáles son algunas de las mejores pieles de camiones para este juego. Siguiendo estos sencillos pasos, puede cambiar la apariencia de su camión y divertirse más conduciéndolo. </p>
|
7 |
-
<h2>Cómo instalar pieles de camiones en Truck Simulator Ultimate</h2>
|
8 |
-
<p>Hay dos maneras de instalar pieles de camiones en Truck Simulator Ultimate: descargarlos desde la tienda de aplicaciones o la web, o copiar su URL y pegarlo en la configuración del juego. He aquí cómo hacer ambas cosas:</p>
|
9 |
-
<p></p>
|
10 |
-
<h3>Descargar skins desde la tienda de aplicaciones o la web</h3>
|
11 |
-
|
12 |
-
<p>Si está utilizando un dispositivo iOS, o si desea encontrar más pieles en línea, puede visitar sitios web que ofrecen mods para Truck Simulator Ultimate. Los mods son modificaciones que añaden nuevas características o contenido al juego. Uno de los sitios web más populares para los mods es TSU Mods, que tiene más de 30 mods para diferentes camiones, coches, vehículos de policía, ambulancias, remolques y más. También puedes encontrar otros sitios web buscando "truck simulator ultimate mod" en la web. </p>
|
13 |
-
<h3>Copiar la URL de la piel y pegarla en la configuración del juego</h3>
|
14 |
-
<p>Una vez que haya descargado una aplicación skin o un archivo mod, debe copiar su URL (la dirección web que comienza con http:// o https://) y pegarla en la configuración del juego. Para hacer esto, siga estos pasos:</p>
|
15 |
-
<ol>
|
16 |
-
<li>Open Truck Simulator Ultimate y toque en el icono del menú en la esquina superior izquierda. </li>
|
17 |
-
<li>Toque en Configuración y luego en DLC Mods.</li>
|
18 |
-
<li>Toque en Agregar URL Mod y pegar la URL de la piel que desea utilizar. </li>
|
19 |
-
<li>Toque en Guardar URL de mod y luego en Aplicar mods.</li>
|
20 |
-
<li>Volver al menú principal y toque en Garaje.</li>
|
21 |
-
<li>Seleccione su camión y toque en Personalizar.</li <li>Toque en Skins y elija la piel que ha instalado. </li>
|
22 |
-
<li>Toque en Aplicar y disfrutar de la nueva piel del camión. </li>
|
23 |
-
</ol>
|
24 |
-
<p>¡Eso es todo! Ha instalado con éxito una piel de camión en Truck Simulator Ultimate. Puedes repetir estos pasos para cualquier otra piel que quieras usar. </p>
|
25 |
-
<h2>Las mejores pieles de camiones para Truck Simulator Ultimate</h2>
|
26 |
-
<p>Ahora que sabes cómo instalar pieles de camiones en Truck Simulator Ultimate, es posible que te estés preguntando cuáles son algunas de las mejores pieles de camiones para este juego. Por supuesto, esto depende de su gusto personal y preferencia, pero aquí están algunas de nuestras recomendaciones:</p>
|
27 |
-
<h3>Camiones con licencia de Mercedes-Benz con detalles realistas</h3>
|
28 |
-
|
29 |
-
<h3>BMW F90 M5 2020 con diseño deportivo y rendimiento</h3>
|
30 |
-
<p>Si está buscando velocidad y estilo, es posible que desee probar la piel BMW F90 M5 2020, que es un mod que reemplaza el coche BMW original en el juego con una versión más potente y elegante. El BMW F90 M5 2020 es un sedán de alto rendimiento que tiene un diseño deportivo y un motor V8 de doble turbocompresor que puede alcanzar hasta 305 km/h. La piel también tiene características realistas, como faros, luces traseras, escapes, alerones y llantas. Puede encontrar esta piel en TSU Mods.</p>
|
31 |
-
<h3>TOFAŞ Şahin con estilo turco clásico y nostalgia</h3>
|
32 |
-
<p>Si buscas nostalgia y diversión, es posible que quieras probar la piel TOFAŞ Şahin, que es un mod que reemplaza el coche Fiat original en el juego con un coche turco clásico que fue popular en los años 1980 y 1990. El TOFAŞ Şahin es un sedán compacto que tiene un diseño simple pero encantador y una base de fans leales en Turquía. La piel también tiene características realistas, como placas de matrícula, pegatinas, parachoques y cuernos. Puede encontrar esta piel en TSU Mods.</p>
|
33 |
-
<h2>Conclusión</h2>
|
34 |
-
<p>Truck Simulator Ultimate es un juego que ofrece mucha diversión y emoción para los amantes de la simulación. Una de las maneras de mejorar su experiencia de juego es utilizar pieles de camiones, que son diferentes diseños y colores para el exterior de su camión. Las pieles de camiones pueden hacer que su camión se vea más realista, elegante o único, dependiendo de su preferencia. </p>
|
35 |
-
<p>En este artículo, te mostramos cómo instalar skins de camiones en Truck Simulator Ultimate descargándolos desde la tienda de aplicaciones o la web, o copiando su URL y pegándolo en la configuración del juego. También le dimos algunos ejemplos de las mejores pieles de camiones para Truck Simulator Ultimate, como camiones con licencia de Mercedes-Benz, BMW F90 M5 2020 y TOFAŞ Şahin.</p>
|
36 |
-
|
37 |
-
<h2>Preguntas frecuentes</h2>
|
38 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre Truck Simulator Ultimate:</p>
|
39 |
-
<h4>¿Cuáles son los requisitos del sistema para Truck Simulator Ultimate? </h4>
|
40 |
-
<p>Los requisitos mínimos del sistema para Truck Simulator Ultimate son: - Android: Android 7.0 o superior; 3 GB de RAM; 1 GB de espacio libre - iOS: iOS 11 o superior; iPhone 6S o superior; iPad Air 2 o superior; iPad Mini 4 o superior; iPod Touch (7a generación) o superior; 1 GB de espacio libre Los requisitos del sistema recomendados para Truck Simulator Ultimate son: - Android: Android 9.0 o superior; 4 GB de RAM; 2 GB de espacio libre - iOS: iOS 13 o superior; iPhone X o superior; iPad Pro (2017) o superior; iPad Air (2019) o superior; iPad Mini (2019) o superior; iPod Touch (7a generación) o superior; 2 GB de espacio libre</p>
|
41 |
-
<h4>¿Cómo puedo participar en el modo multijugador y las carreras? </h4>
|
42 |
-
<p>Para participar en el modo multijugador y carreras en Truck Simulator Ultimate, necesitas tener una conexión a Internet y una cuenta de Zuuks. Puede crear una cuenta Z uks tocando el icono del menú en la esquina superior izquierda, luego tocando en Perfil y luego tocando en Registro. También puedes iniciar sesión con tu cuenta de Facebook o Google. Una vez que tenga una cuenta de Zuuks, puede unirse o crear salas multijugador y carreras tocando el icono del menú, luego tocando en Multijugador, y luego elegir la opción que desee. También puedes invitar a tus amigos a jugar contigo tocando el botón Invitar amigos. </p>
|
43 |
-
<h4>¿Cómo puedo gestionar mi propio negocio y flota en el juego? </h4>
|
44 |
-
|
45 |
-
<h4>¿Cómo puedo personalizar mis camiones con otros accesorios y modificaciones? </h4>
|
46 |
-
<p>Para personalizar sus camiones con otros accesorios y modificaciones en Truck Simulator Ultimate, necesita tener suficiente dinero y reputación. Puedes ganar dinero y reputación completando entregas, participando en subastas y carreras, y cumpliendo contratos. También puede gastar dinero real para comprar monedas o diamantes, que son las monedas premium en el juego. Una vez que tenga suficiente dinero y reputación, puede personalizar sus camiones con diferentes accesorios y modificaciones tocando el icono del menú, luego tocando en Garaje, luego seleccionando su camión y luego tocando en Personalizar. Puede cambiar varios aspectos de su camión, como motor, transmisión, suspensión, frenos, neumáticos, llantas, luces, bocinas, espejos, alerones, escapes, pintura, pegatinas y más. </p>
|
47 |
-
<h4>¿Cómo puedo contactar a los desarrolladores para sugerencias y quejas? </h4>
|
48 |
-
<p>Para contactar a los desarrolladores de Truck Simulator Ultimate para sugerencias y quejas, puede usar uno de los siguientes métodos: - Correo electrónico: [email protected] - Facebook: https://www.facebook.com/zuuks.games - Instagram: https:/www.instagram.com/zuuksgames --Instagram: https://www.instagram.com Twitter: https://twitter.com/ZuuksGames - YouTube: https://www.youtube.com/channel/UCSZ5daJft7LuWzSyjdp_8HA Los desarrolladores siempre están abiertos a la retroalimentación y sugerencias de sus jugadores. También actualizan el juego regularmente con nuevas características y mejoras. </p> 64aa2da5cf<br />
|
49 |
-
<br />
|
50 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Carretes Descargar Instagram Mp3.md
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar Instagram Reel Audio como MP3</h1>
|
3 |
-
<p>Instagram Reels son videos cortos, divertidos y atractivos que puedes crear y compartir en la aplicación. Son una gran manera de mostrar su creatividad, personalidad y talento. Pero a veces, es posible que se encuentre con un carrete que tiene un clip de audio increíble que desea descargar y utilizar para sus propios videos u otros fines. ¿Cómo se hace eso? </p>
|
4 |
-
<p>En este artículo, te mostraremos cómo descargar audio de Instagram Reel como MP3 usando diferentes métodos y herramientas. También explicaremos cómo guardar los clips de audio de Reel para usarlos más tarde en la aplicación. Si desea descargar una canción pegadiza, un efecto de sonido divertido, o una voz en off de tendencia, tenemos todo cubierto. </p>
|
5 |
-
<h2>carretes descargar instagram mp3</h2><br /><p><b><b>Download</b> ✶✶✶ <a href="https://bltlly.com/2v6Lrr">https://bltlly.com/2v6Lrr</a></b></p><br /><br />
|
6 |
-
<h2>¿Puedes descargar audio de Instagram Reels? </h2>
|
7 |
-
<p>La respuesta corta es sí, pero no directamente desde la aplicación. Instagram no tiene una función integrada que te permita descargar o guardar el audio de un Reel. Sin embargo, hay algunas formas no oficiales de hacerlo usando herramientas o aplicaciones de terceros. </p>
|
8 |
-
<p>Estos métodos implican copiar el enlace del Carrete y pegarlo en un sitio web o una aplicación que puede extraer el archivo de audio del video. Alternativamente, también puedes guardar el audio de un Carrete en tu cuenta de Instagram y usarlo más tarde para tus propios videos. </p>
|
9 |
-
<p>Sin embargo, antes de descargar o guardar cualquier audio de carrete, asegúrese de respetar los derechos y permisos del creador original. No utilice su audio sin darles crédito o pedir su consentimiento. Además, no viole ninguna ley de derechos de autor o términos de servicio de Instagram.</p>
|
10 |
-
<h2>Cómo descargar Instagram Reel audio usando herramientas de terceros</h2>
|
11 |
-
<p>Una forma de descargar Instagram Reel audio como MP3 es utilizar un sitio web de terceros que puede convertir el enlace de vídeo en un archivo de audio. Hay muchos de estos sitios web disponibles en línea, pero le mostraremos cuatro de ellos que son gratuitos y fáciles de usar. </p>
|
12 |
-
<h3>ReelSave.App</h3>
|
13 |
-
|
14 |
-
<h4>Pasos a seguir:</h4>
|
15 |
-
<ol>
|
16 |
-
<li>Elija el audio del carrete que desea descargar y toque el icono de compartir en el lado derecho. Parece un avión de papel. </li>
|
17 |
-
<li>Toca la opción Copiar enlace en la parte inferior de la pantalla emergente. </li>
|
18 |
-
<li>Vaya a <a href="( 1 )">ReelSave.App</a> en su navegador y pegue el enlace en el cuadro. </li>
|
19 |
-
<li>Pulse Descargar y espere a que el sitio web procese su solicitud. </li>
|
20 |
-
<li>Pulse Descargar MP3 y guardar el archivo en su dispositivo. </li>
|
21 |
-
</ol>
|
22 |
-
<h3>ReelsDownloader.io</h3>
|
23 |
-
<p>Este es otro sitio web que puede ayudarle a descargar Instagram Reel audio como MP3 con facilidad. También funciona de manera similar a ReelSave.App, pero tiene algunas características adicionales que puede encontrar útiles. </p>
|
24 |
-
<h4>Pasos a seguir:</h4>
|
25 |
-
<ol>
|
26 |
-
<li>Elija el audio del carrete que desea descargar y toque el icono de compartir en el lado derecho. Parece un avión de papel. </li>
|
27 |
-
<li>Toca la opción Copiar enlace en la parte inferior de la pantalla emergente. </li>
|
28 |
-
<li>Vaya a <a href=">ReelsDownloader.io</a> en su navegador y pegue el enlace en la caja. </li>
|
29 |
-
<li>Pulse Descargar y espere a que el sitio web para obtener el carrete.</li>
|
30 |
-
<li>Pulse Descargar MP3 y guardar el archivo en su dispositivo. </li>
|
31 |
-
</ol>
|
32 |
-
<p>Algunas de las características adicionales de este sitio web son:</p>
|
33 |
-
<p></p>
|
34 |
-
<ul>
|
35 |
-
<li> Puede previsualizar el vídeo del carrete antes de descargarlo. </li>
|
36 |
-
<li> También puede descargar el vídeo del carrete como MP4 si lo desea. </li>
|
37 |
-
<li> Puede ver el nombre y la imagen del perfil del creador del carrete. </li>
|
38 |
-
</ul>
|
39 |
-
<h3>Insta.SaveTube.Me</h3>
|
40 |
-
<p>Este es otro sitio web que puede ayudarle a descargar Instagram Reel audio como MP3 con unos pocos clics. Es similar a ReelsDownloader.io, pero tiene una interfaz y un diseño diferentes. </p>
|
41 |
-
<h4>Pasos a seguir:</h4>
|
42 |
-
<ol>
|
43 |
-
<li>Elija el audio del carrete que desea descargar y toque el icono de compartir en el lado derecho. Parece un avión de papel. </li>
|
44 |
-
<li>Toca la opción Copiar enlace en la parte inferior de la pantalla emergente. </li>
|
45 |
-
|
46 |
-
<li>Toca Descargar y espera a que el sitio web cargue el Carrete.</li>
|
47 |
-
<li>Pulse Descargar MP3 y guardar el archivo en su dispositivo. </li>
|
48 |
-
</ol>
|
49 |
-
<h3>Ahorrador de carretes</h3>
|
50 |
-
<p>Esta es una extensión de Chrome que puede ayudarle a descargar Instagram Reel audio como MP3 directamente desde su navegador. Es conveniente y fácil de usar, pero primero debe instalarlo antes de usarlo. </p>
|
51 |
-
<h4>Pasos a seguir:</h4>
|
52 |
-
<ol>
|
53 |
-
<li>Ir a <a href=">Reel Saver</a> en la Chrome Web Store y haga clic en Añadir a Chrome.</li>
|
54 |
-
<li>Confirme su instalación haciendo clic en Agregar extensión. </li>
|
55 |
-
<li>Vaya a Instagram.com en su navegador e inicie sesión en su cuenta. </li>
|
56 |
-
<li>Elija el audio del carrete que desea descargar y haga clic en él para abrirlo en pantalla completa. </li>
|
57 |
-
<li>Haga clic en el icono Reel Saver en la esquina superior derecha de su navegador. Parece un círculo azul con una flecha blanca dentro. </li>
|
58 |
-
<li>Seleccione Descargar MP3 y guarde el archivo en su dispositivo. </li>
|
59 |
-
</ol>
|
60 |
-
<h2>Cómo descargar Instagram Reel audio usando aplicaciones</h2>
|
61 |
-
<p>Si prefiere usar aplicaciones en lugar de sitios web o extensiones, también hay algunas opciones para usted. Aquí hay dos aplicaciones que pueden ayudarle a descargar Instagram Reel audio como MP3 en su dispositivo móvil. Ambos son gratuitos y están disponibles para usuarios de Android e iOS. </p>
|
62 |
-
<h3>Editor de vídeo InShot</h3>
|
63 |
-
<p>Esta es una aplicación de edición de video popular que también puede ayudarlo a descargar Instagram Reel audio como MP3. Tiene muchas características y herramientas que puedes usar para crear videos increíbles, pero nos centraremos en cómo usarlo para descargar clips de audio de carrete. </p>
|
64 |
-
<h4>Pasos a seguir:</h4>
|
65 |
-
<ol>
|
66 |
-
<li>Elija el audio del carrete que desea descargar y toque el icono de compartir en el lado derecho. Parece un avión de papel. </li>
|
67 |
-
<li>Pulse Copiar enlace en la parte inferior de la pantalla emergente. </li>
|
68 |
-
<li>Abra InShot Video Editor en su dispositivo y toque Video en la esquina inferior izquierda. </li>
|
69 |
-
<li>Pulse Nuevo en la esquina superior derecha y seleccione Instagram de la lista de fuentes. </li>
|
70 |
-
<li> Pegar el enlace del carrete en el cuadro y pulse OK.</li>
|
71 |
-
|
72 |
-
<li>La aplicación guardará el archivo de vídeo en su dispositivo. Pulse Hecho en la esquina inferior derecha y vuelva a la pantalla principal de la aplicación. </li>
|
73 |
-
<li>Toca Música en la esquina inferior izquierda y selecciona Mi música de la lista de opciones. </li>
|
74 |
-
<li>Encuentra y selecciona el archivo de video que acabas de guardar y toca Usar.</li>
|
75 |
-
<li>La aplicación extraerá el audio del vídeo y lo añadirá a su editor. Pulse Guardar en la esquina superior derecha y seleccione Exportar MP3.</li>
|
76 |
-
<li>La aplicación guardará el archivo de audio en su dispositivo. Pulse Hecho en la esquina inferior derecha y salga de la aplicación. </li>
|
77 |
-
</ol>
|
78 |
-
<h3> Convertidor de vídeo a MP3</h3>
|
79 |
-
<p>Esta es una aplicación sencilla y directa que puede ayudarle a descargar Instagram Reel audio como MP3. No tiene características ni herramientas adicionales, pero hace su trabajo bien y rápido. </p>
|
80 |
-
<h4>Pasos a seguir:</h4>
|
81 |
-
<ol>
|
82 |
-
<li>Elija el audio del carrete que desea descargar y toque el icono de compartir en el lado derecho. Parece un avión de papel. </li>
|
83 |
-
<li>Pulse Copiar enlace en la parte inferior de la pantalla emergente. </li>
|
84 |
-
<li>Abra Video to MP3 Converter en su dispositivo y toque Pegar URL en la parte superior de la pantalla. </li>
|
85 |
-
<li>Pega el enlace del Carrete en la caja y toca Convertir.</li>
|
86 |
-
<li>La aplicación descargará y convertirá el vídeo Reel en un archivo de audio. Pulse Descargar en la parte inferior de la pantalla y guarde el archivo en su dispositivo. </li>
|
87 |
-
</ol>
|
88 |
-
<h2>Cómo guardar el audio de Instagram Reels para usarlo más tarde en la aplicación</h2>
|
89 |
-
<p>Si no quieres descargar Instagram Reel audio como MP3, pero quieres usarlo más tarde para tus propios videos en la aplicación, hay una manera de hacerlo. Instagram tiene una función que le permite guardar clips de audio de carrete a su cuenta y acceder a ellos en cualquier momento que desee. </p>
|
90 |
-
<h4>Pasos a seguir:</h4>
|
91 |
-
<ol>
|
92 |
-
<li>Elija el audio del carrete que desea guardar y toque en él para abrirlo en pantalla completa. </li>
|
93 |
-
<li>Toque en el nombre de audio en la parte inferior de la pantalla. Parece una nota de música con algún texto al lado. </li>
|
94 |
-
|
95 |
-
<li>La aplicación guardará el clip de audio en su cuenta. Puede encontrarlo en la sección Guardado en Audio.</li>
|
96 |
-
<li>Para utilizarlo para sus propios vídeos, toque en Crear carrete en la parte inferior de la pantalla. Parece un icono de la cámara con un signo más. </li>
|
97 |
-
<li>Toque en Audio en la esquina superior izquierda de la pantalla y seleccione Guardado de la lista de opciones. </li>
|
98 |
-
<li>Encuentra y selecciona el clip de audio que guardaste y comienza a grabar tu video con él. </li>
|
99 |
-
</ol>
|
100 |
-
<h2>Conclusión</h2>
|
101 |
-
<p>En este artículo, te hemos mostrado cómo descargar audio de Instagram Reel como MP3 usando diferentes métodos y herramientas. También hemos explicado cómo guardar los clips de audio de Reel para usarlos más adelante en la aplicación. Esperamos que este artículo le haya resultado útil e informativo. Si tiene alguna pregunta o comentario, háganoslo saber en los comentarios a continuación. </p>
|
102 |
-
<h2>Preguntas frecuentes</h2>
|
103 |
-
<h3>¿Cómo encuentro el audio original de un Instagram Reel? </h3>
|
104 |
-
<p>Si quieres saber de dónde viene un audio de Instagram Reel, puedes tocar el nombre del audio en la parte inferior de la pantalla. Te llevará a una página donde podrás ver todos los vídeos que utilizan ese clip de audio. También puede ver quién creó o subió el audio original pulsando en su imagen de perfil o nombre. </p>
|
105 |
-
<h3>¿Cómo puedo crear mi propio audio para Instagram Reels? </h3>
|
106 |
-
<p>Si quieres crear tu propio audio para Instagram Reels, puedes usar cualquier aplicación de grabación de sonido o dispositivo que pueda producir un archivo MP3. También puedes usar cualquier música o efectos de sonido que tengas en tu dispositivo o en línea. Una vez que tengas tu archivo de audio listo, puedes subirlo a Instagram siguiendo estos pasos:</p>
|
107 |
-
<ol>
|
108 |
-
<li>Toque en Crear carrete en la parte inferior de la pantalla. Parece un icono de la cámara con un signo más. </li>
|
109 |
-
<li>Toque en Audio en la esquina superior izquierda de la pantalla y seleccione Examinar de la lista de opciones. </li>
|
110 |
-
<li>Pulse sobre el icono Subir en la esquina superior derecha de la pantalla. Parece un cuadrado con una flecha apuntando hacia arriba. </li>
|
111 |
-
|
112 |
-
<li>Espere a que la aplicación procese y cargue su archivo de audio. </li>
|
113 |
-
<li>Comienza a grabar tu video con tu propio audio. </li>
|
114 |
-
</ol>
|
115 |
-
<h3>¿Cómo edito el audio de un Instagram Reel? </h3>
|
116 |
-
<p>Si desea editar el audio de un Instagram Reel, puede usar las herramientas integradas en la aplicación o cualquier aplicación externa que pueda editar archivos de audio. Estas son algunas de las cosas que puedes hacer con las herramientas integradas:</p>
|
117 |
-
<ul>
|
118 |
-
<li> Puede recortar o cortar el clip de audio para adaptarse a su longitud de vídeo arrastrando el control deslizante en la parte inferior de la pantalla. </li>
|
119 |
-
<li> Puede ajustar el volumen del clip de audio pulsando en Volumen en la esquina superior derecha de la pantalla y moviendo el control deslizante hacia arriba o hacia abajo. </li>
|
120 |
-
<li>Puede mezclar el clip de audio con su sonido original tocando Mix Audio en la esquina superior derecha de la pantalla y moviendo el control deslizante hacia la izquierda o hacia la derecha. </li>
|
121 |
-
</ul>
|
122 |
-
<h3>¿Cómo comparto un Instagram Reel con un audio específico? </h3>
|
123 |
-
<p>Si quieres compartir un Instagram Reel con un audio específico, puedes usar la opción Compartir audio en la aplicación. Esto le permitirá enviar un mensaje directo a cualquier persona en Instagram con un enlace a su carrete y su audio. Estos son los pasos a seguir:</p>
|
124 |
-
<ol>
|
125 |
-
<li> Elija el carrete que desea compartir y toque en él para abrirlo en pantalla completa. </li>
|
126 |
-
<li>Toque en el icono de compartir en el lado derecho. Parece un avión de papel. </li>
|
127 |
-
<li>Toque en Compartir audio en la parte inferior de la pantalla emergente. </li>
|
128 |
-
<li>Selecciona a quién quieres enviarlo desde tus contactos o busca a alguien en Instagram.</li>
|
129 |
-
<li>Agrega un mensaje si lo deseas y toca Enviar.</li>
|
130 |
-
</ol>
|
131 |
-
<h3>¿Cómo silencio el audio de un Instagram Reel? </h3>
|
132 |
-
<p>Si desea silenciar el audio de un Instagram Reel, puede utilizar el botón de silencio en la aplicación. Esto le permitirá ver el video sin ningún sonido. Estos son los pasos a seguir:</p>
|
133 |
-
<ol>
|
134 |
-
<li>Elija el carrete que desea silenciar y toque en él para abrirlo en pantalla completa. </li>
|
135 |
-
|
136 |
-
<li>La aplicación silenciará el audio de ese carrete y cualquier otro carrete que vea después de eso. </li>
|
137 |
-
<li>Para desactivar, pulse el botón de silencio de nuevo. Se verá como un altavoz con ondas sonoras que salen de él. </li>
|
138 |
-
</ol></p> 64aa2da5cf<br />
|
139 |
-
<br />
|
140 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Carx Street Android Hack Apk.md
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>CarX Street Android Hack APK: Cómo obtener dinero ilimitado y desbloquear todos los coches</h1>
|
3 |
-
<p>Si eres un fan de los juegos de carreras realistas, es posible que hayas oído hablar de CarX Street, un juego de simulación que ofrece gráficos impresionantes, física y personalización. CarX Street le permite explorar un gran mundo abierto con diferentes tipos de mapas, desde las concurridas calles de la ciudad hasta las carreteras de montaña en espiral y las carreteras costeras. También puede elegir entre una variedad de coches, desde los clásicos coches musculares hasta los modernos supercoches, y sintonizarlos a su gusto. Puedes competir con otros jugadores en carreras de red reales, o unirte a clubes y desafiar jefes. </p>
|
4 |
-
<p>Sin embargo, tan divertido como CarX Street es, también puede ser frustrante si no tienes suficiente dinero para comprar coches nuevos o piezas, o si quieres desbloquear todos los coches y modos en el juego. Es por eso que algunos jugadores buscan un apk hack para CarX Street, que es una versión modificada del juego que le da dinero ilimitado, desbloquea todos los coches y modos, y le permite personalizar la configuración del juego. Con un hack apk, se puede disfrutar de CarX Street sin limitaciones o restricciones. </p>
|
5 |
-
<h2>carx street android hack apk</h2><br /><p><b><b>DOWNLOAD</b> ►►►►► <a href="https://bltlly.com/2v6Myp">https://bltlly.com/2v6Myp</a></b></p><br /><br />
|
6 |
-
<p>Pero antes de descargar e instalar un hack apk para CarX Street, usted debe ser consciente de los beneficios y riesgos de usar uno. En este artículo, le mostraremos cómo encontrar, instalar y utilizar un hack apk para CarX Street, así como algunas características y consejos para el juego. Sigue leyendo para saber más. </p>
|
7 |
-
<h2> Cómo descargar e instalar CarX Street Hack APK</h2>
|
8 |
-
<p>El primer paso para utilizar un hack apk para CarX Street es encontrar una fuente confiable para ella. Hay muchos sitios web que afirman ofrecer apks hack para varios juegos, pero no todos ellos son dignos de confianza. Algunos de ellos pueden contener virus, malware o spyware que pueden dañar su dispositivo o robar su información personal. Algunos de ellos también pueden proporcionar versiones falsas o obsoletas de la apk hack que no funcionan o causar problemas con el juego. </p>
|
9 |
-
|
10 |
-
<p>Una vez que haya encontrado una fuente confiable para el apk hack para CarX Street, es necesario habilitar fuentes desconocidas en su dispositivo Android. Esto se debe a que los dispositivos Android normalmente no permiten instalar aplicaciones desde fuentes distintas de Google Play Store. Para habilitar fuentes desconocidas, vaya a Configuración > Seguridad > Fuentes desconocidas y enciéndala. También es posible que tenga que conceder algunos permisos para el hack apk al instalarlo. </p>
|
11 |
-
<p>Después de habilitar fuentes desconocidas, puede descargar e instalar el apk hack para CarX Street siguiendo estos pasos:</p>
|
12 |
-
<ol>
|
13 |
-
<li>Descargar el archivo apk hack de la fuente que ha elegido. </li>
|
14 |
-
<li>Busque el archivo en el almacenamiento de su dispositivo y toque en él. </li>
|
15 |
-
<li>Siga las instrucciones en la pantalla para instalar el hack apk. </li>
|
16 |
-
<li>Iniciar el juego y disfrutar. </li>
|
17 |
-
</ol>
|
18 |
-
<p>Se puede verificar que el hack apk está funcionando mediante la comprobación de si usted tiene dinero ilimitado y todos los coches y modos desbloqueados en el juego. También puede acceder al menú mod tocando el icono en la esquina superior izquierda de la pantalla. El menú mod te permite personalizar la configuración del juego, como velocidad, aceleración, manejo, gravedad, daños y más. También puede activar o desactivar algunas funciones, como nitro, drift, tráfico y policía. </p>
|
19 |
-
<h2>Cómo utilizar CarX Street Hack APK</h2>
|
20 |
-
<p>Ahora que ha instalado el apk hack para CarX Street, se puede utilizar para disfrutar del juego sin limitaciones o restricciones. Estas son algunas de las cosas que puede hacer con el hack apk:</p>
|
21 |
-
<ul>
|
22 |
-
<li>Obtener dinero ilimitado y comprar cualquier coche o parte que desee. Puedes acceder a la tienda desde el menú principal y navegar por las diferentes categorías de coches y piezas. Usted puede comprar cualquier coche o parte que te gusta sin preocuparse por el precio. También puede actualizar sus coches y piezas para mejorar su rendimiento y apariencia. </li>
|
23 |
-
|
24 |
-
<li>Personalizar la configuración del juego a su preferencia. Puedes acceder al menú mod desde la esquina superior izquierda de la pantalla y ajustar la configuración del juego a tu gusto. Puede cambiar la velocidad, aceleración, manejo, gravedad, daños y más de su coche. También puede activar o desactivar algunas funciones, como nitro, drift, tráfico y policía. Puedes experimentar con diferentes configuraciones y ver cómo afectan tu juego. </li>
|
25 |
-
</ul>
|
26 |
-
<h2>Características y consejos del juego de CarX Street</h2>
|
27 |
-
<p>CarX Street es un juego de simulación que ofrece gráficos realistas, física y personalización. Es uno de los juegos de carreras más populares en dispositivos Android. Estas son algunas de las principales características del juego CarX Street:</p>
|
28 |
-
<ul>
|
29 |
-
<li>Impresionantes gráficos y efectos de sonido. CarX Street utiliza tecnología gráfica avanzada para crear efectos visuales y de sonido realistas. Puede ver los detalles de su automóvil, el medio ambiente y el clima. También puede escuchar el sonido del motor, el chirrido de los neumáticos y el impacto de la colisión. </li>
|
30 |
-
<li>Física realista y mecánica de conducción. CarX Street utiliza un motor de física realista para simular el comportamiento de su automóvil en diferentes superficies y condiciones. Puede sentir el peso, la inercia, la tracción y la suspensión de su automóvil. También puede controlar su automóvil con diferentes técnicas de conducción, como dirección, frenado, aceleración, deriva y nitro. </li>
|
31 |
-
<li>Gran mundo abierto con diferentes tipos de mapas. CarX Street le permite explorar un gran mundo abierto con diferentes tipos de mapas, como calles de ciudades, carreteras de montaña en espiral, carreteras costeras y más. Cada tipo de mapa tiene sus propias características y desafíos. Puedes descubrir nuevos lugares y secretos en cada mapa. </li>
|
32 |
-
|
33 |
-
<li>Características multijugador y club online. CarX Street te permite competir con otros jugadores en carreras de red reales. Puede unirse o crear un club y desafiar a otros clubes o jefes. También puede chatear con otros jugadores y compartir sus logros y consejos. </li>
|
34 |
-
</ul>
|
35 |
-
<p>CarX Street es un juego que requiere habilidad y estrategia para dominar. Aquí hay algunos consejos y trucos para mejorar sus habilidades de carreras y rendimiento:</p>
|
36 |
-
<p></p>
|
37 |
-
<ul>
|
38 |
-
<li>Elige el coche y las piezas adecuadas para cada mapa y modo. Diferentes coches y piezas tienen diferentes ventajas y desventajas en diferentes mapas y modos. Por ejemplo, un coche con alta velocidad y aceleración puede ser bueno para las carreras de carretera, pero no para las carreras de la ciudad. Un automóvil con un alto manejo y frenado puede ser bueno para las carreteras de montaña, pero no para las carreteras costeras. Deberías experimentar con diferentes combinaciones y encontrar la mejor para cada situación. </li>
|
39 |
-
<li>Utilice las técnicas de conducción sabiamente. CarX Street ofrece diferentes técnicas de conducción, como dirección, frenado, aceleración, deriva y nitro. Usted debe utilizar sabiamente para controlar su coche y ganar una ventaja sobre sus oponentes. Por ejemplo, puede usar la dirección para evitar obstáculos y curvas, frenar para frenar y prepararse para giros, acelerar para acelerar y adelantar, ir a la deriva para mantener el impulso y ganar puntos, y nitro para aumentar su velocidad y rendimiento. </li>
|
40 |
-
<li>Cuidado con el tráfico y la policía. CarX Street cuenta con el tráfico y la policía en algunos mapas y modos. Usted debe tener cuidado con ellos y evitar chocar con ellos. El tráfico puede ralentizar y dañar su coche. La policía puede perseguirte y darte multas o arrestarte. Puede utilizar el mapa en la esquina superior derecha de la pantalla para ver las ubicaciones de tráfico y policía. </li>
|
41 |
-
</ul>
|
42 |
-
<h2>Conclusión</h2>
|
43 |
-
|
44 |
-
<p>En este artículo, le mostramos cómo encontrar, instalar y utilizar un hack apk para CarX Street, así como algunas características y consejos para el juego. Esperamos que haya encontrado este artículo útil e informativo. Sin embargo, también queremos recordarle que el uso de un hack apk para CarX Street no es legal o ético, y puede causar problemas con el juego o su dispositivo. Debe usarlo bajo su propio riesgo y discreción. </p>
|
45 |
-
<p>Si tienes algún comentario o preguntas sobre este artículo o el juego CarX Street, no dudes en dejar un comentario a continuación. Nos encantaría saber de ti. </p>
|
46 |
-
<h3>Preguntas frecuentes</h3>
|
47 |
-
<ul>
|
48 |
-
<li>Q: ¿Es CarX Street libre para jugar? </li>
|
49 |
-
<li>A: Sí, CarX Street es gratis para descargar y jugar en dispositivos Android. Sin embargo, también contiene compras en la aplicación que requieren dinero real. </li>
|
50 |
-
<li>Q: ¿CarX Street es compatible con mi dispositivo? </li>
|
51 |
-
<li>A: CarX Street requiere Android 6.0 o superior y al menos 2 GB de RAM para funcionar sin problemas. Puede comprobar la compatibilidad de su dispositivo en Google Play Store.</li>
|
52 |
-
<li>Q: ¿Cómo puedo actualizar CarX Street? </li>
|
53 |
-
<li>A: Puede actualizar CarX Street desde Google Play Store o desde el sitio web oficial del juego. Sin embargo, si usted está utilizando un hack apk para CarX Street, no puede ser capaz de actualizarlo o acceder a las últimas características del juego. </li>
|
54 |
-
<li>Q: ¿Cómo puedo contactar a los desarrolladores de CarX Street? </li>
|
55 |
-
<li>A: Puede ponerse en contacto con los desarrolladores de CarX Street enviando un correo electrónico a [email protected] o visitando su página de Facebook. </li>
|
56 |
-
<li>Q: ¿Cómo puedo reportar un error o un problema con CarX Street? </li>
|
57 |
-
<li>A: Puede reportar un error o un problema con CarX Street enviando un correo electrónico a [email protected] o utilizando la opción de retroalimentación en la configuración del juego. </li>
|
58 |
-
</ul></p> 64aa2da5cf<br />
|
59 |
-
<br />
|
60 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Error Genshin Impacto.md
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo corregir el error de descarga Genshin impacto en Windows PC</h1>
|
3 |
-
<p>Genshin Impact es un popular juego de rol de acción que es gratuito, pero ofrece compras en el juego para objetos y personajes adicionales. El juego fue lanzado en 2020 por miHoYo, una compañía de desarrollo de videojuegos con sede en Shanghai, China. Genshin Impact ha recibido críticas positivas de críticos y jugadores por igual por sus impresionantes gráficos, un juego atractivo y una rica historia. </p>
|
4 |
-
<h2>descargar error genshin impacto</h2><br /><p><b><b>Download File</b> ☆☆☆ <a href="https://bltlly.com/2v6JR4">https://bltlly.com/2v6JR4</a></b></p><br /><br />
|
5 |
-
<p>Sin embargo, algunos usuarios de PC con Windows han informado que enfrentan un error de descarga al intentar instalar o actualizar el juego. El mensaje de error dice "Error de descarga de archivos del juego. Compruebe la configuración de red e inténtelo de nuevo." Este error puede impedirle disfrutar del juego y puede ser frustrante para hacer frente a. </p>
|
6 |
-
<p>En este artículo, explicaremos qué causa este error y cómo puede solucionarlo usando cinco métodos simples. También responderemos algunas preguntas frecuentes sobre el juego y sus problemas de descarga. </p>
|
7 |
-
<h2>¿Qué es el error de descarga Genshin impacto? </h2>
|
8 |
-
<p>Descargar error genshin impacto es un error que se produce cuando se intenta descargar o actualizar los archivos de juego de Genshin impacto en su PC con Windows. El error puede detener el proceso de descarga y dañar los archivos del juego, haciéndolos inutilizables. </p>
|
9 |
-
<p></p>
|
10 |
-
<h3>Causas del error de descarga
|
11 |
-
<p>Hay varias causas posibles para este error, como:</p>
|
12 |
-
<ul>
|
13 |
-
<li>Conexión a Internet inestable o lenta</li>
|
14 |
-
<li>Software antivirus o firewall bloquea la descarga</li>
|
15 |
-
<li>Archivos de juego dañados o incompletos</li>
|
16 |
-
<li>Configuración DNS incorrecta</li>
|
17 |
-
<li>Problemas o mantenimiento del servidor</li>
|
18 |
-
</ul>
|
19 |
-
<h3>Síntomas de descarga Error Genshin Impact</h3>
|
20 |
-
<p>Algunos de los síntomas comunes de este error son:</p>
|
21 |
-
<ul>
|
22 |
-
<li>La descarga se detiene en un cierto porcentaje o tamaño de archivo</li>
|
23 |
-
<li>La velocidad de descarga es muy lenta o fluctúa</li>
|
24 |
-
<li>El mensaje de error aparece repetidamente</li>
|
25 |
-
<li>El lanzador del juego se bloquea o se congela</li>
|
26 |
-
<li>El juego no se ejecuta correctamente</li>
|
27 |
-
</ul>
|
28 |
-
|
29 |
-
<p>Afortunadamente, hay algunas formas fáciles y eficaces de corregir este error y reanudar su descarga. Aquí hay cinco métodos que puede probar:</p>
|
30 |
-
<h3>Método 1: Reinicie su enrutador y compruebe su velocidad de Internet</h3>
|
31 |
-
<p>Lo primero que debe hacer es comprobar su conexión a Internet y asegurarse de que es estable y lo suficientemente rápido para descargar los archivos del juego. Puede utilizar una herramienta de prueba de velocidad en línea para medir su velocidad de Internet y compararla con la velocidad recomendada para descargar Genshin Impact.</p>
|
32 |
-
<p>La velocidad recomendada para descargar Genshin Impact es de al menos 5 Mbps tanto para subir como para descargar. Si su velocidad es menor que eso, puede experimentar descargas lentas o interrumpidas. </p>
|
33 |
-
<p>Para mejorar su velocidad de Internet, puede probar los siguientes pasos:</p>
|
34 |
-
<ol>
|
35 |
-
<li>Reinicie su router desconectándolo de la fuente de alimentación durante unos segundos y conectándolo de nuevo. </li>
|
36 |
-
<li>Acerque su router a su PC o use una conexión por cable en lugar de Wi-Fi.</li>
|
37 |
-
<li>Evite usar otros dispositivos o aplicaciones que consumen ancho de banda mientras descarga el juego. </li>
|
38 |
-
<li>Póngase en contacto con su proveedor de servicios de Internet si sus necesidades y preferencias, pero asegúrese de leer los comentarios y calificaciones antes de elegir uno. Algunos de los servicios VPN populares son ExpressVPN, NordVPN, Surfshark y CyberGhost.</p>
|
39 |
-
<p>Estos son los pasos para usar un servicio VPN para corregir el error de descarga:</p>
|
40 |
-
<ol>
|
41 |
-
<li>Descargue e instale un servicio VPN de su elección en su PC.</li>
|
42 |
-
<li>Inicie el servicio VPN e inicie sesión con su cuenta. </li>
|
43 |
-
<li>Seleccione una ubicación de servidor que esté cerca del servidor de descarga. Por ejemplo, si está descargando desde el servidor de Asia, puede elegir un servidor en Japón, Corea o Singapur.</li>
|
44 |
-
<li>Conéctese al servidor y espere a que se establezca la conexión. </li>
|
45 |
-
<li>Intenta descargar el juego de nuevo y ver si el error está resuelto. </li>
|
46 |
-
</ol>
|
47 |
-
|
48 |
-
<h3>Método 5: Descargar manualmente los archivos del juego</h3>
|
49 |
-
<p>El último método que puede probar es descargar manualmente los archivos del juego desde una fuente de terceros y copiarlos en su carpeta de juegos. Esto puede evitar el error de descarga y ahorrarle tiempo y ancho de banda. Sin embargo, este método no es recomendado por los desarrolladores de juegos oficiales y puede plantear algunos riesgos como infección de malware, pérdida de datos o prohibición de cuentas. Por lo tanto, solo debe usar este método bajo su propio riesgo y discreción. </p>
|
50 |
-
<p>Estos son los pasos para descargar manualmente los archivos del juego:</p>
|
51 |
-
<ol>
|
52 |
-
<li>Ir a un sitio web confiable que ofrece la última versión de los archivos del juego para Genshin Impact. Puede buscar en línea para estos sitios web o pedir recomendaciones a otros jugadores. Algunos de los sitios web que ofrecen este servicio son https://genshinimpact.fandom.com/wiki/Downloads, https://www.gensh.in/download-links, y https:/www.reddit.com/r/Genshin_Impact/comments/j1s3ng/genshin_impact_installationfiles/</li>>
|
53 |
-
<li>Seleccione el servidor que coincida con su región y descargue los archivos del juego como un archivo zip o rar. </li>
|
54 |
-
<li>Extrae los archivos del juego usando un programa extractor de archivos como WinRAR o 7-Zip. </li>
|
55 |
-
<li>Copia y pega los archivos del juego en tu carpeta de juego. La ubicación predeterminada de la carpeta del juego es C: Archivos de programa Genshin Impact Genshin Impact Game.</li>
|
56 |
-
<li>Ejecute el lanzador y verifique los archivos del juego. El lanzador comprobará si hay algún archivo perdido o desactualizado y lo descargará si es necesario. </li>
|
57 |
-
<li>Inicia el juego y disfruta jugando sin errores. </li>
|
58 |
-
</ol>
|
59 |
-
<h2>Conclusión</h2>
|
60 |
-
<p>Genshin Impact es un juego divertido e inmersivo que puedes jugar gratis en tu PC con Windows. Sin embargo, puede encontrar algunos errores de descarga que pueden impedirle instalar o actualizar el juego. Estos errores pueden ser causados por varios factores, como conexión a Internet, software antivirus, archivos de juegos dañados, configuración de DNS o problemas con el servidor. </p>
|
61 |
-
|
62 |
-
<ul>
|
63 |
-
<li>Reinicie su router y compruebe su velocidad de Internet</li>
|
64 |
-
<li>Desactivar o poner en una lista blanca su software antivirus</li>
|
65 |
-
<li>Desinstalar y volver a instalar el juego y el lanzador</li>
|
66 |
-
<li>Utilice una VPN para conectarse al servidor de descarga</li>
|
67 |
-
<li>Descargar manualmente los archivos del juego</li>
|
68 |
-
</ul>
|
69 |
-
<p>Esperamos que estos métodos le ayudarán a solucionar el impacto genshin error de descarga en el PC con Windows y disfrutar de jugar el juego sin ningún problema. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. </p>
|
70 |
-
<h2>Preguntas frecuentes</h2>
|
71 |
-
<h3>¿Por qué Genshin no puede descargar? </h3>
|
72 |
-
<p>Genshin puede seguir fallando en la descarga debido a varias razones tales como conexión a Internet inestable o lenta, software antivirus o firewall bloqueando la descarga, archivos de juegos dañados o incompletos, configuración incorrecta de DNS, o problemas o mantenimiento del servidor. Puede probar uno de los métodos que hemos discutido en este artículo para corregir el error de descarga y reanudar su descarga. </p>
|
73 |
-
<h3>¿Cuánto tiempo se tarda en descargar Genshin Impact? </h3>
|
74 |
-
<p>El tiempo de descarga de Genshin Impact depende de la velocidad de Internet y el tamaño de los archivos del juego. Los archivos del juego son de aproximadamente 20 GB en total, pero pueden variar dependiendo del servidor y las actualizaciones. El tiempo promedio de descarga de Genshin Impact es de 1 a 2 horas, pero puede tardar más si su velocidad de Internet es lenta o si encuentra algún error de descarga. </p>
|
75 |
-
<h3>¿Cómo actualizo Genshin Impact en PC? </h3>
|
76 |
-
<p>Para actualizar Genshin Impact en el PC, es necesario ejecutar el lanzador y haga clic en el botón Actualizar. El lanzador descargará e instalará automáticamente la última versión del juego. También puede consultar el sitio web oficial o las cuentas de redes sociales de Genshin Impact para cualquier noticia o anuncio sobre las actualizaciones. </p>
|
77 |
-
<h3>¿Cómo puedo verificar los archivos del juego en Genshin Impact? </h3>
|
78 |
-
|
79 |
-
<h3>¿Cómo cambio el servidor de descarga en Genshin Impact? </h3>
|
80 |
-
<p>Para cambiar el servidor de descarga en Genshin Impact, debe ejecutar el lanzador y hacer clic en el icono de configuración en la esquina superior derecha. Luego, haga clic en la pestaña Servidor de juegos y seleccione el servidor que coincida con su región. Puede elegir entre Asia, Europa, América o TW, HK, MO. Después de seleccionar el servidor, haga clic en Guardar y reinicie el lanzador. </p> 64aa2da5cf<br />
|
81 |
-
<br />
|
82 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CForGETaass/vits-uma-genshin-honkai/attentions.py
DELETED
@@ -1,300 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
import commons
|
7 |
-
from modules import LayerNorm
|
8 |
-
|
9 |
-
|
10 |
-
class Encoder(nn.Module):
|
11 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs):
|
12 |
-
super().__init__()
|
13 |
-
self.hidden_channels = hidden_channels
|
14 |
-
self.filter_channels = filter_channels
|
15 |
-
self.n_heads = n_heads
|
16 |
-
self.n_layers = n_layers
|
17 |
-
self.kernel_size = kernel_size
|
18 |
-
self.p_dropout = p_dropout
|
19 |
-
self.window_size = window_size
|
20 |
-
|
21 |
-
self.drop = nn.Dropout(p_dropout)
|
22 |
-
self.attn_layers = nn.ModuleList()
|
23 |
-
self.norm_layers_1 = nn.ModuleList()
|
24 |
-
self.ffn_layers = nn.ModuleList()
|
25 |
-
self.norm_layers_2 = nn.ModuleList()
|
26 |
-
for i in range(self.n_layers):
|
27 |
-
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size))
|
28 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
29 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
|
30 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
31 |
-
|
32 |
-
def forward(self, x, x_mask):
|
33 |
-
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
34 |
-
x = x * x_mask
|
35 |
-
for i in range(self.n_layers):
|
36 |
-
y = self.attn_layers[i](x, x, attn_mask)
|
37 |
-
y = self.drop(y)
|
38 |
-
x = self.norm_layers_1[i](x + y)
|
39 |
-
|
40 |
-
y = self.ffn_layers[i](x, x_mask)
|
41 |
-
y = self.drop(y)
|
42 |
-
x = self.norm_layers_2[i](x + y)
|
43 |
-
x = x * x_mask
|
44 |
-
return x
|
45 |
-
|
46 |
-
|
47 |
-
class Decoder(nn.Module):
|
48 |
-
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs):
|
49 |
-
super().__init__()
|
50 |
-
self.hidden_channels = hidden_channels
|
51 |
-
self.filter_channels = filter_channels
|
52 |
-
self.n_heads = n_heads
|
53 |
-
self.n_layers = n_layers
|
54 |
-
self.kernel_size = kernel_size
|
55 |
-
self.p_dropout = p_dropout
|
56 |
-
self.proximal_bias = proximal_bias
|
57 |
-
self.proximal_init = proximal_init
|
58 |
-
|
59 |
-
self.drop = nn.Dropout(p_dropout)
|
60 |
-
self.self_attn_layers = nn.ModuleList()
|
61 |
-
self.norm_layers_0 = nn.ModuleList()
|
62 |
-
self.encdec_attn_layers = nn.ModuleList()
|
63 |
-
self.norm_layers_1 = nn.ModuleList()
|
64 |
-
self.ffn_layers = nn.ModuleList()
|
65 |
-
self.norm_layers_2 = nn.ModuleList()
|
66 |
-
for i in range(self.n_layers):
|
67 |
-
self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init))
|
68 |
-
self.norm_layers_0.append(LayerNorm(hidden_channels))
|
69 |
-
self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
|
70 |
-
self.norm_layers_1.append(LayerNorm(hidden_channels))
|
71 |
-
self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True))
|
72 |
-
self.norm_layers_2.append(LayerNorm(hidden_channels))
|
73 |
-
|
74 |
-
def forward(self, x, x_mask, h, h_mask):
|
75 |
-
"""
|
76 |
-
x: decoder input
|
77 |
-
h: encoder output
|
78 |
-
"""
|
79 |
-
self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype)
|
80 |
-
encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
|
81 |
-
x = x * x_mask
|
82 |
-
for i in range(self.n_layers):
|
83 |
-
y = self.self_attn_layers[i](x, x, self_attn_mask)
|
84 |
-
y = self.drop(y)
|
85 |
-
x = self.norm_layers_0[i](x + y)
|
86 |
-
|
87 |
-
y = self.encdec_attn_layers[i](x, h, encdec_attn_mask)
|
88 |
-
y = self.drop(y)
|
89 |
-
x = self.norm_layers_1[i](x + y)
|
90 |
-
|
91 |
-
y = self.ffn_layers[i](x, x_mask)
|
92 |
-
y = self.drop(y)
|
93 |
-
x = self.norm_layers_2[i](x + y)
|
94 |
-
x = x * x_mask
|
95 |
-
return x
|
96 |
-
|
97 |
-
|
98 |
-
class MultiHeadAttention(nn.Module):
|
99 |
-
def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False):
|
100 |
-
super().__init__()
|
101 |
-
assert channels % n_heads == 0
|
102 |
-
|
103 |
-
self.channels = channels
|
104 |
-
self.out_channels = out_channels
|
105 |
-
self.n_heads = n_heads
|
106 |
-
self.p_dropout = p_dropout
|
107 |
-
self.window_size = window_size
|
108 |
-
self.heads_share = heads_share
|
109 |
-
self.block_length = block_length
|
110 |
-
self.proximal_bias = proximal_bias
|
111 |
-
self.proximal_init = proximal_init
|
112 |
-
self.attn = None
|
113 |
-
|
114 |
-
self.k_channels = channels // n_heads
|
115 |
-
self.conv_q = nn.Conv1d(channels, channels, 1)
|
116 |
-
self.conv_k = nn.Conv1d(channels, channels, 1)
|
117 |
-
self.conv_v = nn.Conv1d(channels, channels, 1)
|
118 |
-
self.conv_o = nn.Conv1d(channels, out_channels, 1)
|
119 |
-
self.drop = nn.Dropout(p_dropout)
|
120 |
-
|
121 |
-
if window_size is not None:
|
122 |
-
n_heads_rel = 1 if heads_share else n_heads
|
123 |
-
rel_stddev = self.k_channels**-0.5
|
124 |
-
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
125 |
-
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
|
126 |
-
|
127 |
-
nn.init.xavier_uniform_(self.conv_q.weight)
|
128 |
-
nn.init.xavier_uniform_(self.conv_k.weight)
|
129 |
-
nn.init.xavier_uniform_(self.conv_v.weight)
|
130 |
-
if proximal_init:
|
131 |
-
with torch.no_grad():
|
132 |
-
self.conv_k.weight.copy_(self.conv_q.weight)
|
133 |
-
self.conv_k.bias.copy_(self.conv_q.bias)
|
134 |
-
|
135 |
-
def forward(self, x, c, attn_mask=None):
|
136 |
-
q = self.conv_q(x)
|
137 |
-
k = self.conv_k(c)
|
138 |
-
v = self.conv_v(c)
|
139 |
-
|
140 |
-
x, self.attn = self.attention(q, k, v, mask=attn_mask)
|
141 |
-
|
142 |
-
x = self.conv_o(x)
|
143 |
-
return x
|
144 |
-
|
145 |
-
def attention(self, query, key, value, mask=None):
|
146 |
-
# reshape [b, d, t] -> [b, n_h, t, d_k]
|
147 |
-
b, d, t_s, t_t = (*key.size(), query.size(2))
|
148 |
-
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
|
149 |
-
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
150 |
-
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
|
151 |
-
|
152 |
-
scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1))
|
153 |
-
if self.window_size is not None:
|
154 |
-
assert t_s == t_t, "Relative attention is only available for self-attention."
|
155 |
-
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
|
156 |
-
rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings)
|
157 |
-
scores_local = self._relative_position_to_absolute_position(rel_logits)
|
158 |
-
scores = scores + scores_local
|
159 |
-
if self.proximal_bias:
|
160 |
-
assert t_s == t_t, "Proximal bias is only available for self-attention."
|
161 |
-
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
|
162 |
-
if mask is not None:
|
163 |
-
scores = scores.masked_fill(mask == 0, -1e4)
|
164 |
-
if self.block_length is not None:
|
165 |
-
assert t_s == t_t, "Local attention is only available for self-attention."
|
166 |
-
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
|
167 |
-
scores = scores.masked_fill(block_mask == 0, -1e4)
|
168 |
-
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
|
169 |
-
p_attn = self.drop(p_attn)
|
170 |
-
output = torch.matmul(p_attn, value)
|
171 |
-
if self.window_size is not None:
|
172 |
-
relative_weights = self._absolute_position_to_relative_position(p_attn)
|
173 |
-
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
|
174 |
-
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
|
175 |
-
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
|
176 |
-
return output, p_attn
|
177 |
-
|
178 |
-
def _matmul_with_relative_values(self, x, y):
|
179 |
-
"""
|
180 |
-
x: [b, h, l, m]
|
181 |
-
y: [h or 1, m, d]
|
182 |
-
ret: [b, h, l, d]
|
183 |
-
"""
|
184 |
-
ret = torch.matmul(x, y.unsqueeze(0))
|
185 |
-
return ret
|
186 |
-
|
187 |
-
def _matmul_with_relative_keys(self, x, y):
|
188 |
-
"""
|
189 |
-
x: [b, h, l, d]
|
190 |
-
y: [h or 1, m, d]
|
191 |
-
ret: [b, h, l, m]
|
192 |
-
"""
|
193 |
-
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
|
194 |
-
return ret
|
195 |
-
|
196 |
-
def _get_relative_embeddings(self, relative_embeddings, length):
|
197 |
-
max_relative_position = 2 * self.window_size + 1
|
198 |
-
# Pad first before slice to avoid using cond ops.
|
199 |
-
pad_length = max(length - (self.window_size + 1), 0)
|
200 |
-
slice_start_position = max((self.window_size + 1) - length, 0)
|
201 |
-
slice_end_position = slice_start_position + 2 * length - 1
|
202 |
-
if pad_length > 0:
|
203 |
-
padded_relative_embeddings = F.pad(
|
204 |
-
relative_embeddings,
|
205 |
-
commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
|
206 |
-
else:
|
207 |
-
padded_relative_embeddings = relative_embeddings
|
208 |
-
used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position]
|
209 |
-
return used_relative_embeddings
|
210 |
-
|
211 |
-
def _relative_position_to_absolute_position(self, x):
|
212 |
-
"""
|
213 |
-
x: [b, h, l, 2*l-1]
|
214 |
-
ret: [b, h, l, l]
|
215 |
-
"""
|
216 |
-
batch, heads, length, _ = x.size()
|
217 |
-
# Concat columns of pad to shift from relative to absolute indexing.
|
218 |
-
x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]))
|
219 |
-
|
220 |
-
# Concat extra elements so to add up to shape (len+1, 2*len-1).
|
221 |
-
x_flat = x.view([batch, heads, length * 2 * length])
|
222 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]))
|
223 |
-
|
224 |
-
# Reshape and slice out the padded elements.
|
225 |
-
x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:]
|
226 |
-
return x_final
|
227 |
-
|
228 |
-
def _absolute_position_to_relative_position(self, x):
|
229 |
-
"""
|
230 |
-
x: [b, h, l, l]
|
231 |
-
ret: [b, h, l, 2*l-1]
|
232 |
-
"""
|
233 |
-
batch, heads, length, _ = x.size()
|
234 |
-
# padd along column
|
235 |
-
x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]))
|
236 |
-
x_flat = x.view([batch, heads, length**2 + length*(length -1)])
|
237 |
-
# add 0's in the beginning that will skew the elements after reshape
|
238 |
-
x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
|
239 |
-
x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:]
|
240 |
-
return x_final
|
241 |
-
|
242 |
-
def _attention_bias_proximal(self, length):
|
243 |
-
"""Bias for self-attention to encourage attention to close positions.
|
244 |
-
Args:
|
245 |
-
length: an integer scalar.
|
246 |
-
Returns:
|
247 |
-
a Tensor with shape [1, 1, length, length]
|
248 |
-
"""
|
249 |
-
r = torch.arange(length, dtype=torch.float32)
|
250 |
-
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
|
251 |
-
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
|
252 |
-
|
253 |
-
|
254 |
-
class FFN(nn.Module):
|
255 |
-
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False):
|
256 |
-
super().__init__()
|
257 |
-
self.in_channels = in_channels
|
258 |
-
self.out_channels = out_channels
|
259 |
-
self.filter_channels = filter_channels
|
260 |
-
self.kernel_size = kernel_size
|
261 |
-
self.p_dropout = p_dropout
|
262 |
-
self.activation = activation
|
263 |
-
self.causal = causal
|
264 |
-
|
265 |
-
if causal:
|
266 |
-
self.padding = self._causal_padding
|
267 |
-
else:
|
268 |
-
self.padding = self._same_padding
|
269 |
-
|
270 |
-
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size)
|
271 |
-
self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size)
|
272 |
-
self.drop = nn.Dropout(p_dropout)
|
273 |
-
|
274 |
-
def forward(self, x, x_mask):
|
275 |
-
x = self.conv_1(self.padding(x * x_mask))
|
276 |
-
if self.activation == "gelu":
|
277 |
-
x = x * torch.sigmoid(1.702 * x)
|
278 |
-
else:
|
279 |
-
x = torch.relu(x)
|
280 |
-
x = self.drop(x)
|
281 |
-
x = self.conv_2(self.padding(x * x_mask))
|
282 |
-
return x * x_mask
|
283 |
-
|
284 |
-
def _causal_padding(self, x):
|
285 |
-
if self.kernel_size == 1:
|
286 |
-
return x
|
287 |
-
pad_l = self.kernel_size - 1
|
288 |
-
pad_r = 0
|
289 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
290 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
291 |
-
return x
|
292 |
-
|
293 |
-
def _same_padding(self, x):
|
294 |
-
if self.kernel_size == 1:
|
295 |
-
return x
|
296 |
-
pad_l = (self.kernel_size - 1) // 2
|
297 |
-
pad_r = self.kernel_size // 2
|
298 |
-
padding = [[0, 0], [0, 0], [pad_l, pad_r]]
|
299 |
-
x = F.pad(x, commons.convert_pad_shape(padding))
|
300 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/dependencies/cub/test/half.h
DELETED
@@ -1,317 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
* Copyright (c) 2011, Duane Merrill. All rights reserved.
|
3 |
-
* Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
|
4 |
-
*
|
5 |
-
* Redistribution and use in source and binary forms, with or without
|
6 |
-
* modification, are permitted provided that the following conditions are met:
|
7 |
-
* * Redistributions of source code must retain the above copyright
|
8 |
-
* notice, this list of conditions and the following disclaimer.
|
9 |
-
* * Redistributions in binary form must reproduce the above copyright
|
10 |
-
* notice, this list of conditions and the following disclaimer in the
|
11 |
-
* documentation and/or other materials provided with the distribution.
|
12 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
13 |
-
* names of its contributors may be used to endorse or promote products
|
14 |
-
* derived from this software without specific prior written permission.
|
15 |
-
*
|
16 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
17 |
-
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
18 |
-
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
19 |
-
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
20 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
21 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
22 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
23 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
24 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
25 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
26 |
-
*
|
27 |
-
******************************************************************************/
|
28 |
-
|
29 |
-
#pragma once
|
30 |
-
|
31 |
-
/**
|
32 |
-
* \file
|
33 |
-
* Utilities for interacting with the opaque CUDA __half type
|
34 |
-
*/
|
35 |
-
|
36 |
-
#include <stdint.h>
|
37 |
-
#include <cuda_fp16.h>
|
38 |
-
#include <iosfwd>
|
39 |
-
|
40 |
-
#include <cub/util_type.cuh>
|
41 |
-
|
42 |
-
#ifdef __GNUC__
|
43 |
-
// There's a ton of type-punning going on in this file.
|
44 |
-
#pragma GCC diagnostic push
|
45 |
-
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
|
46 |
-
#endif
|
47 |
-
|
48 |
-
|
49 |
-
/******************************************************************************
|
50 |
-
* half_t
|
51 |
-
******************************************************************************/
|
52 |
-
|
53 |
-
/**
|
54 |
-
* Host-based fp16 data type compatible and convertible with __half
|
55 |
-
*/
|
56 |
-
struct half_t
|
57 |
-
{
|
58 |
-
uint16_t __x;
|
59 |
-
|
60 |
-
/// Constructor from __half
|
61 |
-
__host__ __device__ __forceinline__
|
62 |
-
half_t(const __half &other)
|
63 |
-
{
|
64 |
-
__x = reinterpret_cast<const uint16_t&>(other);
|
65 |
-
}
|
66 |
-
|
67 |
-
/// Constructor from integer
|
68 |
-
__host__ __device__ __forceinline__
|
69 |
-
half_t(int a)
|
70 |
-
{
|
71 |
-
*this = half_t(float(a));
|
72 |
-
}
|
73 |
-
|
74 |
-
/// Default constructor
|
75 |
-
__host__ __device__ __forceinline__
|
76 |
-
half_t() : __x(0)
|
77 |
-
{}
|
78 |
-
|
79 |
-
/// Constructor from float
|
80 |
-
__host__ __device__ __forceinline__
|
81 |
-
half_t(float a)
|
82 |
-
{
|
83 |
-
// Stolen from Norbert Juffa
|
84 |
-
uint32_t ia = *reinterpret_cast<uint32_t*>(&a);
|
85 |
-
uint16_t ir;
|
86 |
-
|
87 |
-
ir = (ia >> 16) & 0x8000;
|
88 |
-
|
89 |
-
if ((ia & 0x7f800000) == 0x7f800000)
|
90 |
-
{
|
91 |
-
if ((ia & 0x7fffffff) == 0x7f800000)
|
92 |
-
{
|
93 |
-
ir |= 0x7c00; /* infinity */
|
94 |
-
}
|
95 |
-
else
|
96 |
-
{
|
97 |
-
ir = 0x7fff; /* canonical NaN */
|
98 |
-
}
|
99 |
-
}
|
100 |
-
else if ((ia & 0x7f800000) >= 0x33000000)
|
101 |
-
{
|
102 |
-
int32_t shift = (int32_t) ((ia >> 23) & 0xff) - 127;
|
103 |
-
if (shift > 15)
|
104 |
-
{
|
105 |
-
ir |= 0x7c00; /* infinity */
|
106 |
-
}
|
107 |
-
else
|
108 |
-
{
|
109 |
-
ia = (ia & 0x007fffff) | 0x00800000; /* extract mantissa */
|
110 |
-
if (shift < -14)
|
111 |
-
{ /* denormal */
|
112 |
-
ir |= ia >> (-1 - shift);
|
113 |
-
ia = ia << (32 - (-1 - shift));
|
114 |
-
}
|
115 |
-
else
|
116 |
-
{ /* normal */
|
117 |
-
ir |= ia >> (24 - 11);
|
118 |
-
ia = ia << (32 - (24 - 11));
|
119 |
-
ir = ir + ((14 + shift) << 10);
|
120 |
-
}
|
121 |
-
/* IEEE-754 round to nearest of even */
|
122 |
-
if ((ia > 0x80000000) || ((ia == 0x80000000) && (ir & 1)))
|
123 |
-
{
|
124 |
-
ir++;
|
125 |
-
}
|
126 |
-
}
|
127 |
-
}
|
128 |
-
|
129 |
-
this->__x = ir;
|
130 |
-
}
|
131 |
-
|
132 |
-
/// Cast to __half
|
133 |
-
__host__ __device__ __forceinline__
|
134 |
-
operator __half() const
|
135 |
-
{
|
136 |
-
return reinterpret_cast<const __half&>(__x);
|
137 |
-
}
|
138 |
-
|
139 |
-
/// Cast to float
|
140 |
-
__host__ __device__ __forceinline__
|
141 |
-
operator float() const
|
142 |
-
{
|
143 |
-
// Stolen from Andrew Kerr
|
144 |
-
|
145 |
-
int sign = ((this->__x >> 15) & 1);
|
146 |
-
int exp = ((this->__x >> 10) & 0x1f);
|
147 |
-
int mantissa = (this->__x & 0x3ff);
|
148 |
-
uint32_t f = 0;
|
149 |
-
|
150 |
-
if (exp > 0 && exp < 31)
|
151 |
-
{
|
152 |
-
// normal
|
153 |
-
exp += 112;
|
154 |
-
f = (sign << 31) | (exp << 23) | (mantissa << 13);
|
155 |
-
}
|
156 |
-
else if (exp == 0)
|
157 |
-
{
|
158 |
-
if (mantissa)
|
159 |
-
{
|
160 |
-
// subnormal
|
161 |
-
exp += 113;
|
162 |
-
while ((mantissa & (1 << 10)) == 0)
|
163 |
-
{
|
164 |
-
mantissa <<= 1;
|
165 |
-
exp--;
|
166 |
-
}
|
167 |
-
mantissa &= 0x3ff;
|
168 |
-
f = (sign << 31) | (exp << 23) | (mantissa << 13);
|
169 |
-
}
|
170 |
-
else if (sign)
|
171 |
-
{
|
172 |
-
f = 0x80000000; // negative zero
|
173 |
-
}
|
174 |
-
else
|
175 |
-
{
|
176 |
-
f = 0x0; // zero
|
177 |
-
}
|
178 |
-
}
|
179 |
-
else if (exp == 31)
|
180 |
-
{
|
181 |
-
if (mantissa)
|
182 |
-
{
|
183 |
-
f = 0x7fffffff; // not a number
|
184 |
-
}
|
185 |
-
else
|
186 |
-
{
|
187 |
-
f = (0xff << 23) | (sign << 31); // inf
|
188 |
-
}
|
189 |
-
}
|
190 |
-
return *reinterpret_cast<float const *>(&f);
|
191 |
-
}
|
192 |
-
|
193 |
-
|
194 |
-
/// Get raw storage
|
195 |
-
__host__ __device__ __forceinline__
|
196 |
-
uint16_t raw()
|
197 |
-
{
|
198 |
-
return this->__x;
|
199 |
-
}
|
200 |
-
|
201 |
-
/// Equality
|
202 |
-
__host__ __device__ __forceinline__
|
203 |
-
bool operator ==(const half_t &other)
|
204 |
-
{
|
205 |
-
return (this->__x == other.__x);
|
206 |
-
}
|
207 |
-
|
208 |
-
/// Inequality
|
209 |
-
__host__ __device__ __forceinline__
|
210 |
-
bool operator !=(const half_t &other)
|
211 |
-
{
|
212 |
-
return (this->__x != other.__x);
|
213 |
-
}
|
214 |
-
|
215 |
-
/// Assignment by sum
|
216 |
-
__host__ __device__ __forceinline__
|
217 |
-
half_t& operator +=(const half_t &rhs)
|
218 |
-
{
|
219 |
-
*this = half_t(float(*this) + float(rhs));
|
220 |
-
return *this;
|
221 |
-
}
|
222 |
-
|
223 |
-
/// Multiply
|
224 |
-
__host__ __device__ __forceinline__
|
225 |
-
half_t operator*(const half_t &other)
|
226 |
-
{
|
227 |
-
return half_t(float(*this) * float(other));
|
228 |
-
}
|
229 |
-
|
230 |
-
/// Add
|
231 |
-
__host__ __device__ __forceinline__
|
232 |
-
half_t operator+(const half_t &other)
|
233 |
-
{
|
234 |
-
return half_t(float(*this) + float(other));
|
235 |
-
}
|
236 |
-
|
237 |
-
/// Less-than
|
238 |
-
__host__ __device__ __forceinline__
|
239 |
-
bool operator<(const half_t &other) const
|
240 |
-
{
|
241 |
-
return float(*this) < float(other);
|
242 |
-
}
|
243 |
-
|
244 |
-
/// Less-than-equal
|
245 |
-
__host__ __device__ __forceinline__
|
246 |
-
bool operator<=(const half_t &other) const
|
247 |
-
{
|
248 |
-
return float(*this) <= float(other);
|
249 |
-
}
|
250 |
-
|
251 |
-
/// Greater-than
|
252 |
-
__host__ __device__ __forceinline__
|
253 |
-
bool operator>(const half_t &other) const
|
254 |
-
{
|
255 |
-
return float(*this) > float(other);
|
256 |
-
}
|
257 |
-
|
258 |
-
/// Greater-than-equal
|
259 |
-
__host__ __device__ __forceinline__
|
260 |
-
bool operator>=(const half_t &other) const
|
261 |
-
{
|
262 |
-
return float(*this) >= float(other);
|
263 |
-
}
|
264 |
-
|
265 |
-
/// numeric_traits<half_t>::max
|
266 |
-
__host__ __device__ __forceinline__
|
267 |
-
static half_t max() {
|
268 |
-
uint16_t max_word = 0x7BFF;
|
269 |
-
return reinterpret_cast<half_t&>(max_word);
|
270 |
-
}
|
271 |
-
|
272 |
-
/// numeric_traits<half_t>::lowest
|
273 |
-
__host__ __device__ __forceinline__
|
274 |
-
static half_t lowest() {
|
275 |
-
uint16_t lowest_word = 0xFBFF;
|
276 |
-
return reinterpret_cast<half_t&>(lowest_word);
|
277 |
-
}
|
278 |
-
};
|
279 |
-
|
280 |
-
|
281 |
-
/******************************************************************************
|
282 |
-
* I/O stream overloads
|
283 |
-
******************************************************************************/
|
284 |
-
|
285 |
-
/// Insert formatted \p half_t into the output stream
|
286 |
-
std::ostream& operator<<(std::ostream &out, const half_t &x)
|
287 |
-
{
|
288 |
-
out << (float)x;
|
289 |
-
return out;
|
290 |
-
}
|
291 |
-
|
292 |
-
|
293 |
-
/// Insert formatted \p __half into the output stream
|
294 |
-
std::ostream& operator<<(std::ostream &out, const __half &x)
|
295 |
-
{
|
296 |
-
return out << half_t(x);
|
297 |
-
}
|
298 |
-
|
299 |
-
|
300 |
-
/******************************************************************************
|
301 |
-
* Traits overloads
|
302 |
-
******************************************************************************/
|
303 |
-
|
304 |
-
template <>
|
305 |
-
struct cub::FpLimits<half_t>
|
306 |
-
{
|
307 |
-
static __host__ __device__ __forceinline__ half_t Max() { return half_t::max(); }
|
308 |
-
|
309 |
-
static __host__ __device__ __forceinline__ half_t Lowest() { return half_t::lowest(); }
|
310 |
-
};
|
311 |
-
|
312 |
-
template <> struct cub::NumericTraits<half_t> : cub::BaseTraits<FLOATING_POINT, true, false, unsigned short, half_t> {};
|
313 |
-
|
314 |
-
|
315 |
-
#ifdef __GNUC__
|
316 |
-
#pragma GCC diagnostic pop
|
317 |
-
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/monoscene_lite/monoscene/unet2d.py
DELETED
@@ -1,198 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
Code adapted from https://github.com/shariqfarooq123/AdaBins/blob/main/models/unet_adaptive_bins.py
|
3 |
-
"""
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
import torch.nn.functional as F
|
7 |
-
import os
|
8 |
-
|
9 |
-
|
10 |
-
class UpSampleBN(nn.Module):
|
11 |
-
def __init__(self, skip_input, output_features):
|
12 |
-
super(UpSampleBN, self).__init__()
|
13 |
-
self._net = nn.Sequential(
|
14 |
-
nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1),
|
15 |
-
nn.BatchNorm2d(output_features),
|
16 |
-
nn.LeakyReLU(),
|
17 |
-
nn.Conv2d(
|
18 |
-
output_features, output_features, kernel_size=3, stride=1, padding=1
|
19 |
-
),
|
20 |
-
nn.BatchNorm2d(output_features),
|
21 |
-
nn.LeakyReLU(),
|
22 |
-
)
|
23 |
-
|
24 |
-
def forward(self, x, concat_with):
|
25 |
-
up_x = F.interpolate(
|
26 |
-
x,
|
27 |
-
size=(concat_with.shape[2], concat_with.shape[3]),
|
28 |
-
mode="bilinear",
|
29 |
-
align_corners=True,
|
30 |
-
)
|
31 |
-
f = torch.cat([up_x, concat_with], dim=1)
|
32 |
-
return self._net(f)
|
33 |
-
|
34 |
-
|
35 |
-
class DecoderBN(nn.Module):
|
36 |
-
def __init__(
|
37 |
-
self, num_features, bottleneck_features, out_feature, use_decoder=True
|
38 |
-
):
|
39 |
-
super(DecoderBN, self).__init__()
|
40 |
-
features = int(num_features)
|
41 |
-
self.use_decoder = use_decoder
|
42 |
-
|
43 |
-
self.conv2 = nn.Conv2d(
|
44 |
-
bottleneck_features, features, kernel_size=1, stride=1, padding=1
|
45 |
-
)
|
46 |
-
|
47 |
-
self.out_feature_1_1 = out_feature
|
48 |
-
self.out_feature_1_2 = out_feature
|
49 |
-
self.out_feature_1_4 = out_feature
|
50 |
-
self.out_feature_1_8 = out_feature
|
51 |
-
self.out_feature_1_16 = out_feature
|
52 |
-
self.feature_1_16 = features // 2
|
53 |
-
self.feature_1_8 = features // 4
|
54 |
-
self.feature_1_4 = features // 8
|
55 |
-
self.feature_1_2 = features // 16
|
56 |
-
self.feature_1_1 = features // 32
|
57 |
-
|
58 |
-
if self.use_decoder:
|
59 |
-
self.resize_output_1_1 = nn.Conv2d(
|
60 |
-
self.feature_1_1, self.out_feature_1_1, kernel_size=1
|
61 |
-
)
|
62 |
-
self.resize_output_1_2 = nn.Conv2d(
|
63 |
-
self.feature_1_2, self.out_feature_1_2, kernel_size=1
|
64 |
-
)
|
65 |
-
self.resize_output_1_4 = nn.Conv2d(
|
66 |
-
self.feature_1_4, self.out_feature_1_4, kernel_size=1
|
67 |
-
)
|
68 |
-
self.resize_output_1_8 = nn.Conv2d(
|
69 |
-
self.feature_1_8, self.out_feature_1_8, kernel_size=1
|
70 |
-
)
|
71 |
-
self.resize_output_1_16 = nn.Conv2d(
|
72 |
-
self.feature_1_16, self.out_feature_1_16, kernel_size=1
|
73 |
-
)
|
74 |
-
|
75 |
-
self.up16 = UpSampleBN(
|
76 |
-
skip_input=features + 224, output_features=self.feature_1_16
|
77 |
-
)
|
78 |
-
self.up8 = UpSampleBN(
|
79 |
-
skip_input=self.feature_1_16 + 80, output_features=self.feature_1_8
|
80 |
-
)
|
81 |
-
self.up4 = UpSampleBN(
|
82 |
-
skip_input=self.feature_1_8 + 48, output_features=self.feature_1_4
|
83 |
-
)
|
84 |
-
self.up2 = UpSampleBN(
|
85 |
-
skip_input=self.feature_1_4 + 32, output_features=self.feature_1_2
|
86 |
-
)
|
87 |
-
self.up1 = UpSampleBN(
|
88 |
-
skip_input=self.feature_1_2 + 3, output_features=self.feature_1_1
|
89 |
-
)
|
90 |
-
else:
|
91 |
-
self.resize_output_1_1 = nn.Conv2d(3, out_feature, kernel_size=1)
|
92 |
-
self.resize_output_1_2 = nn.Conv2d(32, out_feature * 2, kernel_size=1)
|
93 |
-
self.resize_output_1_4 = nn.Conv2d(48, out_feature * 4, kernel_size=1)
|
94 |
-
|
95 |
-
def forward(self, features):
|
96 |
-
x_block0, x_block1, x_block2, x_block3, x_block4 = (
|
97 |
-
features[4],
|
98 |
-
features[5],
|
99 |
-
features[6],
|
100 |
-
features[8],
|
101 |
-
features[11],
|
102 |
-
)
|
103 |
-
bs = x_block0.shape[0]
|
104 |
-
x_d0 = self.conv2(x_block4)
|
105 |
-
|
106 |
-
if self.use_decoder:
|
107 |
-
x_1_16 = self.up16(x_d0, x_block3)
|
108 |
-
x_1_8 = self.up8(x_1_16, x_block2)
|
109 |
-
x_1_4 = self.up4(x_1_8, x_block1)
|
110 |
-
x_1_2 = self.up2(x_1_4, x_block0)
|
111 |
-
x_1_1 = self.up1(x_1_2, features[0])
|
112 |
-
return {
|
113 |
-
"1_1": self.resize_output_1_1(x_1_1),
|
114 |
-
"1_2": self.resize_output_1_2(x_1_2),
|
115 |
-
"1_4": self.resize_output_1_4(x_1_4),
|
116 |
-
"1_8": self.resize_output_1_8(x_1_8),
|
117 |
-
"1_16": self.resize_output_1_16(x_1_16),
|
118 |
-
}
|
119 |
-
else:
|
120 |
-
x_1_1 = features[0]
|
121 |
-
x_1_2, x_1_4, x_1_8, x_1_16 = (
|
122 |
-
features[4],
|
123 |
-
features[5],
|
124 |
-
features[6],
|
125 |
-
features[8],
|
126 |
-
)
|
127 |
-
x_global = features[-1].reshape(bs, 2560, -1).mean(2)
|
128 |
-
return {
|
129 |
-
"1_1": self.resize_output_1_1(x_1_1),
|
130 |
-
"1_2": self.resize_output_1_2(x_1_2),
|
131 |
-
"1_4": self.resize_output_1_4(x_1_4),
|
132 |
-
"global": x_global,
|
133 |
-
}
|
134 |
-
|
135 |
-
|
136 |
-
class Encoder(nn.Module):
|
137 |
-
def __init__(self, backend):
|
138 |
-
super(Encoder, self).__init__()
|
139 |
-
self.original_model = backend
|
140 |
-
|
141 |
-
def forward(self, x):
|
142 |
-
features = [x]
|
143 |
-
for k, v in self.original_model._modules.items():
|
144 |
-
if k == "blocks":
|
145 |
-
for ki, vi in v._modules.items():
|
146 |
-
features.append(vi(features[-1]))
|
147 |
-
else:
|
148 |
-
features.append(v(features[-1]))
|
149 |
-
return features
|
150 |
-
|
151 |
-
|
152 |
-
class UNet2D(nn.Module):
|
153 |
-
def __init__(self, backend, num_features, out_feature, use_decoder=True):
|
154 |
-
super(UNet2D, self).__init__()
|
155 |
-
self.use_decoder = use_decoder
|
156 |
-
self.encoder = Encoder(backend)
|
157 |
-
self.decoder = DecoderBN(
|
158 |
-
out_feature=out_feature,
|
159 |
-
use_decoder=use_decoder,
|
160 |
-
bottleneck_features=num_features,
|
161 |
-
num_features=num_features,
|
162 |
-
)
|
163 |
-
|
164 |
-
def forward(self, x, **kwargs):
|
165 |
-
encoded_feats = self.encoder(x)
|
166 |
-
unet_out = self.decoder(encoded_feats, **kwargs)
|
167 |
-
return unet_out
|
168 |
-
|
169 |
-
def get_encoder_params(self): # lr/10 learning rate
|
170 |
-
return self.encoder.parameters()
|
171 |
-
|
172 |
-
def get_decoder_params(self): # lr learning rate
|
173 |
-
return self.decoder.parameters()
|
174 |
-
|
175 |
-
@classmethod
|
176 |
-
def build(cls, **kwargs):
|
177 |
-
basemodel_name = "tf_efficientnet_b7_ns"
|
178 |
-
num_features = 2560
|
179 |
-
|
180 |
-
print("Loading base model ()...".format(basemodel_name), end="")
|
181 |
-
basemodel = torch.hub.load(
|
182 |
-
"rwightman/gen-efficientnet-pytorch", basemodel_name, pretrained=True
|
183 |
-
)
|
184 |
-
print("Done.")
|
185 |
-
|
186 |
-
# Remove last layer
|
187 |
-
print("Removing last two layers (global_pool & classifier).")
|
188 |
-
basemodel.global_pool = nn.Identity()
|
189 |
-
basemodel.classifier = nn.Identity()
|
190 |
-
|
191 |
-
# Building Encoder-Decoder model
|
192 |
-
print("Building Encoder-Decoder model..", end="")
|
193 |
-
m = cls(basemodel, num_features=num_features, **kwargs)
|
194 |
-
print("Done.")
|
195 |
-
return m
|
196 |
-
|
197 |
-
if __name__ == '__main__':
|
198 |
-
model = UNet2D.build(out_feature=256, use_decoder=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/export/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
|
2 |
-
This directory contains code to prepare a detectron2 model for deployment.
|
3 |
-
Currently it supports exporting a detectron2 model to Caffe2 format through ONNX.
|
4 |
-
|
5 |
-
Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage.
|
6 |
-
|
7 |
-
|
8 |
-
### Acknowledgements
|
9 |
-
|
10 |
-
Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools.
|
11 |
-
|
12 |
-
Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who
|
13 |
-
help export Detectron2 models to TorchScript.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/apps/notice/notice.js
DELETED
@@ -1,184 +0,0 @@
|
|
1 |
-
import { sendSocketList, Config, Version } from '../../components/index.js'
|
2 |
-
import { setMsgMap } from '../../model/index.js'
|
3 |
-
|
4 |
-
Bot.on('notice', async e => {
|
5 |
-
if (e.self_id == '88888'){
|
6 |
-
if (e.group?.bot?.uin) {
|
7 |
-
e.self_id = e.group.bot.uin
|
8 |
-
} else if (e.friend?.bot?.uin) {
|
9 |
-
e.self_id = e.friend.bot.uin
|
10 |
-
}
|
11 |
-
e.bot = Bot[e.self_id]
|
12 |
-
}
|
13 |
-
if (Config.muteStop && (e.group?.mute_left > 0 || e.group?.all_muted)) return false
|
14 |
-
if (sendSocketList.length == 0) return false
|
15 |
-
if (e.group_id) {
|
16 |
-
// 判断云崽白名单
|
17 |
-
const whiteGroup = Config.whiteGroup
|
18 |
-
if (Array.isArray(whiteGroup) && whiteGroup.length > 0) {
|
19 |
-
if (!whiteGroup.some(i => i == e.group_id)) return false
|
20 |
-
}
|
21 |
-
// 判断插件白名单
|
22 |
-
const yesGroup = Config.yesGroup
|
23 |
-
if (Array.isArray(yesGroup) && yesGroup.length > 0) {
|
24 |
-
if (!yesGroup.some(i => i == e.group_id)) return false
|
25 |
-
}
|
26 |
-
// 判断云崽黑名单
|
27 |
-
const blackGroup = Config.blackGroup
|
28 |
-
if (Array.isArray(blackGroup) && blackGroup.length > 0) {
|
29 |
-
if (blackGroup.some(i => i == e.group_id)) return false
|
30 |
-
}
|
31 |
-
// 判断插件黑名单
|
32 |
-
const noGroup = Config.noGroup
|
33 |
-
if (Array.isArray(noGroup) && noGroup.length > 0) {
|
34 |
-
if (noGroup.some(i => i == e.group_id)) return false
|
35 |
-
}
|
36 |
-
}
|
37 |
-
e.reply = reply(e)
|
38 |
-
let other = {}
|
39 |
-
if (e.notice_type == 'group') {
|
40 |
-
other.group_id = e.group_id
|
41 |
-
other.user_id = e.user_id
|
42 |
-
other.operator_id = e.operator_id
|
43 |
-
switch (e.sub_type) {
|
44 |
-
//群员增加
|
45 |
-
case 'increase':
|
46 |
-
if (!Config.groupIncrease) return false
|
47 |
-
other.notice_type = 'group_increase'
|
48 |
-
other.sub_type = 'approve'
|
49 |
-
other.operator_id = e.user_id
|
50 |
-
break;
|
51 |
-
//群员减少
|
52 |
-
case 'decrease':
|
53 |
-
if (!Config.groupDecrease) return false
|
54 |
-
other.notice_type = 'group_decrease'
|
55 |
-
other.sub_type = e.operator_id == e.user_id ? 'leave' : 'kick'
|
56 |
-
if (e.user_id == Bot.uin) other.sub_type = 'kick_me'
|
57 |
-
break
|
58 |
-
//戳一戳
|
59 |
-
case 'poke':
|
60 |
-
if (!Config.groupPoke) return false
|
61 |
-
other.notice_type = 'notify'
|
62 |
-
other.sub_type = 'poke'
|
63 |
-
other.user_id = e.operator_id
|
64 |
-
other.target_id = e.target_id
|
65 |
-
break
|
66 |
-
//群管理变动
|
67 |
-
case 'admin':
|
68 |
-
if (!Config.groupAdmin) return false
|
69 |
-
other.notice_type = 'group_admin'
|
70 |
-
other.sub_type = e.set ? 'set' : 'unset'
|
71 |
-
break
|
72 |
-
//禁言
|
73 |
-
case 'ban':
|
74 |
-
if (!Config.groupBan) return false
|
75 |
-
other.notice_type = 'group_ban'
|
76 |
-
other.sub_type = e.duration == 0 ? 'lift_ban' : 'ban'
|
77 |
-
other.duration = e.duration
|
78 |
-
break
|
79 |
-
//群消息撤回
|
80 |
-
case 'recall':
|
81 |
-
if (!Config.groupRecall) return false
|
82 |
-
other.notice_type = 'group_recall'
|
83 |
-
other.message_id = e.rand
|
84 |
-
break
|
85 |
-
default:
|
86 |
-
return false
|
87 |
-
}
|
88 |
-
} else if (e.notice_type == 'friend') {
|
89 |
-
other.user_id = e.user_id
|
90 |
-
switch (e.sub_type) {
|
91 |
-
//好友添加
|
92 |
-
case 'increase':
|
93 |
-
if (!Config.friendIncrease) return false
|
94 |
-
other.notice_type = 'friend_add'
|
95 |
-
break
|
96 |
-
//好友消息撤回
|
97 |
-
case 'recall':
|
98 |
-
if (!Config.friendRecall) return false
|
99 |
-
other.notice_type = 'friend_recall'
|
100 |
-
other.message_id = e.rand
|
101 |
-
break
|
102 |
-
default:
|
103 |
-
return false
|
104 |
-
}
|
105 |
-
} else {
|
106 |
-
return false
|
107 |
-
}
|
108 |
-
let msg = {
|
109 |
-
time: Date.parse(new Date()) / 1000,
|
110 |
-
self_id: e.self_id,
|
111 |
-
post_type: 'notice',
|
112 |
-
...other
|
113 |
-
}
|
114 |
-
msg = JSON.stringify(msg)
|
115 |
-
for (const i of sendSocketList) {
|
116 |
-
if (i.status == 1) {
|
117 |
-
switch (Number(i.type)) {
|
118 |
-
case 1:
|
119 |
-
case 2:
|
120 |
-
if (Version.isTrss) {
|
121 |
-
if (i.uin != e.self_id) continue
|
122 |
-
if (!Version.protocol.some(i => i == e.bot?.version?.name)) continue
|
123 |
-
}
|
124 |
-
i.ws.send(msg)
|
125 |
-
break;
|
126 |
-
default:
|
127 |
-
break;
|
128 |
-
}
|
129 |
-
}
|
130 |
-
}
|
131 |
-
})
|
132 |
-
|
133 |
-
function reply(e) {
|
134 |
-
if (!Version.isTrss) {
|
135 |
-
const replyNew = e.reply
|
136 |
-
return async function (massage, quote = false, data = {}) {
|
137 |
-
const ret = await replyNew(massage, quote, data)
|
138 |
-
if (ret) {
|
139 |
-
setMsgMap({
|
140 |
-
message_id: ret.message_id,
|
141 |
-
time: ret.time,
|
142 |
-
seq: ret.seq,
|
143 |
-
rand: ret.rand,
|
144 |
-
user_id: e.user_id,
|
145 |
-
group_id: e.group_id,
|
146 |
-
onebot_id: Math.floor(Math.random() * Math.pow(2, 32)) | 0,
|
147 |
-
})
|
148 |
-
}
|
149 |
-
return ret
|
150 |
-
}
|
151 |
-
} else {
|
152 |
-
if (e.bot?.version?.name == 'ICQQ') {
|
153 |
-
return async function (massage, quote = false) {
|
154 |
-
let ret
|
155 |
-
if (e.isGroup) {
|
156 |
-
if (e.group?.sendMsg) {
|
157 |
-
ret = await e.group.sendMsg(massage, quote)
|
158 |
-
} else {
|
159 |
-
ret = await e.bot.pickGroup(e.group_id).sendMsg(massage, quote)
|
160 |
-
}
|
161 |
-
} else {
|
162 |
-
if (e.friend?.sendMsg) {
|
163 |
-
ret = await e.friend.sendMsg(massage, quote)
|
164 |
-
} else {
|
165 |
-
ret = await e.bot.pickFriend(e.user_id).sendMsg(massage, quote)
|
166 |
-
}
|
167 |
-
}
|
168 |
-
if (ret) {
|
169 |
-
setMsgMap({
|
170 |
-
message_id: ret.message_id,
|
171 |
-
time: ret.time,
|
172 |
-
seq: ret.seq,
|
173 |
-
rand: ret.rand,
|
174 |
-
user_id: e.user_id,
|
175 |
-
group_id: e.group_id,
|
176 |
-
onebot_id: Math.floor(Math.random() * Math.pow(2, 32)) | 0,
|
177 |
-
})
|
178 |
-
}
|
179 |
-
return ret
|
180 |
-
}
|
181 |
-
}
|
182 |
-
return e.reply
|
183 |
-
}
|
184 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|