Commit
·
9311d0c
1
Parent(s):
7e47d4b
Update parquet files (step 20 of 296)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe InDesign CS6 Portable Achieve Liquid Layout and Alternate Layout with a Single File.md +0 -146
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Azhar 5 Hindi 720p Download The Thrilling Conclusion of the Award-Winning Franchise.md +0 -152
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fiateperonlinepartscatalogue Save Time and Money with Online Fiat Parts Shopping.md +0 -149
- spaces/1gistliPinn/ChatGPT4/Examples/3com Baseline Switch 2250 Plus Software 41 VERIFIED.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Sketchbook Designer 2012 Serial Number FAQs and Answers.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Dbf Viewer 2000 Serial [UPD] Crack Adobe.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/EasyKMS Windows Activator (XP-VISTA-7-8-10-2003--2012-2016) [PATCHED].md +0 -149
- spaces/1line/AutoGPT/autogpt/speech/eleven_labs.py +0 -86
- spaces/1phancelerku/anime-remove-background/Download and install Go the ultimate guide to the open-source programming language.md +0 -154
- spaces/2023Liu2023/bingo/src/components/ui/textarea.tsx +0 -24
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Chatgpt4Online.py +0 -39
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/OpenaiChat.py +0 -88
- spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/AItianhu.py +0 -77
- spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/describer/classroom.py +0 -40
- spaces/AlexMo/audio_summarizer/app.py +0 -91
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_music_spectrogram_to_diffusers.py +0 -213
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/utils/weight_init.py +0 -684
- spaces/Artrajz/vits-simple-api/app.py +0 -544
- spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/groundingdino.py +0 -412
- spaces/Arvi/feedback_generator/README.md +0 -12
- spaces/Banbri/zcvzcv/src/components/ui/accordion.tsx +0 -60
- spaces/Bart92/RVC_HF/utils/i18n.py +0 -28
- spaces/Benson/text-generation/Examples/Cmo Descargar El Simulador De Bus Ultimate.md +0 -100
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/paginator.py +0 -243
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/evaluation/lvis_evaluation.py +0 -350
- spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/temporary_buffer.h +0 -22
- spaces/CVPR/lama-example/saicinpainting/training/data/aug.py +0 -84
- spaces/ChrisPreston/diff-svc_minato_aqua/modules/hubert/hubert_onnx.py +0 -19
- spaces/CikeyQI/Yunzai/Yunzai/lib/tools/name.js +0 -35
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/T_S_I__1.py +0 -164
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/wrapper-6f348d45-38be7a64.js +0 -8
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/test_data/__init__.py +0 -0
- spaces/Dao3/Top-20-Models/cake.css +0 -34
- spaces/Demi2809/rvc-models/app-full.py +0 -254
- spaces/Detomo/ai-comic-generation/next.config.js +0 -11
- spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dataset_tool.py +0 -645
- spaces/DragGan/DragGan/stylegan_human/utils/data_utils.py +0 -37
- spaces/Eddycrack864/Applio-Inference/infer/lib/infer_pack/modules/F0Predictor/F0Predictor.py +0 -16
- spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/layers_537227KB.py +0 -126
- spaces/EinfachOlder/ChatGPT-prompt-generator/app.py +0 -19
- spaces/Elbhnasy/Foodvision_mini/app.py +0 -73
- spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/config.py +0 -37
- spaces/GT-RIPL/GPT-K/knowledge/utils.py +0 -38
- spaces/GXSA/bingo/src/components/ui/button.tsx +0 -57
- spaces/Gen-Sim/Gen-Sim/misc/job_query.py +0 -11
- spaces/Goutam982/RVC_V2_voice_clone/i18n/locale_diff.py +0 -45
- spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/mmcif_parsing.py +0 -384
- spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/model/prng.py +0 -69
- spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/htc.py +0 -15
- spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py +0 -7
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Adobe InDesign CS6 Portable Achieve Liquid Layout and Alternate Layout with a Single File.md
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1><b>Adobe InDesign CS6 Portable: A Powerful and Convenient Design Tool</b></h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are looking for a professional and versatile design tool that can help you create stunning graphic designs for banners, e-books, flyers, brochures, or magazines, you might want to check out Adobe InDesign CS6 Portable. This is a portable version of Adobe InDesign CS6, which means you can use it on any system without installing it. You can also carry it on a USB drive or a cloud storage service and use it on different devices. This way, you can save space on your hard disk and avoid leaving any temporary files on your PC.</p>
|
5 |
-
<p>Adobe InDesign CS6 Portable is a powerful application that offers many features and functions to help you create amazing layouts and designs. It is also integrated with other Adobe Creative Suite products such as Illustrator CS6 and Photoshop CS6, so you can easily import and edit graphics and images from these applications. You can also export your designs to various formats such as PDF, EPUB, SWF, or HTML.</p>
|
6 |
-
<h2>AdobeInDesignCS6Portable</h2><br /><p><b><b>Download</b> ✑ ✑ ✑ <a href="https://byltly.com/2uKxps">https://byltly.com/2uKxps</a></b></p><br /><br />
|
7 |
-
<p>To download and use Adobe InDesign CS6 Portable, you just need to follow these simple steps:</p>
|
8 |
-
<ol>
|
9 |
-
<li>Go to <a href="https://drive.google.com/drive/folders/0B0-U6j14AivsNTFzRzU1OXNJenc?tid=0B0-U6j14AivsNklfUkVfOWJNZ1k">this link</a> and choose the language version you want (English or Korean).</li>
|
10 |
-
<li>Download the .7z file to your PC or USB drive.</li>
|
11 |
-
<li>Extract the file with WinRAR or another application.</li>
|
12 |
-
<li>Run Set-up.exe to launch the application.</li>
|
13 |
-
<li>Enjoy using Adobe InDesign CS6 Portable!</li>
|
14 |
-
</ol>
|
15 |
-
<h2>Features of Adobe InDesign CS6 Portable</h2>
|
16 |
-
<p>Adobe InDesign CS6 Portable has many features that make it a great design tool for professionals and beginners alike. Here are some of the most notable features:</p>
|
17 |
-
<ul>
|
18 |
-
<li><b>TypeKit Desktop Fonts:</b> You can access hundreds of high-quality fonts from TypeKit, a subscription service that offers fonts for web and desktop use. You can sync the fonts you want to use with your Adobe account and use them in your designs.</li>
|
19 |
-
<li><b>Robust Text Composition:</b> You can create beautiful and complex text layouts with advanced typography tools. You can adjust kerning, tracking, leading, hyphenation, alignment, paragraph styles, character styles, and more. You can also use OpenType fonts and apply various effects such as drop shadows, gradients, or transparency.</li>
|
20 |
-
<li><b>Desktop Publishing:</b> You can create professional-looking print publications with ease. You can set up master pages, margins, columns, grids, guides, bleeds, and more. You can also use preflight and printing tools to check for errors and optimize your output quality.</li>
|
21 |
-
<li><b>EPUB Interactivity:</b> You can create interactive e-books with audio, video, animations, buttons, hyperlinks, and more. You can also preview your e-books on various devices and export them to EPUB format.</li>
|
22 |
-
<li><b>iPad Apps Without Coding:</b> You can create engaging iPad apps without writing any code. You can use the Adobe Digital Publishing Suite (DPS) to design your app layout, add interactivity, and publish your app to the App Store.</li>
|
23 |
-
<li><b>Hi DPI and Retina Display Support:</b> You can work with high-resolution graphics and images on Hi DPI and Retina displays. You can also export your designs to these displays without losing quality or clarity.</li>
|
24 |
-
<li><b>QR Code Creation Facility:</b> You can generate QR codes within the application and add them to your designs. You can customize the size, color, shape, and content of the QR codes.</li>
|
25 |
-
<li><b>Hyperlinks Simplified:</b> You can create and manage hyperlinks easily within the application. You can also test your hyperlinks before exporting your designs.</li>
|
26 |
-
<li><b>Better Performance:</b> You can work faster and smoother with Adobe InDesign CS6 Portable. The application has improved performance in terms of opening files, saving files, scrolling pages, zooming in and out, applying effects, exporting files, and more.</li>
|
27 |
-
<li><b>Font Search and Filter:</b> You can find the fonts you need quickly and easily with the font search and filter feature. You can search for fonts by name or by category (such as serif or sans serif). You can also filter fonts by favorites or recently used.</li>
|
28 |
-
<li><b>Automation Tools Integration:</b> You can automate repetitive tasks with scripts or plug-ins. You can also use third-party automation tools such as XML or XSLT to import or export data.</li>
|
29 |
-
<li><b>Improved Sync Capability:</b> You can sync your settings and preferences across multiple devices with Adobe Creative Cloud. You can also access your files from anywhere with cloud storage services such as Google Drive or Dropbox.</li>
|
30 |
-
</ul>
|
31 |
-
<h2>System Requirements for Adobe InDesign CS6 Portable</h2>
|
32 |
-
<p>To run Adobe InDesign CS6 Portable smoothly on your system, you need to meet these minimum requirements:</p>
|
33 |
-
<table style="border-collapse: collapse; width: 100%;">
|
34 |
-
<tbody>
|
35 |
-
<tr style="height: 21px;">
|
36 |
-
<td style="width: 50%; height: 21px;"><strong>Operating systems</strong></td>
|
37 |
-
<td style="width: 50%; height: 21px;">Windows 10, Windows 8, Windows 7, XP</td>
|
38 |
-
</tr>
|
39 |
-
<tr style="height: 21px;">
|
40 |
-
<td style="width: 50%; height: 21px;"><strong>Processor</strong></td>
|
41 |
-
<td style="width: 50%; height: 21px;">Intel® Pentium® 4 or AMD Athlon® 64 processor</td>
|
42 |
-
</tr>
|
43 |
-
<tr style="height: 21px;">
|
44 |
-
<td style="width: 50%; height: 21px;"><strong>RAM</strong></td>
|
45 |
-
<td style="width: 50%; height: 21px;">1GB RAM (2GB recommended)</td>
|
46 |
-
</tr>
|
47 |
-
<tr style="height: 21px;">
|
48 |
-
<td style="width: 50%; height: 21px;"><strong>Hard Disk</strong></td>
|
49 |
-
<td style="width: 50%; height: 21px;">1.6 GB free hard disk space</td>
|
50 |
-
</tr>
|
51 |
-
<tr style="height: 21px;">
|
52 |
-
<td style="width: 50%; height: 21px;"><strong>Display</strong></td>
|
53 |
-
<td style="width: 50%; height: 21px;">1024 x 768 resolution (1280 x 800 recommended)</td>
|
54 |
-
</tr>
|
55 |
-
<tr style="height: 21px;">
|
56 |
-
<td style="width: 50%; height: 21px;"><strong>Miscellaneous</strong></td>
|
57 |
-
<td style="width: 50%; height: 21px;">Adobe® Flash® Player 10 software required to export SWF files</td>
|
58 |
-
</tr>
|
59 |
-
</tbody>
|
60 |
-
</table>
|
61 |
-
<h2>How to Install Adobe InDesign CS6 Portable</h2>
|
62 |
-
<p>To install Adobe InDesign CS6 Portable on your system, you just need to follow these simple steps:</p>
|
63 |
-
<p>Adobe InDesign CS6 Portable free download<br />
|
64 |
-
How to use Adobe InDesign CS6 Portable<br />
|
65 |
-
Adobe InDesign CS6 Portable vs regular version<br />
|
66 |
-
Adobe InDesign CS6 Portable system requirements<br />
|
67 |
-
Adobe InDesign CS6 Portable tutorial<br />
|
68 |
-
Adobe InDesign CS6 Portable for Mac<br />
|
69 |
-
Adobe InDesign CS6 Portable for Windows 10<br />
|
70 |
-
Adobe InDesign CS6 Portable crack<br />
|
71 |
-
Adobe InDesign CS6 Portable serial number<br />
|
72 |
-
Adobe InDesign CS6 Portable online<br />
|
73 |
-
Adobe InDesign CS6 Portable review<br />
|
74 |
-
Adobe InDesign CS6 Portable features<br />
|
75 |
-
Adobe InDesign CS6 Portable alternatives<br />
|
76 |
-
Adobe InDesign CS6 Portable license key<br />
|
77 |
-
Adobe InDesign CS6 Portable price<br />
|
78 |
-
Adobe InDesign CS6 Portable trial<br />
|
79 |
-
Adobe InDesign CS6 Portable full version<br />
|
80 |
-
Adobe InDesign CS6 Portable zip file<br />
|
81 |
-
Adobe InDesign CS6 Portable rar file<br />
|
82 |
-
Adobe InDesign CS6 Portable iso file<br />
|
83 |
-
Adobe InDesign CS6 Portable torrent<br />
|
84 |
-
Adobe InDesign CS6 Portable mega link<br />
|
85 |
-
Adobe InDesign CS6 Portable google drive link<br />
|
86 |
-
Adobe InDesign CS6 Portable dropbox link<br />
|
87 |
-
Adobe InDesign CS6 Portable mediafire link<br />
|
88 |
-
Adobe InDesign CS6 Portable tips and tricks<br />
|
89 |
-
Adobe InDesign CS6 Portable keyboard shortcuts<br />
|
90 |
-
Adobe InDesign CS6 Portable plugins<br />
|
91 |
-
Adobe InDesign CS6 Portable templates<br />
|
92 |
-
Adobe InDesign CS6 Portable fonts<br />
|
93 |
-
Adobe InDesign CS6 Portable swatches<br />
|
94 |
-
Adobe InDesign CS6 Portable presets<br />
|
95 |
-
Adobe InDesign CS6 Portable brushes<br />
|
96 |
-
Adobe InDesign CS6 Portable tools<br />
|
97 |
-
Adobe InDesign CS6 Portable workspace<br />
|
98 |
-
Adobe InDesign CS6 Portable layout<br />
|
99 |
-
Adobe InDesign CS6 Portable master pages<br />
|
100 |
-
Adobe InDesign CS6 Portable text frames<br />
|
101 |
-
Adobe InDesign CS6 Portable graphics frames<br />
|
102 |
-
Adobe InDesign CS6 Portable tables<br />
|
103 |
-
Adobe InDesign CS6 Portable styles<br />
|
104 |
-
Adobe InDesign CS6 Portable export options<br />
|
105 |
-
Adobe InDesign CS6 Portable print options<br />
|
106 |
-
Adobe InDesign CS6 Portable interactive options<br />
|
107 |
-
Adobe InDesign CS6 Portable epub options<br />
|
108 |
-
Adobe InDesign CS6 Portable pdf options<br />
|
109 |
-
Adobe InDesign CS6 Portable xml options<br />
|
110 |
-
Adobe InDesign CS6 Portable scripting options<br />
|
111 |
-
How to install Adobe InDesign CS6 Portable on a USB drive</p>
|
112 |
-
<ol>
|
113 |
-
or USB drive.</code></p></li>
|
114 |
-
<li><p><code>Go to <a href="https://drive.google.com/drive/folders/0B0-U6j14AivsNTFzRzU1OXNJenc?tid=0B0-U6j14AivsNklfUkVfOWJNZ1k">this link</a> and choose the language version you want (English or Korean).</code></p></li>
|
115 |
-
<li><p><code>Download the .7z file to the "Adobe" folder you created.</code></p></li>
|
116 |
-
<li><p><code>Extract the file with WinRAR or another application.</code></p></li>
|
117 |
-
<li><p><code>Run Set-up.exe to launch the application.</code></p></li>
|
118 |
-
<li><p><code>Enjoy using Adobe InDesign CS6 Portable!</code></p></li>
|
119 |
-
</ol>
|
120 |
-
<h2>Conclusion</h2>
|
121 |
-
<p>Adobe InDesign CS6 Portable is a powerful and convenient design tool that can help you create stunning graphic designs for various purposes. It has many features and functions that make it easy and fun to use. It is also portable, which means you can use it on any system without installing it. You can also carry it on a USB drive or a cloud storage service and use it on different devices. This way, you can save space on your hard disk and avoid leaving any temporary files on your PC.</p>
|
122 |
-
<p>If you are looking for a professional and versatile design tool that can help you create amazing layouts and designs, you might want to check out Adobe InDesign CS6 Portable. You can download it for free from the link provided in this article and start using it right away. You can also find more tutorials and resources for Adobe InDesign CS6 Portable on the internet, especially on YouTube. So what are you waiting for? Download Adobe InDesign CS6 Portable today and unleash your creativity!</p>
|
123 |
-
<h2>FAQs</h2>
|
124 |
-
<p>Here are some frequently asked questions about Adobe InDesign CS6 Portable:</p>
|
125 |
-
<ol>
|
126 |
-
<li><b>What is the difference between Adobe InDesign CS6 and Adobe InDesign CC?</b><br>
|
127 |
-
Adobe InDesign CS6 is the last version of Adobe InDesign that was released as part of the Creative Suite (CS) series. Adobe InDesign CC is the current version of Adobe InDesign that is released as part of the Creative Cloud (CC) series. The main difference between them is that Adobe InDesign CC requires a monthly or annual subscription fee to use, while Adobe InDesign CS6 can be purchased once and used forever. Adobe InDesign CC also has more features and updates than Adobe InDesign CS6.</li>
|
128 |
-
<li><b>Is Adobe InDesign CS6 Portable compatible with Windows 10?</b><br>
|
129 |
-
Yes, Adobe InDesign CS6 Portable is compatible with Windows 10. However, you might encounter some minor issues or bugs due to the compatibility mode. To fix these issues, you can try these solutions:</p>
|
130 |
-
<ul>
|
131 |
-
<li>Right-click on the Set-up.exe file and choose Properties.</li>
|
132 |
-
<li>Go to the Compatibility tab and check the box that says "Run this program in compatibility mode for:".</li>
|
133 |
-
<li>Select Windows 7 or Windows 8 from the drop-down menu.</li>
|
134 |
-
<li>Click Apply and OK.</li>
|
135 |
-
<li>Run the Set-up.exe file as administrator.</li>
|
136 |
-
</ul></li>
|
137 |
-
<li><b>How can I update Adobe InDesign CS6 Portable?</b><br>
|
138 |
-
Unfortunately, you cannot update Adobe InDesign CS6 Portable because it is a portable version that does not have an update feature. If you want to use the latest version of Adobe InDesign, you need to subscribe to Adobe InDesign CC or download another portable version of Adobe InDesign CC from a reliable source.</li>
|
139 |
-
<li><b>Can I use Adobe InDesign CS6 Portable on multiple devices?</b><br>
|
140 |
-
Yes, you can use Adobe InDesign CS6 Portable on multiple devices as long as you have enough space on your hard disk or USB drive. You can also use cloud storage services such as Google Drive or Dropbox to store and access your files from anywhere.</li>
|
141 |
-
<li><b>Where can I find more tutorials and resources for Adobe InDesign CS6 Portable?</b><br>
|
142 |
-
You can find more tutorials and resources for Adobe InDesign CS6 Portable on the internet, especially on YouTube. You can also visit the official website of Adobe or join online forums and communities related to Adobe products.</li>
|
143 |
-
</ol>
|
144 |
-
</p> 0a6ba089eb<br />
|
145 |
-
<br />
|
146 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Azhar 5 Hindi 720p Download The Thrilling Conclusion of the Award-Winning Franchise.md
DELETED
@@ -1,152 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Azhar 5 Hindi 720p Download: How to Watch the Latest Bollywood Action Thriller Online</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are a fan of Bollywood movies, you might have heard of the Azhar series. It is one of the most popular and successful action thriller franchises in India, starring Emraan Hashmi as the titular character, a former police officer turned vigilante. The first four movies were released between 2016 and 2019, and they received rave reviews from critics and audiences alike.</p>
|
5 |
-
<p>Now, the fifth and final installment of the series, Azhar 5, is finally here. It was released in theatres on May 26, 2023, and it has already broken several box office records. The movie is directed by Mohit Suri, who also helmed the previous four films, and it features an ensemble cast of Bollywood stars, including Vidya Balan, Ajay Devgn, Kangana Ranaut, and Nawazuddin Siddiqui.</p>
|
6 |
-
<h2>Azhar 5 hindi 720p download</h2><br /><p><b><b>Download File</b> 🗸 <a href="https://byltly.com/2uKwN7">https://byltly.com/2uKwN7</a></b></p><br /><br />
|
7 |
-
<p>In this article, we will tell you everything you need to know about Azhar 5, and how you can watch it online in Hindi 720p quality. Whether you want to download it or stream it, we have got you covered. So, read on and get ready for some adrenaline-pumping action.</p>
|
8 |
-
<h2>What is Azhar 5?</h2>
|
9 |
-
<p>Azhar 5 is the final chapter of the Azhar saga, which follows the exploits of Azhar Ali Khan (Emraan Hashmi), a former cop who turned into a vigilante after his wife was killed by a corrupt politician. In the previous movies, he exposed and eliminated various criminals and corrupt officials who were involved in crimes such as human trafficking, drug smuggling, money laundering, and terrorism.</p>
|
10 |
-
<p>In Azhar 5, he faces his biggest challenge yet. He has to stop a deadly conspiracy that threatens to destroy the country. He has to deal with a ruthless mastermind (Nawazuddin Siddiqui), who has a personal vendetta against him. He also has to team up with his old allies (Vidya Balan and Ajay Devgn) and new friends (Kangana Ranaut) to save the day.</p>
|
11 |
-
<p>Azhar 5 hindi 720p DvDRip x264 AAC<br />
|
12 |
-
Azhar 5 hindi 720p mkv free download<br />
|
13 |
-
Azhar 5 hindi 720p torrent magnet link<br />
|
14 |
-
Azhar 5 hindi 720p full movie online<br />
|
15 |
-
Azhar 5 hindi 720p BluRay quality<br />
|
16 |
-
Azhar 5 hindi 720p watch stream<br />
|
17 |
-
Azhar 5 hindi 720p subtitles download<br />
|
18 |
-
Azhar 5 hindi 720p HD video songs<br />
|
19 |
-
Azhar 5 hindi 720p movie review<br />
|
20 |
-
Azhar 5 hindi 720p box office collection<br />
|
21 |
-
Azhar 5 hindi 720p release date<br />
|
22 |
-
Azhar 5 hindi 720p cast and crew<br />
|
23 |
-
Azhar 5 hindi 720p trailer youtube<br />
|
24 |
-
Azhar 5 hindi 720p official poster<br />
|
25 |
-
Azhar 5 hindi 720p behind the scenes<br />
|
26 |
-
Azhar 5 hindi 720p deleted scenes<br />
|
27 |
-
Azhar 5 hindi 720p director's cut<br />
|
28 |
-
Azhar 5 hindi 720p awards and nominations<br />
|
29 |
-
Azhar 5 hindi 720p IMDb rating<br />
|
30 |
-
Azhar 5 hindi 720p Netflix availability<br />
|
31 |
-
Azhar 5 hindi 720p Amazon Prime Video offer<br />
|
32 |
-
Azhar 5 hindi 720p Hotstar VIP access<br />
|
33 |
-
Azhar 5 hindi 720p ZEE5 premium subscription<br />
|
34 |
-
Azhar 5 hindi 720p SonyLIV exclusive content<br />
|
35 |
-
Azhar 5 hindi 720p Voot select membership<br />
|
36 |
-
Azhar 5 hindi 720p ALTBalaji original series<br />
|
37 |
-
Azhar 5 hindi 720p MXPlayer free streaming<br />
|
38 |
-
Azhar 5 hindi 720p Hoichoi bengali dubbed version<br />
|
39 |
-
Azhar 5 hindi 720p Discovery+ documentary feature<br />
|
40 |
-
Azhar 5 hindi 720p based on true story<br />
|
41 |
-
Azhar 5 hindi 720p best scenes compilation<br />
|
42 |
-
Azhar 5 hindi 720p funny moments video<br />
|
43 |
-
Azhar 5 hindi 720p fan made trailer<br />
|
44 |
-
Azhar 5 hindi 720p reaction video by foreigners<br />
|
45 |
-
Azhar 5 hindi 720p spoof parody video by comedians<br />
|
46 |
-
Azhar 5 hindi 720p comparison with previous movies<br />
|
47 |
-
Azhar 5 hindi 720p trivia and facts you didn't know<br />
|
48 |
-
Azhar 5 hindi</p>
|
49 |
-
<p>Azhar 5 is a thrilling ride that will keep you on the edge of your seat. It has everything you would expect from a Bollywood action movie: spectacular stunts, explosive action sequences, catchy songs, witty dialogues, and emotional drama. It also has a strong message about patriotism, justice, and courage.</p>
|
50 |
-
<h2>Why should you watch Azhar 5?</h2>
|
51 |
-
<p>There are many reasons why you should watch Azhar 5. Here are some of them:</p>
|
52 |
-
<ul>
|
53 |
-
<li>It is the grand finale of the Azhar series, which has been one of the most successful and acclaimed Bollywood franchises in recent years.</li>
|
54 |
-
<li>It has an amazing star cast that delivers stellar performances. Emraan Hashmi is brilliant as always as the charismatic and fearless Azhar. Vidya Balan and Ajay Devgn are impressive as his loyal friends and partners. Kangana Ranaut adds some spice and humor as his new love interest. And Nawazuddin Siddiqui is terrifying as the evil antagonist.</li>
|
55 |
-
<li>It has a gripping plot that will keep you hooked till the end. It has plenty of twists and turns that will surprise you. It also has some emotional moments that will touch your heart.</li>
|
56 |
-
<li>It has stunning visuals that will blow your mind. The movie was shot in various locations across India and abroad, such as Mumbai, Delhi, Goa, Dubai, London, and Paris. The cinematography is superb and captures the beauty and diversity of these places.</li>
|
57 |
-
<li>It has awesome action scenes that will make you gasp. The movie has some of the most spectacular and realistic action sequences ever seen in Bollywood. The stunts are performed by professional stuntmen and choreographed by international experts. The movie also uses minimal CGI and relies more on practical effects.</li>
|
58 |
-
<li>It has catchy music that will make you groove. The movie has a great soundtrack composed by A.R. Rahman, who is one of the most renowned music directors in India and the world. The songs are catchy and melodious, and they suit the mood and theme of the movie.</li>
|
59 |
-
</ul>
|
60 |
-
<p>So, if you are looking for some entertainment and excitement, you should definitely watch Azhar 5. It is a movie that will satisfy your senses and your soul.</p>
|
61 |
-
<h2>How to download Azhar 5 in Hindi 720p quality?</h2>
|
62 |
-
<p>If you want to watch Azhar 5 online in Hindi 720p quality, you have two options: you can either use a torrent site or a streaming service. Both options have their pros and cons, which we will discuss below.</p>
|
63 |
-
<h3>Option 1: Use a torrent site</h3>
|
64 |
-
<p>A torrent site is a website that allows users to download files from other users who have already downloaded them. These files are called torrents, which are small pieces of data that contain information about the original file. To download a torrent file, you need a torrent client software that can connect to other users who have the same file.</p>
|
65 |
-
<p>One of the advantages of using a torrent site is that you can find almost any movie or show that you want to watch online for free. You can also choose from different qualities and formats depending on your preference and device compatibility.</p>
|
66 |
-
<p>However, there are also some disadvantages of using a torrent site. One of them is that it is illegal in many countries to download copyrighted content without permission from the owners or distributors. You could face legal consequences if you are caught doing so.</p>
|
67 |
-
<p>Another disadvantage is that it is risky for your device security and privacy. Torrent sites often contain malware or viruses that can harm your device or steal your personal information. You could also expose your IP address to hackers or cybercriminals who could track your online activity or identity.</p>
|
68 |
-
<h4>Pros and cons of torrenting</h4>
|
69 |
-
<table>
|
70 |
-
<tr>
|
71 |
-
<th>Pros</th>
|
72 |
-
<th>Cons</th>
|
73 |
-
</tr>
|
74 |
-
<tr>
|
75 |
-
<td>- Free access to any movie or show</td>
|
76 |
-
<td>- Illegal in many countries</td>
|
77 |
-
</tr>
|
78 |
-
<tr>
|
79 |
-
<td>- Choice of quality and format</td>
|
80 |
-
<td>- Risky for device security and privacy</td>
|
81 |
-
</tr>
|
82 |
-
<tr>
|
83 |
-
<td>- Fast download speed if there are many seeders (users who have completed downloading)</td>
|
84 |
-
<td>- Slow download speed if there are few seeders or leechers (users who are still downloading)</td>
|
85 |
-
</tr>
|
86 |
-
<tr>
|
87 |
-
<td>- No ads or pop-ups</td>
|
88 |
-
<td>- No subtitles or captions</td>
|
89 |
-
</tr>
|
90 |
-
</table>
|
91 |
-
<h4>How to use a torrent site safely and legally</h4>
|
92 |
-
<p>If you still want to use a torrent site despite its drawbacks, here are some tips on how to do it safely and legally:</p>
|
93 |
-
<ul>
|
94 |
-
<li>Use a VPN (virtual private network) service that can hide your IP address and encrypt your online traffic. This way, you can avoid being tracked or traced by anyone who might monitor your online activity.</li>
|
95 |
-
<li>Use an antivirus software that can scan your device for any malware or viruses that might come from downloading torrents.</li>
|
96 |
-
<li>Use a reputable torrent site that has positive reviews from other users and does not contain any malicious links or files.</li>
|
97 |
-
<li>Use a reliable torrent client software that can handle torrents efficiently and securely.</li>
|
98 |
-
<li>Check the comments section of the torrent file before downloading it to see if there are any issues or complaints from other users.</li>
|
99 |
-
<li>Download only legal content that does not violate any copyright laws or regulations in your country.</li>
|
100 |
-
</ul>
|
101 |
-
<h3>Option 2: Use a streaming service</h3>
|
102 |
-
<p>A streaming service is a website or an app that allows users to watch movies or shows online without downloading them. These movies or shows are stored on servers that can be accessed through an internet connection.</p>
|
103 |
-
<h4>Pros and cons of streaming</h4>
|
104 |
-
<table>
|
105 |
-
<tr>
|
106 |
-
<th>Pros</th>
|
107 |
-
<th>Cons</th>
|
108 |
-
</tr>
|
109 |
-
<tr>
|
110 |
-
<td>- Legal in most countries</td>
|
111 |
-
<td>- Costly if you have to pay for a subscription or a rental fee</td>
|
112 |
-
</tr>
|
113 |
-
<tr>
|
114 |
-
<td>- Safe for device security and privacy</td>
|
115 |
-
<td>- Dependent on internet speed and bandwidth</td>
|
116 |
-
</tr>
|
117 |
-
<tr>
|
118 |
-
<td>- Easy to use and access</td>
|
119 |
-
<td>- Limited availability and selection of content</td>
|
120 |
-
</tr>
|
121 |
-
<tr>
|
122 |
-
<td>- Subtitles and captions available</td>
|
123 |
-
<td>- Ads or pop-ups possible</td>
|
124 |
-
</tr>
|
125 |
-
</table>
|
126 |
-
<h4>How to choose a reliable and affordable streaming service</h4>
|
127 |
-
<p>If you prefer to use a streaming service instead of a torrent site, here are some tips on how to choose a reliable and affordable one:</p>
|
128 |
-
<ul>
|
129 |
-
<li>Compare different streaming services that offer Azhar 5 in Hindi 720p quality. You can check their prices, features, reviews, and ratings.</li>
|
130 |
-
<li>Look for a streaming service that has a free trial or a low-cost subscription plan. You can also look for discounts or coupons that can lower the price.</li>
|
131 |
-
<li>Look for a streaming service that has a high-quality video and audio output. You can also look for options to adjust the quality according to your internet speed and device compatibility.</li>
|
132 |
-
<li>Look for a streaming service that has a user-friendly interface and a good customer support. You can also look for features such as offline viewing, multiple devices, parental controls, and recommendations.</li>
|
133 |
-
<li>Look for a streaming service that has a large and diverse library of content. You can also look for genres, categories, languages, and regions that suit your preferences.</li>
|
134 |
-
</ul>
|
135 |
-
<h2>Conclusion</h2>
|
136 |
-
<p>Azhar 5 is one of the best Bollywood movies of 2023. It is the final installment of the Azhar series, which is a thrilling action thriller franchise starring Emraan Hashmi. The movie has an amazing star cast, a gripping plot, stunning visuals, awesome action scenes, and catchy music. It is a movie that you should not miss.</p>
|
137 |
-
<p>If you want to watch Azhar 5 online in Hindi 720p quality, you have two options: you can either use a torrent site or a streaming service. Both options have their pros and cons, which you should weigh carefully before choosing one. You should also follow some tips on how to use them safely and legally.</p>
|
138 |
-
<p>We hope this article has helped you learn more about Azhar 5 and how to watch it online in Hindi 720p quality. If you have any questions or feedback, please leave them in the comments section below. Thank you for reading and happy watching!</p>
|
139 |
-
<h2>FAQs</h2>
|
140 |
-
<h3>Q: When was Azhar 5 released?</h3>
|
141 |
-
<p>A: Azhar 5 was released in theatres on May 26, 2023.</p>
|
142 |
-
<h3>Q: Who are the main actors in Azhar 5?</h3>
|
143 |
-
<p>A: The main actors in Azhar 5 are Emraan Hashmi, Vidya Balan, Ajay Devgn, Kangana Ranaut, and Nawazuddin Siddiqui.</p>
|
144 |
-
<h3>Q: What is the runtime of Azhar 5?</h3>
|
145 |
-
<p>A: The runtime of Azhar 5 is 2 hours and 45 minutes.</p>
|
146 |
-
<h3>Q: What is the rating of Azhar 5?</h3>
|
147 |
-
<p>A: The rating of Azhar 5 is U/A (unrestricted public exhibition subject to parental guidance for children below the age of twelve).</p>
|
148 |
-
<h3>Q: What is the genre of Azhar 5?</h3>
|
149 |
-
<p>A: The genre of Azhar 5 is action thriller.</p>
|
150 |
-
</p> 0a6ba089eb<br />
|
151 |
-
<br />
|
152 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fiateperonlinepartscatalogue Save Time and Money with Online Fiat Parts Shopping.md
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>FIAT ePER Online Parts Catalogue: A Comprehensive Guide</h1>
|
3 |
-
<p>If you own a FIAT vehicle, you may have heard of FIAT ePER online parts catalogue. This is a handy tool that allows you to browse all the components of your vehicle online, without needing to make a trip to your local dealer. Whether you need to replace a broken part, upgrade your performance, or customize your style, FIAT ePER online parts catalogue can help you find the right parts for your needs. In this article, we will explain what FIAT ePER online parts catalogue is, how to access it, how to find the right parts for your vehicle, and how to order and purchase them.</p>
|
4 |
-
<h2>fiateperonlinepartscatalogue</h2><br /><p><b><b>Download</b> ✯ <a href="https://byltly.com/2uKxYK">https://byltly.com/2uKxYK</a></b></p><br /><br />
|
5 |
-
<h2>What is FIAT ePER online parts catalogue?</h2>
|
6 |
-
<p>FIAT ePER online parts catalogue is a web-based application that contains the complete and updated information on all the spare parts and accessories for FIAT vehicles. It covers all the models and chassis of FIAT cars, vans, trucks, buses, and special vehicles. It also includes information on other brands under the FIAT group, such as Alfa Romeo, Lancia, Abarth, Jeep, and Maserati.</p>
|
7 |
-
<p>The benefits of using FIAT ePER online parts catalogue are:</p>
|
8 |
-
<ul>
|
9 |
-
<li>You can access it anytime and anywhere with an internet connection.</li>
|
10 |
-
<li>You can save time and money by finding the exact part you need without visiting a dealer.</li>
|
11 |
-
<li>You can compare different parts and prices from different dealers.</li>
|
12 |
-
<li>You can view detailed images and diagrams of each part and its location on the vehicle.</li>
|
13 |
-
<li>You can print or download the information for future reference.</li>
|
14 |
-
</ul>
|
15 |
-
<h2>How to access FIAT ePER online parts catalogue</h2>
|
16 |
-
<h3>Requirements and compatibility</h3>
|
17 |
-
<p>To access FIAT ePER online parts catalogue, you will need a computer or a mobile device with an internet browser. The application requires Internet Explorer as the browser, so you may need to install or update it if you are using a different browser. You will also need Adobe Flash Player installed on your device.</p>
|
18 |
-
<h3>Language and country settings</h3>
|
19 |
-
<p>When you open FIAT ePER online parts catalogue, you will see a welcome page where you can choose the language for the browser and the language for the vehicle data. You can select from 14 different languages, such as English, Spanish, French, Italian, German, Chinese, Japanese, etc. You can also choose the country where you are located or where you want to buy the parts from. You can select from 18 different countries, such as Italy, France, Germany, Spain, UK, Ireland, Greece, Poland, etc. You can also choose the drive side of your vehicle (left or right).</p>
|
20 |
-
<h3>Search options and features</h3>
|
21 |
-
<p>After selecting your language and country settings, you will see the main page of FIAT ePER online parts catalogue. Here you can search for the parts you need by three different methods: by VIN number, by model-chassis, or by part number. You can also use the All Makes Catalog option to search for parts from other brands under the FIAT group.</p>
|
22 |
-
<p>Each search option will lead you to a list of results where you can see the name, description, image, price, availability, and dealer information of each part. You can also see a diagram of the part and its location on the vehicle. You can zoom in or out of the diagram or rotate it for a better view. You can also click on any part on the diagram to see its details.</p>
|
23 |
-
<p>On the top right corner of the page, you will see some icons that allow you to perform different actions. For example:</p>
|
24 |
-
<ul>
|
25 |
-
<li>The printer icon allows you to print or save the information as a PDF file.</li>
|
26 |
-
<li>The basket icon allows you to add or remove items from your shopping cart.</li>
|
27 |
-
<li>The email icon allows you to send an email enquiry to your local dealer.</li>
|
28 |
-
<li>The phone icon allows you to call your local dealer directly.</li>
|
29 |
-
<li>The help icon allows you to access the user manual or contact technical support.</li>
|
30 |
-
</ul>
|
31 |
-
<h2>How to find the right parts for your FIAT vehicle</h2>
|
32 |
-
<h3>By VIN number</h3>
|
33 |
-
<p>The easiest way to find the right parts for your FIAT vehicle is by using its VIN number. The VIN number is a unique 17-digit code that identifies your vehicle's make, model, year, engine type, body style, etc. You can find it on your registration document or on a metal plate under the windshield or on the driver's door frame.</p>
|
34 |
-
<p>To search by VIN number:</p>
|
35 |
-
<p>fiat eper online parts catalogue 2021<br />
|
36 |
-
fiat eper online parts catalogue download<br />
|
37 |
-
fiat eper online parts catalogue login<br />
|
38 |
-
fiat eper online parts catalogue free<br />
|
39 |
-
fiat eper online parts catalogue pdf<br />
|
40 |
-
fiat eper online parts catalogue europe<br />
|
41 |
-
fiat eper online parts catalogue uk<br />
|
42 |
-
fiat eper online parts catalogue australia<br />
|
43 |
-
fiat eper online parts catalogue india<br />
|
44 |
-
fiat eper online parts catalogue usa<br />
|
45 |
-
fiat eper online parts catalogue canada<br />
|
46 |
-
fiat eper online parts catalogue south africa<br />
|
47 |
-
fiat eper online parts catalogue new zealand<br />
|
48 |
-
fiat eper online parts catalogue singapore<br />
|
49 |
-
fiat eper online parts catalogue malaysia<br />
|
50 |
-
fiat eper online parts catalogue philippines<br />
|
51 |
-
fiat eper online parts catalogue indonesia<br />
|
52 |
-
fiat eper online parts catalogue thailand<br />
|
53 |
-
fiat eper online parts catalogue vietnam<br />
|
54 |
-
fiat eper online parts catalogue china<br />
|
55 |
-
fiat eper online parts catalogue japan<br />
|
56 |
-
fiat eper online parts catalogue korea<br />
|
57 |
-
fiat eper online parts catalogue taiwan<br />
|
58 |
-
fiat eper online parts catalogue hong kong<br />
|
59 |
-
fiat eper online parts catalogue turkey<br />
|
60 |
-
fiat eper online parts catalogue russia<br />
|
61 |
-
fiat eper online parts catalogue ukraine<br />
|
62 |
-
fiat eper online parts catalogue poland<br />
|
63 |
-
fiat eper online parts catalogue germany<br />
|
64 |
-
fiat eper online parts catalogue france<br />
|
65 |
-
fiat eper online parts catalogue italy<br />
|
66 |
-
fiat eper online parts catalogue spain<br />
|
67 |
-
fiat eper online parts catalogue portugal<br />
|
68 |
-
fiat eper online parts catalogue greece<br />
|
69 |
-
fiat eper online parts catalogue sweden<br />
|
70 |
-
fiat eper online parts catalogue norway<br />
|
71 |
-
fiat eper online parts catalogue finland<br />
|
72 |
-
fiat eper online parts catalogue denmark<br />
|
73 |
-
fiat eper online parts catalogue netherlands<br />
|
74 |
-
fiat eper online parts catalogue belgium<br />
|
75 |
-
fiat eper online parts catalogue switzerland<br />
|
76 |
-
fiat eper online parts catalogue austria<br />
|
77 |
-
fiat eper online parts catalogue hungary<br />
|
78 |
-
fiat eper online parts catalogue romania<br />
|
79 |
-
fiat eper online parts catalogue bulgaria<br />
|
80 |
-
fiat eper online parts catalogue serbia<br />
|
81 |
-
fiat eper online parts catalogue croatia<br />
|
82 |
-
fiat eper online parts catalogue slovenia<br />
|
83 |
-
fiat eper online parts catalogue albania</p>
|
84 |
-
<ol>
|
85 |
-
<li>Enter your VIN number in the box on the main page of FIAT ePER online parts catalogue.</li>
|
86 |
-
<li>Click on Search.</li>
|
87 |
-
<li>You will see a list of results that match your VIN number. Choose the one that corresponds to your vehicle.</li>
|
88 |
-
<li>You will see a list of categories that contain all the parts for your vehicle. Choose the category that contains the part you are looking for.</li>
|
89 |
-
<li>You will see a list of subcategories that contain more specific parts. Choose the subcategory that contains the part you are looking for.</li>
|
90 |
-
<li>You will see a list of parts that match your criteria. Choose the part you want to buy or view its details.</li>
|
91 |
-
</ol>
|
92 |
-
<h3>By model-chassis</h3>
|
93 |
-
<p>If you don't have your VIN number handy or if it is not recognized by FIAT ePER online parts catalogue, you can search by model-chassis instead. This method allows you to select your vehicle's make, model, year, engine type, body style, etc. from a drop-down menu.</p>
|
94 |
-
<p>To search by model-chassis:</p>
|
95 |
-
<ol>
|
96 |
-
<li>Select Model-Chassis from the main page of FIAT ePER online parts catalogue.</li>
|
97 |
-
<li>Select your vehicle's make from the first drop-down menu. You can choose from FIAT, Alfa Romeo, Lancia, Abarth, Jeep, or Maserati.</li>
|
98 |
-
<li>Select your vehicle's model from the second drop-down menu. You will see a list of models that correspond to your make. For example, if you choose FIAT, you will see models such as 500, Panda, Punto, Doblo, etc.</li>
|
99 |
-
<li>Select your vehicle's year from the third drop-down menu. You will see a list of years that correspond to your model. For example, if you choose 500, you will see years such as 2007, 2008, 2009, etc.</li>
|
100 |
-
<li>Select your vehicle's engine type from the fourth drop-down menu. You will see a list of engine types that correspond to your year. For example, if you choose 2009, you will see engine types such as 1.2 8V, 1.4 16V, 1.3 JTD Multijet, etc.</li>
|
101 |
-
<li>Select your vehicle's body style from the fifth drop-down menu. You will see a list of body styles that correspond to your engine type. For example, if you choose 1.2 8V, you will see body styles such as Hatchback 3 doors, Hatchback 5 doors, Convertible 2 doors, etc.</li>
|
102 |
-
<li>Select any other options that apply to your vehicle from the remaining drop-down menus. These may include transmission type, drive type, trim level, color code, etc. depending on your vehicle's specifications.</li>
|
103 |
-
<li>Click on Search. You will see a list of results that match your model-chassis criteria. Choose the one that corresponds to your vehicle.</li>
|
104 |
-
<li>You will see a list of categories that contain all the parts for vehicle. Choose the category that contains the part you are looking for.</li>
|
105 |
-
<li>You will see a list of subcategories that contain more specific parts. Choose the subcategory that contains the part you are looking for.</li>
|
106 |
-
<li>You will see a list of parts that match your criteria. Choose the part you want to buy or view its details.</li>
|
107 |
-
</ol>
|
108 |
-
<h3>By part number</h3>
|
109 |
-
<p>If you already know the part number of the part you need, you can search by part number directly. The part number is a unique code that identifies each part and its specifications. You can find it on the original packaging of the part, on the invoice or receipt of your purchase, or on the part itself.</p>
|
110 |
-
<p>To search by part number:</p>
|
111 |
-
<ol>
|
112 |
-
<li>Enter your part number in the box on the main page of FIAT ePER online parts catalogue.</li>
|
113 |
-
<li>Click on Search.</li>
|
114 |
-
<li>You will see a list of results that match your part number. Choose the one that corresponds to your part.</li>
|
115 |
-
<li>You will see the details of your part, such as name, description, image, price, availability, and dealer information. You can also see a diagram of the part and its location on the vehicle.</li>
|
116 |
-
</ol>
|
117 |
-
<h2>How to order and purchase parts from FIAT ePER online parts catalogue</h2>
|
118 |
-
<h3>Contacting your local dealer</h3>
|
119 |
-
<p>Once you have found the parts you need, you can contact your local dealer to order and purchase them. You can find your local dealer's information on each part's page or on the dealer locator tool on the main page of FIAT ePER online parts catalogue. You can contact your local dealer by email, phone, or in person.</p>
|
120 |
-
<p>When contacting your local dealer, you should provide them with:</p>
|
121 |
-
<ul>
|
122 |
-
<li>Your name and contact details.</li>
|
123 |
-
<li>Your vehicle's make, model, year, engine type, body style, VIN number, etc.</li>
|
124 |
-
<li>The part numbers and quantities of the parts you want to order.</li>
|
125 |
-
<li>Any special requests or instructions you may have.</li>
|
126 |
-
</ul>
|
127 |
-
<h3>Checking availability and price</h3>
|
128 |
-
<p>Your local dealer will check the availability and price of the parts you want to order and confirm them with you. You can also check the availability and price of each part on FIAT ePER online parts catalogue yourself. However, these may vary depending on your location, currency, taxes, shipping costs, etc. Therefore, it is advisable to confirm with your local dealer before placing an order.</p>
|
129 |
-
<h3>Placing an order and payment options</h3>
|
130 |
-
<p>After confirming the availability and price of the parts you want to order, you can place an order with your local dealer. You can do this by email, phone, or in person. You will need to provide them with your personal and payment details. You can choose from different payment options depending on your local dealer's policies. These may include cash, credit card, debit card, bank transfer, PayPal, etc.</p>
|
131 |
-
<p>Your local dealer will process your order and send you a confirmation email or message with your order number and tracking information. You can use this information to track your order status and delivery date on FIAT ePER online parts catalogue or on your local dealer's website.</p>
|
132 |
-
<h2>Conclusion</h2>
|
133 |
-
<p>FIAT ePER online parts catalogue is a useful tool that allows you to find and buy all the spare parts and accessories for your FIAT vehicle online. It is easy to use and offers many benefits such as saving time and money, comparing different parts and prices, viewing detailed images and diagrams of each part and its location on the vehicle, etc. You can access FIAT ePER online parts catalogue anytime and anywhere with an internet connection and a compatible browser. You can search for the parts you need by VIN number, by model-chassis, or by part number. You can also contact your local dealer to order and purchase the parts you need. FIAT ePER online parts catalogue is a comprehensive guide that helps you keep your FIAT vehicle in top condition.</p>
|
134 |
-
<h2>FAQs</h2>
|
135 |
-
<ul>
|
136 |
-
<li><b>What is FIAT ePER online parts catalogue?</b><br>
|
137 |
-
FIAT ePER online parts catalogue is a web-based application that contains the complete and updated information on all the spare parts and accessories for FIAT vehicles.</li>
|
138 |
-
<li><b>How do I access FIAT ePER online parts catalogue?</b><br>
|
139 |
-
You can access FIAT ePER online parts catalogue by visiting http://eper.fiatforum.com/ or https://www.pekidi.com/navi?FORCED=TRUE on your internet browser. You will need Internet Explorer as your browser and Adobe Flash Player installed on your device.</li>
|
140 |
-
<li><b>How do I find the right parts for my FIAT vehicle?</b><br>
|
141 |
-
You can find the right parts for your FIAT vehicle by searching by VIN number, by model-chassis, or by part number. You can also use the All Makes Catalog option to search for parts from other brands under the FIAT group.</li>
|
142 |
-
<li><b>How do I order and purchase parts from FIAT ePER online parts catalogue?</b><br>
|
143 |
-
You can order and purchase parts from FIAT ePER online parts catalogue by contacting your local dealer by email, phone, or in person. You will need to provide them with your personal and payment details. You can choose from different payment options depending on your local dealer's policies.</li>
|
144 |
-
<li><b>How do I track my order status and delivery date?</b><br>
|
145 |
-
You can track your order status and delivery date by using your order number and tracking information that your local dealer will send you by email or message. You can also use these information to track your order status and delivery date on FIAT ePER online parts catalogue or on your local dealer's website.</li>
|
146 |
-
</ul>
|
147 |
-
</p> 0a6ba089eb<br />
|
148 |
-
<br />
|
149 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/3com Baseline Switch 2250 Plus Software 41 VERIFIED.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>3com Baseline Switch 2250 Plus Software 41</h2><br /><p><b><b>DOWNLOAD</b> ->>->>->> <a href="https://imgfil.com/2uxZ4F">https://imgfil.com/2uxZ4F</a></b></p><br /><br />
|
2 |
-
|
3 |
-
3skeng Pipe Tool Crack ->>> DOWNLOAD 3skeng pipe software free download with crack ... 3com baseline switch 2250 plus software 41 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Autodesk Sketchbook Designer 2012 Serial Number FAQs and Answers.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>I am trying to get all of our teams laptops to have Autodesk Inventor on them. We saw that 2012 is new so we began installing that on them but even after registering them and getting the product key and serial number it says that there is a 30 day trial only. Do FIRST students not get 2012?</p>
|
3 |
-
<p>Recently a Vault Professional 2012 user contacted us with a number of locked DWF visualization files that were created several years ago in earlier versions of Vault and are now locked so they cannot be updated or deleted.</p>
|
4 |
-
<h2>autodesk sketchbook designer 2012 serial number</h2><br /><p><b><b>Download Zip</b> ✑ <a href="https://imgfil.com/2uxY02">https://imgfil.com/2uxY02</a></b></p><br /><br /> aaccfb2cb3<br />
|
5 |
-
<br />
|
6 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Dbf Viewer 2000 Serial [UPD] Crack Adobe.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>dbf viewer 2000 serial crack adobe</h2><br /><p><b><b>Download Zip</b> ✫ <a href="https://imgfil.com/2uxYwv">https://imgfil.com/2uxYwv</a></b></p><br /><br />
|
2 |
-
<br />
|
3 |
-
DBF Viewer 2000 supports all dbf file formats (Foxpro, Visual Foxpro, dBase). ... Using Xbase Runtime Free Download crack, warez, password, serial numbers, ... Also Free Download IDM Crack, Adobe Cracked Software. , and splitting the ... 4d29de3e1b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/EasyKMS Windows Activator (XP-VISTA-7-8-10-2003--2012-2016) [PATCHED].md
DELETED
@@ -1,149 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>EasyKMS Windows Activator (XP-VISTA-7-8-10-2003--2012-2016): A Comprehensive Guide</h1>
|
3 |
-
|
4 |
-
<p>If you are looking for a simple and effective way to activate any Windows version from XP to 10 and 2003 to 2016, you might want to try EasyKMS Windows Activator. This is a tool that uses the Key Management Service (KMS) technology to activate Windows without requiring a genuine product key. In this article, we will explain what EasyKMS Windows Activator is, how it works, and how to use it.</p>
|
5 |
-
<h2>EasyKMS Windows Activator (XP-VISTA-7-8-10-2003--2012-2016)</h2><br /><p><b><b>Download File</b> ✯ <a href="https://imgfil.com/2uy0ko">https://imgfil.com/2uy0ko</a></b></p><br /><br />
|
6 |
-
|
7 |
-
<h2>What is EasyKMS Windows Activator?</h2>
|
8 |
-
|
9 |
-
<p>EasyKMS Windows Activator is a software that can activate Windows XP, Vista, 7, 8, 10 and Server editions (2003, 2012 and 2016) by emulating a KMS server on your computer. KMS stands for Key Management Service, which is a feature that allows organizations to activate multiple computers with a single product key. By using EasyKMS Windows Activator, you can bypass the activation process and enjoy all the features of Windows without paying for a license.</p>
|
10 |
-
|
11 |
-
<h2>How does EasyKMS Windows Activator work?</h2>
|
12 |
-
|
13 |
-
<p>EasyKMS Windows Activator works by creating a virtual KMS server on your computer and registering it with Microsoft. Then, it sends activation requests to the server and receives valid activation responses. This way, your Windows system thinks that it is activated by a legitimate KMS server and stops asking for a product key. The activation lasts for 180 days and can be renewed automatically by running EasyKMS Windows Activator again.</p>
|
14 |
-
|
15 |
-
<h2>How to use EasyKMS Windows Activator?</h2>
|
16 |
-
|
17 |
-
<p>Using EasyKMS Windows Activator is very easy and straightforward. You just need to follow these steps:</p>
|
18 |
-
|
19 |
-
<ol>
|
20 |
-
<li>Download EasyKMS Windows Activator from a reliable source.</li>
|
21 |
-
<li>Extract the zip file and run the executable file as administrator.</li>
|
22 |
-
<li>Select the Windows version that you want to activate from the drop-down menu.</li>
|
23 |
-
<li>Click on the "Activate" button and wait for the process to complete.</li>
|
24 |
-
<li>Restart your computer and enjoy your activated Windows.</li>
|
25 |
-
</ol>
|
26 |
-
|
27 |
-
<p>Note: You may need to disable your antivirus or firewall temporarily before running EasyKMS Windows Activator, as some security programs may detect it as a potential threat.</p>
|
28 |
-
|
29 |
-
<h2>Conclusion</h2>
|
30 |
-
|
31 |
-
<p>EasyKMS Windows Activator is a handy tool that can help you activate any Windows version from XP to 10 and 2003 to 2016 without requiring a genuine product key. It uses the KMS technology to emulate a server on your computer and send activation requests to Microsoft. It is easy to use and works for 180 days with automatic renewal. However, you should be careful when downloading EasyKMS Windows Activator from online sources, as some of them may contain malware or viruses. Always use a trusted source and scan the file before running it.</p>
|
32 |
-
<p></p>
|
33 |
-
<h2>Benefits of using EasyKMS Windows Activator</h2>
|
34 |
-
|
35 |
-
<p>There are many benefits of using EasyKMS Windows Activator to activate your Windows system. Some of them are:</p>
|
36 |
-
|
37 |
-
<ul>
|
38 |
-
<li>You can save money by not buying a genuine product key.</li>
|
39 |
-
<li>You can access all the features and updates of Windows without any restrictions.</li>
|
40 |
-
<li>You can activate any Windows version from XP to 10 and 2003 to 2016 with one tool.</li>
|
41 |
-
<li>You can activate your Windows offline and online without any hassle.</li>
|
42 |
-
<li>You can renew your activation every 180 days automatically.</li>
|
43 |
-
</ul>
|
44 |
-
|
45 |
-
<h2>Drawbacks of using EasyKMS Windows Activator</h2>
|
46 |
-
|
47 |
-
<p>However, there are also some drawbacks of using EasyKMS Windows Activator that you should be aware of. Some of them are:</p>
|
48 |
-
|
49 |
-
<ul>
|
50 |
-
<li>You may violate the terms and conditions of Microsoft by using an unauthorized activation method.</li>
|
51 |
-
<li>You may risk your computer's security by downloading EasyKMS Windows Activator from untrusted sources.</li>
|
52 |
-
<li>You may encounter some compatibility issues with some programs or drivers that require a genuine Windows license.</li>
|
53 |
-
<li>You may lose your activation if you change your hardware or reinstall your Windows system.</li>
|
54 |
-
<li>You may face legal consequences if you use EasyKMS Windows Activator for commercial purposes.</li>
|
55 |
-
</ul>
|
56 |
-
|
57 |
-
<h2>Alternatives to EasyKMS Windows Activator</h2>
|
58 |
-
|
59 |
-
<p>If you are looking for alternatives to EasyKMS Windows Activator, there are some other options that you can try. Some of them are:</p>
|
60 |
-
|
61 |
-
<ul>
|
62 |
-
<li>Buy a genuine product key from Microsoft or an authorized reseller.</li>
|
63 |
-
<li>Use a free trial version of Windows that lasts for 90 days.</li>
|
64 |
-
<li>Use a free or open-source operating system such as Linux or Chrome OS.</li>
|
65 |
-
<li>Use a virtual machine or a cloud service to run Windows on your computer.</li>
|
66 |
-
<li>Use a different activation tool such as KMSPico, Microsoft Toolkit, or Re-Loader Activator.</li>
|
67 |
-
</ul>
|
68 |
-
|
69 |
-
<p>However, you should be careful when using any activation tool other than EasyKMS Windows Activator, as some of them may contain malware or viruses. Always use a trusted source and scan the file before running it.</p>
|
70 |
-
<h2>FAQs about EasyKMS Windows Activator</h2>
|
71 |
-
|
72 |
-
<p>Here are some frequently asked questions and answers about EasyKMS Windows Activator that you may find useful:</p>
|
73 |
-
|
74 |
-
<ol>
|
75 |
-
<li><b>Is EasyKMS Windows Activator safe to use?</b><br>
|
76 |
-
EasyKMS Windows Activator is safe to use if you download it from a reliable source and scan it with an antivirus program before running it. However, you should be careful when using any activation tool, as some of them may contain malware or viruses that can harm your computer.</li>
|
77 |
-
<li><b>Is EasyKMS Windows Activator legal to use?</b><br>
|
78 |
-
EasyKMS Windows Activator is not legal to use, as it violates the terms and conditions of Microsoft by using an unauthorized activation method. You may face legal consequences if you use EasyKMS Windows Activator for commercial purposes or distribute it to others.</li>
|
79 |
-
<li><b>Does EasyKMS Windows Activator work for all Windows versions?</b><br>
|
80 |
-
EasyKMS Windows Activator works for all Windows versions from XP to 10 and 2003 to 2016. However, you should select the correct Windows version from the drop-down menu before activating it.</li>
|
81 |
-
<li><b>How long does EasyKMS Windows Activator last?</b><br>
|
82 |
-
EasyKMS Windows Activator lasts for 180 days and can be renewed automatically by running EasyKMS Windows Activator again. You can also check the activation status and expiry date by clicking on the "Check" button.</li>
|
83 |
-
<li><b>Can I uninstall EasyKMS Windows Activator after activating Windows?</b><br>
|
84 |
-
Yes, you can uninstall EasyKMS Windows Activator after activating Windows by clicking on the "Uninstall" button. However, this will not affect your activation status, as it is stored in your system registry.</li>
|
85 |
-
</ol>
|
86 |
-
|
87 |
-
<h2>Conclusion</h2>
|
88 |
-
|
89 |
-
<p>In conclusion, EasyKMS Windows Activator is a handy tool that can help you activate any Windows version from XP to 10 and 2003 to 2016 without requiring a genuine product key. It uses the KMS technology to emulate a server on your computer and send activation requests to Microsoft. It is easy to use and works for 180 days with automatic renewal. However, you should be careful when downloading EasyKMS Windows Activator from online sources, as some of them may contain malware or viruses. Always use a trusted source and scan the file before running it. You should also be aware of the drawbacks and alternatives of using EasyKMS Windows Activator, as it is not legal or safe to use in some cases. We hope this article has helped you understand what EasyKMS Windows Activator is, how it works, and how to use it.</p>
|
90 |
-
<h2>Comparison of EasyKMS Windows Activator with other activation tools</h2>
|
91 |
-
|
92 |
-
<p>There are many other activation tools that can activate Windows without requiring a genuine product key. Some of them are KMSPico, Microsoft Toolkit, and Re-Loader Activator. Here is a comparison of EasyKMS Windows Activator with these tools:</p>
|
93 |
-
|
94 |
-
<table>
|
95 |
-
<tr>
|
96 |
-
<th>Tool</th>
|
97 |
-
<th>Supported Windows versions</th>
|
98 |
-
<th>Activation method</th>
|
99 |
-
<th>Advantages</th>
|
100 |
-
<th>Disadvantages</th>
|
101 |
-
</tr>
|
102 |
-
<tr>
|
103 |
-
<td>EasyKMS Windows Activator</td>
|
104 |
-
<td>XP-VISTA-7-8-10-2003--2012-2016</td>
|
105 |
-
<td>KMS emulation</td>
|
106 |
-
<td>Easy to use, works offline and online, updates automatically, works for 180 days with renewal</td>
|
107 |
-
<td>Not legal, may be detected by antivirus or firewall, may cause compatibility issues with some programs or drivers</td>
|
108 |
-
</tr>
|
109 |
-
<tr>
|
110 |
-
<td>KMSPico</td>
|
111 |
-
<td>VISTA-7-8-10-2008--2016</td>
|
112 |
-
<td>KMS injection</td>
|
113 |
-
<td>Simple to use, works offline and online, updates automatically, works permanently</td>
|
114 |
-
<td>Not legal, may be detected by antivirus or firewall, may cause compatibility issues with some programs or drivers, may require internet connection for activation</td>
|
115 |
-
</tr>
|
116 |
-
<tr>
|
117 |
-
<td>Microsoft Toolkit</td>
|
118 |
-
<td>VISTA-7-8-10-2008--2016</td>
|
119 |
-
<td>KMS emulation or EZ activation</td>
|
120 |
-
<td>Versatile to use, works offline and online, updates automatically, works for 180 days with renewal, can activate Microsoft Office as well</td>
|
121 |
-
<td>Not legal, may be detected by antivirus or firewall, may cause compatibility issues with some programs or drivers, may require internet connection for activation, may require .NET Framework 4.0 or higher</td>
|
122 |
-
</tr>
|
123 |
-
<tr>
|
124 |
-
<td>Re-Loader Activator</td>
|
125 |
-
<td>XP-VISTA-7-8-10-2003--2019</td>
|
126 |
-
<td>KMS emulation or OEM activation</td>
|
127 |
-
<td>Simple to use, works offline and online, updates automatically, works permanently, can activate Microsoft Office as well</td>
|
128 |
-
<td>Not legal, may be detected by antivirus or firewall, may cause compatibility issues with some programs or drivers, may require internet connection for activation</td>
|
129 |
-
</tr>
|
130 |
-
</table>
|
131 |
-
|
132 |
-
<p>As you can see, each tool has its own pros and cons. You should choose the one that suits your needs and preferences. However, you should always be careful when using any activation tool, as they are not legal or safe to use in some cases.</p>
|
133 |
-
|
134 |
-
<h2>Sources and References for EasyKMS Windows Activator</h2>
|
135 |
-
|
136 |
-
<p>If you want to learn more about EasyKMS Windows Activator or other activation tools, you can check out these sources and references:</p>
|
137 |
-
|
138 |
-
<ul>
|
139 |
-
<li><a href="https://ourneta.com/wp-content/uploads/2022/07/EasyKMS_Windows_Activator_XPVISTA7810200320122016.pdf">EasyKMS Windows Activator (XP-VISTA-7-8-10-2003--2012-2016) - download - all softwares.</a></li>
|
140 |
-
<li><a href="https://podcasts.bcast.fm/e/18p39vln-hack-easykms-windows-activator-xp-vista-7-8-10-2003-2012-2016">HACK EasyKMS Windows Activator (XP-VISTA-7-8-10-2003--2012-2016)</a></li>
|
141 |
-
<li><a href="https://www.kmspico.info/">KMSPico - Official Site - Download KMSPico 11 Final [2021]</a></li>
|
142 |
-
<li><a href="https://microsofttoolkitofficial.info/">Microsoft Toolkit 2.6.8 Official Download Windows & Office Activator 2021 - Microsoft Toolkit Official Website.</a></li>
|
143 |
-
<li><a href="https://reloaderactivator.net/">ReLoader Activator 6.6 Download For Windows & Office [2021]</a></li>
|
144 |
-
<li><a href="https://support.microsoft.com/en-us/windows/activate-windows-10-c39005d4-ed02-b81e-a9ea-a9d88b5f1920">Activate Windows 10 - Microsoft Support.</a></li>
|
145 |
-
<h2>Conclusion</h2>
|
146 |
-
|
147 |
-
<p>In conclusion, EasyKMS Windows Activator is a handy tool that can help you activate any Windows version from XP to 10 and 2003 to 2016 without requiring a genuine product key. It uses the KMS technology to emulate a server on your computer and send activation requests to Microsoft. It is easy to use and works for 180 days with automatic renewal. However, you should be careful when downloading EasyKMS Windows Activator from online sources, as some of them may contain malware or viruses. Always use a trusted source and scan the file before running it. You should also be aware of the drawbacks and alternatives of using EasyKMS Windows Activator, as it is not legal or safe to use in some cases. We hope this article has helped you understand what EasyKMS Windows Activator is, how it works, and how to use it.</p> 3cee63e6c2<br />
|
148 |
-
<br />
|
149 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1line/AutoGPT/autogpt/speech/eleven_labs.py
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
"""ElevenLabs speech module"""
|
2 |
-
import os
|
3 |
-
|
4 |
-
import requests
|
5 |
-
from playsound import playsound
|
6 |
-
|
7 |
-
from autogpt.config import Config
|
8 |
-
from autogpt.speech.base import VoiceBase
|
9 |
-
|
10 |
-
PLACEHOLDERS = {"your-voice-id"}
|
11 |
-
|
12 |
-
|
13 |
-
class ElevenLabsSpeech(VoiceBase):
|
14 |
-
"""ElevenLabs speech class"""
|
15 |
-
|
16 |
-
def _setup(self) -> None:
|
17 |
-
"""Set up the voices, API key, etc.
|
18 |
-
|
19 |
-
Returns:
|
20 |
-
None: None
|
21 |
-
"""
|
22 |
-
|
23 |
-
cfg = Config()
|
24 |
-
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
25 |
-
voice_options = {
|
26 |
-
"Rachel": "21m00Tcm4TlvDq8ikWAM",
|
27 |
-
"Domi": "AZnzlk1XvdvUeBnXmlld",
|
28 |
-
"Bella": "EXAVITQu4vr4xnSDxMaL",
|
29 |
-
"Antoni": "ErXwobaYiN019PkySvjV",
|
30 |
-
"Elli": "MF3mGyEYCl7XYWbV9V6O",
|
31 |
-
"Josh": "TxGEqnHWrfWFTfGW9XjX",
|
32 |
-
"Arnold": "VR6AewLTigWG4xSOukaG",
|
33 |
-
"Adam": "pNInz6obpgDQGcFmaJgB",
|
34 |
-
"Sam": "yoZ06aMxZJJ28mfd3POQ",
|
35 |
-
}
|
36 |
-
self._headers = {
|
37 |
-
"Content-Type": "application/json",
|
38 |
-
"xi-api-key": cfg.elevenlabs_api_key,
|
39 |
-
}
|
40 |
-
self._voices = default_voices.copy()
|
41 |
-
if cfg.elevenlabs_voice_1_id in voice_options:
|
42 |
-
cfg.elevenlabs_voice_1_id = voice_options[cfg.elevenlabs_voice_1_id]
|
43 |
-
if cfg.elevenlabs_voice_2_id in voice_options:
|
44 |
-
cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id]
|
45 |
-
self._use_custom_voice(cfg.elevenlabs_voice_1_id, 0)
|
46 |
-
self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1)
|
47 |
-
|
48 |
-
def _use_custom_voice(self, voice, voice_index) -> None:
|
49 |
-
"""Use a custom voice if provided and not a placeholder
|
50 |
-
|
51 |
-
Args:
|
52 |
-
voice (str): The voice ID
|
53 |
-
voice_index (int): The voice index
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
None: None
|
57 |
-
"""
|
58 |
-
# Placeholder values that should be treated as empty
|
59 |
-
if voice and voice not in PLACEHOLDERS:
|
60 |
-
self._voices[voice_index] = voice
|
61 |
-
|
62 |
-
def _speech(self, text: str, voice_index: int = 0) -> bool:
|
63 |
-
"""Speak text using elevenlabs.io's API
|
64 |
-
|
65 |
-
Args:
|
66 |
-
text (str): The text to speak
|
67 |
-
voice_index (int, optional): The voice to use. Defaults to 0.
|
68 |
-
|
69 |
-
Returns:
|
70 |
-
bool: True if the request was successful, False otherwise
|
71 |
-
"""
|
72 |
-
tts_url = (
|
73 |
-
f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
|
74 |
-
)
|
75 |
-
response = requests.post(tts_url, headers=self._headers, json={"text": text})
|
76 |
-
|
77 |
-
if response.status_code == 200:
|
78 |
-
with open("speech.mpeg", "wb") as f:
|
79 |
-
f.write(response.content)
|
80 |
-
playsound("speech.mpeg", True)
|
81 |
-
os.remove("speech.mpeg")
|
82 |
-
return True
|
83 |
-
else:
|
84 |
-
print("Request failed with status code:", response.status_code)
|
85 |
-
print("Response content:", response.content)
|
86 |
-
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download and install Go the ultimate guide to the open-source programming language.md
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Download Go By: A Guide to the Go Programming Language</h1>
|
3 |
-
<p>Are you interested in learning a new programming language that is simple, secure, and scalable? If so, you might want to download go by, a phrasal verb that has multiple meanings in English. In this article, we will explain what go by means in different contexts, and how you can use it to learn the Go programming language.</p>
|
4 |
-
<h2>What is Go and why should you learn it?</h2>
|
5 |
-
<p>Go is an open-source programming language that was created by Google in 2009. It is designed to make it easy to build simple, reliable, and efficient software systems. Here are some of the reasons why you should learn Go:</p>
|
6 |
-
<h2>download go by</h2><br /><p><b><b>DOWNLOAD</b> ✒ ✒ ✒ <a href="https://jinyurl.com/2uNUh7">https://jinyurl.com/2uNUh7</a></b></p><br /><br />
|
7 |
-
<h3>Go is an open-source programming language supported by Google</h3>
|
8 |
-
<p>Go is free to use and modify, and it has a large and active community of developers who contribute to its development and improvement. You can find the source code, documentation, tutorials, and other resources on go.dev, the official website for Go. You can also access the Go module mirror and checksum database run by Google, which provide fast and secure downloads of modules from third-party sources.</p>
|
9 |
-
<h3>Go is easy to learn and great for teams</h3>
|
10 |
-
<p>Go has a simple and consistent syntax that makes it easy to read and write. It has only 25 keywords and a few basic data types, such as numbers, strings, booleans, arrays, slices, maps, and structs. It also supports common programming features such as functions, methods, interfaces, pointers, and error handling. Go has a built-in tool called gofmt that automatically formats your code according to a standard style guide, which helps to maintain code quality and consistency across teams.</p>
|
11 |
-
<h3>Go has built-in concurrency and a robust standard library</h3>
|
12 |
-
<p>Go supports concurrency, which means that it can run multiple tasks at the same time without blocking or waiting for each other. This makes it ideal for developing applications that need to handle high volumes of requests or data in parallel. Go uses a concept called goroutines, which are lightweight threads that can communicate with each other through channels. Go also has a rich and comprehensive standard library that provides packages for common tasks such as input/output, networking, cryptography, testing, debugging, and more.</p>
|
13 |
-
<h3>Go has a large ecosystem of partners, communities, and tools</h3>
|
14 |
-
<p>Go is used by many organizations in various industries, such as Google, Uber, Netflix, Dropbox, Docker, Kubernetes, and more. You can find many examples of how they use Go to power their software and services on go.dev/solutions. You can also join the global community of Go developers who share their knowledge and experience through blogs, podcasts, newsletters, forums, meetups, conferences, and more. You can find many resources to help you learn and improve your skills on go.dev/learn. Additionally, you can use many tools and frameworks that are compatible with Go, such as editors, IDEs, debuggers, testing tools, web frameworks, databases, etc.</ <h2>How to download and install Go on your computer</h2>
|
15 |
-
<p>If you want to start programming in Go, you need to download and install the Go tools on your computer. Here are the steps to do that:</p>
|
16 |
-
<h3>Download the official binary distributions from go.dev/dl</h3>
|
17 |
-
<p>Go to [go.dev/dl](^1^) and click on the download button for your operating system. You will get a file with a .msi extension for Windows, a .pkg extension for Mac, or a .tar.gz extension for Linux. Save the file to your preferred location.</p>
|
18 |
-
<h3>Follow the installation instructions for your operating system</h3>
|
19 |
-
<p>Depending on your operating system, you will need to follow different installation instructions. For Windows, you need to open the .msi file and follow the prompts to install Go. By default, the installer will install Go to Program Files or Program Files (x86). You can change the location as needed. For Mac, you need to open the .pkg file and follow the prompts to install Go. By default, the installer will install Go to /usr/local/go. You can change the location as needed. For Linux, you need to extract the .tar.gz file into /usr/local, creating a Go tree in /usr/local/go. You can use the following command to do that: $ rm -rf /usr/local/go && tar -C /usr/local -xzf go1.20.4.linux-amd64.tar.gz</p>
|
20 |
-
<h3>Verify that you've installed Go by typing go version in a command prompt</h3>
|
21 |
-
<p>To check that you've installed Go correctly, you need to open a command prompt and type the following command: $ go version This command should print the installed version of Go. If you see an error message or no output, it means that something went wrong with the installation. You may need to check your environment variables or reinstall Go.</p>
|
22 |
-
<p>download go by example pdf<br />
|
23 |
-
download go by the mcclain sisters<br />
|
24 |
-
download go by vanessa hudgens<br />
|
25 |
-
download go by common ft john legend<br />
|
26 |
-
download go by hillsong united<br />
|
27 |
-
download go by the chemical brothers<br />
|
28 |
-
download go by asia monet ray<br />
|
29 |
-
download go by flow naruto<br />
|
30 |
-
download go by krizz kaliko<br />
|
31 |
-
download go by moby<br />
|
32 |
-
download go by delilah<br />
|
33 |
-
download go by jonsi<br />
|
34 |
-
download go by pearl jam<br />
|
35 |
-
download go by bts<br />
|
36 |
-
download go by grimes ft blood diamonds<br />
|
37 |
-
download go by shiraishi kuranosuke<br />
|
38 |
-
download go by valley lodge<br />
|
39 |
-
download go by tones and i<br />
|
40 |
-
download go by donnie mcclurkin<br />
|
41 |
-
download go by newsboys<br />
|
42 |
-
download go by tekno<br />
|
43 |
-
download go by blink 182<br />
|
44 |
-
download go by dexter britain<br />
|
45 |
-
download go by sheppard<br />
|
46 |
-
download go by lecrae ft tedashii<br />
|
47 |
-
download go by the black keys<br />
|
48 |
-
download go by vassy ft tiesto and kshmr<br />
|
49 |
-
download go by matthew west<br />
|
50 |
-
download go by hanson<br />
|
51 |
-
download go by logic ft big sean<br />
|
52 |
-
download go by travis scott ft quavo and offset<br />
|
53 |
-
download go by g-eazy ft marc e bassy<br />
|
54 |
-
download go by french montana ft chris brown and migos<br />
|
55 |
-
download go by dj khaled ft drake and rick ross<br />
|
56 |
-
download go by kelly clarkson ft john legend and blake shelton<br />
|
57 |
-
download go by zedd ft alessia cara and logic<br />
|
58 |
-
download go by ed sheeran ft justin bieber and chance the rapper<br />
|
59 |
-
download go by taylor swift ft brendon urie and katy perry<br />
|
60 |
-
download go by post malone ft swae lee and nicki minaj<br />
|
61 |
-
download go programming language for windows 10 64 bit <br />
|
62 |
-
download go programming language for mac os x <br />
|
63 |
-
download go programming language for linux ubuntu <br />
|
64 |
-
download go programming language for android studio <br />
|
65 |
-
download go programming language documentation pdf <br />
|
66 |
-
download go programming language book pdf <br />
|
67 |
-
download go programming language tutorial pdf <br />
|
68 |
-
download go programming language compiler <br />
|
69 |
-
download go programming language ide <br />
|
70 |
-
download go programming language projects</p>
|
71 |
-
<h2>How to write and run your first Go program</h2>
|
72 |
-
<p>Now that you have Go installed on your computer, you can write and run your first Go program. Here are the steps to do that:</p>
|
73 |
-
<h3>Create a file named hello.go with a simple Hello World program</h3>
|
74 |
-
<p>In your text editor, create a file named hello.go in which to write your code. Paste the following code into your hello.go file and save the file.</p>
|
75 |
-
<code>
|
76 |
-
package main import "fmt" func main() fmt.Println("Hello, World!") </code>
|
77 |
-
<p>This is your Go code. In this code, you:</p>
|
78 |
-
<ul>
|
79 |
-
<li>Declare a main package (a package is a way to group functions, and it's made up of all the files in the same directory).</li>
|
80 |
-
<li>Import the popular fmt package, which contains functions for formatting text, including printing to the console. This package is one of the standard library packages you got when you installed Go.</li>
|
81 |
-
<li>Implement a main function to print a message to the console. The main function is the entry point of any executable program in Go.</li>
|
82 |
-
</ul>
|
83 |
-
<h3>Use the go run command to compile and execute your program</h3>
|
84 |
-
<p>To run your program, you need to use the go run command, which compiles and executes your code in one step. Open a command prompt and navigate to the location of your hello.go file. Then type the following command: $ go run hello.go This command should print Hello, World! in the command prompt.</p>
|
85 |
-
<h3>Use the go build command to create an executable file from your program</h3>
|
86 |
-
<p>If you want to create an executable file from your program, you can use the go build command, which compiles your code and produces a binary file that you can run without using go run. To do that, type the following command: $ go build hello.go This command will create a file named hello.exe on Windows, hello on Mac, or hello on Linux. You can run this file by typing its name in the command prompt: $ ./hello This command should also print Hello, World! in the command prompt.</p>
|
87 |
-
<h2>How to use go by as a phrasal verb in English</h2>
|
88 |
-
<p>Besides being a programming language name, go by is also a phrasal verb in English that has multiple meanings depending on how it is used. Here are some of the common meanings of go by:</p>
|
89 |
-
<h3>Go by can mean to move past, in space or time</h3>
|
90 |
-
<p>You can use go by to describe something or someone moving past another thing or person, either physically or temporally. For example:</p>
|
91 |
-
<ul>
|
92 |
-
<li>I saw her car going by my house this morning.</li>
|
93 |
-
<li>The bus goes by every 15 minutes.</li>
|
94 |
-
<li>The years went by so fast.</li> <h3>Go by can mean to follow or use information provided by something or someone</h3>
|
95 |
-
<p>You can use go by to describe something or someone following or using information provided by another thing or person, such as a rule, a law, a sign, a clock, a name, etc. For example:</p>
|
96 |
-
<ul>
|
97 |
-
<li>You have to go by the speed limit or you'll get a ticket.</li>
|
98 |
-
<li>I go by what the doctor tells me.</li>
|
99 |
-
<li>Go by the directions on the map and you'll find the place.</li>
|
100 |
-
<li>I don't go by the clock, I go by the sun.</li>
|
101 |
-
<li>He goes by John, but his real name is Jonathan.</li>
|
102 |
-
</ul>
|
103 |
-
<h3>Go by can mean to be known or called by a particular name</h3>
|
104 |
-
<p>You can use go by to describe something or someone being known or called by a particular name, such as a nickname, a pseudonym, an alias, etc. For example:</p>
|
105 |
-
<ul>
|
106 |
-
<li>She goes by the name of Lady Gaga.</li>
|
107 |
-
<li>He goes by the initials J.K. Rowling.</li>
|
108 |
-
<li>They go by the code name of Alpha Team.</li>
|
109 |
-
</ul>
|
110 |
-
<h2>Conclusion and FAQs</h2>
|
111 |
-
<p>In this article, we have learned how to download go by, a phrasal verb that has multiple meanings in English, and how to use it to learn the Go programming language. We have seen what Go is and why you should learn it, how to download and install Go on your computer, how to write and run your first Go program, and how to use go by in different contexts. We hope that this article has helped you to understand and appreciate the power and simplicity of Go, and that you are ready to start your journey as a Go developer. Here are some frequently asked questions about Go and go by:</p>
|
112 |
-
<h3>What are some of the advantages of Go over other programming languages?</h3>
|
113 |
-
<p>Some of the advantages of Go over other programming languages are:</p>
|
114 |
-
<ul>
|
115 |
-
<li>It is fast and efficient, both in terms of compilation and execution.</li>
|
116 |
-
<li>It is cross-platform and portable, meaning that it can run on different operating systems and architectures without much modification.</li>
|
117 |
-
<li>It is safe and secure, meaning that it has features such as garbage collection, memory management, type safety, and error handling that prevent common errors and vulnerabilities.</li>
|
118 |
-
<li>It is expressive and concise, meaning that it has a clear and simple syntax that allows you to write less code with more meaning.</li>
|
119 |
-
</ul>
|
120 |
-
<h3>What are some of the challenges or drawbacks of Go?</h3>
|
121 |
-
<p>Some of the challenges or drawbacks of Go are:</p>
|
122 |
-
<ul>
|
123 |
-
<li>It is relatively young and evolving, meaning that it may not have all the features or libraries that other languages have.</li>
|
124 |
-
<li>It is opinionated and strict, meaning that it may not suit everyone's preferences or styles of programming.</li>
|
125 |
-
<li>It is not very compatible with other languages, meaning that it may not be easy to integrate with existing code bases or frameworks written in other languages.</li>
|
126 |
-
</ul>
|
127 |
-
<h3>What are some of the best resources to learn Go?</h3>
|
128 |
-
<p>Some of the best resources to learn Go are:</p>
|
129 |
-
<ul>
|
130 |
-
<li>The official website for Go: [go.dev], where you can find documentation, tutorials, solutions, modules, events, blogs, podcasts, newsletters, forums, meetups, conferences, and more.</li>
|
131 |
-
<li>The official tour of Go: [tour.golang.org], where you can learn the basics of Go through interactive exercises and examples.</li>
|
132 |
-
<li>The official blog for Go: [blog.golang.org], where you can read articles about Go news, updates, features, tips, tricks, best practices, case studies, etc.</li>
|
133 |
-
<li>The official YouTube channel for Go: [youtube.com/golang], where you can watch videos about Go talks, tutorials, demos, interviews, etc.</li>
|
134 |
-
</ul>
|
135 |
-
<h3>What are some of the common uses of go by in English?</h3>
|
136 |
-
<p>Some of the common uses of go by in English are:</p>
|
137 |
-
<table border="1">
|
138 |
-
<tr><th>Meaning</th><th>Example</th></tr>
|
139 |
-
<tr><td>To move past something or someone</td><td>The train goes by every hour.</td></tr>
|
140 |
-
<tr><td>To follow or use information provided by something or someone</td><td>I go by what my teacher says.</td></tr>
|
141 |
-
<tr><td>To be known or called by a particular name</td><td>She goes by Lizzy.</td></tr>
|
142 |
-
</table>
|
143 |
-
<h3>How can I practice using go by in English?</h3>
|
144 |
-
<p>You can practice using go by in English by making sentences with different meanings and contexts of the phrasal verb. You can also try to use go by in different tenses and forms, such as past, present, future, continuous, perfect, etc. For example:</p>
|
145 |
-
<ul>
|
146 |
-
<li>I went by the library on my way home.</li>
|
147 |
-
<li>He is going by the instructions on the manual.</li>
|
148 |
-
<li>She will go by her maiden name after the divorce.</li>
|
149 |
-
<li>We are going by the bus stop right now.</li>
|
150 |
-
<li>They have gone by the rules all their lives.</li>
|
151 |
-
</ul>
|
152 |
-
<p>I hope you enjoyed this article and learned something new. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy coding!</p> 401be4b1e0<br />
|
153 |
-
<br />
|
154 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/ui/textarea.tsx
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
import * as React from 'react'
|
2 |
-
|
3 |
-
import { cn } from '@/lib/utils'
|
4 |
-
|
5 |
-
export interface TextareaProps
|
6 |
-
extends React.TextareaHTMLAttributes<HTMLTextAreaElement> {}
|
7 |
-
|
8 |
-
const Textarea = React.forwardRef<HTMLTextAreaElement, TextareaProps>(
|
9 |
-
({ className, ...props }, ref) => {
|
10 |
-
return (
|
11 |
-
<textarea
|
12 |
-
className={cn(
|
13 |
-
'flex min-h-[80px] w-full rounded-md border border-input bg-transparent px-3 py-2 text-sm ring-offset-background placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50',
|
14 |
-
className
|
15 |
-
)}
|
16 |
-
ref={ref}
|
17 |
-
{...props}
|
18 |
-
/>
|
19 |
-
)
|
20 |
-
}
|
21 |
-
)
|
22 |
-
Textarea.displayName = 'Textarea'
|
23 |
-
|
24 |
-
export { Textarea }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Chatgpt4Online.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
from aiohttp import ClientSession
|
5 |
-
|
6 |
-
from ..typing import AsyncGenerator
|
7 |
-
from .base_provider import AsyncGeneratorProvider
|
8 |
-
|
9 |
-
|
10 |
-
class Chatgpt4Online(AsyncGeneratorProvider):
|
11 |
-
url = "https://chatgpt4online.org"
|
12 |
-
supports_gpt_35_turbo = True
|
13 |
-
working = True
|
14 |
-
|
15 |
-
@classmethod
|
16 |
-
async def create_async_generator(
|
17 |
-
cls,
|
18 |
-
model: str,
|
19 |
-
messages: list[dict[str, str]],
|
20 |
-
**kwargs
|
21 |
-
) -> AsyncGenerator:
|
22 |
-
async with ClientSession() as session:
|
23 |
-
data = {
|
24 |
-
"botId": "default",
|
25 |
-
"customId": None,
|
26 |
-
"session": "N/A",
|
27 |
-
"chatId": "",
|
28 |
-
"contextId": 58,
|
29 |
-
"messages": messages,
|
30 |
-
"newMessage": messages[-1]["content"],
|
31 |
-
"stream": True
|
32 |
-
}
|
33 |
-
async with session.post(cls.url + "/wp-json/mwai-ui/v1/chats/submit", json=data) as response:
|
34 |
-
response.raise_for_status()
|
35 |
-
async for line in response.content:
|
36 |
-
if line.startswith(b"data: "):
|
37 |
-
line = json.loads(line[6:])
|
38 |
-
if line["type"] == "live":
|
39 |
-
yield line["data"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/OpenaiChat.py
DELETED
@@ -1,88 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from curl_cffi.requests import AsyncSession
|
4 |
-
import uuid
|
5 |
-
import json
|
6 |
-
|
7 |
-
from .base_provider import AsyncProvider, get_cookies, format_prompt
|
8 |
-
from ..typing import AsyncGenerator
|
9 |
-
|
10 |
-
|
11 |
-
class OpenaiChat(AsyncProvider):
|
12 |
-
url = "https://chat.openai.com"
|
13 |
-
needs_auth = True
|
14 |
-
working = True
|
15 |
-
supports_gpt_35_turbo = True
|
16 |
-
_access_token = None
|
17 |
-
|
18 |
-
@classmethod
|
19 |
-
async def create_async(
|
20 |
-
cls,
|
21 |
-
model: str,
|
22 |
-
messages: list[dict[str, str]],
|
23 |
-
proxy: str = None,
|
24 |
-
access_token: str = None,
|
25 |
-
cookies: dict = None,
|
26 |
-
**kwargs: dict
|
27 |
-
) -> AsyncGenerator:
|
28 |
-
proxies = {"https": proxy}
|
29 |
-
if not access_token:
|
30 |
-
access_token = await cls.get_access_token(cookies, proxies)
|
31 |
-
headers = {
|
32 |
-
"Accept": "text/event-stream",
|
33 |
-
"Authorization": f"Bearer {access_token}",
|
34 |
-
}
|
35 |
-
async with AsyncSession(proxies=proxies, headers=headers, impersonate="chrome107") as session:
|
36 |
-
messages = [
|
37 |
-
{
|
38 |
-
"id": str(uuid.uuid4()),
|
39 |
-
"author": {"role": "user"},
|
40 |
-
"content": {"content_type": "text", "parts": [format_prompt(messages)]},
|
41 |
-
},
|
42 |
-
]
|
43 |
-
data = {
|
44 |
-
"action": "next",
|
45 |
-
"messages": messages,
|
46 |
-
"conversation_id": None,
|
47 |
-
"parent_message_id": str(uuid.uuid4()),
|
48 |
-
"model": "text-davinci-002-render-sha",
|
49 |
-
"history_and_training_disabled": True,
|
50 |
-
}
|
51 |
-
response = await session.post("https://chat.openai.com/backend-api/conversation", json=data)
|
52 |
-
response.raise_for_status()
|
53 |
-
last_message = None
|
54 |
-
for line in response.content.decode().splitlines():
|
55 |
-
if line.startswith("data: "):
|
56 |
-
line = line[6:]
|
57 |
-
if line == "[DONE]":
|
58 |
-
break
|
59 |
-
line = json.loads(line)
|
60 |
-
if "message" in line:
|
61 |
-
last_message = line["message"]["content"]["parts"][0]
|
62 |
-
return last_message
|
63 |
-
|
64 |
-
|
65 |
-
@classmethod
|
66 |
-
async def get_access_token(cls, cookies: dict = None, proxies: dict = None) -> str:
|
67 |
-
if not cls._access_token:
|
68 |
-
cookies = cookies if cookies else get_cookies("chat.openai.com")
|
69 |
-
async with AsyncSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session:
|
70 |
-
response = await session.get("https://chat.openai.com/api/auth/session")
|
71 |
-
response.raise_for_status()
|
72 |
-
cls._access_token = response.json()["accessToken"]
|
73 |
-
return cls._access_token
|
74 |
-
|
75 |
-
|
76 |
-
@classmethod
|
77 |
-
@property
|
78 |
-
def params(cls):
|
79 |
-
params = [
|
80 |
-
("model", "str"),
|
81 |
-
("messages", "list[dict[str, str]]"),
|
82 |
-
("stream", "bool"),
|
83 |
-
("proxy", "str"),
|
84 |
-
("access_token", "str"),
|
85 |
-
("cookies", "dict[str, str]")
|
86 |
-
]
|
87 |
-
param = ", ".join([": ".join(p) for p in params])
|
88 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/AItianhu.py
DELETED
@@ -1,77 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
import json
|
4 |
-
|
5 |
-
from ..typing import AsyncGenerator
|
6 |
-
from ..requests import StreamSession
|
7 |
-
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
|
8 |
-
|
9 |
-
|
10 |
-
class AItianhu(AsyncGeneratorProvider):
|
11 |
-
url = "https://www.aitianhu.com"
|
12 |
-
working = True
|
13 |
-
supports_gpt_35_turbo = True
|
14 |
-
|
15 |
-
@classmethod
|
16 |
-
async def create_async_generator(
|
17 |
-
cls,
|
18 |
-
model: str,
|
19 |
-
messages: list[dict[str, str]],
|
20 |
-
proxy: str = None,
|
21 |
-
cookies: dict = None,
|
22 |
-
timeout: int = 30,
|
23 |
-
**kwargs
|
24 |
-
) -> AsyncGenerator:
|
25 |
-
if not cookies:
|
26 |
-
cookies = get_cookies("www.aitianhu.com")
|
27 |
-
data = {
|
28 |
-
"prompt": format_prompt(messages),
|
29 |
-
"options": {},
|
30 |
-
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
|
31 |
-
"temperature": 0.8,
|
32 |
-
"top_p": 1,
|
33 |
-
**kwargs
|
34 |
-
}
|
35 |
-
headers = {
|
36 |
-
"Authority": cls.url,
|
37 |
-
"Accept": "application/json, text/plain, */*",
|
38 |
-
"Origin": cls.url,
|
39 |
-
"Referer": f"{cls.url}/"
|
40 |
-
}
|
41 |
-
async with StreamSession(
|
42 |
-
headers=headers,
|
43 |
-
cookies=cookies,
|
44 |
-
timeout=timeout,
|
45 |
-
proxies={"https": proxy},
|
46 |
-
impersonate="chrome107",
|
47 |
-
verify=False
|
48 |
-
) as session:
|
49 |
-
async with session.post(f"{cls.url}/api/chat-process", json=data) as response:
|
50 |
-
response.raise_for_status()
|
51 |
-
async for line in response.iter_lines():
|
52 |
-
if line == b"<script>":
|
53 |
-
raise RuntimeError("Solve challenge and pass cookies")
|
54 |
-
if b"platform's risk control" in line:
|
55 |
-
raise RuntimeError("Platform's Risk Control")
|
56 |
-
line = json.loads(line)
|
57 |
-
if "detail" in line:
|
58 |
-
content = line["detail"]["choices"][0]["delta"].get("content")
|
59 |
-
if content:
|
60 |
-
yield content
|
61 |
-
else:
|
62 |
-
raise RuntimeError(f"Response: {line}")
|
63 |
-
|
64 |
-
|
65 |
-
@classmethod
|
66 |
-
@property
|
67 |
-
def params(cls):
|
68 |
-
params = [
|
69 |
-
("model", "str"),
|
70 |
-
("messages", "list[dict[str, str]]"),
|
71 |
-
("stream", "bool"),
|
72 |
-
("proxy", "str"),
|
73 |
-
("temperature", "float"),
|
74 |
-
("top_p", "int"),
|
75 |
-
]
|
76 |
-
param = ", ".join([": ".join(p) for p in params])
|
77 |
-
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/describer/classroom.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from typing import TYPE_CHECKING, Any, List
|
4 |
-
from string import Template
|
5 |
-
|
6 |
-
from . import describer_registry as DescriberRegistry
|
7 |
-
from .basic import BasicDescriber
|
8 |
-
|
9 |
-
if TYPE_CHECKING:
|
10 |
-
from agentverse.environments import BaseEnvironment
|
11 |
-
|
12 |
-
|
13 |
-
@DescriberRegistry.register("classroom")
|
14 |
-
class ClassroomDescriber(BasicDescriber):
|
15 |
-
start_prompt: str
|
16 |
-
end_prompt: str
|
17 |
-
|
18 |
-
def get_env_description(self, environment: BaseEnvironment) -> List[str]:
|
19 |
-
if not environment.rule_params.get("is_grouped", False):
|
20 |
-
if environment.rule_params.get("is_grouped_ended", False):
|
21 |
-
# If the group discussion is just ended
|
22 |
-
environment.rule_params["is_grouped_ended"] = False
|
23 |
-
return [self.end_prompt for _ in range(len(environment.agents))]
|
24 |
-
else:
|
25 |
-
return super().get_env_description(environment)
|
26 |
-
description = []
|
27 |
-
for i, agent in enumerate(environment.agents):
|
28 |
-
if i == 0:
|
29 |
-
# Professor will not participate in group discussion
|
30 |
-
description.append("")
|
31 |
-
else:
|
32 |
-
description.append(
|
33 |
-
Template(self.start_prompt).safe_substitute(
|
34 |
-
{"receiver_name": ", ".join(agent.receiver)}
|
35 |
-
)
|
36 |
-
)
|
37 |
-
return description
|
38 |
-
|
39 |
-
def reset(self) -> None:
|
40 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexMo/audio_summarizer/app.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
import whisper
|
2 |
-
from pytube import YouTube
|
3 |
-
from transformers import pipeline
|
4 |
-
import gradio as gr
|
5 |
-
import os
|
6 |
-
import re
|
7 |
-
|
8 |
-
# model = whisper.load_model("base")
|
9 |
-
# model = pipeline(model="AlexMo/FIFA_WC22_WINNER_LANGUAGE_MODEL")
|
10 |
-
model = pipeline(model="AlexMo/improved_whisper_model")
|
11 |
-
summarizer = pipeline("summarization")
|
12 |
-
|
13 |
-
def transcribe_inp(microphone, file_upload):
|
14 |
-
warn_output = ""
|
15 |
-
if (microphone is not None) and (file_upload is not None):
|
16 |
-
warn_output = (
|
17 |
-
"NOTE: The audio file will be discarded after this run.\n"
|
18 |
-
)
|
19 |
-
|
20 |
-
elif (microphone is None) and (file_upload is None):
|
21 |
-
return "ERROR: You have to either use the microphone or upload an audio file"
|
22 |
-
|
23 |
-
file = microphone if microphone is not None else file_upload
|
24 |
-
|
25 |
-
text = model(file, batch_size=1024)["text"]
|
26 |
-
|
27 |
-
return warn_output + text
|
28 |
-
|
29 |
-
def getAudio(url):
|
30 |
-
link = YouTube(url)
|
31 |
-
video = link.streams.filter(only_audio=True).first()
|
32 |
-
file = video.download(output_path=".")
|
33 |
-
base, ext = os.path.splitext(file)
|
34 |
-
file_ext = base + '.mp3'
|
35 |
-
os.rename(file, file_ext)
|
36 |
-
return file_ext
|
37 |
-
|
38 |
-
|
39 |
-
def getText(url):
|
40 |
-
if url != '':
|
41 |
-
output_text_transcribe = ''
|
42 |
-
res = model(getAudio(url))
|
43 |
-
return res['text'].strip()
|
44 |
-
|
45 |
-
|
46 |
-
def getSummary(article):
|
47 |
-
# header = ' '.join(re.split(r'(?<=[.:;])\s', article)[:5])
|
48 |
-
b = summarizer(article, min_length=15, max_length=120, do_sample=False)
|
49 |
-
b = b[0]['summary_text'].replace(' .', '.').strip()
|
50 |
-
|
51 |
-
return b
|
52 |
-
|
53 |
-
|
54 |
-
with gr.Blocks() as demo:
|
55 |
-
gr.HTML(
|
56 |
-
"""
|
57 |
-
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
|
58 |
-
<div>
|
59 |
-
<h1>Dutch whisperer</h1>
|
60 |
-
</div>
|
61 |
-
<p style="margin-bottom: 10px; font-size: 94%">
|
62 |
-
Summarize audio files, mic input or Youtube videos using OpenAI's Whisper
|
63 |
-
</p>
|
64 |
-
</div>
|
65 |
-
"""
|
66 |
-
)
|
67 |
-
with gr.Tab('Get a summary from your own mic or audio file'):
|
68 |
-
input_audio = [
|
69 |
-
gr.inputs.Audio(source="microphone", type="filepath", optional=True),
|
70 |
-
gr.inputs.Audio(source="upload", type="filepath", optional=True),
|
71 |
-
]
|
72 |
-
result_button_transcribe_audio = gr.Button('1. Transcribe')
|
73 |
-
output_text_transcribe_audio = gr.Textbox(placeholder='Transcript of the audio file.', label='Transcript')
|
74 |
-
|
75 |
-
result_button_summary_audio = gr.Button('2. Get a summary')
|
76 |
-
output_text_summary_audio = gr.Textbox(placeholder='Summary of the audio file.', label='Summary')
|
77 |
-
|
78 |
-
result_button_transcribe_audio.click(transcribe_inp, inputs=input_audio, outputs=output_text_transcribe_audio)
|
79 |
-
result_button_summary_audio.click(getSummary, inputs=output_text_transcribe_audio, outputs=output_text_summary_audio)
|
80 |
-
with gr.Tab('Summary of Youtube video'):
|
81 |
-
input_text_url = gr.Textbox(placeholder='Youtube video URL', label='URL')
|
82 |
-
result_button_transcribe = gr.Button('1. Transcribe')
|
83 |
-
output_text_transcribe = gr.Textbox(placeholder='Transcript of the YouTube video.', label='Transcript')
|
84 |
-
|
85 |
-
result_button_summary = gr.Button('2. Create Summary')
|
86 |
-
output_text_summary = gr.Textbox(placeholder='Summary of the YouTube video transcript.', label='Summary')
|
87 |
-
|
88 |
-
result_button_transcribe.click(getText, inputs=input_text_url, outputs=output_text_transcribe)
|
89 |
-
result_button_summary.click(getSummary, inputs=output_text_transcribe, outputs=output_text_summary)
|
90 |
-
|
91 |
-
demo.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_music_spectrogram_to_diffusers.py
DELETED
@@ -1,213 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
import argparse
|
3 |
-
import os
|
4 |
-
|
5 |
-
import jax as jnp
|
6 |
-
import numpy as onp
|
7 |
-
import torch
|
8 |
-
import torch.nn as nn
|
9 |
-
from music_spectrogram_diffusion import inference
|
10 |
-
from t5x import checkpoints
|
11 |
-
|
12 |
-
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
|
13 |
-
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, T5FilmDecoder
|
14 |
-
|
15 |
-
|
16 |
-
MODEL = "base_with_context"
|
17 |
-
|
18 |
-
|
19 |
-
def load_notes_encoder(weights, model):
|
20 |
-
model.token_embedder.weight = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"]))
|
21 |
-
model.position_encoding.weight = nn.Parameter(
|
22 |
-
torch.FloatTensor(weights["Embed_0"]["embedding"]), requires_grad=False
|
23 |
-
)
|
24 |
-
for lyr_num, lyr in enumerate(model.encoders):
|
25 |
-
ly_weight = weights[f"layers_{lyr_num}"]
|
26 |
-
lyr.layer[0].layer_norm.weight = nn.Parameter(
|
27 |
-
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"])
|
28 |
-
)
|
29 |
-
|
30 |
-
attention_weights = ly_weight["attention"]
|
31 |
-
lyr.layer[0].SelfAttention.q.weight = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
|
32 |
-
lyr.layer[0].SelfAttention.k.weight = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
|
33 |
-
lyr.layer[0].SelfAttention.v.weight = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
|
34 |
-
lyr.layer[0].SelfAttention.o.weight = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
|
35 |
-
|
36 |
-
lyr.layer[1].layer_norm.weight = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"]))
|
37 |
-
|
38 |
-
lyr.layer[1].DenseReluDense.wi_0.weight = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T))
|
39 |
-
lyr.layer[1].DenseReluDense.wi_1.weight = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T))
|
40 |
-
lyr.layer[1].DenseReluDense.wo.weight = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T))
|
41 |
-
|
42 |
-
model.layer_norm.weight = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"]))
|
43 |
-
return model
|
44 |
-
|
45 |
-
|
46 |
-
def load_continuous_encoder(weights, model):
|
47 |
-
model.input_proj.weight = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T))
|
48 |
-
|
49 |
-
model.position_encoding.weight = nn.Parameter(
|
50 |
-
torch.FloatTensor(weights["Embed_0"]["embedding"]), requires_grad=False
|
51 |
-
)
|
52 |
-
|
53 |
-
for lyr_num, lyr in enumerate(model.encoders):
|
54 |
-
ly_weight = weights[f"layers_{lyr_num}"]
|
55 |
-
attention_weights = ly_weight["attention"]
|
56 |
-
|
57 |
-
lyr.layer[0].SelfAttention.q.weight = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
|
58 |
-
lyr.layer[0].SelfAttention.k.weight = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
|
59 |
-
lyr.layer[0].SelfAttention.v.weight = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
|
60 |
-
lyr.layer[0].SelfAttention.o.weight = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
|
61 |
-
lyr.layer[0].layer_norm.weight = nn.Parameter(
|
62 |
-
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"])
|
63 |
-
)
|
64 |
-
|
65 |
-
lyr.layer[1].DenseReluDense.wi_0.weight = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T))
|
66 |
-
lyr.layer[1].DenseReluDense.wi_1.weight = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T))
|
67 |
-
lyr.layer[1].DenseReluDense.wo.weight = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T))
|
68 |
-
lyr.layer[1].layer_norm.weight = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"]))
|
69 |
-
|
70 |
-
model.layer_norm.weight = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"]))
|
71 |
-
|
72 |
-
return model
|
73 |
-
|
74 |
-
|
75 |
-
def load_decoder(weights, model):
|
76 |
-
model.conditioning_emb[0].weight = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T))
|
77 |
-
model.conditioning_emb[2].weight = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T))
|
78 |
-
|
79 |
-
model.position_encoding.weight = nn.Parameter(
|
80 |
-
torch.FloatTensor(weights["Embed_0"]["embedding"]), requires_grad=False
|
81 |
-
)
|
82 |
-
|
83 |
-
model.continuous_inputs_projection.weight = nn.Parameter(
|
84 |
-
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T)
|
85 |
-
)
|
86 |
-
|
87 |
-
for lyr_num, lyr in enumerate(model.decoders):
|
88 |
-
ly_weight = weights[f"layers_{lyr_num}"]
|
89 |
-
lyr.layer[0].layer_norm.weight = nn.Parameter(
|
90 |
-
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"])
|
91 |
-
)
|
92 |
-
|
93 |
-
lyr.layer[0].FiLMLayer.scale_bias.weight = nn.Parameter(
|
94 |
-
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T)
|
95 |
-
)
|
96 |
-
|
97 |
-
attention_weights = ly_weight["self_attention"]
|
98 |
-
lyr.layer[0].attention.to_q.weight = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
|
99 |
-
lyr.layer[0].attention.to_k.weight = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
|
100 |
-
lyr.layer[0].attention.to_v.weight = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
|
101 |
-
lyr.layer[0].attention.to_out[0].weight = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
|
102 |
-
|
103 |
-
attention_weights = ly_weight["MultiHeadDotProductAttention_0"]
|
104 |
-
lyr.layer[1].attention.to_q.weight = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T))
|
105 |
-
lyr.layer[1].attention.to_k.weight = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T))
|
106 |
-
lyr.layer[1].attention.to_v.weight = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T))
|
107 |
-
lyr.layer[1].attention.to_out[0].weight = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T))
|
108 |
-
lyr.layer[1].layer_norm.weight = nn.Parameter(
|
109 |
-
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"])
|
110 |
-
)
|
111 |
-
|
112 |
-
lyr.layer[2].layer_norm.weight = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"]))
|
113 |
-
lyr.layer[2].film.scale_bias.weight = nn.Parameter(
|
114 |
-
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T)
|
115 |
-
)
|
116 |
-
lyr.layer[2].DenseReluDense.wi_0.weight = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T))
|
117 |
-
lyr.layer[2].DenseReluDense.wi_1.weight = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T))
|
118 |
-
lyr.layer[2].DenseReluDense.wo.weight = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T))
|
119 |
-
|
120 |
-
model.decoder_norm.weight = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"]))
|
121 |
-
|
122 |
-
model.spec_out.weight = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T))
|
123 |
-
|
124 |
-
return model
|
125 |
-
|
126 |
-
|
127 |
-
def main(args):
|
128 |
-
t5_checkpoint = checkpoints.load_t5x_checkpoint(args.checkpoint_path)
|
129 |
-
t5_checkpoint = jnp.tree_util.tree_map(onp.array, t5_checkpoint)
|
130 |
-
|
131 |
-
gin_overrides = [
|
132 |
-
"from __gin__ import dynamic_registration",
|
133 |
-
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
|
134 |
-
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
|
135 |
-
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
|
136 |
-
]
|
137 |
-
|
138 |
-
gin_file = os.path.join(args.checkpoint_path, "..", "config.gin")
|
139 |
-
gin_config = inference.parse_training_gin_file(gin_file, gin_overrides)
|
140 |
-
synth_model = inference.InferenceModel(args.checkpoint_path, gin_config)
|
141 |
-
|
142 |
-
scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2", variance_type="fixed_large")
|
143 |
-
|
144 |
-
notes_encoder = SpectrogramNotesEncoder(
|
145 |
-
max_length=synth_model.sequence_length["inputs"],
|
146 |
-
vocab_size=synth_model.model.module.config.vocab_size,
|
147 |
-
d_model=synth_model.model.module.config.emb_dim,
|
148 |
-
dropout_rate=synth_model.model.module.config.dropout_rate,
|
149 |
-
num_layers=synth_model.model.module.config.num_encoder_layers,
|
150 |
-
num_heads=synth_model.model.module.config.num_heads,
|
151 |
-
d_kv=synth_model.model.module.config.head_dim,
|
152 |
-
d_ff=synth_model.model.module.config.mlp_dim,
|
153 |
-
feed_forward_proj="gated-gelu",
|
154 |
-
)
|
155 |
-
|
156 |
-
continuous_encoder = SpectrogramContEncoder(
|
157 |
-
input_dims=synth_model.audio_codec.n_dims,
|
158 |
-
targets_context_length=synth_model.sequence_length["targets_context"],
|
159 |
-
d_model=synth_model.model.module.config.emb_dim,
|
160 |
-
dropout_rate=synth_model.model.module.config.dropout_rate,
|
161 |
-
num_layers=synth_model.model.module.config.num_encoder_layers,
|
162 |
-
num_heads=synth_model.model.module.config.num_heads,
|
163 |
-
d_kv=synth_model.model.module.config.head_dim,
|
164 |
-
d_ff=synth_model.model.module.config.mlp_dim,
|
165 |
-
feed_forward_proj="gated-gelu",
|
166 |
-
)
|
167 |
-
|
168 |
-
decoder = T5FilmDecoder(
|
169 |
-
input_dims=synth_model.audio_codec.n_dims,
|
170 |
-
targets_length=synth_model.sequence_length["targets_context"],
|
171 |
-
max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time,
|
172 |
-
d_model=synth_model.model.module.config.emb_dim,
|
173 |
-
num_layers=synth_model.model.module.config.num_decoder_layers,
|
174 |
-
num_heads=synth_model.model.module.config.num_heads,
|
175 |
-
d_kv=synth_model.model.module.config.head_dim,
|
176 |
-
d_ff=synth_model.model.module.config.mlp_dim,
|
177 |
-
dropout_rate=synth_model.model.module.config.dropout_rate,
|
178 |
-
)
|
179 |
-
|
180 |
-
notes_encoder = load_notes_encoder(t5_checkpoint["target"]["token_encoder"], notes_encoder)
|
181 |
-
continuous_encoder = load_continuous_encoder(t5_checkpoint["target"]["continuous_encoder"], continuous_encoder)
|
182 |
-
decoder = load_decoder(t5_checkpoint["target"]["decoder"], decoder)
|
183 |
-
|
184 |
-
melgan = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder")
|
185 |
-
|
186 |
-
pipe = SpectrogramDiffusionPipeline(
|
187 |
-
notes_encoder=notes_encoder,
|
188 |
-
continuous_encoder=continuous_encoder,
|
189 |
-
decoder=decoder,
|
190 |
-
scheduler=scheduler,
|
191 |
-
melgan=melgan,
|
192 |
-
)
|
193 |
-
if args.save:
|
194 |
-
pipe.save_pretrained(args.output_path)
|
195 |
-
|
196 |
-
|
197 |
-
if __name__ == "__main__":
|
198 |
-
parser = argparse.ArgumentParser()
|
199 |
-
|
200 |
-
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
|
201 |
-
parser.add_argument(
|
202 |
-
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
|
203 |
-
)
|
204 |
-
parser.add_argument(
|
205 |
-
"--checkpoint_path",
|
206 |
-
default=f"{MODEL}/checkpoint_500000",
|
207 |
-
type=str,
|
208 |
-
required=False,
|
209 |
-
help="Path to the original jax model checkpoint.",
|
210 |
-
)
|
211 |
-
args = parser.parse_args()
|
212 |
-
|
213 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/utils/weight_init.py
DELETED
@@ -1,684 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import copy
|
3 |
-
import math
|
4 |
-
import warnings
|
5 |
-
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
-
import torch.nn as nn
|
9 |
-
from torch import Tensor
|
10 |
-
|
11 |
-
from annotator.uniformer.mmcv.utils import Registry, build_from_cfg, get_logger, print_log
|
12 |
-
|
13 |
-
INITIALIZERS = Registry('initializer')
|
14 |
-
|
15 |
-
|
16 |
-
def update_init_info(module, init_info):
|
17 |
-
"""Update the `_params_init_info` in the module if the value of parameters
|
18 |
-
are changed.
|
19 |
-
|
20 |
-
Args:
|
21 |
-
module (obj:`nn.Module`): The module of PyTorch with a user-defined
|
22 |
-
attribute `_params_init_info` which records the initialization
|
23 |
-
information.
|
24 |
-
init_info (str): The string that describes the initialization.
|
25 |
-
"""
|
26 |
-
assert hasattr(
|
27 |
-
module,
|
28 |
-
'_params_init_info'), f'Can not find `_params_init_info` in {module}'
|
29 |
-
for name, param in module.named_parameters():
|
30 |
-
|
31 |
-
assert param in module._params_init_info, (
|
32 |
-
f'Find a new :obj:`Parameter` '
|
33 |
-
f'named `{name}` during executing the '
|
34 |
-
f'`init_weights` of '
|
35 |
-
f'`{module.__class__.__name__}`. '
|
36 |
-
f'Please do not add or '
|
37 |
-
f'replace parameters during executing '
|
38 |
-
f'the `init_weights`. ')
|
39 |
-
|
40 |
-
# The parameter has been changed during executing the
|
41 |
-
# `init_weights` of module
|
42 |
-
mean_value = param.data.mean()
|
43 |
-
if module._params_init_info[param]['tmp_mean_value'] != mean_value:
|
44 |
-
module._params_init_info[param]['init_info'] = init_info
|
45 |
-
module._params_init_info[param]['tmp_mean_value'] = mean_value
|
46 |
-
|
47 |
-
|
48 |
-
def constant_init(module, val, bias=0):
|
49 |
-
if hasattr(module, 'weight') and module.weight is not None:
|
50 |
-
nn.init.constant_(module.weight, val)
|
51 |
-
if hasattr(module, 'bias') and module.bias is not None:
|
52 |
-
nn.init.constant_(module.bias, bias)
|
53 |
-
|
54 |
-
|
55 |
-
def xavier_init(module, gain=1, bias=0, distribution='normal'):
|
56 |
-
assert distribution in ['uniform', 'normal']
|
57 |
-
if hasattr(module, 'weight') and module.weight is not None:
|
58 |
-
if distribution == 'uniform':
|
59 |
-
nn.init.xavier_uniform_(module.weight, gain=gain)
|
60 |
-
else:
|
61 |
-
nn.init.xavier_normal_(module.weight, gain=gain)
|
62 |
-
if hasattr(module, 'bias') and module.bias is not None:
|
63 |
-
nn.init.constant_(module.bias, bias)
|
64 |
-
|
65 |
-
|
66 |
-
def normal_init(module, mean=0, std=1, bias=0):
|
67 |
-
if hasattr(module, 'weight') and module.weight is not None:
|
68 |
-
nn.init.normal_(module.weight, mean, std)
|
69 |
-
if hasattr(module, 'bias') and module.bias is not None:
|
70 |
-
nn.init.constant_(module.bias, bias)
|
71 |
-
|
72 |
-
|
73 |
-
def trunc_normal_init(module: nn.Module,
|
74 |
-
mean: float = 0,
|
75 |
-
std: float = 1,
|
76 |
-
a: float = -2,
|
77 |
-
b: float = 2,
|
78 |
-
bias: float = 0) -> None:
|
79 |
-
if hasattr(module, 'weight') and module.weight is not None:
|
80 |
-
trunc_normal_(module.weight, mean, std, a, b) # type: ignore
|
81 |
-
if hasattr(module, 'bias') and module.bias is not None:
|
82 |
-
nn.init.constant_(module.bias, bias) # type: ignore
|
83 |
-
|
84 |
-
|
85 |
-
def uniform_init(module, a=0, b=1, bias=0):
|
86 |
-
if hasattr(module, 'weight') and module.weight is not None:
|
87 |
-
nn.init.uniform_(module.weight, a, b)
|
88 |
-
if hasattr(module, 'bias') and module.bias is not None:
|
89 |
-
nn.init.constant_(module.bias, bias)
|
90 |
-
|
91 |
-
|
92 |
-
def kaiming_init(module,
|
93 |
-
a=0,
|
94 |
-
mode='fan_out',
|
95 |
-
nonlinearity='relu',
|
96 |
-
bias=0,
|
97 |
-
distribution='normal'):
|
98 |
-
assert distribution in ['uniform', 'normal']
|
99 |
-
if hasattr(module, 'weight') and module.weight is not None:
|
100 |
-
if distribution == 'uniform':
|
101 |
-
nn.init.kaiming_uniform_(
|
102 |
-
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
|
103 |
-
else:
|
104 |
-
nn.init.kaiming_normal_(
|
105 |
-
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
|
106 |
-
if hasattr(module, 'bias') and module.bias is not None:
|
107 |
-
nn.init.constant_(module.bias, bias)
|
108 |
-
|
109 |
-
|
110 |
-
def caffe2_xavier_init(module, bias=0):
|
111 |
-
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
|
112 |
-
# Acknowledgment to FAIR's internal code
|
113 |
-
kaiming_init(
|
114 |
-
module,
|
115 |
-
a=1,
|
116 |
-
mode='fan_in',
|
117 |
-
nonlinearity='leaky_relu',
|
118 |
-
bias=bias,
|
119 |
-
distribution='uniform')
|
120 |
-
|
121 |
-
|
122 |
-
def bias_init_with_prob(prior_prob):
|
123 |
-
"""initialize conv/fc bias value according to a given probability value."""
|
124 |
-
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
|
125 |
-
return bias_init
|
126 |
-
|
127 |
-
|
128 |
-
def _get_bases_name(m):
|
129 |
-
return [b.__name__ for b in m.__class__.__bases__]
|
130 |
-
|
131 |
-
|
132 |
-
class BaseInit(object):
|
133 |
-
|
134 |
-
def __init__(self, *, bias=0, bias_prob=None, layer=None):
|
135 |
-
self.wholemodule = False
|
136 |
-
if not isinstance(bias, (int, float)):
|
137 |
-
raise TypeError(f'bias must be a number, but got a {type(bias)}')
|
138 |
-
|
139 |
-
if bias_prob is not None:
|
140 |
-
if not isinstance(bias_prob, float):
|
141 |
-
raise TypeError(f'bias_prob type must be float, \
|
142 |
-
but got {type(bias_prob)}')
|
143 |
-
|
144 |
-
if layer is not None:
|
145 |
-
if not isinstance(layer, (str, list)):
|
146 |
-
raise TypeError(f'layer must be a str or a list of str, \
|
147 |
-
but got a {type(layer)}')
|
148 |
-
else:
|
149 |
-
layer = []
|
150 |
-
|
151 |
-
if bias_prob is not None:
|
152 |
-
self.bias = bias_init_with_prob(bias_prob)
|
153 |
-
else:
|
154 |
-
self.bias = bias
|
155 |
-
self.layer = [layer] if isinstance(layer, str) else layer
|
156 |
-
|
157 |
-
def _get_init_info(self):
|
158 |
-
info = f'{self.__class__.__name__}, bias={self.bias}'
|
159 |
-
return info
|
160 |
-
|
161 |
-
|
162 |
-
@INITIALIZERS.register_module(name='Constant')
|
163 |
-
class ConstantInit(BaseInit):
|
164 |
-
"""Initialize module parameters with constant values.
|
165 |
-
|
166 |
-
Args:
|
167 |
-
val (int | float): the value to fill the weights in the module with
|
168 |
-
bias (int | float): the value to fill the bias. Defaults to 0.
|
169 |
-
bias_prob (float, optional): the probability for bias initialization.
|
170 |
-
Defaults to None.
|
171 |
-
layer (str | list[str], optional): the layer will be initialized.
|
172 |
-
Defaults to None.
|
173 |
-
"""
|
174 |
-
|
175 |
-
def __init__(self, val, **kwargs):
|
176 |
-
super().__init__(**kwargs)
|
177 |
-
self.val = val
|
178 |
-
|
179 |
-
def __call__(self, module):
|
180 |
-
|
181 |
-
def init(m):
|
182 |
-
if self.wholemodule:
|
183 |
-
constant_init(m, self.val, self.bias)
|
184 |
-
else:
|
185 |
-
layername = m.__class__.__name__
|
186 |
-
basesname = _get_bases_name(m)
|
187 |
-
if len(set(self.layer) & set([layername] + basesname)):
|
188 |
-
constant_init(m, self.val, self.bias)
|
189 |
-
|
190 |
-
module.apply(init)
|
191 |
-
if hasattr(module, '_params_init_info'):
|
192 |
-
update_init_info(module, init_info=self._get_init_info())
|
193 |
-
|
194 |
-
def _get_init_info(self):
|
195 |
-
info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}'
|
196 |
-
return info
|
197 |
-
|
198 |
-
|
199 |
-
@INITIALIZERS.register_module(name='Xavier')
|
200 |
-
class XavierInit(BaseInit):
|
201 |
-
r"""Initialize module parameters with values according to the method
|
202 |
-
described in `Understanding the difficulty of training deep feedforward
|
203 |
-
neural networks - Glorot, X. & Bengio, Y. (2010).
|
204 |
-
<http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf>`_
|
205 |
-
|
206 |
-
Args:
|
207 |
-
gain (int | float): an optional scaling factor. Defaults to 1.
|
208 |
-
bias (int | float): the value to fill the bias. Defaults to 0.
|
209 |
-
bias_prob (float, optional): the probability for bias initialization.
|
210 |
-
Defaults to None.
|
211 |
-
distribution (str): distribution either be ``'normal'``
|
212 |
-
or ``'uniform'``. Defaults to ``'normal'``.
|
213 |
-
layer (str | list[str], optional): the layer will be initialized.
|
214 |
-
Defaults to None.
|
215 |
-
"""
|
216 |
-
|
217 |
-
def __init__(self, gain=1, distribution='normal', **kwargs):
|
218 |
-
super().__init__(**kwargs)
|
219 |
-
self.gain = gain
|
220 |
-
self.distribution = distribution
|
221 |
-
|
222 |
-
def __call__(self, module):
|
223 |
-
|
224 |
-
def init(m):
|
225 |
-
if self.wholemodule:
|
226 |
-
xavier_init(m, self.gain, self.bias, self.distribution)
|
227 |
-
else:
|
228 |
-
layername = m.__class__.__name__
|
229 |
-
basesname = _get_bases_name(m)
|
230 |
-
if len(set(self.layer) & set([layername] + basesname)):
|
231 |
-
xavier_init(m, self.gain, self.bias, self.distribution)
|
232 |
-
|
233 |
-
module.apply(init)
|
234 |
-
if hasattr(module, '_params_init_info'):
|
235 |
-
update_init_info(module, init_info=self._get_init_info())
|
236 |
-
|
237 |
-
def _get_init_info(self):
|
238 |
-
info = f'{self.__class__.__name__}: gain={self.gain}, ' \
|
239 |
-
f'distribution={self.distribution}, bias={self.bias}'
|
240 |
-
return info
|
241 |
-
|
242 |
-
|
243 |
-
@INITIALIZERS.register_module(name='Normal')
|
244 |
-
class NormalInit(BaseInit):
|
245 |
-
r"""Initialize module parameters with the values drawn from the normal
|
246 |
-
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
|
247 |
-
|
248 |
-
Args:
|
249 |
-
mean (int | float):the mean of the normal distribution. Defaults to 0.
|
250 |
-
std (int | float): the standard deviation of the normal distribution.
|
251 |
-
Defaults to 1.
|
252 |
-
bias (int | float): the value to fill the bias. Defaults to 0.
|
253 |
-
bias_prob (float, optional): the probability for bias initialization.
|
254 |
-
Defaults to None.
|
255 |
-
layer (str | list[str], optional): the layer will be initialized.
|
256 |
-
Defaults to None.
|
257 |
-
|
258 |
-
"""
|
259 |
-
|
260 |
-
def __init__(self, mean=0, std=1, **kwargs):
|
261 |
-
super().__init__(**kwargs)
|
262 |
-
self.mean = mean
|
263 |
-
self.std = std
|
264 |
-
|
265 |
-
def __call__(self, module):
|
266 |
-
|
267 |
-
def init(m):
|
268 |
-
if self.wholemodule:
|
269 |
-
normal_init(m, self.mean, self.std, self.bias)
|
270 |
-
else:
|
271 |
-
layername = m.__class__.__name__
|
272 |
-
basesname = _get_bases_name(m)
|
273 |
-
if len(set(self.layer) & set([layername] + basesname)):
|
274 |
-
normal_init(m, self.mean, self.std, self.bias)
|
275 |
-
|
276 |
-
module.apply(init)
|
277 |
-
if hasattr(module, '_params_init_info'):
|
278 |
-
update_init_info(module, init_info=self._get_init_info())
|
279 |
-
|
280 |
-
def _get_init_info(self):
|
281 |
-
info = f'{self.__class__.__name__}: mean={self.mean},' \
|
282 |
-
f' std={self.std}, bias={self.bias}'
|
283 |
-
return info
|
284 |
-
|
285 |
-
|
286 |
-
@INITIALIZERS.register_module(name='TruncNormal')
|
287 |
-
class TruncNormalInit(BaseInit):
|
288 |
-
r"""Initialize module parameters with the values drawn from the normal
|
289 |
-
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
|
290 |
-
outside :math:`[a, b]`.
|
291 |
-
|
292 |
-
Args:
|
293 |
-
mean (float): the mean of the normal distribution. Defaults to 0.
|
294 |
-
std (float): the standard deviation of the normal distribution.
|
295 |
-
Defaults to 1.
|
296 |
-
a (float): The minimum cutoff value.
|
297 |
-
b ( float): The maximum cutoff value.
|
298 |
-
bias (float): the value to fill the bias. Defaults to 0.
|
299 |
-
bias_prob (float, optional): the probability for bias initialization.
|
300 |
-
Defaults to None.
|
301 |
-
layer (str | list[str], optional): the layer will be initialized.
|
302 |
-
Defaults to None.
|
303 |
-
|
304 |
-
"""
|
305 |
-
|
306 |
-
def __init__(self,
|
307 |
-
mean: float = 0,
|
308 |
-
std: float = 1,
|
309 |
-
a: float = -2,
|
310 |
-
b: float = 2,
|
311 |
-
**kwargs) -> None:
|
312 |
-
super().__init__(**kwargs)
|
313 |
-
self.mean = mean
|
314 |
-
self.std = std
|
315 |
-
self.a = a
|
316 |
-
self.b = b
|
317 |
-
|
318 |
-
def __call__(self, module: nn.Module) -> None:
|
319 |
-
|
320 |
-
def init(m):
|
321 |
-
if self.wholemodule:
|
322 |
-
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
|
323 |
-
self.bias)
|
324 |
-
else:
|
325 |
-
layername = m.__class__.__name__
|
326 |
-
basesname = _get_bases_name(m)
|
327 |
-
if len(set(self.layer) & set([layername] + basesname)):
|
328 |
-
trunc_normal_init(m, self.mean, self.std, self.a, self.b,
|
329 |
-
self.bias)
|
330 |
-
|
331 |
-
module.apply(init)
|
332 |
-
if hasattr(module, '_params_init_info'):
|
333 |
-
update_init_info(module, init_info=self._get_init_info())
|
334 |
-
|
335 |
-
def _get_init_info(self):
|
336 |
-
info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \
|
337 |
-
f' mean={self.mean}, std={self.std}, bias={self.bias}'
|
338 |
-
return info
|
339 |
-
|
340 |
-
|
341 |
-
@INITIALIZERS.register_module(name='Uniform')
|
342 |
-
class UniformInit(BaseInit):
|
343 |
-
r"""Initialize module parameters with values drawn from the uniform
|
344 |
-
distribution :math:`\mathcal{U}(a, b)`.
|
345 |
-
|
346 |
-
Args:
|
347 |
-
a (int | float): the lower bound of the uniform distribution.
|
348 |
-
Defaults to 0.
|
349 |
-
b (int | float): the upper bound of the uniform distribution.
|
350 |
-
Defaults to 1.
|
351 |
-
bias (int | float): the value to fill the bias. Defaults to 0.
|
352 |
-
bias_prob (float, optional): the probability for bias initialization.
|
353 |
-
Defaults to None.
|
354 |
-
layer (str | list[str], optional): the layer will be initialized.
|
355 |
-
Defaults to None.
|
356 |
-
"""
|
357 |
-
|
358 |
-
def __init__(self, a=0, b=1, **kwargs):
|
359 |
-
super().__init__(**kwargs)
|
360 |
-
self.a = a
|
361 |
-
self.b = b
|
362 |
-
|
363 |
-
def __call__(self, module):
|
364 |
-
|
365 |
-
def init(m):
|
366 |
-
if self.wholemodule:
|
367 |
-
uniform_init(m, self.a, self.b, self.bias)
|
368 |
-
else:
|
369 |
-
layername = m.__class__.__name__
|
370 |
-
basesname = _get_bases_name(m)
|
371 |
-
if len(set(self.layer) & set([layername] + basesname)):
|
372 |
-
uniform_init(m, self.a, self.b, self.bias)
|
373 |
-
|
374 |
-
module.apply(init)
|
375 |
-
if hasattr(module, '_params_init_info'):
|
376 |
-
update_init_info(module, init_info=self._get_init_info())
|
377 |
-
|
378 |
-
def _get_init_info(self):
|
379 |
-
info = f'{self.__class__.__name__}: a={self.a},' \
|
380 |
-
f' b={self.b}, bias={self.bias}'
|
381 |
-
return info
|
382 |
-
|
383 |
-
|
384 |
-
@INITIALIZERS.register_module(name='Kaiming')
|
385 |
-
class KaimingInit(BaseInit):
|
386 |
-
r"""Initialize module parameters with the values according to the method
|
387 |
-
described in `Delving deep into rectifiers: Surpassing human-level
|
388 |
-
performance on ImageNet classification - He, K. et al. (2015).
|
389 |
-
<https://www.cv-foundation.org/openaccess/content_iccv_2015/
|
390 |
-
papers/He_Delving_Deep_into_ICCV_2015_paper.pdf>`_
|
391 |
-
|
392 |
-
Args:
|
393 |
-
a (int | float): the negative slope of the rectifier used after this
|
394 |
-
layer (only used with ``'leaky_relu'``). Defaults to 0.
|
395 |
-
mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
|
396 |
-
``'fan_in'`` preserves the magnitude of the variance of the weights
|
397 |
-
in the forward pass. Choosing ``'fan_out'`` preserves the
|
398 |
-
magnitudes in the backwards pass. Defaults to ``'fan_out'``.
|
399 |
-
nonlinearity (str): the non-linear function (`nn.functional` name),
|
400 |
-
recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
|
401 |
-
Defaults to 'relu'.
|
402 |
-
bias (int | float): the value to fill the bias. Defaults to 0.
|
403 |
-
bias_prob (float, optional): the probability for bias initialization.
|
404 |
-
Defaults to None.
|
405 |
-
distribution (str): distribution either be ``'normal'`` or
|
406 |
-
``'uniform'``. Defaults to ``'normal'``.
|
407 |
-
layer (str | list[str], optional): the layer will be initialized.
|
408 |
-
Defaults to None.
|
409 |
-
"""
|
410 |
-
|
411 |
-
def __init__(self,
|
412 |
-
a=0,
|
413 |
-
mode='fan_out',
|
414 |
-
nonlinearity='relu',
|
415 |
-
distribution='normal',
|
416 |
-
**kwargs):
|
417 |
-
super().__init__(**kwargs)
|
418 |
-
self.a = a
|
419 |
-
self.mode = mode
|
420 |
-
self.nonlinearity = nonlinearity
|
421 |
-
self.distribution = distribution
|
422 |
-
|
423 |
-
def __call__(self, module):
|
424 |
-
|
425 |
-
def init(m):
|
426 |
-
if self.wholemodule:
|
427 |
-
kaiming_init(m, self.a, self.mode, self.nonlinearity,
|
428 |
-
self.bias, self.distribution)
|
429 |
-
else:
|
430 |
-
layername = m.__class__.__name__
|
431 |
-
basesname = _get_bases_name(m)
|
432 |
-
if len(set(self.layer) & set([layername] + basesname)):
|
433 |
-
kaiming_init(m, self.a, self.mode, self.nonlinearity,
|
434 |
-
self.bias, self.distribution)
|
435 |
-
|
436 |
-
module.apply(init)
|
437 |
-
if hasattr(module, '_params_init_info'):
|
438 |
-
update_init_info(module, init_info=self._get_init_info())
|
439 |
-
|
440 |
-
def _get_init_info(self):
|
441 |
-
info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \
|
442 |
-
f'nonlinearity={self.nonlinearity}, ' \
|
443 |
-
f'distribution ={self.distribution}, bias={self.bias}'
|
444 |
-
return info
|
445 |
-
|
446 |
-
|
447 |
-
@INITIALIZERS.register_module(name='Caffe2Xavier')
|
448 |
-
class Caffe2XavierInit(KaimingInit):
|
449 |
-
# `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
|
450 |
-
# Acknowledgment to FAIR's internal code
|
451 |
-
def __init__(self, **kwargs):
|
452 |
-
super().__init__(
|
453 |
-
a=1,
|
454 |
-
mode='fan_in',
|
455 |
-
nonlinearity='leaky_relu',
|
456 |
-
distribution='uniform',
|
457 |
-
**kwargs)
|
458 |
-
|
459 |
-
def __call__(self, module):
|
460 |
-
super().__call__(module)
|
461 |
-
|
462 |
-
|
463 |
-
@INITIALIZERS.register_module(name='Pretrained')
|
464 |
-
class PretrainedInit(object):
|
465 |
-
"""Initialize module by loading a pretrained model.
|
466 |
-
|
467 |
-
Args:
|
468 |
-
checkpoint (str): the checkpoint file of the pretrained model should
|
469 |
-
be load.
|
470 |
-
prefix (str, optional): the prefix of a sub-module in the pretrained
|
471 |
-
model. it is for loading a part of the pretrained model to
|
472 |
-
initialize. For example, if we would like to only load the
|
473 |
-
backbone of a detector model, we can set ``prefix='backbone.'``.
|
474 |
-
Defaults to None.
|
475 |
-
map_location (str): map tensors into proper locations.
|
476 |
-
"""
|
477 |
-
|
478 |
-
def __init__(self, checkpoint, prefix=None, map_location=None):
|
479 |
-
self.checkpoint = checkpoint
|
480 |
-
self.prefix = prefix
|
481 |
-
self.map_location = map_location
|
482 |
-
|
483 |
-
def __call__(self, module):
|
484 |
-
from annotator.uniformer.mmcv.runner import (_load_checkpoint_with_prefix, load_checkpoint,
|
485 |
-
load_state_dict)
|
486 |
-
logger = get_logger('mmcv')
|
487 |
-
if self.prefix is None:
|
488 |
-
print_log(f'load model from: {self.checkpoint}', logger=logger)
|
489 |
-
load_checkpoint(
|
490 |
-
module,
|
491 |
-
self.checkpoint,
|
492 |
-
map_location=self.map_location,
|
493 |
-
strict=False,
|
494 |
-
logger=logger)
|
495 |
-
else:
|
496 |
-
print_log(
|
497 |
-
f'load {self.prefix} in model from: {self.checkpoint}',
|
498 |
-
logger=logger)
|
499 |
-
state_dict = _load_checkpoint_with_prefix(
|
500 |
-
self.prefix, self.checkpoint, map_location=self.map_location)
|
501 |
-
load_state_dict(module, state_dict, strict=False, logger=logger)
|
502 |
-
|
503 |
-
if hasattr(module, '_params_init_info'):
|
504 |
-
update_init_info(module, init_info=self._get_init_info())
|
505 |
-
|
506 |
-
def _get_init_info(self):
|
507 |
-
info = f'{self.__class__.__name__}: load from {self.checkpoint}'
|
508 |
-
return info
|
509 |
-
|
510 |
-
|
511 |
-
def _initialize(module, cfg, wholemodule=False):
|
512 |
-
func = build_from_cfg(cfg, INITIALIZERS)
|
513 |
-
# wholemodule flag is for override mode, there is no layer key in override
|
514 |
-
# and initializer will give init values for the whole module with the name
|
515 |
-
# in override.
|
516 |
-
func.wholemodule = wholemodule
|
517 |
-
func(module)
|
518 |
-
|
519 |
-
|
520 |
-
def _initialize_override(module, override, cfg):
|
521 |
-
if not isinstance(override, (dict, list)):
|
522 |
-
raise TypeError(f'override must be a dict or a list of dict, \
|
523 |
-
but got {type(override)}')
|
524 |
-
|
525 |
-
override = [override] if isinstance(override, dict) else override
|
526 |
-
|
527 |
-
for override_ in override:
|
528 |
-
|
529 |
-
cp_override = copy.deepcopy(override_)
|
530 |
-
name = cp_override.pop('name', None)
|
531 |
-
if name is None:
|
532 |
-
raise ValueError('`override` must contain the key "name",'
|
533 |
-
f'but got {cp_override}')
|
534 |
-
# if override only has name key, it means use args in init_cfg
|
535 |
-
if not cp_override:
|
536 |
-
cp_override.update(cfg)
|
537 |
-
# if override has name key and other args except type key, it will
|
538 |
-
# raise error
|
539 |
-
elif 'type' not in cp_override.keys():
|
540 |
-
raise ValueError(
|
541 |
-
f'`override` need "type" key, but got {cp_override}')
|
542 |
-
|
543 |
-
if hasattr(module, name):
|
544 |
-
_initialize(getattr(module, name), cp_override, wholemodule=True)
|
545 |
-
else:
|
546 |
-
raise RuntimeError(f'module did not have attribute {name}, '
|
547 |
-
f'but init_cfg is {cp_override}.')
|
548 |
-
|
549 |
-
|
550 |
-
def initialize(module, init_cfg):
|
551 |
-
"""Initialize a module.
|
552 |
-
|
553 |
-
Args:
|
554 |
-
module (``torch.nn.Module``): the module will be initialized.
|
555 |
-
init_cfg (dict | list[dict]): initialization configuration dict to
|
556 |
-
define initializer. OpenMMLab has implemented 6 initializers
|
557 |
-
including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
|
558 |
-
``Kaiming``, and ``Pretrained``.
|
559 |
-
Example:
|
560 |
-
>>> module = nn.Linear(2, 3, bias=True)
|
561 |
-
>>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
|
562 |
-
>>> initialize(module, init_cfg)
|
563 |
-
|
564 |
-
>>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
|
565 |
-
>>> # define key ``'layer'`` for initializing layer with different
|
566 |
-
>>> # configuration
|
567 |
-
>>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
|
568 |
-
dict(type='Constant', layer='Linear', val=2)]
|
569 |
-
>>> initialize(module, init_cfg)
|
570 |
-
|
571 |
-
>>> # define key``'override'`` to initialize some specific part in
|
572 |
-
>>> # module
|
573 |
-
>>> class FooNet(nn.Module):
|
574 |
-
>>> def __init__(self):
|
575 |
-
>>> super().__init__()
|
576 |
-
>>> self.feat = nn.Conv2d(3, 16, 3)
|
577 |
-
>>> self.reg = nn.Conv2d(16, 10, 3)
|
578 |
-
>>> self.cls = nn.Conv2d(16, 5, 3)
|
579 |
-
>>> model = FooNet()
|
580 |
-
>>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
|
581 |
-
>>> override=dict(type='Constant', name='reg', val=3, bias=4))
|
582 |
-
>>> initialize(model, init_cfg)
|
583 |
-
|
584 |
-
>>> model = ResNet(depth=50)
|
585 |
-
>>> # Initialize weights with the pretrained model.
|
586 |
-
>>> init_cfg = dict(type='Pretrained',
|
587 |
-
checkpoint='torchvision://resnet50')
|
588 |
-
>>> initialize(model, init_cfg)
|
589 |
-
|
590 |
-
>>> # Initialize weights of a sub-module with the specific part of
|
591 |
-
>>> # a pretrained model by using "prefix".
|
592 |
-
>>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\
|
593 |
-
>>> 'retinanet_r50_fpn_1x_coco/'\
|
594 |
-
>>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
|
595 |
-
>>> init_cfg = dict(type='Pretrained',
|
596 |
-
checkpoint=url, prefix='backbone.')
|
597 |
-
"""
|
598 |
-
if not isinstance(init_cfg, (dict, list)):
|
599 |
-
raise TypeError(f'init_cfg must be a dict or a list of dict, \
|
600 |
-
but got {type(init_cfg)}')
|
601 |
-
|
602 |
-
if isinstance(init_cfg, dict):
|
603 |
-
init_cfg = [init_cfg]
|
604 |
-
|
605 |
-
for cfg in init_cfg:
|
606 |
-
# should deeply copy the original config because cfg may be used by
|
607 |
-
# other modules, e.g., one init_cfg shared by multiple bottleneck
|
608 |
-
# blocks, the expected cfg will be changed after pop and will change
|
609 |
-
# the initialization behavior of other modules
|
610 |
-
cp_cfg = copy.deepcopy(cfg)
|
611 |
-
override = cp_cfg.pop('override', None)
|
612 |
-
_initialize(module, cp_cfg)
|
613 |
-
|
614 |
-
if override is not None:
|
615 |
-
cp_cfg.pop('layer', None)
|
616 |
-
_initialize_override(module, override, cp_cfg)
|
617 |
-
else:
|
618 |
-
# All attributes in module have same initialization.
|
619 |
-
pass
|
620 |
-
|
621 |
-
|
622 |
-
def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,
|
623 |
-
b: float) -> Tensor:
|
624 |
-
# Method based on
|
625 |
-
# https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
626 |
-
# Modified from
|
627 |
-
# https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
|
628 |
-
def norm_cdf(x):
|
629 |
-
# Computes standard normal cumulative distribution function
|
630 |
-
return (1. + math.erf(x / math.sqrt(2.))) / 2.
|
631 |
-
|
632 |
-
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
633 |
-
warnings.warn(
|
634 |
-
'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
|
635 |
-
'The distribution of values may be incorrect.',
|
636 |
-
stacklevel=2)
|
637 |
-
|
638 |
-
with torch.no_grad():
|
639 |
-
# Values are generated by using a truncated uniform distribution and
|
640 |
-
# then using the inverse CDF for the normal distribution.
|
641 |
-
# Get upper and lower cdf values
|
642 |
-
lower = norm_cdf((a - mean) / std)
|
643 |
-
upper = norm_cdf((b - mean) / std)
|
644 |
-
|
645 |
-
# Uniformly fill tensor with values from [lower, upper], then translate
|
646 |
-
# to [2lower-1, 2upper-1].
|
647 |
-
tensor.uniform_(2 * lower - 1, 2 * upper - 1)
|
648 |
-
|
649 |
-
# Use inverse cdf transform for normal distribution to get truncated
|
650 |
-
# standard normal
|
651 |
-
tensor.erfinv_()
|
652 |
-
|
653 |
-
# Transform to proper mean, std
|
654 |
-
tensor.mul_(std * math.sqrt(2.))
|
655 |
-
tensor.add_(mean)
|
656 |
-
|
657 |
-
# Clamp to ensure it's in the proper range
|
658 |
-
tensor.clamp_(min=a, max=b)
|
659 |
-
return tensor
|
660 |
-
|
661 |
-
|
662 |
-
def trunc_normal_(tensor: Tensor,
|
663 |
-
mean: float = 0.,
|
664 |
-
std: float = 1.,
|
665 |
-
a: float = -2.,
|
666 |
-
b: float = 2.) -> Tensor:
|
667 |
-
r"""Fills the input Tensor with values drawn from a truncated
|
668 |
-
normal distribution. The values are effectively drawn from the
|
669 |
-
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
|
670 |
-
with values outside :math:`[a, b]` redrawn until they are within
|
671 |
-
the bounds. The method used for generating the random values works
|
672 |
-
best when :math:`a \leq \text{mean} \leq b`.
|
673 |
-
|
674 |
-
Modified from
|
675 |
-
https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
|
676 |
-
|
677 |
-
Args:
|
678 |
-
tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
|
679 |
-
mean (float): the mean of the normal distribution.
|
680 |
-
std (float): the standard deviation of the normal distribution.
|
681 |
-
a (float): the minimum cutoff value.
|
682 |
-
b (float): the maximum cutoff value.
|
683 |
-
"""
|
684 |
-
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/app.py
DELETED
@@ -1,544 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import time
|
3 |
-
import uuid
|
4 |
-
|
5 |
-
from contants import ModelType
|
6 |
-
from logger import logger
|
7 |
-
from flask import Flask, request, send_file, jsonify, make_response, render_template
|
8 |
-
from werkzeug.utils import secure_filename
|
9 |
-
from flask_apscheduler import APScheduler
|
10 |
-
from functools import wraps
|
11 |
-
from utils.data_utils import save_audio, clean_folder, check_is_none
|
12 |
-
from utils.load_model import load_model
|
13 |
-
from io import BytesIO
|
14 |
-
|
15 |
-
app = Flask(__name__)
|
16 |
-
app.config.from_pyfile("config.py")
|
17 |
-
|
18 |
-
scheduler = APScheduler()
|
19 |
-
scheduler.init_app(app)
|
20 |
-
if app.config.get("CLEAN_INTERVAL_SECONDS", 3600) > 0:
|
21 |
-
scheduler.start()
|
22 |
-
|
23 |
-
for path in (app.config['LOGS_PATH'], app.config['UPLOAD_FOLDER'], app.config['CACHE_PATH']):
|
24 |
-
try:
|
25 |
-
os.makedirs(path, exist_ok=True)
|
26 |
-
except Exception as e:
|
27 |
-
logger.error(f"Unable to create directory {path}: {str(e)}")
|
28 |
-
|
29 |
-
# load model
|
30 |
-
tts = load_model(app.config["MODEL_LIST"])
|
31 |
-
|
32 |
-
|
33 |
-
def require_api_key(func):
|
34 |
-
@wraps(func)
|
35 |
-
def check_api_key(*args, **kwargs):
|
36 |
-
if not app.config.get('API_KEY_ENABLED', False):
|
37 |
-
return func(*args, **kwargs)
|
38 |
-
else:
|
39 |
-
api_key = request.args.get('api_key') or request.headers.get('X-API-KEY')
|
40 |
-
if api_key and api_key == app.config['API_KEY']:
|
41 |
-
return func(*args, **kwargs)
|
42 |
-
else:
|
43 |
-
return make_response(jsonify({"status": "error", "message": "Invalid API Key"}), 401)
|
44 |
-
|
45 |
-
return check_api_key
|
46 |
-
|
47 |
-
|
48 |
-
@app.route('/', methods=["GET", "POST"])
|
49 |
-
def index():
|
50 |
-
kwargs = {
|
51 |
-
"speakers": tts.voice_speakers,
|
52 |
-
"speakers_count": tts.speakers_count,
|
53 |
-
"vits_speakers_count": tts.vits_speakers_count,
|
54 |
-
"w2v2_speakers_count": tts.w2v2_speakers_count,
|
55 |
-
"w2v2_emotion_count": tts.w2v2_emotion_count,
|
56 |
-
"bert_vits2_speakers_count": tts.bert_vits2_speakers_count
|
57 |
-
}
|
58 |
-
return render_template("index.html", **kwargs)
|
59 |
-
|
60 |
-
|
61 |
-
@app.route('/voice/speakers', methods=["GET", "POST"])
|
62 |
-
def voice_speakers_api():
|
63 |
-
return jsonify(tts.voice_speakers)
|
64 |
-
|
65 |
-
|
66 |
-
@app.route('/voice', methods=["GET", "POST"])
|
67 |
-
@app.route('/voice/vits', methods=["GET", "POST"])
|
68 |
-
@require_api_key
|
69 |
-
def voice_vits_api():
|
70 |
-
try:
|
71 |
-
if request.method == "GET":
|
72 |
-
request_data = request.args
|
73 |
-
elif request.method == "POST":
|
74 |
-
content_type = request.headers.get('Content-Type')
|
75 |
-
if content_type == 'application/json':
|
76 |
-
request_data = request.get_json()
|
77 |
-
else:
|
78 |
-
request_data = request.form
|
79 |
-
|
80 |
-
text = request_data.get("text", "")
|
81 |
-
id = int(request_data.get("id", app.config.get("ID", 0)))
|
82 |
-
format = request_data.get("format", app.config.get("FORMAT", "wav"))
|
83 |
-
lang = request_data.get("lang", app.config.get("LANG", "auto")).lower()
|
84 |
-
length = float(request_data.get("length", app.config.get("LENGTH", 1)))
|
85 |
-
noise = float(request_data.get("noise", app.config.get("NOISE", 0.667)))
|
86 |
-
noisew = float(request_data.get("noisew", app.config.get("NOISEW", 0.8)))
|
87 |
-
max = int(request_data.get("max", app.config.get("MAX", 50)))
|
88 |
-
use_streaming = request_data.get('streaming', False, type=bool)
|
89 |
-
except Exception as e:
|
90 |
-
logger.error(f"[{ModelType.VITS.value}] {e}")
|
91 |
-
return make_response("parameter error", 400)
|
92 |
-
|
93 |
-
logger.info(
|
94 |
-
f"[{ModelType.VITS.value}] id:{id} format:{format} lang:{lang} length:{length} noise:{noise} noisew:{noisew}")
|
95 |
-
logger.info(f"[{ModelType.VITS.value}] len:{len(text)} text:{text}")
|
96 |
-
|
97 |
-
if check_is_none(text):
|
98 |
-
logger.info(f"[{ModelType.VITS.value}] text is empty")
|
99 |
-
return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
|
100 |
-
|
101 |
-
if check_is_none(id):
|
102 |
-
logger.info(f"[{ModelType.VITS.value}] speaker id is empty")
|
103 |
-
return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
|
104 |
-
|
105 |
-
if id < 0 or id >= tts.vits_speakers_count:
|
106 |
-
logger.info(f"[{ModelType.VITS.value}] speaker id {id} does not exist")
|
107 |
-
return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
|
108 |
-
|
109 |
-
# 校验模型是否支持输入的语言
|
110 |
-
speaker_lang = tts.voice_speakers[ModelType.VITS.value][id].get('lang')
|
111 |
-
if lang not in ["auto", "mix"] and len(speaker_lang) != 1 and lang not in speaker_lang:
|
112 |
-
logger.info(f"[{ModelType.VITS.value}] lang \"{lang}\" is not in {speaker_lang}")
|
113 |
-
return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
|
114 |
-
|
115 |
-
# 如果配置文件中设置了LANGUAGE_AUTOMATIC_DETECT则强制将speaker_lang设置为LANGUAGE_AUTOMATIC_DETECT
|
116 |
-
if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
|
117 |
-
speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
|
118 |
-
|
119 |
-
if use_streaming and format.upper() != "MP3":
|
120 |
-
format = "mp3"
|
121 |
-
logger.warning("Streaming response only supports MP3 format.")
|
122 |
-
|
123 |
-
fname = f"{str(uuid.uuid1())}.{format}"
|
124 |
-
file_type = f"audio/{format}"
|
125 |
-
task = {"text": text,
|
126 |
-
"id": id,
|
127 |
-
"format": format,
|
128 |
-
"length": length,
|
129 |
-
"noise": noise,
|
130 |
-
"noisew": noisew,
|
131 |
-
"max": max,
|
132 |
-
"lang": lang,
|
133 |
-
"speaker_lang": speaker_lang}
|
134 |
-
|
135 |
-
if use_streaming:
|
136 |
-
audio = tts.stream_vits_infer(task)
|
137 |
-
response = make_response(audio)
|
138 |
-
response.headers['Content-Disposition'] = f'attachment; filename={fname}'
|
139 |
-
response.headers['Content-Type'] = file_type
|
140 |
-
return response
|
141 |
-
else:
|
142 |
-
t1 = time.time()
|
143 |
-
audio = tts.vits_infer(task)
|
144 |
-
t2 = time.time()
|
145 |
-
logger.info(f"[{ModelType.VITS.value}] finish in {(t2 - t1):.2f}s")
|
146 |
-
|
147 |
-
if app.config.get("SAVE_AUDIO", False):
|
148 |
-
logger.debug(f"[{ModelType.VITS.value}] {fname}")
|
149 |
-
path = os.path.join(app.config.get('CACHE_PATH'), fname)
|
150 |
-
save_audio(audio.getvalue(), path)
|
151 |
-
|
152 |
-
return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
|
153 |
-
|
154 |
-
|
155 |
-
@app.route('/voice/hubert-vits', methods=["POST"])
|
156 |
-
@require_api_key
|
157 |
-
def voice_hubert_api():
|
158 |
-
if request.method == "POST":
|
159 |
-
try:
|
160 |
-
voice = request.files['upload']
|
161 |
-
id = int(request.form.get("id"))
|
162 |
-
format = request.form.get("format", app.config.get("LANG", "auto"))
|
163 |
-
length = float(request.form.get("length", app.config.get("LENGTH", 1)))
|
164 |
-
noise = float(request.form.get("noise", app.config.get("NOISE", 0.667)))
|
165 |
-
noisew = float(request.form.get("noisew", app.config.get("NOISEW", 0.8)))
|
166 |
-
use_streaming = request.form.get('streaming', False, type=bool)
|
167 |
-
except Exception as e:
|
168 |
-
logger.error(f"[{ModelType.HUBERT_VITS.value}] {e}")
|
169 |
-
return make_response("parameter error", 400)
|
170 |
-
|
171 |
-
logger.info(
|
172 |
-
f"[{ModelType.HUBERT_VITS.value}] id:{id} format:{format} length:{length} noise:{noise} noisew:{noisew}")
|
173 |
-
|
174 |
-
fname = secure_filename(str(uuid.uuid1()) + "." + voice.filename.split(".")[1])
|
175 |
-
voice.save(os.path.join(app.config['UPLOAD_FOLDER'], fname))
|
176 |
-
|
177 |
-
if check_is_none(id):
|
178 |
-
logger.info(f"[{ModelType.HUBERT_VITS.value}] speaker id is empty")
|
179 |
-
return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
|
180 |
-
|
181 |
-
if id < 0 or id >= tts.hubert_speakers_count:
|
182 |
-
logger.info(f"[{ModelType.HUBERT_VITS.value}] speaker id {id} does not exist")
|
183 |
-
return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
|
184 |
-
|
185 |
-
file_type = f"audio/{format}"
|
186 |
-
task = {"id": id,
|
187 |
-
"format": format,
|
188 |
-
"length": length,
|
189 |
-
"noise": noise,
|
190 |
-
"noisew": noisew,
|
191 |
-
"audio_path": os.path.join(app.config['UPLOAD_FOLDER'], fname)}
|
192 |
-
|
193 |
-
t1 = time.time()
|
194 |
-
audio = tts.hubert_vits_infer(task)
|
195 |
-
t2 = time.time()
|
196 |
-
logger.info(f"[{ModelType.HUBERT_VITS.value}] finish in {(t2 - t1):.2f}s")
|
197 |
-
|
198 |
-
if app.config.get("SAVE_AUDIO", False):
|
199 |
-
logger.debug(f"[{ModelType.HUBERT_VITS.value}] {fname}")
|
200 |
-
path = os.path.join(app.config.get('CACHE_PATH'), fname)
|
201 |
-
save_audio(audio.getvalue(), path)
|
202 |
-
|
203 |
-
if use_streaming:
|
204 |
-
audio = tts.generate_audio_chunks(audio)
|
205 |
-
response = make_response(audio)
|
206 |
-
response.headers['Content-Disposition'] = f'attachment; filename={fname}'
|
207 |
-
response.headers['Content-Type'] = file_type
|
208 |
-
return response
|
209 |
-
else:
|
210 |
-
return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
|
211 |
-
|
212 |
-
|
213 |
-
@app.route('/voice/w2v2-vits', methods=["GET", "POST"])
|
214 |
-
@require_api_key
|
215 |
-
def voice_w2v2_api():
|
216 |
-
try:
|
217 |
-
if request.method == "GET":
|
218 |
-
request_data = request.args
|
219 |
-
elif request.method == "POST":
|
220 |
-
content_type = request.headers.get('Content-Type')
|
221 |
-
if content_type == 'application/json':
|
222 |
-
request_data = request.get_json()
|
223 |
-
else:
|
224 |
-
request_data = request.form
|
225 |
-
|
226 |
-
text = request_data.get("text", "")
|
227 |
-
id = int(request_data.get("id", app.config.get("ID", 0)))
|
228 |
-
format = request_data.get("format", app.config.get("FORMAT", "wav"))
|
229 |
-
lang = request_data.get("lang", app.config.get("LANG", "auto")).lower()
|
230 |
-
length = float(request_data.get("length", app.config.get("LENGTH", 1)))
|
231 |
-
noise = float(request_data.get("noise", app.config.get("NOISE", 0.667)))
|
232 |
-
noisew = float(request_data.get("noisew", app.config.get("NOISEW", 0.8)))
|
233 |
-
max = int(request_data.get("max", app.config.get("MAX", 50)))
|
234 |
-
emotion = int(request_data.get("emotion", app.config.get("EMOTION", 0)))
|
235 |
-
use_streaming = request_data.get('streaming', False, type=bool)
|
236 |
-
except Exception as e:
|
237 |
-
logger.error(f"[{ModelType.W2V2_VITS.value}] {e}")
|
238 |
-
return make_response(f"parameter error", 400)
|
239 |
-
|
240 |
-
logger.info(f"[{ModelType.W2V2_VITS.value}] id:{id} format:{format} lang:{lang} "
|
241 |
-
f"length:{length} noise:{noise} noisew:{noisew} emotion:{emotion}")
|
242 |
-
logger.info(f"[{ModelType.W2V2_VITS.value}] len:{len(text)} text:{text}")
|
243 |
-
|
244 |
-
if check_is_none(text):
|
245 |
-
logger.info(f"[{ModelType.W2V2_VITS.value}] text is empty")
|
246 |
-
return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
|
247 |
-
|
248 |
-
if check_is_none(id):
|
249 |
-
logger.info(f"[{ModelType.W2V2_VITS.value}] speaker id is empty")
|
250 |
-
return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
|
251 |
-
|
252 |
-
if id < 0 or id >= tts.w2v2_speakers_count:
|
253 |
-
logger.info(f"[{ModelType.W2V2_VITS.value}] speaker id {id} does not exist")
|
254 |
-
return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
|
255 |
-
|
256 |
-
# 校验模型是否支持输入的语言
|
257 |
-
speaker_lang = tts.voice_speakers[ModelType.W2V2_VITS.value][id].get('lang')
|
258 |
-
if lang not in ["auto", "mix"] and len(speaker_lang) != 1 and lang not in speaker_lang:
|
259 |
-
logger.info(f"[{ModelType.W2V2_VITS.value}] lang \"{lang}\" is not in {speaker_lang}")
|
260 |
-
return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
|
261 |
-
|
262 |
-
# 如果配置文件中设置了LANGUAGE_AUTOMATIC_DETECT则强制将speaker_lang设置为LANGUAGE_AUTOMATIC_DETECT
|
263 |
-
if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
|
264 |
-
speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
|
265 |
-
|
266 |
-
if use_streaming and format.upper() != "MP3":
|
267 |
-
format = "mp3"
|
268 |
-
logger.warning("Streaming response only supports MP3 format.")
|
269 |
-
|
270 |
-
fname = f"{str(uuid.uuid1())}.{format}"
|
271 |
-
file_type = f"audio/{format}"
|
272 |
-
task = {"text": text,
|
273 |
-
"id": id,
|
274 |
-
"format": format,
|
275 |
-
"length": length,
|
276 |
-
"noise": noise,
|
277 |
-
"noisew": noisew,
|
278 |
-
"max": max,
|
279 |
-
"lang": lang,
|
280 |
-
"emotion": emotion,
|
281 |
-
"speaker_lang": speaker_lang}
|
282 |
-
|
283 |
-
t1 = time.time()
|
284 |
-
audio = tts.w2v2_vits_infer(task)
|
285 |
-
t2 = time.time()
|
286 |
-
logger.info(f"[{ModelType.W2V2_VITS.value}] finish in {(t2 - t1):.2f}s")
|
287 |
-
|
288 |
-
if app.config.get("SAVE_AUDIO", False):
|
289 |
-
logger.debug(f"[{ModelType.W2V2_VITS.value}] {fname}")
|
290 |
-
path = os.path.join(app.config.get('CACHE_PATH'), fname)
|
291 |
-
save_audio(audio.getvalue(), path)
|
292 |
-
|
293 |
-
if use_streaming:
|
294 |
-
audio = tts.generate_audio_chunks(audio)
|
295 |
-
response = make_response(audio)
|
296 |
-
response.headers['Content-Disposition'] = f'attachment; filename={fname}'
|
297 |
-
response.headers['Content-Type'] = file_type
|
298 |
-
return response
|
299 |
-
else:
|
300 |
-
return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
|
301 |
-
|
302 |
-
|
303 |
-
@app.route('/voice/conversion', methods=["POST"])
|
304 |
-
@app.route('/voice/vits/conversion', methods=["POST"])
|
305 |
-
@require_api_key
|
306 |
-
def vits_voice_conversion_api():
|
307 |
-
if request.method == "POST":
|
308 |
-
try:
|
309 |
-
voice = request.files['upload']
|
310 |
-
original_id = int(request.form["original_id"])
|
311 |
-
target_id = int(request.form["target_id"])
|
312 |
-
format = request.form.get("format", voice.filename.split(".")[1])
|
313 |
-
use_streaming = request.form.get('streaming', False, type=bool)
|
314 |
-
except Exception as e:
|
315 |
-
logger.error(f"[vits_voice_convertsion] {e}")
|
316 |
-
return make_response("parameter error", 400)
|
317 |
-
|
318 |
-
logger.info(f"[vits_voice_convertsion] orginal_id:{original_id} target_id:{target_id}")
|
319 |
-
fname = secure_filename(str(uuid.uuid1()) + "." + voice.filename.split(".")[1])
|
320 |
-
audio_path = os.path.join(app.config['UPLOAD_FOLDER'], fname)
|
321 |
-
voice.save(audio_path)
|
322 |
-
file_type = f"audio/{format}"
|
323 |
-
task = {"audio_path": audio_path,
|
324 |
-
"original_id": original_id,
|
325 |
-
"target_id": target_id,
|
326 |
-
"format": format}
|
327 |
-
|
328 |
-
t1 = time.time()
|
329 |
-
audio = tts.vits_voice_conversion(task)
|
330 |
-
t2 = time.time()
|
331 |
-
logger.info(f"[Voice conversion] finish in {(t2 - t1):.2f}s")
|
332 |
-
|
333 |
-
if app.config.get("SAVE_AUDIO", False):
|
334 |
-
logger.debug(f"[Voice conversion] {fname}")
|
335 |
-
path = os.path.join(app.config.get('CACHE_PATH'), fname)
|
336 |
-
save_audio(audio.getvalue(), path)
|
337 |
-
|
338 |
-
if use_streaming:
|
339 |
-
audio = tts.generate_audio_chunks(audio)
|
340 |
-
response = make_response(audio)
|
341 |
-
response.headers['Content-Disposition'] = f'attachment; filename={fname}'
|
342 |
-
response.headers['Content-Type'] = file_type
|
343 |
-
return response
|
344 |
-
else:
|
345 |
-
return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
|
346 |
-
|
347 |
-
|
348 |
-
@app.route('/voice/ssml', methods=["POST"])
|
349 |
-
@require_api_key
|
350 |
-
def ssml_api():
|
351 |
-
try:
|
352 |
-
content_type = request.headers.get('Content-Type')
|
353 |
-
if content_type == 'application/json':
|
354 |
-
request_data = request.get_json()
|
355 |
-
else:
|
356 |
-
request_data = request.form
|
357 |
-
|
358 |
-
ssml = request_data.get("ssml")
|
359 |
-
except Exception as e:
|
360 |
-
logger.info(f"[ssml] {e}")
|
361 |
-
return make_response(jsonify({"status": "error", "message": f"parameter error"}), 400)
|
362 |
-
|
363 |
-
logger.debug(ssml)
|
364 |
-
voice_tasks, format = tts.parse_ssml(ssml)
|
365 |
-
fname = f"{str(uuid.uuid1())}.{format}"
|
366 |
-
file_type = f"audio/{format}"
|
367 |
-
|
368 |
-
t1 = time.time()
|
369 |
-
audio = tts.process_ssml_infer_task(voice_tasks, format)
|
370 |
-
t2 = time.time()
|
371 |
-
logger.info(f"[ssml] finish in {(t2 - t1):.2f}s")
|
372 |
-
|
373 |
-
if app.config.get("SAVE_AUDIO", False):
|
374 |
-
logger.debug(f"[ssml] {fname}")
|
375 |
-
path = os.path.join(app.config.get('CACHE_PATH'), fname)
|
376 |
-
save_audio(audio.getvalue(), path)
|
377 |
-
|
378 |
-
return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
|
379 |
-
|
380 |
-
|
381 |
-
@app.route('/voice/dimension-emotion', methods=["POST"])
|
382 |
-
@require_api_key
|
383 |
-
def dimensional_emotion():
|
384 |
-
if request.method == "POST":
|
385 |
-
try:
|
386 |
-
audio = request.files['upload']
|
387 |
-
use_streaming = request.form.get('streaming', False, type=bool)
|
388 |
-
except Exception as e:
|
389 |
-
logger.error(f"[dimensional_emotion] {e}")
|
390 |
-
return make_response("parameter error", 400)
|
391 |
-
|
392 |
-
content = BytesIO(audio.read())
|
393 |
-
|
394 |
-
file_type = "application/octet-stream; charset=ascii"
|
395 |
-
fname = os.path.splitext(audio.filename)[0] + ".npy"
|
396 |
-
emotion_npy = tts.get_dimensional_emotion_npy(content)
|
397 |
-
if use_streaming:
|
398 |
-
emotion_npy = tts.generate_audio_chunks(emotion_npy)
|
399 |
-
response = make_response(emotion_npy)
|
400 |
-
response.headers['Content-Disposition'] = f'attachment; filename={fname}'
|
401 |
-
response.headers['Content-Type'] = file_type
|
402 |
-
return response
|
403 |
-
else:
|
404 |
-
return send_file(path_or_file=emotion_npy, mimetype=file_type, download_name=fname)
|
405 |
-
|
406 |
-
|
407 |
-
@app.route('/voice/bert-vits2', methods=["GET", "POST"])
|
408 |
-
@require_api_key
|
409 |
-
def voice_bert_vits2_api():
|
410 |
-
try:
|
411 |
-
if request.method == "GET":
|
412 |
-
request_data = request.args
|
413 |
-
elif request.method == "POST":
|
414 |
-
content_type = request.headers.get('Content-Type')
|
415 |
-
if content_type == 'application/json':
|
416 |
-
request_data = request.get_json()
|
417 |
-
else:
|
418 |
-
request_data = request.form
|
419 |
-
|
420 |
-
text = request_data.get("text", "")
|
421 |
-
id = int(request_data.get("id", app.config.get("ID", 0)))
|
422 |
-
format = request_data.get("format", app.config.get("FORMAT", "wav"))
|
423 |
-
lang = request_data.get("lang", "auto").lower()
|
424 |
-
length = float(request_data.get("length", app.config.get("LENGTH", 1)))
|
425 |
-
noise = float(request_data.get("noise", app.config.get("NOISE", 0.667)))
|
426 |
-
noisew = float(request_data.get("noisew", app.config.get("NOISEW", 0.8)))
|
427 |
-
sdp_ratio = float(request_data.get("sdp_ratio", app.config.get("SDP_RATIO", 0.2)))
|
428 |
-
max = int(request_data.get("max", app.config.get("MAX", 50)))
|
429 |
-
except Exception as e:
|
430 |
-
logger.error(f"[{ModelType.BERT_VITS2.value}] {e}")
|
431 |
-
return make_response("parameter error", 400)
|
432 |
-
|
433 |
-
logger.info(
|
434 |
-
f"[{ModelType.BERT_VITS2.value}] id:{id} format:{format} lang:{lang} length:{length} noise:{noise} noisew:{noisew} sdp_ratio:{sdp_ratio}")
|
435 |
-
logger.info(f"[{ModelType.BERT_VITS2.value}] len:{len(text)} text:{text}")
|
436 |
-
|
437 |
-
if check_is_none(text):
|
438 |
-
logger.info(f"[{ModelType.BERT_VITS2.value}] text is empty")
|
439 |
-
return make_response(jsonify({"status": "error", "message": "text is empty"}), 400)
|
440 |
-
|
441 |
-
if check_is_none(id):
|
442 |
-
logger.info(f"[{ModelType.BERT_VITS2.value}] speaker id is empty")
|
443 |
-
return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
|
444 |
-
|
445 |
-
if id < 0 or id >= tts.bert_vits2_speakers_count:
|
446 |
-
logger.info(f"[{ModelType.BERT_VITS2.value}] speaker id {id} does not exist")
|
447 |
-
return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
|
448 |
-
|
449 |
-
# 校验模型是否支持输入的语言
|
450 |
-
speaker_lang = tts.voice_speakers[ModelType.BERT_VITS2.value][id].get('lang')
|
451 |
-
if lang not in ["auto", "mix"] and len(speaker_lang) != 1 and lang not in speaker_lang:
|
452 |
-
logger.info(f"[{ModelType.BERT_VITS2.value}] lang \"{lang}\" is not in {speaker_lang}")
|
453 |
-
return make_response(jsonify({"status": "error", "message": f"lang '{lang}' is not in {speaker_lang}"}), 400)
|
454 |
-
|
455 |
-
# 如果配置文件中设置了LANGUAGE_AUTOMATIC_DETECT则强制将speaker_lang设置为LANGUAGE_AUTOMATIC_DETECT
|
456 |
-
if app.config.get("LANGUAGE_AUTOMATIC_DETECT", []) != []:
|
457 |
-
speaker_lang = app.config.get("LANGUAGE_AUTOMATIC_DETECT")
|
458 |
-
|
459 |
-
fname = f"{str(uuid.uuid1())}.{format}"
|
460 |
-
file_type = f"audio/{format}"
|
461 |
-
task = {"text": text,
|
462 |
-
"id": id,
|
463 |
-
"format": format,
|
464 |
-
"length": length,
|
465 |
-
"noise": noise,
|
466 |
-
"noisew": noisew,
|
467 |
-
"sdp_ratio": sdp_ratio,
|
468 |
-
"max": max,
|
469 |
-
"lang": lang,
|
470 |
-
"speaker_lang": speaker_lang}
|
471 |
-
|
472 |
-
t1 = time.time()
|
473 |
-
audio = tts.bert_vits2_infer(task)
|
474 |
-
t2 = time.time()
|
475 |
-
logger.info(f"[{ModelType.BERT_VITS2.value}] finish in {(t2 - t1):.2f}s")
|
476 |
-
|
477 |
-
if app.config.get("SAVE_AUDIO", False):
|
478 |
-
logger.debug(f"[{ModelType.BERT_VITS2.value}] {fname}")
|
479 |
-
path = os.path.join(app.config.get('CACHE_PATH'), fname)
|
480 |
-
save_audio(audio.getvalue(), path)
|
481 |
-
|
482 |
-
return send_file(path_or_file=audio, mimetype=file_type, download_name=fname)
|
483 |
-
|
484 |
-
|
485 |
-
@app.route('/voice/check', methods=["GET", "POST"])
|
486 |
-
def check():
|
487 |
-
try:
|
488 |
-
if request.method == "GET":
|
489 |
-
request_data = request.args
|
490 |
-
elif request.method == "POST":
|
491 |
-
content_type = request.headers.get('Content-Type')
|
492 |
-
if content_type == 'application/json':
|
493 |
-
request_data = request.get_json()
|
494 |
-
else:
|
495 |
-
request_data = request.form
|
496 |
-
|
497 |
-
model_type_str = request_data.get("model_type", request_data.get("model")).upper()
|
498 |
-
id = int(request_data.get("id"))
|
499 |
-
except Exception as e:
|
500 |
-
logger.info(f"[check] {e}")
|
501 |
-
return make_response(jsonify({"status": "error", "message": "parameter error"}), 400)
|
502 |
-
|
503 |
-
if check_is_none(model_type_str):
|
504 |
-
logger.info(f"[check] model {model_type_str} is empty")
|
505 |
-
return make_response(jsonify({"status": "error", "message": "model is empty"}), 400)
|
506 |
-
|
507 |
-
if model_type_str not in ModelType._value2member_map_:
|
508 |
-
res = make_response(jsonify({"status": "error", "message": f"model {model_type_str} does not exist"}))
|
509 |
-
res.status = 404
|
510 |
-
logger.info(f"[check] speaker id {id} error")
|
511 |
-
return res
|
512 |
-
|
513 |
-
if check_is_none(id):
|
514 |
-
logger.info(f"[check] speaker id is empty")
|
515 |
-
return make_response(jsonify({"status": "error", "message": "speaker id is empty"}), 400)
|
516 |
-
|
517 |
-
model_type = ModelType(model_type_str)
|
518 |
-
speaker_list = tts.voice_speakers[model_type.value]
|
519 |
-
|
520 |
-
if len(speaker_list) == 0:
|
521 |
-
logger.info(f"[check] {model_type_str} not loaded")
|
522 |
-
return make_response(jsonify({"status": "error", "message": f"{model_type_str} not loaded"}), 400)
|
523 |
-
|
524 |
-
if id < 0 or id >= len(speaker_list):
|
525 |
-
logger.info(f"[check] speaker id {id} does not exist")
|
526 |
-
return make_response(jsonify({"status": "error", "message": f"id {id} does not exist"}), 400)
|
527 |
-
name = str(speaker_list[id]["name"])
|
528 |
-
lang = speaker_list[id]["lang"]
|
529 |
-
logger.info(f"[check] check id:{id} name:{name} lang:{lang}")
|
530 |
-
|
531 |
-
return make_response(jsonify({"status": "success", "id": id, "name": name, "lang": lang}), 200)
|
532 |
-
|
533 |
-
|
534 |
-
# regular cleaning
|
535 |
-
@scheduler.task('interval', id='clean_task', seconds=app.config.get("CLEAN_INTERVAL_SECONDS", 3600),
|
536 |
-
misfire_grace_time=900)
|
537 |
-
def clean_task():
|
538 |
-
clean_folder(app.config["UPLOAD_FOLDER"])
|
539 |
-
clean_folder(app.config["CACHE_PATH"])
|
540 |
-
|
541 |
-
|
542 |
-
if __name__ == '__main__':
|
543 |
-
app.run(host='0.0.0.0', port=app.config.get("PORT", 23456), debug=app.config.get("DEBUG", False)) # 对外开放
|
544 |
-
# app.run(host='127.0.0.1', port=app.config.get("PORT",23456), debug=True) # 本地运行、调试
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/models/GroundingDINO/groundingdino.py
DELETED
@@ -1,412 +0,0 @@
|
|
1 |
-
# ------------------------------------------------------------------------
|
2 |
-
# Grounding DINO
|
3 |
-
# url: https://github.com/IDEA-Research/GroundingDINO
|
4 |
-
# Copyright (c) 2023 IDEA. All Rights Reserved.
|
5 |
-
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
6 |
-
# ------------------------------------------------------------------------
|
7 |
-
# Conditional DETR model and criterion classes.
|
8 |
-
# Copyright (c) 2021 Microsoft. All Rights Reserved.
|
9 |
-
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
|
10 |
-
# ------------------------------------------------------------------------
|
11 |
-
# Modified from DETR (https://github.com/facebookresearch/detr)
|
12 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
13 |
-
# ------------------------------------------------------------------------
|
14 |
-
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
|
15 |
-
# Copyright (c) 2020 SenseTime. All Rights Reserved.
|
16 |
-
# ------------------------------------------------------------------------
|
17 |
-
import copy
|
18 |
-
from typing import List
|
19 |
-
|
20 |
-
import torch
|
21 |
-
import torch.nn.functional as F
|
22 |
-
from torch import nn
|
23 |
-
from torchvision.ops.boxes import nms
|
24 |
-
from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast
|
25 |
-
|
26 |
-
from groundingdino.util import box_ops, get_tokenlizer
|
27 |
-
from groundingdino.util.misc import (
|
28 |
-
NestedTensor,
|
29 |
-
accuracy,
|
30 |
-
get_world_size,
|
31 |
-
interpolate,
|
32 |
-
inverse_sigmoid,
|
33 |
-
is_dist_avail_and_initialized,
|
34 |
-
nested_tensor_from_tensor_list,
|
35 |
-
)
|
36 |
-
from groundingdino.util.utils import get_phrases_from_posmap
|
37 |
-
from groundingdino.util.visualizer import COCOVisualizer
|
38 |
-
from groundingdino.util.vl_utils import create_positive_map_from_span
|
39 |
-
|
40 |
-
from ..registry import MODULE_BUILD_FUNCS
|
41 |
-
from .backbone import build_backbone
|
42 |
-
from .bertwarper import (
|
43 |
-
BertModelWarper,
|
44 |
-
generate_masks_with_special_tokens,
|
45 |
-
generate_masks_with_special_tokens_and_transfer_map,
|
46 |
-
)
|
47 |
-
from .transformer import build_transformer
|
48 |
-
from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss
|
49 |
-
|
50 |
-
|
51 |
-
class GroundingDINO(nn.Module):
|
52 |
-
"""This is the Cross-Attention Detector module that performs object detection"""
|
53 |
-
|
54 |
-
def __init__(
|
55 |
-
self,
|
56 |
-
backbone,
|
57 |
-
transformer,
|
58 |
-
num_queries,
|
59 |
-
aux_loss=False,
|
60 |
-
iter_update=False,
|
61 |
-
query_dim=2,
|
62 |
-
num_feature_levels=1,
|
63 |
-
nheads=8,
|
64 |
-
# two stage
|
65 |
-
two_stage_type="no", # ['no', 'standard']
|
66 |
-
dec_pred_bbox_embed_share=True,
|
67 |
-
two_stage_class_embed_share=True,
|
68 |
-
two_stage_bbox_embed_share=True,
|
69 |
-
num_patterns=0,
|
70 |
-
dn_number=100,
|
71 |
-
dn_box_noise_scale=0.4,
|
72 |
-
dn_label_noise_ratio=0.5,
|
73 |
-
dn_labelbook_size=100,
|
74 |
-
text_encoder_type="bert-base-uncased",
|
75 |
-
sub_sentence_present=True,
|
76 |
-
max_text_len=256,
|
77 |
-
):
|
78 |
-
"""Initializes the model.
|
79 |
-
Parameters:
|
80 |
-
backbone: torch module of the backbone to be used. See backbone.py
|
81 |
-
transformer: torch module of the transformer architecture. See transformer.py
|
82 |
-
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
|
83 |
-
Conditional DETR can detect in a single image. For COCO, we recommend 100 queries.
|
84 |
-
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
|
85 |
-
"""
|
86 |
-
super().__init__()
|
87 |
-
self.num_queries = num_queries
|
88 |
-
self.transformer = transformer
|
89 |
-
self.hidden_dim = hidden_dim = transformer.d_model
|
90 |
-
self.num_feature_levels = num_feature_levels
|
91 |
-
self.nheads = nheads
|
92 |
-
self.max_text_len = 256
|
93 |
-
self.sub_sentence_present = sub_sentence_present
|
94 |
-
|
95 |
-
# setting query dim
|
96 |
-
self.query_dim = query_dim
|
97 |
-
assert query_dim == 4
|
98 |
-
|
99 |
-
# for dn training
|
100 |
-
self.num_patterns = num_patterns
|
101 |
-
self.dn_number = dn_number
|
102 |
-
self.dn_box_noise_scale = dn_box_noise_scale
|
103 |
-
self.dn_label_noise_ratio = dn_label_noise_ratio
|
104 |
-
self.dn_labelbook_size = dn_labelbook_size
|
105 |
-
|
106 |
-
# bert
|
107 |
-
self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type)
|
108 |
-
self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type)
|
109 |
-
self.bert.pooler.dense.weight.requires_grad_(False)
|
110 |
-
self.bert.pooler.dense.bias.requires_grad_(False)
|
111 |
-
self.bert = BertModelWarper(bert_model=self.bert)
|
112 |
-
|
113 |
-
self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True)
|
114 |
-
nn.init.constant_(self.feat_map.bias.data, 0)
|
115 |
-
nn.init.xavier_uniform_(self.feat_map.weight.data)
|
116 |
-
# freeze
|
117 |
-
|
118 |
-
# special tokens
|
119 |
-
self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"])
|
120 |
-
|
121 |
-
# prepare input projection layers
|
122 |
-
if num_feature_levels > 1:
|
123 |
-
num_backbone_outs = len(backbone.num_channels)
|
124 |
-
input_proj_list = []
|
125 |
-
for _ in range(num_backbone_outs):
|
126 |
-
in_channels = backbone.num_channels[_]
|
127 |
-
input_proj_list.append(
|
128 |
-
nn.Sequential(
|
129 |
-
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
|
130 |
-
nn.GroupNorm(32, hidden_dim),
|
131 |
-
)
|
132 |
-
)
|
133 |
-
for _ in range(num_feature_levels - num_backbone_outs):
|
134 |
-
input_proj_list.append(
|
135 |
-
nn.Sequential(
|
136 |
-
nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
|
137 |
-
nn.GroupNorm(32, hidden_dim),
|
138 |
-
)
|
139 |
-
)
|
140 |
-
in_channels = hidden_dim
|
141 |
-
self.input_proj = nn.ModuleList(input_proj_list)
|
142 |
-
else:
|
143 |
-
assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!"
|
144 |
-
self.input_proj = nn.ModuleList(
|
145 |
-
[
|
146 |
-
nn.Sequential(
|
147 |
-
nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1),
|
148 |
-
nn.GroupNorm(32, hidden_dim),
|
149 |
-
)
|
150 |
-
]
|
151 |
-
)
|
152 |
-
|
153 |
-
self.backbone = backbone
|
154 |
-
self.aux_loss = aux_loss
|
155 |
-
self.box_pred_damping = box_pred_damping = None
|
156 |
-
|
157 |
-
self.iter_update = iter_update
|
158 |
-
assert iter_update, "Why not iter_update?"
|
159 |
-
|
160 |
-
# prepare pred layers
|
161 |
-
self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share
|
162 |
-
# prepare class & box embed
|
163 |
-
_class_embed = ContrastiveEmbed()
|
164 |
-
|
165 |
-
_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
|
166 |
-
nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0)
|
167 |
-
nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0)
|
168 |
-
|
169 |
-
if dec_pred_bbox_embed_share:
|
170 |
-
box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)]
|
171 |
-
else:
|
172 |
-
box_embed_layerlist = [
|
173 |
-
copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)
|
174 |
-
]
|
175 |
-
class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)]
|
176 |
-
self.bbox_embed = nn.ModuleList(box_embed_layerlist)
|
177 |
-
self.class_embed = nn.ModuleList(class_embed_layerlist)
|
178 |
-
self.transformer.decoder.bbox_embed = self.bbox_embed
|
179 |
-
self.transformer.decoder.class_embed = self.class_embed
|
180 |
-
|
181 |
-
# two stage
|
182 |
-
self.two_stage_type = two_stage_type
|
183 |
-
assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format(
|
184 |
-
two_stage_type
|
185 |
-
)
|
186 |
-
if two_stage_type != "no":
|
187 |
-
if two_stage_bbox_embed_share:
|
188 |
-
assert dec_pred_bbox_embed_share
|
189 |
-
self.transformer.enc_out_bbox_embed = _bbox_embed
|
190 |
-
else:
|
191 |
-
self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed)
|
192 |
-
|
193 |
-
if two_stage_class_embed_share:
|
194 |
-
assert dec_pred_bbox_embed_share
|
195 |
-
self.transformer.enc_out_class_embed = _class_embed
|
196 |
-
else:
|
197 |
-
self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed)
|
198 |
-
|
199 |
-
self.refpoint_embed = None
|
200 |
-
|
201 |
-
self._reset_parameters()
|
202 |
-
|
203 |
-
def _reset_parameters(self):
|
204 |
-
# init input_proj
|
205 |
-
for proj in self.input_proj:
|
206 |
-
nn.init.xavier_uniform_(proj[0].weight, gain=1)
|
207 |
-
nn.init.constant_(proj[0].bias, 0)
|
208 |
-
|
209 |
-
def set_image_tensor(self, samples: NestedTensor):
|
210 |
-
if isinstance(samples, (list, torch.Tensor)):
|
211 |
-
samples = nested_tensor_from_tensor_list(samples)
|
212 |
-
self.features, self.poss = self.backbone(samples)
|
213 |
-
|
214 |
-
def unset_image_tensor(self):
|
215 |
-
if hasattr(self, 'features'):
|
216 |
-
del self.features
|
217 |
-
if hasattr(self,'poss'):
|
218 |
-
del self.poss
|
219 |
-
|
220 |
-
def set_image_features(self, features , poss):
|
221 |
-
self.features = features
|
222 |
-
self.poss = poss
|
223 |
-
|
224 |
-
def init_ref_points(self, use_num_queries):
|
225 |
-
self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim)
|
226 |
-
|
227 |
-
def forward(self, samples: NestedTensor, targets: List = None, **kw):
|
228 |
-
"""The forward expects a NestedTensor, which consists of:
|
229 |
-
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
|
230 |
-
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
|
231 |
-
|
232 |
-
It returns a dict with the following elements:
|
233 |
-
- "pred_logits": the classification logits (including no-object) for all queries.
|
234 |
-
Shape= [batch_size x num_queries x num_classes]
|
235 |
-
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
|
236 |
-
(center_x, center_y, width, height). These values are normalized in [0, 1],
|
237 |
-
relative to the size of each individual image (disregarding possible padding).
|
238 |
-
See PostProcess for information on how to retrieve the unnormalized bounding box.
|
239 |
-
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
|
240 |
-
dictionnaries containing the two above keys for each decoder layer.
|
241 |
-
"""
|
242 |
-
if targets is None:
|
243 |
-
captions = kw["captions"]
|
244 |
-
else:
|
245 |
-
captions = [t["caption"] for t in targets]
|
246 |
-
|
247 |
-
# encoder texts
|
248 |
-
tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to(
|
249 |
-
samples.device
|
250 |
-
)
|
251 |
-
(
|
252 |
-
text_self_attention_masks,
|
253 |
-
position_ids,
|
254 |
-
cate_to_token_mask_list,
|
255 |
-
) = generate_masks_with_special_tokens_and_transfer_map(
|
256 |
-
tokenized, self.specical_tokens, self.tokenizer
|
257 |
-
)
|
258 |
-
|
259 |
-
if text_self_attention_masks.shape[1] > self.max_text_len:
|
260 |
-
text_self_attention_masks = text_self_attention_masks[
|
261 |
-
:, : self.max_text_len, : self.max_text_len
|
262 |
-
]
|
263 |
-
position_ids = position_ids[:, : self.max_text_len]
|
264 |
-
tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len]
|
265 |
-
tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len]
|
266 |
-
tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len]
|
267 |
-
|
268 |
-
# extract text embeddings
|
269 |
-
if self.sub_sentence_present:
|
270 |
-
tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"}
|
271 |
-
tokenized_for_encoder["attention_mask"] = text_self_attention_masks
|
272 |
-
tokenized_for_encoder["position_ids"] = position_ids
|
273 |
-
else:
|
274 |
-
# import ipdb; ipdb.set_trace()
|
275 |
-
tokenized_for_encoder = tokenized
|
276 |
-
|
277 |
-
bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768
|
278 |
-
|
279 |
-
encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model
|
280 |
-
text_token_mask = tokenized.attention_mask.bool() # bs, 195
|
281 |
-
# text_token_mask: True for nomask, False for mask
|
282 |
-
# text_self_attention_masks: True for nomask, False for mask
|
283 |
-
|
284 |
-
if encoded_text.shape[1] > self.max_text_len:
|
285 |
-
encoded_text = encoded_text[:, : self.max_text_len, :]
|
286 |
-
text_token_mask = text_token_mask[:, : self.max_text_len]
|
287 |
-
position_ids = position_ids[:, : self.max_text_len]
|
288 |
-
text_self_attention_masks = text_self_attention_masks[
|
289 |
-
:, : self.max_text_len, : self.max_text_len
|
290 |
-
]
|
291 |
-
|
292 |
-
text_dict = {
|
293 |
-
"encoded_text": encoded_text, # bs, 195, d_model
|
294 |
-
"text_token_mask": text_token_mask, # bs, 195
|
295 |
-
"position_ids": position_ids, # bs, 195
|
296 |
-
"text_self_attention_masks": text_self_attention_masks, # bs, 195,195
|
297 |
-
}
|
298 |
-
|
299 |
-
# import ipdb; ipdb.set_trace()
|
300 |
-
if isinstance(samples, (list, torch.Tensor)):
|
301 |
-
samples = nested_tensor_from_tensor_list(samples)
|
302 |
-
if not hasattr(self, 'features') or not hasattr(self, 'poss'):
|
303 |
-
self.set_image_tensor(samples)
|
304 |
-
|
305 |
-
srcs = []
|
306 |
-
masks = []
|
307 |
-
for l, feat in enumerate(self.features):
|
308 |
-
src, mask = feat.decompose()
|
309 |
-
srcs.append(self.input_proj[l](src))
|
310 |
-
masks.append(mask)
|
311 |
-
assert mask is not None
|
312 |
-
if self.num_feature_levels > len(srcs):
|
313 |
-
_len_srcs = len(srcs)
|
314 |
-
for l in range(_len_srcs, self.num_feature_levels):
|
315 |
-
if l == _len_srcs:
|
316 |
-
src = self.input_proj[l](self.features[-1].tensors)
|
317 |
-
else:
|
318 |
-
src = self.input_proj[l](srcs[-1])
|
319 |
-
m = samples.mask
|
320 |
-
mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]
|
321 |
-
pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)
|
322 |
-
srcs.append(src)
|
323 |
-
masks.append(mask)
|
324 |
-
self.poss.append(pos_l)
|
325 |
-
|
326 |
-
input_query_bbox = input_query_label = attn_mask = dn_meta = None
|
327 |
-
hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(
|
328 |
-
srcs, masks, input_query_bbox, self.poss, input_query_label, attn_mask, text_dict
|
329 |
-
)
|
330 |
-
|
331 |
-
# deformable-detr-like anchor update
|
332 |
-
outputs_coord_list = []
|
333 |
-
for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(
|
334 |
-
zip(reference[:-1], self.bbox_embed, hs)
|
335 |
-
):
|
336 |
-
layer_delta_unsig = layer_bbox_embed(layer_hs)
|
337 |
-
layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig)
|
338 |
-
layer_outputs_unsig = layer_outputs_unsig.sigmoid()
|
339 |
-
outputs_coord_list.append(layer_outputs_unsig)
|
340 |
-
outputs_coord_list = torch.stack(outputs_coord_list)
|
341 |
-
|
342 |
-
# output
|
343 |
-
outputs_class = torch.stack(
|
344 |
-
[
|
345 |
-
layer_cls_embed(layer_hs, text_dict)
|
346 |
-
for layer_cls_embed, layer_hs in zip(self.class_embed, hs)
|
347 |
-
]
|
348 |
-
)
|
349 |
-
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord_list[-1]}
|
350 |
-
|
351 |
-
# # for intermediate outputs
|
352 |
-
# if self.aux_loss:
|
353 |
-
# out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list)
|
354 |
-
|
355 |
-
# # for encoder output
|
356 |
-
# if hs_enc is not None:
|
357 |
-
# # prepare intermediate outputs
|
358 |
-
# interm_coord = ref_enc[-1]
|
359 |
-
# interm_class = self.transformer.enc_out_class_embed(hs_enc[-1], text_dict)
|
360 |
-
# out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord}
|
361 |
-
# out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal}
|
362 |
-
unset_image_tensor = kw.get('unset_image_tensor', True)
|
363 |
-
if unset_image_tensor:
|
364 |
-
self.unset_image_tensor() ## If necessary
|
365 |
-
return out
|
366 |
-
|
367 |
-
@torch.jit.unused
|
368 |
-
def _set_aux_loss(self, outputs_class, outputs_coord):
|
369 |
-
# this is a workaround to make torchscript happy, as torchscript
|
370 |
-
# doesn't support dictionary with non-homogeneous values, such
|
371 |
-
# as a dict having both a Tensor and a list.
|
372 |
-
return [
|
373 |
-
{"pred_logits": a, "pred_boxes": b}
|
374 |
-
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
|
375 |
-
]
|
376 |
-
|
377 |
-
|
378 |
-
@MODULE_BUILD_FUNCS.registe_with_name(module_name="groundingdino")
|
379 |
-
def build_groundingdino(args):
|
380 |
-
|
381 |
-
backbone = build_backbone(args)
|
382 |
-
transformer = build_transformer(args)
|
383 |
-
|
384 |
-
dn_labelbook_size = args.dn_labelbook_size
|
385 |
-
dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share
|
386 |
-
sub_sentence_present = args.sub_sentence_present
|
387 |
-
|
388 |
-
model = GroundingDINO(
|
389 |
-
backbone,
|
390 |
-
transformer,
|
391 |
-
num_queries=args.num_queries,
|
392 |
-
aux_loss=True,
|
393 |
-
iter_update=True,
|
394 |
-
query_dim=4,
|
395 |
-
num_feature_levels=args.num_feature_levels,
|
396 |
-
nheads=args.nheads,
|
397 |
-
dec_pred_bbox_embed_share=dec_pred_bbox_embed_share,
|
398 |
-
two_stage_type=args.two_stage_type,
|
399 |
-
two_stage_bbox_embed_share=args.two_stage_bbox_embed_share,
|
400 |
-
two_stage_class_embed_share=args.two_stage_class_embed_share,
|
401 |
-
num_patterns=args.num_patterns,
|
402 |
-
dn_number=0,
|
403 |
-
dn_box_noise_scale=args.dn_box_noise_scale,
|
404 |
-
dn_label_noise_ratio=args.dn_label_noise_ratio,
|
405 |
-
dn_labelbook_size=dn_labelbook_size,
|
406 |
-
text_encoder_type=args.text_encoder_type,
|
407 |
-
sub_sentence_present=sub_sentence_present,
|
408 |
-
max_text_len=args.max_text_len,
|
409 |
-
)
|
410 |
-
|
411 |
-
return model
|
412 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arvi/feedback_generator/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Feedback Generator
|
3 |
-
emoji: 🏢
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.41.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/components/ui/accordion.tsx
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
"use client"
|
2 |
-
|
3 |
-
import * as React from "react"
|
4 |
-
import * as AccordionPrimitive from "@radix-ui/react-accordion"
|
5 |
-
import { ChevronDown } from "lucide-react"
|
6 |
-
|
7 |
-
import { cn } from "@/lib/utils"
|
8 |
-
|
9 |
-
const Accordion = AccordionPrimitive.Root
|
10 |
-
|
11 |
-
const AccordionItem = React.forwardRef<
|
12 |
-
React.ElementRef<typeof AccordionPrimitive.Item>,
|
13 |
-
React.ComponentPropsWithoutRef<typeof AccordionPrimitive.Item>
|
14 |
-
>(({ className, ...props }, ref) => (
|
15 |
-
<AccordionPrimitive.Item
|
16 |
-
ref={ref}
|
17 |
-
className={cn("border-b", className)}
|
18 |
-
{...props}
|
19 |
-
/>
|
20 |
-
))
|
21 |
-
AccordionItem.displayName = "AccordionItem"
|
22 |
-
|
23 |
-
const AccordionTrigger = React.forwardRef<
|
24 |
-
React.ElementRef<typeof AccordionPrimitive.Trigger>,
|
25 |
-
React.ComponentPropsWithoutRef<typeof AccordionPrimitive.Trigger>
|
26 |
-
>(({ className, children, ...props }, ref) => (
|
27 |
-
<AccordionPrimitive.Header className="flex">
|
28 |
-
<AccordionPrimitive.Trigger
|
29 |
-
ref={ref}
|
30 |
-
className={cn(
|
31 |
-
"flex flex-1 items-center justify-between py-4 font-medium transition-all hover:underline [&[data-state=open]>svg]:rotate-180",
|
32 |
-
className
|
33 |
-
)}
|
34 |
-
{...props}
|
35 |
-
>
|
36 |
-
{children}
|
37 |
-
<ChevronDown className="h-4 w-4 shrink-0 transition-transform duration-200" />
|
38 |
-
</AccordionPrimitive.Trigger>
|
39 |
-
</AccordionPrimitive.Header>
|
40 |
-
))
|
41 |
-
AccordionTrigger.displayName = AccordionPrimitive.Trigger.displayName
|
42 |
-
|
43 |
-
const AccordionContent = React.forwardRef<
|
44 |
-
React.ElementRef<typeof AccordionPrimitive.Content>,
|
45 |
-
React.ComponentPropsWithoutRef<typeof AccordionPrimitive.Content>
|
46 |
-
>(({ className, children, ...props }, ref) => (
|
47 |
-
<AccordionPrimitive.Content
|
48 |
-
ref={ref}
|
49 |
-
className={cn(
|
50 |
-
"overflow-hidden text-sm transition-all data-[state=closed]:animate-accordion-up data-[state=open]:animate-accordion-down",
|
51 |
-
className
|
52 |
-
)}
|
53 |
-
{...props}
|
54 |
-
>
|
55 |
-
<div className="pb-4 pt-0">{children}</div>
|
56 |
-
</AccordionPrimitive.Content>
|
57 |
-
))
|
58 |
-
AccordionContent.displayName = AccordionPrimitive.Content.displayName
|
59 |
-
|
60 |
-
export { Accordion, AccordionItem, AccordionTrigger, AccordionContent }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/utils/i18n.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
import locale
|
2 |
-
import json
|
3 |
-
import os
|
4 |
-
|
5 |
-
|
6 |
-
def load_language_list(language):
|
7 |
-
with open(f"./i18n/{language}.json", "r", encoding="utf-8") as f:
|
8 |
-
language_list = json.load(f)
|
9 |
-
return language_list
|
10 |
-
|
11 |
-
|
12 |
-
class I18nAuto:
|
13 |
-
def __init__(self, language=None):
|
14 |
-
if language in ["Auto", None]:
|
15 |
-
language = "es_ES"
|
16 |
-
if not os.path.exists(f"./i18n/{language}.json"):
|
17 |
-
language = "es_ES"
|
18 |
-
language = "es_ES"
|
19 |
-
self.language = language
|
20 |
-
# print("Use Language:", language)
|
21 |
-
self.language_map = load_language_list(language)
|
22 |
-
|
23 |
-
def __call__(self, key):
|
24 |
-
return self.language_map.get(key, key)
|
25 |
-
|
26 |
-
def print(self):
|
27 |
-
# print("Use Language:", self.language)
|
28 |
-
print("")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cmo Descargar El Simulador De Bus Ultimate.md
DELETED
@@ -1,100 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo descargar Grupo de Fuerzas Especiales 2 Versi Lama</h1>
|
3 |
-
<p>Si eres un fan de los juegos de disparos en primera persona, es posible que hayas oído hablar de <strong>Special Forces Group 2</strong>, un popular juego para Android que te permite jugar online o sin conexión con diferentes modos, armas, mapas y personajes. Pero, ¿sabías que también puedes jugar a una versión anterior del juego, llamada <strong>versi lama</strong>, que tiene algunas características que no están disponibles en la última versión? En este artículo, te mostraremos cómo descargar las fuerzas especiales del grupo 2 versi lama y disfrutar del juego clásico. </p>
|
4 |
-
<h2>cómo descargar el simulador de bus ultimate</h2><br /><p><b><b>Download File</b> ✯ <a href="https://bltlly.com/2v6LwB">https://bltlly.com/2v6LwB</a></b></p><br /><br />
|
5 |
-
<h2>¿Qué es el Grupo de Fuerzas Especiales 2?</h2>
|
6 |
-
<p>Special Forces Group 2 es un juego de disparos en primera persona en 3D desarrollado por ForgeGames. Fue lanzado en 2016 y ha sido actualizado regularmente con nuevos contenidos y mejoras. El juego ofrece varios modos, como clásico, resurrección, captura de la bandera, modo zombi, modo de bomba, cuchillos, combate a muerte, carrera de armamentos, y francotirador. Puedes jugar solo con bots o multijugador en línea y a través del router wifi. También puedes personalizar tus pieles de armas, personajes y mapas. El juego tiene más de 100 millones de descargas en Google Play Store y ha recibido críticas positivas de usuarios y críticos por igual . </p>
|
7 |
-
<h2>¿Por qué descargar una versión anterior del juego? </h2>
|
8 |
-
<p>Si bien la última versión de Special Forces Group 2 tiene muchas ventajas, como mejores gráficos, rendimiento y compatibilidad, algunos jugadores podrían preferir la versión anterior por varias razones. Por ejemplo: resultado/p>
|
9 |
-
<ul>
|
10 |
-
<li> Tiene un dispositivo anterior que no puede ejecutar la última versión sin problemas o en absoluto. </li>
|
11 |
-
<li>Te gusta más la antigua interfaz de usuario o la jugabilidad que la nueva. </li>
|
12 |
-
<li>Quieres jugar con amigos que aún no han actualizado su juego. </li>
|
13 |
-
<li>Quieres experimentar la nostalgia de jugar el juego como era cuando salió por primera vez. </li>
|
14 |
-
<li> Desea probar algunas características que se eliminaron o cambiaron en las versiones más recientes. </li>
|
15 |
-
</ul>
|
16 |
-
|
17 |
-
<h2>Cómo encontrar e instalar versiones anteriores de aplicaciones Android</h2>
|
18 |
-
<p>Google Play Store no te permite revertir una aplicación a sus versiones anteriores. Sin embargo, hay varias fuentes de terceros que ofrecen versiones anteriores de aplicaciones de Android para descargar. Estas fuentes suelen ser sitios web que alojan archivos APK, que son archivos de instalación para aplicaciones Android. Puede descargar estos archivos APK e instalarlos en su dispositivo manualmente. Sin embargo, antes de hacerlo, debe asegurarse de que:</p>
|
19 |
-
<ul>
|
20 |
-
<li>Tienes suficiente espacio de almacenamiento en tu dispositivo. </li>
|
21 |
-
<li>Has habilitado la opción "Fuentes desconocidas" en tu configuración de Android > Seguridad. Esto le permitirá instalar aplicaciones desde fuentes distintas de Play Store.</li>
|
22 |
-
<li>Ha comprobado la autenticidad y la seguridad del archivo APK. Algunos archivos APK pueden estar dañados, modificados o infectados con malware. Puede utilizar herramientas como VirusTotal o APKPure Verifier para escanear el archivo APK antes de instalarlo. </li>
|
23 |
-
<li> Ha realizado una copia de seguridad de sus datos y configuraciones en caso de que algo salga mal. </li>
|
24 |
-
</ul>
|
25 |
-
<p>Una vez que haya hecho estos pasos, puede proceder a encontrar e instalar la versión anterior de Special Forces Group 2. Hay muchos sitios web que ofrecen archivos APK, pero nos centraremos en tres de los más populares y confiables: APKMirror, uptodown y APK4Fun. Aquí está cómo usar cada uno de ellos para descargar el grupo de fuerzas especiales 2 versi lama. </p>
|
26 |
-
<h3>APKMirror</h3>
|
27 |
-
<p>APKMirror es una de las fuentes más confiables de archivos APK en la web. Tiene una gran colección de aplicaciones y juegos, incluidas las versiones anteriores. También verifica la firma y el hash de cada archivo APK para garantizar su autenticidad e integridad. Para descargar la versión lama del grupo de fuerzas especiales 2 de APKMirror, siga estos pasos:</p>
|
28 |
-
<ol>
|
29 |
-
<li>Ir a <a href=">APKMirror.com</a> y buscar "Special Forces Group 2" en la barra de búsqueda. </li>
|
30 |
-
|
31 |
-
<li>En la siguiente página, haga clic en el "Descargar APK" botón. Verá una ventana emergente con alguna información y advertencias. Léalas cuidadosamente y haz clic en "Entiendo" si estás de acuerdo. </li>
|
32 |
-
<li>La descarga se iniciará automáticamente. Puede ver el progreso en el administrador de descargas o la barra de notificaciones de su navegador. </li>
|
33 |
-
<li>Una vez que la descarga se ha completado, localizar el archivo APK en el almacenamiento de su dispositivo y toque en él para instalarlo. Puede ver un mensaje pidiéndole que confirme la instalación. Toque en "Instalar" y espere a que el proceso termine. </li>
|
34 |
-
<li>Ahora puedes lanzar el juego y disfrutar jugando al grupo de fuerzas especiales 2 versi lama. </li>
|
35 |
-
</ol>
|
36 |
-
<h3>uptodown</h3>
|
37 |
-
<p>uptodown es otro sitio web popular que ofrece archivos APK para aplicaciones y juegos Android. Tiene una interfaz fácil de usar y una amplia gama de categorías y géneros. También escanea cada archivo APK en busca de virus y malware usando VirusTotal. Para descargar el grupo de fuerzas especiales 2 versi lama de uptodown, siga estos pasos:</p>
|
38 |
-
<ol>
|
39 |
-
<li>Vaya a <a href=">uptodown.com</a> y busque "Special Forces Group 2" en la barra de búsqueda. </li>
|
40 |
-
<li>Seleccione el juego de los resultados de búsqueda y haga clic en él. </li>
|
41 |
-
<li>En la página del juego, desplácese hacia abajo y encuentre la sección "Versiones anteriores". Puede ver la lista de versiones anteriores disponibles para descargar. Por ejemplo, si desea descargar la versión 4.21, puede hacer clic en el botón verde "Descargar" junto a ella. </li>
|
42 |
-
<li>La descarga se iniciará automáticamente. Puede ver el progreso en el administrador de descargas o la barra de notificaciones de su navegador. </li>
|
43 |
-
<li>Una vez que la descarga se ha completado, localizar el archivo APK en el almacenamiento de su dispositivo y toque en él para instalarlo. Puede ver un mensaje pidiéndole que confirme la instalación. Toque en "Instalar" y espere a que el proceso termine. </li>
|
44 |
-
<li>Ahora puedes lanzar el juego y disfrutar jugando al grupo de fuerzas especiales 2 versi lama. </li>
|
45 |
-
</ol>
|
46 |
-
<h3>APK4Fun</h3>
|
47 |
-
|
48 |
-
<ol>
|
49 |
-
<li>Ir a <a href=">apk4fun.com</a> y buscar "Special Forces Group 2" en la barra de búsqueda. </li>
|
50 |
-
<li>Seleccione el juego de los resultados de búsqueda y haga clic en él. </li>
|
51 |
-
<li>En la página del juego, desplácese hacia abajo y encuentre la sección "Versiones antiguas". Puede ver la lista de versiones anteriores disponibles para descargar. Por ejemplo, si desea descargar la versión 4.21, puede hacer clic en el botón azul "Descargar APK" junto a ella. </li>
|
52 |
-
<li>La descarga se iniciará automáticamente. Puede ver el progreso en el administrador de descargas o la barra de notificaciones de su navegador. </li>
|
53 |
-
<li>Una vez que la descarga se ha completado, localizar el archivo APK en el almacenamiento de su dispositivo y toque en él para instalarlo. Puede ver un mensaje pidiéndole que confirme la instalación. Toque en "Instalar" y espere a que el proceso termine. </li>
|
54 |
-
<li>Ahora puedes lanzar el juego y disfrutar jugando al grupo de fuerzas especiales 2 versi lama. </li>
|
55 |
-
</ol>
|
56 |
-
<h2>Cómo prevenir las actualizaciones automáticas y mantener la Versi Lama</h2>
|
57 |
-
<p>Ahora que ha descargado e instalado el grupo de fuerzas especiales 2 versi lama, es posible que desee mantenerlo de esa manera y evitar que el juego se actualice a la última versión automáticamente. Esto se debe a que la Play Store puede detectar que tienes una versión desactualizada del juego y pedirte que la actualices. Si haces eso, perderás la versi lama y tendrás que repetir todo el proceso de nuevo. Para evitar esto, puedes desactivar las actualizaciones automáticas del juego siguiendo estos pasos:</p>
|
58 |
-
<p></p>
|
59 |
-
<ol>
|
60 |
-
<li>Abra la aplicación Play Store en su dispositivo y toque en el icono del menú (tres líneas horizontales) en la esquina superior izquierda. </li>
|
61 |
-
<li>Toque en "Mis aplicaciones y juegos" y encontrar Grupo de Fuerzas Especiales 2 en la lista de aplicaciones instaladas. </li>
|
62 |
-
<li> Toque en el juego y luego toque en el icono del menú (tres puntos verticales) en la esquina superior derecha. </li>
|
63 |
-
<li>Desmarque la casilla que dice "Habilitar actualización automática". Esto detendrá la actualización automática del juego. </li>
|
64 |
-
|
65 |
-
</ol>
|
66 |
-
<p>Al hacer estos pasos, puedes disfrutar jugando al grupo de fuerzas especiales 2 versi lama sin preocuparte por perderlo. </p>
|
67 |
-
<h2>Conclusión</h2>
|
68 |
-
<p>En este artículo, te hemos mostrado cómo descargar el grupo de fuerzas especiales 2 versi lama, una versión anterior de un popular juego para Android. Hemos explicado lo que es el juego, por qué es posible que desee jugar el lama versi, cómo encontrar e instalar versiones anteriores de aplicaciones Android utilizando fuentes de terceros, y cómo evitar las actualizaciones automáticas y mantener la lama versi. Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, por favor deje un comentario a continuación. ¡Feliz juego! </p>
|
69 |
-
<h3>Preguntas frecuentes</h3>
|
70 |
-
<p>Aquí hay algunas preguntas y respuestas frecuentes sobre la descarga de las fuerzas especiales grupo 2 versi lama:</p>
|
71 |
-
<ul>
|
72 |
-
<li><strong>Q: ¿Es legal descargar versiones anteriores de aplicaciones para Android? </strong></li>
|
73 |
-
<li>A: Depende de la aplicación y su acuerdo de licencia. Algunas aplicaciones pueden permitirle descargar y usar versiones anteriores, mientras que otras pueden prohibirlo. Siempre debe comprobar los términos y condiciones de la aplicación antes de descargarla de fuentes de terceros. </li>
|
74 |
-
<li><strong>Q: ¿Es seguro descargar versiones anteriores de aplicaciones Android? </strong></li>
|
75 |
-
<li>A: No siempre. Las versiones anteriores de las aplicaciones Android pueden tener vulnerabilidades de seguridad, errores o problemas de compatibilidad que podrían dañar el dispositivo o los datos. Siempre debe escanear el archivo APK en busca de virus y malware antes de instalarlo. También debe realizar copias de seguridad de sus datos y configuraciones en caso de que algo salga mal. </li>
|
76 |
-
<li><strong>P: ¿Cuáles son los beneficios de jugar al grupo de fuerzas especiales 2 versi lama? </strong></li>
|
77 |
-
<li>A: Algunos de los beneficios de jugar grupo de fuerzas especiales 2 versi lama son:</li>
|
78 |
-
<ul>
|
79 |
-
<li>Puedes jugar con amigos que aún no han actualizado su juego. </li>
|
80 |
-
<li>Puedes experimentar la nostalgia de jugar el juego como cuando salió por primera vez. </li>
|
81 |
-
<li>Puede probar algunas características que se eliminaron o cambiaron en las versiones más recientes. </li>
|
82 |
-
</ul>
|
83 |
-
|
84 |
-
<li>A: Algunos de los inconvenientes de jugar grupo de fuerzas especiales 2 versi lama son:</li>
|
85 |
-
<ul>
|
86 |
-
<li>Es posible que se pierda algunos nuevos contenidos y mejoras que están disponibles en la última versión. </li>
|
87 |
-
<li> Es posible que encuentre algunos errores o fallos que se han corregido en la última versión. </li>
|
88 |
-
<li>Es posible que no pueda jugar en línea con jugadores que han actualizado su juego. </li>
|
89 |
-
</ul>
|
90 |
-
<li><strong>Q: ¿Cómo puedo actualizar mi juego a la última versión si cambio de opinión? </strong></li>
|
91 |
-
<li>A: Si quieres actualizar tu juego a la última versión, puedes hacerlo siguiendo estos pasos:</li>
|
92 |
-
<ol>
|
93 |
-
<li>Eliminar la versión lama de su dispositivo yendo a Configuración > Aplicaciones > Grupo de fuerzas especiales 2 > Desinstalar.</li>
|
94 |
-
<li>Ir a la Play Store y buscar Grupo de Fuerzas Especiales 2.</li>
|
95 |
-
<li>Toque en el juego y luego toque en "Actualizar". La última versión se descargará e instalará en su dispositivo. </li>
|
96 |
-
<li>Ahora puedes iniciar el juego y disfrutar jugando con las nuevas características. </li>
|
97 |
-
</ol>
|
98 |
-
</ul></p> 64aa2da5cf<br />
|
99 |
-
<br />
|
100 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/paginator.py
DELETED
@@ -1,243 +0,0 @@
|
|
1 |
-
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
import os
|
14 |
-
|
15 |
-
from botocore import xform_name
|
16 |
-
from botocore.compat import OrderedDict
|
17 |
-
from botocore.docs.bcdoc.restdoc import DocumentStructure
|
18 |
-
from botocore.docs.method import document_model_driven_method
|
19 |
-
from botocore.docs.utils import DocumentedShape
|
20 |
-
from botocore.utils import get_service_module_name
|
21 |
-
|
22 |
-
|
23 |
-
class PaginatorDocumenter:
|
24 |
-
def __init__(self, client, service_paginator_model, root_docs_path):
|
25 |
-
self._client = client
|
26 |
-
self._client_class_name = self._client.__class__.__name__
|
27 |
-
self._service_name = self._client.meta.service_model.service_name
|
28 |
-
self._service_paginator_model = service_paginator_model
|
29 |
-
self._root_docs_path = root_docs_path
|
30 |
-
self._USER_GUIDE_LINK = (
|
31 |
-
'https://boto3.amazonaws.com/'
|
32 |
-
'v1/documentation/api/latest/guide/paginators.html'
|
33 |
-
)
|
34 |
-
|
35 |
-
def document_paginators(self, section):
|
36 |
-
"""Documents the various paginators for a service
|
37 |
-
|
38 |
-
param section: The section to write to.
|
39 |
-
"""
|
40 |
-
section.style.h2('Paginators')
|
41 |
-
self._add_overview(section)
|
42 |
-
section.style.new_line()
|
43 |
-
section.writeln('The available paginators are:')
|
44 |
-
section.style.toctree()
|
45 |
-
|
46 |
-
paginator_names = sorted(
|
47 |
-
self._service_paginator_model._paginator_config
|
48 |
-
)
|
49 |
-
|
50 |
-
# List the available paginators and then document each paginator.
|
51 |
-
for paginator_name in paginator_names:
|
52 |
-
section.style.tocitem(
|
53 |
-
f'{self._service_name}/paginator/{paginator_name}'
|
54 |
-
)
|
55 |
-
# Create a new DocumentStructure for each paginator and add contents.
|
56 |
-
paginator_doc_structure = DocumentStructure(
|
57 |
-
paginator_name, target='html'
|
58 |
-
)
|
59 |
-
self._add_paginator(paginator_doc_structure, paginator_name)
|
60 |
-
# Write paginators in individual/nested files.
|
61 |
-
# Path: <root>/reference/services/<service>/paginator/<paginator_name>.rst
|
62 |
-
paginator_dir_path = os.path.join(
|
63 |
-
self._root_docs_path, self._service_name, 'paginator'
|
64 |
-
)
|
65 |
-
paginator_doc_structure.write_to_file(
|
66 |
-
paginator_dir_path, paginator_name
|
67 |
-
)
|
68 |
-
|
69 |
-
def _add_paginator(self, section, paginator_name):
|
70 |
-
breadcrumb_section = section.add_new_section('breadcrumb')
|
71 |
-
breadcrumb_section.style.ref(
|
72 |
-
self._client_class_name, f'../../{self._service_name}'
|
73 |
-
)
|
74 |
-
breadcrumb_section.write(f' / Paginator / {paginator_name}')
|
75 |
-
section.add_title_section(paginator_name)
|
76 |
-
|
77 |
-
# Docment the paginator class
|
78 |
-
paginator_section = section.add_new_section(paginator_name)
|
79 |
-
paginator_section.style.start_sphinx_py_class(
|
80 |
-
class_name=(
|
81 |
-
f'{self._client_class_name}.Paginator.{paginator_name}'
|
82 |
-
)
|
83 |
-
)
|
84 |
-
paginator_section.style.start_codeblock()
|
85 |
-
paginator_section.style.new_line()
|
86 |
-
|
87 |
-
# Document how to instantiate the paginator.
|
88 |
-
paginator_section.write(
|
89 |
-
f"paginator = client.get_paginator('{xform_name(paginator_name)}')"
|
90 |
-
)
|
91 |
-
paginator_section.style.end_codeblock()
|
92 |
-
paginator_section.style.new_line()
|
93 |
-
# Get the pagination model for the particular paginator.
|
94 |
-
paginator_config = self._service_paginator_model.get_paginator(
|
95 |
-
paginator_name
|
96 |
-
)
|
97 |
-
document_paginate_method(
|
98 |
-
section=paginator_section,
|
99 |
-
paginator_name=paginator_name,
|
100 |
-
event_emitter=self._client.meta.events,
|
101 |
-
service_model=self._client.meta.service_model,
|
102 |
-
paginator_config=paginator_config,
|
103 |
-
)
|
104 |
-
|
105 |
-
def _add_overview(self, section):
|
106 |
-
section.style.new_line()
|
107 |
-
section.write(
|
108 |
-
'Paginators are available on a client instance '
|
109 |
-
'via the ``get_paginator`` method. For more detailed instructions '
|
110 |
-
'and examples on the usage of paginators, see the '
|
111 |
-
'paginators '
|
112 |
-
)
|
113 |
-
section.style.external_link(
|
114 |
-
title='user guide',
|
115 |
-
link=self._USER_GUIDE_LINK,
|
116 |
-
)
|
117 |
-
section.write('.')
|
118 |
-
section.style.new_line()
|
119 |
-
|
120 |
-
|
121 |
-
def document_paginate_method(
|
122 |
-
section,
|
123 |
-
paginator_name,
|
124 |
-
event_emitter,
|
125 |
-
service_model,
|
126 |
-
paginator_config,
|
127 |
-
include_signature=True,
|
128 |
-
):
|
129 |
-
"""Documents the paginate method of a paginator
|
130 |
-
|
131 |
-
:param section: The section to write to
|
132 |
-
|
133 |
-
:param paginator_name: The name of the paginator. It is snake cased.
|
134 |
-
|
135 |
-
:param event_emitter: The event emitter to use to emit events
|
136 |
-
|
137 |
-
:param service_model: The service model
|
138 |
-
|
139 |
-
:param paginator_config: The paginator config associated to a particular
|
140 |
-
paginator.
|
141 |
-
|
142 |
-
:param include_signature: Whether or not to include the signature.
|
143 |
-
It is useful for generating docstrings.
|
144 |
-
"""
|
145 |
-
# Retrieve the operation model of the underlying operation.
|
146 |
-
operation_model = service_model.operation_model(paginator_name)
|
147 |
-
|
148 |
-
# Add representations of the request and response parameters
|
149 |
-
# we want to include in the description of the paginate method.
|
150 |
-
# These are parameters we expose via the botocore interface.
|
151 |
-
pagination_config_members = OrderedDict()
|
152 |
-
|
153 |
-
pagination_config_members['MaxItems'] = DocumentedShape(
|
154 |
-
name='MaxItems',
|
155 |
-
type_name='integer',
|
156 |
-
documentation=(
|
157 |
-
'<p>The total number of items to return. If the total '
|
158 |
-
'number of items available is more than the value '
|
159 |
-
'specified in max-items then a <code>NextToken</code> '
|
160 |
-
'will be provided in the output that you can use to '
|
161 |
-
'resume pagination.</p>'
|
162 |
-
),
|
163 |
-
)
|
164 |
-
|
165 |
-
if paginator_config.get('limit_key', None):
|
166 |
-
pagination_config_members['PageSize'] = DocumentedShape(
|
167 |
-
name='PageSize',
|
168 |
-
type_name='integer',
|
169 |
-
documentation='<p>The size of each page.<p>',
|
170 |
-
)
|
171 |
-
|
172 |
-
pagination_config_members['StartingToken'] = DocumentedShape(
|
173 |
-
name='StartingToken',
|
174 |
-
type_name='string',
|
175 |
-
documentation=(
|
176 |
-
'<p>A token to specify where to start paginating. '
|
177 |
-
'This is the <code>NextToken</code> from a previous '
|
178 |
-
'response.</p>'
|
179 |
-
),
|
180 |
-
)
|
181 |
-
|
182 |
-
botocore_pagination_params = [
|
183 |
-
DocumentedShape(
|
184 |
-
name='PaginationConfig',
|
185 |
-
type_name='structure',
|
186 |
-
documentation=(
|
187 |
-
'<p>A dictionary that provides parameters to control '
|
188 |
-
'pagination.</p>'
|
189 |
-
),
|
190 |
-
members=pagination_config_members,
|
191 |
-
)
|
192 |
-
]
|
193 |
-
|
194 |
-
botocore_pagination_response_params = [
|
195 |
-
DocumentedShape(
|
196 |
-
name='NextToken',
|
197 |
-
type_name='string',
|
198 |
-
documentation=('<p>A token to resume pagination.</p>'),
|
199 |
-
)
|
200 |
-
]
|
201 |
-
|
202 |
-
service_pagination_params = []
|
203 |
-
|
204 |
-
# Add the normal input token of the method to a list
|
205 |
-
# of input paramters that we wish to hide since we expose our own.
|
206 |
-
if isinstance(paginator_config['input_token'], list):
|
207 |
-
service_pagination_params += paginator_config['input_token']
|
208 |
-
else:
|
209 |
-
service_pagination_params.append(paginator_config['input_token'])
|
210 |
-
|
211 |
-
# Hide the limit key in the documentation.
|
212 |
-
if paginator_config.get('limit_key', None):
|
213 |
-
service_pagination_params.append(paginator_config['limit_key'])
|
214 |
-
|
215 |
-
# Hide the output tokens in the documentation.
|
216 |
-
service_pagination_response_params = []
|
217 |
-
if isinstance(paginator_config['output_token'], list):
|
218 |
-
service_pagination_response_params += paginator_config['output_token']
|
219 |
-
else:
|
220 |
-
service_pagination_response_params.append(
|
221 |
-
paginator_config['output_token']
|
222 |
-
)
|
223 |
-
|
224 |
-
paginate_description = (
|
225 |
-
'Creates an iterator that will paginate through responses '
|
226 |
-
'from :py:meth:`{}.Client.{}`.'.format(
|
227 |
-
get_service_module_name(service_model), xform_name(paginator_name)
|
228 |
-
)
|
229 |
-
)
|
230 |
-
|
231 |
-
document_model_driven_method(
|
232 |
-
section,
|
233 |
-
'paginate',
|
234 |
-
operation_model,
|
235 |
-
event_emitter=event_emitter,
|
236 |
-
method_description=paginate_description,
|
237 |
-
example_prefix='response_iterator = paginator.paginate',
|
238 |
-
include_input=botocore_pagination_params,
|
239 |
-
include_output=botocore_pagination_response_params,
|
240 |
-
exclude_input=service_pagination_params,
|
241 |
-
exclude_output=service_pagination_response_params,
|
242 |
-
include_signature=include_signature,
|
243 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/evaluation/lvis_evaluation.py
DELETED
@@ -1,350 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import copy
|
3 |
-
import itertools
|
4 |
-
import json
|
5 |
-
import logging
|
6 |
-
import os
|
7 |
-
import pickle
|
8 |
-
from collections import OrderedDict
|
9 |
-
import torch
|
10 |
-
from fvcore.common.file_io import PathManager
|
11 |
-
|
12 |
-
import detectron2.utils.comm as comm
|
13 |
-
from detectron2.data import MetadataCatalog
|
14 |
-
from detectron2.structures import Boxes, BoxMode, pairwise_iou
|
15 |
-
from detectron2.utils.logger import create_small_table
|
16 |
-
|
17 |
-
from .coco_evaluation import instances_to_coco_json
|
18 |
-
from .evaluator import DatasetEvaluator
|
19 |
-
|
20 |
-
|
21 |
-
class LVISEvaluator(DatasetEvaluator):
|
22 |
-
"""
|
23 |
-
Evaluate object proposal and instance detection/segmentation outputs using
|
24 |
-
LVIS's metrics and evaluation API.
|
25 |
-
"""
|
26 |
-
|
27 |
-
def __init__(self, dataset_name, cfg, distributed, output_dir=None):
|
28 |
-
"""
|
29 |
-
Args:
|
30 |
-
dataset_name (str): name of the dataset to be evaluated.
|
31 |
-
It must have the following corresponding metadata:
|
32 |
-
"json_file": the path to the LVIS format annotation
|
33 |
-
cfg (CfgNode): config instance
|
34 |
-
distributed (True): if True, will collect results from all ranks for evaluation.
|
35 |
-
Otherwise, will evaluate the results in the current process.
|
36 |
-
output_dir (str): optional, an output directory to dump results.
|
37 |
-
"""
|
38 |
-
from lvis import LVIS
|
39 |
-
|
40 |
-
self._tasks = self._tasks_from_config(cfg)
|
41 |
-
self._distributed = distributed
|
42 |
-
self._output_dir = output_dir
|
43 |
-
|
44 |
-
self._cpu_device = torch.device("cpu")
|
45 |
-
self._logger = logging.getLogger(__name__)
|
46 |
-
|
47 |
-
self._metadata = MetadataCatalog.get(dataset_name)
|
48 |
-
json_file = PathManager.get_local_path(self._metadata.json_file)
|
49 |
-
self._lvis_api = LVIS(json_file)
|
50 |
-
# Test set json files do not contain annotations (evaluation must be
|
51 |
-
# performed using the LVIS evaluation server).
|
52 |
-
self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0
|
53 |
-
|
54 |
-
def reset(self):
|
55 |
-
self._predictions = []
|
56 |
-
|
57 |
-
def _tasks_from_config(self, cfg):
|
58 |
-
"""
|
59 |
-
Returns:
|
60 |
-
tuple[str]: tasks that can be evaluated under the given configuration.
|
61 |
-
"""
|
62 |
-
tasks = ("bbox",)
|
63 |
-
if cfg.MODEL.MASK_ON:
|
64 |
-
tasks = tasks + ("segm",)
|
65 |
-
return tasks
|
66 |
-
|
67 |
-
def process(self, inputs, outputs):
|
68 |
-
"""
|
69 |
-
Args:
|
70 |
-
inputs: the inputs to a LVIS model (e.g., GeneralizedRCNN).
|
71 |
-
It is a list of dict. Each dict corresponds to an image and
|
72 |
-
contains keys like "height", "width", "file_name", "image_id".
|
73 |
-
outputs: the outputs of a LVIS model. It is a list of dicts with key
|
74 |
-
"instances" that contains :class:`Instances`.
|
75 |
-
"""
|
76 |
-
for input, output in zip(inputs, outputs):
|
77 |
-
prediction = {"image_id": input["image_id"]}
|
78 |
-
|
79 |
-
if "instances" in output:
|
80 |
-
instances = output["instances"].to(self._cpu_device)
|
81 |
-
prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
|
82 |
-
if "proposals" in output:
|
83 |
-
prediction["proposals"] = output["proposals"].to(self._cpu_device)
|
84 |
-
self._predictions.append(prediction)
|
85 |
-
|
86 |
-
def evaluate(self):
|
87 |
-
if self._distributed:
|
88 |
-
comm.synchronize()
|
89 |
-
predictions = comm.gather(self._predictions, dst=0)
|
90 |
-
predictions = list(itertools.chain(*predictions))
|
91 |
-
|
92 |
-
if not comm.is_main_process():
|
93 |
-
return
|
94 |
-
else:
|
95 |
-
predictions = self._predictions
|
96 |
-
|
97 |
-
if len(predictions) == 0:
|
98 |
-
self._logger.warning("[LVISEvaluator] Did not receive valid predictions.")
|
99 |
-
return {}
|
100 |
-
|
101 |
-
if self._output_dir:
|
102 |
-
PathManager.mkdirs(self._output_dir)
|
103 |
-
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
|
104 |
-
with PathManager.open(file_path, "wb") as f:
|
105 |
-
torch.save(predictions, f)
|
106 |
-
|
107 |
-
self._results = OrderedDict()
|
108 |
-
if "proposals" in predictions[0]:
|
109 |
-
self._eval_box_proposals(predictions)
|
110 |
-
if "instances" in predictions[0]:
|
111 |
-
self._eval_predictions(set(self._tasks), predictions)
|
112 |
-
# Copy so the caller can do whatever with results
|
113 |
-
return copy.deepcopy(self._results)
|
114 |
-
|
115 |
-
def _eval_predictions(self, tasks, predictions):
|
116 |
-
"""
|
117 |
-
Evaluate predictions on the given tasks.
|
118 |
-
Fill self._results with the metrics of the tasks.
|
119 |
-
|
120 |
-
Args:
|
121 |
-
predictions (list[dict]): list of outputs from the model
|
122 |
-
"""
|
123 |
-
self._logger.info("Preparing results in the LVIS format ...")
|
124 |
-
lvis_results = list(itertools.chain(*[x["instances"] for x in predictions]))
|
125 |
-
|
126 |
-
# LVIS evaluator can be used to evaluate results for COCO dataset categories.
|
127 |
-
# In this case `_metadata` variable will have a field with COCO-specific category mapping.
|
128 |
-
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
|
129 |
-
reverse_id_mapping = {
|
130 |
-
v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
|
131 |
-
}
|
132 |
-
for result in lvis_results:
|
133 |
-
result["category_id"] = reverse_id_mapping[result["category_id"]]
|
134 |
-
else:
|
135 |
-
# unmap the category ids for LVIS (from 0-indexed to 1-indexed)
|
136 |
-
for result in lvis_results:
|
137 |
-
result["category_id"] += 1
|
138 |
-
|
139 |
-
if self._output_dir:
|
140 |
-
file_path = os.path.join(self._output_dir, "lvis_instances_results.json")
|
141 |
-
self._logger.info("Saving results to {}".format(file_path))
|
142 |
-
with PathManager.open(file_path, "w") as f:
|
143 |
-
f.write(json.dumps(lvis_results))
|
144 |
-
f.flush()
|
145 |
-
|
146 |
-
if not self._do_evaluation:
|
147 |
-
self._logger.info("Annotations are not available for evaluation.")
|
148 |
-
return
|
149 |
-
|
150 |
-
self._logger.info("Evaluating predictions ...")
|
151 |
-
for task in sorted(tasks):
|
152 |
-
res = _evaluate_predictions_on_lvis(
|
153 |
-
self._lvis_api, lvis_results, task, class_names=self._metadata.get("thing_classes")
|
154 |
-
)
|
155 |
-
self._results[task] = res
|
156 |
-
|
157 |
-
def _eval_box_proposals(self, predictions):
|
158 |
-
"""
|
159 |
-
Evaluate the box proposals in predictions.
|
160 |
-
Fill self._results with the metrics for "box_proposals" task.
|
161 |
-
"""
|
162 |
-
if self._output_dir:
|
163 |
-
# Saving generated box proposals to file.
|
164 |
-
# Predicted box_proposals are in XYXY_ABS mode.
|
165 |
-
bbox_mode = BoxMode.XYXY_ABS.value
|
166 |
-
ids, boxes, objectness_logits = [], [], []
|
167 |
-
for prediction in predictions:
|
168 |
-
ids.append(prediction["image_id"])
|
169 |
-
boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
|
170 |
-
objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
|
171 |
-
|
172 |
-
proposal_data = {
|
173 |
-
"boxes": boxes,
|
174 |
-
"objectness_logits": objectness_logits,
|
175 |
-
"ids": ids,
|
176 |
-
"bbox_mode": bbox_mode,
|
177 |
-
}
|
178 |
-
with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
|
179 |
-
pickle.dump(proposal_data, f)
|
180 |
-
|
181 |
-
if not self._do_evaluation:
|
182 |
-
self._logger.info("Annotations are not available for evaluation.")
|
183 |
-
return
|
184 |
-
|
185 |
-
self._logger.info("Evaluating bbox proposals ...")
|
186 |
-
res = {}
|
187 |
-
areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
|
188 |
-
for limit in [100, 1000]:
|
189 |
-
for area, suffix in areas.items():
|
190 |
-
stats = _evaluate_box_proposals(predictions, self._lvis_api, area=area, limit=limit)
|
191 |
-
key = "AR{}@{:d}".format(suffix, limit)
|
192 |
-
res[key] = float(stats["ar"].item() * 100)
|
193 |
-
self._logger.info("Proposal metrics: \n" + create_small_table(res))
|
194 |
-
self._results["box_proposals"] = res
|
195 |
-
|
196 |
-
|
197 |
-
# inspired from Detectron:
|
198 |
-
# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
|
199 |
-
def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None):
|
200 |
-
"""
|
201 |
-
Evaluate detection proposal recall metrics. This function is a much
|
202 |
-
faster alternative to the official LVIS API recall evaluation code. However,
|
203 |
-
it produces slightly different results.
|
204 |
-
"""
|
205 |
-
# Record max overlap value for each gt box
|
206 |
-
# Return vector of overlap values
|
207 |
-
areas = {
|
208 |
-
"all": 0,
|
209 |
-
"small": 1,
|
210 |
-
"medium": 2,
|
211 |
-
"large": 3,
|
212 |
-
"96-128": 4,
|
213 |
-
"128-256": 5,
|
214 |
-
"256-512": 6,
|
215 |
-
"512-inf": 7,
|
216 |
-
}
|
217 |
-
area_ranges = [
|
218 |
-
[0 ** 2, 1e5 ** 2], # all
|
219 |
-
[0 ** 2, 32 ** 2], # small
|
220 |
-
[32 ** 2, 96 ** 2], # medium
|
221 |
-
[96 ** 2, 1e5 ** 2], # large
|
222 |
-
[96 ** 2, 128 ** 2], # 96-128
|
223 |
-
[128 ** 2, 256 ** 2], # 128-256
|
224 |
-
[256 ** 2, 512 ** 2], # 256-512
|
225 |
-
[512 ** 2, 1e5 ** 2],
|
226 |
-
] # 512-inf
|
227 |
-
assert area in areas, "Unknown area range: {}".format(area)
|
228 |
-
area_range = area_ranges[areas[area]]
|
229 |
-
gt_overlaps = []
|
230 |
-
num_pos = 0
|
231 |
-
|
232 |
-
for prediction_dict in dataset_predictions:
|
233 |
-
predictions = prediction_dict["proposals"]
|
234 |
-
|
235 |
-
# sort predictions in descending order
|
236 |
-
# TODO maybe remove this and make it explicit in the documentation
|
237 |
-
inds = predictions.objectness_logits.sort(descending=True)[1]
|
238 |
-
predictions = predictions[inds]
|
239 |
-
|
240 |
-
ann_ids = lvis_api.get_ann_ids(img_ids=[prediction_dict["image_id"]])
|
241 |
-
anno = lvis_api.load_anns(ann_ids)
|
242 |
-
gt_boxes = [
|
243 |
-
BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno
|
244 |
-
]
|
245 |
-
gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
|
246 |
-
gt_boxes = Boxes(gt_boxes)
|
247 |
-
gt_areas = torch.as_tensor([obj["area"] for obj in anno])
|
248 |
-
|
249 |
-
if len(gt_boxes) == 0 or len(predictions) == 0:
|
250 |
-
continue
|
251 |
-
|
252 |
-
valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
|
253 |
-
gt_boxes = gt_boxes[valid_gt_inds]
|
254 |
-
|
255 |
-
num_pos += len(gt_boxes)
|
256 |
-
|
257 |
-
if len(gt_boxes) == 0:
|
258 |
-
continue
|
259 |
-
|
260 |
-
if limit is not None and len(predictions) > limit:
|
261 |
-
predictions = predictions[:limit]
|
262 |
-
|
263 |
-
overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
|
264 |
-
|
265 |
-
_gt_overlaps = torch.zeros(len(gt_boxes))
|
266 |
-
for j in range(min(len(predictions), len(gt_boxes))):
|
267 |
-
# find which proposal box maximally covers each gt box
|
268 |
-
# and get the iou amount of coverage for each gt box
|
269 |
-
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
|
270 |
-
|
271 |
-
# find which gt box is 'best' covered (i.e. 'best' = most iou)
|
272 |
-
gt_ovr, gt_ind = max_overlaps.max(dim=0)
|
273 |
-
assert gt_ovr >= 0
|
274 |
-
# find the proposal box that covers the best covered gt box
|
275 |
-
box_ind = argmax_overlaps[gt_ind]
|
276 |
-
# record the iou coverage of this gt box
|
277 |
-
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
|
278 |
-
assert _gt_overlaps[j] == gt_ovr
|
279 |
-
# mark the proposal box and the gt box as used
|
280 |
-
overlaps[box_ind, :] = -1
|
281 |
-
overlaps[:, gt_ind] = -1
|
282 |
-
|
283 |
-
# append recorded iou coverage level
|
284 |
-
gt_overlaps.append(_gt_overlaps)
|
285 |
-
gt_overlaps = (
|
286 |
-
torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
|
287 |
-
)
|
288 |
-
gt_overlaps, _ = torch.sort(gt_overlaps)
|
289 |
-
|
290 |
-
if thresholds is None:
|
291 |
-
step = 0.05
|
292 |
-
thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
|
293 |
-
recalls = torch.zeros_like(thresholds)
|
294 |
-
# compute recall for each iou threshold
|
295 |
-
for i, t in enumerate(thresholds):
|
296 |
-
recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
|
297 |
-
# ar = 2 * np.trapz(recalls, thresholds)
|
298 |
-
ar = recalls.mean()
|
299 |
-
return {
|
300 |
-
"ar": ar,
|
301 |
-
"recalls": recalls,
|
302 |
-
"thresholds": thresholds,
|
303 |
-
"gt_overlaps": gt_overlaps,
|
304 |
-
"num_pos": num_pos,
|
305 |
-
}
|
306 |
-
|
307 |
-
|
308 |
-
def _evaluate_predictions_on_lvis(lvis_gt, lvis_results, iou_type, class_names=None):
|
309 |
-
"""
|
310 |
-
Args:
|
311 |
-
iou_type (str):
|
312 |
-
kpt_oks_sigmas (list[float]):
|
313 |
-
class_names (None or list[str]): if provided, will use it to predict
|
314 |
-
per-category AP.
|
315 |
-
|
316 |
-
Returns:
|
317 |
-
a dict of {metric name: score}
|
318 |
-
"""
|
319 |
-
metrics = {
|
320 |
-
"bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
|
321 |
-
"segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
|
322 |
-
}[iou_type]
|
323 |
-
|
324 |
-
logger = logging.getLogger(__name__)
|
325 |
-
|
326 |
-
if len(lvis_results) == 0: # TODO: check if needed
|
327 |
-
logger.warn("No predictions from the model!")
|
328 |
-
return {metric: float("nan") for metric in metrics}
|
329 |
-
|
330 |
-
if iou_type == "segm":
|
331 |
-
lvis_results = copy.deepcopy(lvis_results)
|
332 |
-
# When evaluating mask AP, if the results contain bbox, LVIS API will
|
333 |
-
# use the box area as the area of the instance, instead of the mask area.
|
334 |
-
# This leads to a different definition of small/medium/large.
|
335 |
-
# We remove the bbox field to let mask AP use mask area.
|
336 |
-
for c in lvis_results:
|
337 |
-
c.pop("bbox", None)
|
338 |
-
|
339 |
-
from lvis import LVISEval, LVISResults
|
340 |
-
|
341 |
-
lvis_results = LVISResults(lvis_gt, lvis_results)
|
342 |
-
lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type)
|
343 |
-
lvis_eval.run()
|
344 |
-
lvis_eval.print_results()
|
345 |
-
|
346 |
-
# Pull the standard metrics from the LVIS results
|
347 |
-
results = lvis_eval.get_results()
|
348 |
-
results = {metric: float(results[metric] * 100) for metric in metrics}
|
349 |
-
logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results))
|
350 |
-
return results
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/temporary_buffer.h
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system has no special temporary buffer functions
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/lama-example/saicinpainting/training/data/aug.py
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
from albumentations import DualIAATransform, to_tuple
|
2 |
-
import imgaug.augmenters as iaa
|
3 |
-
|
4 |
-
class IAAAffine2(DualIAATransform):
|
5 |
-
"""Place a regular grid of points on the input and randomly move the neighbourhood of these point around
|
6 |
-
via affine transformations.
|
7 |
-
|
8 |
-
Note: This class introduce interpolation artifacts to mask if it has values other than {0;1}
|
9 |
-
|
10 |
-
Args:
|
11 |
-
p (float): probability of applying the transform. Default: 0.5.
|
12 |
-
|
13 |
-
Targets:
|
14 |
-
image, mask
|
15 |
-
"""
|
16 |
-
|
17 |
-
def __init__(
|
18 |
-
self,
|
19 |
-
scale=(0.7, 1.3),
|
20 |
-
translate_percent=None,
|
21 |
-
translate_px=None,
|
22 |
-
rotate=0.0,
|
23 |
-
shear=(-0.1, 0.1),
|
24 |
-
order=1,
|
25 |
-
cval=0,
|
26 |
-
mode="reflect",
|
27 |
-
always_apply=False,
|
28 |
-
p=0.5,
|
29 |
-
):
|
30 |
-
super(IAAAffine2, self).__init__(always_apply, p)
|
31 |
-
self.scale = dict(x=scale, y=scale)
|
32 |
-
self.translate_percent = to_tuple(translate_percent, 0)
|
33 |
-
self.translate_px = to_tuple(translate_px, 0)
|
34 |
-
self.rotate = to_tuple(rotate)
|
35 |
-
self.shear = dict(x=shear, y=shear)
|
36 |
-
self.order = order
|
37 |
-
self.cval = cval
|
38 |
-
self.mode = mode
|
39 |
-
|
40 |
-
@property
|
41 |
-
def processor(self):
|
42 |
-
return iaa.Affine(
|
43 |
-
self.scale,
|
44 |
-
self.translate_percent,
|
45 |
-
self.translate_px,
|
46 |
-
self.rotate,
|
47 |
-
self.shear,
|
48 |
-
self.order,
|
49 |
-
self.cval,
|
50 |
-
self.mode,
|
51 |
-
)
|
52 |
-
|
53 |
-
def get_transform_init_args_names(self):
|
54 |
-
return ("scale", "translate_percent", "translate_px", "rotate", "shear", "order", "cval", "mode")
|
55 |
-
|
56 |
-
|
57 |
-
class IAAPerspective2(DualIAATransform):
|
58 |
-
"""Perform a random four point perspective transform of the input.
|
59 |
-
|
60 |
-
Note: This class introduce interpolation artifacts to mask if it has values other than {0;1}
|
61 |
-
|
62 |
-
Args:
|
63 |
-
scale ((float, float): standard deviation of the normal distributions. These are used to sample
|
64 |
-
the random distances of the subimage's corners from the full image's corners. Default: (0.05, 0.1).
|
65 |
-
p (float): probability of applying the transform. Default: 0.5.
|
66 |
-
|
67 |
-
Targets:
|
68 |
-
image, mask
|
69 |
-
"""
|
70 |
-
|
71 |
-
def __init__(self, scale=(0.05, 0.1), keep_size=True, always_apply=False, p=0.5,
|
72 |
-
order=1, cval=0, mode="replicate"):
|
73 |
-
super(IAAPerspective2, self).__init__(always_apply, p)
|
74 |
-
self.scale = to_tuple(scale, 1.0)
|
75 |
-
self.keep_size = keep_size
|
76 |
-
self.cval = cval
|
77 |
-
self.mode = mode
|
78 |
-
|
79 |
-
@property
|
80 |
-
def processor(self):
|
81 |
-
return iaa.PerspectiveTransform(self.scale, keep_size=self.keep_size, mode=self.mode, cval=self.cval)
|
82 |
-
|
83 |
-
def get_transform_init_args_names(self):
|
84 |
-
return ("scale", "keep_size")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChrisPreston/diff-svc_minato_aqua/modules/hubert/hubert_onnx.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
import time
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import torchaudio
|
5 |
-
|
6 |
-
|
7 |
-
def get_onnx_units(hbt_soft, raw_wav_path):
|
8 |
-
source, sr = torchaudio.load(raw_wav_path)
|
9 |
-
source = torchaudio.functional.resample(source, sr, 16000)
|
10 |
-
if len(source.shape) == 2 and source.shape[1] >= 2:
|
11 |
-
source = torch.mean(source, dim=0).unsqueeze(0)
|
12 |
-
source = source.unsqueeze(0)
|
13 |
-
# 使用ONNX Runtime进行推理
|
14 |
-
start = time.time()
|
15 |
-
units = hbt_soft.run(output_names=["units"],
|
16 |
-
input_feed={"wav": source.numpy()})[0]
|
17 |
-
use_time = time.time() - start
|
18 |
-
print("hubert_onnx_session.run time:{}".format(use_time))
|
19 |
-
return units
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/lib/tools/name.js
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
import fs from "node:fs"
|
2 |
-
import childProcess from "child_process"
|
3 |
-
|
4 |
-
const _path = process.cwd()
|
5 |
-
|
6 |
-
fs.readFile(`${_path}/config/pm2/pm2.json`, `utf8`, (err, data) => {
|
7 |
-
if (err) {
|
8 |
-
console.log('pm2.json文件读取错误:', err)
|
9 |
-
return
|
10 |
-
}
|
11 |
-
|
12 |
-
try {
|
13 |
-
const config = JSON.parse(data)
|
14 |
-
if (config.apps && config.apps.length > 0 && config.apps[0].name) {
|
15 |
-
const appName = config.apps[0].name
|
16 |
-
console.log(config.apps[0].name)
|
17 |
-
runPm2Logs(appName)
|
18 |
-
} else {
|
19 |
-
console.log('读取失败:无法在pm2.json中找到name数组')
|
20 |
-
}
|
21 |
-
} catch (parseError) {
|
22 |
-
console.log('读取失败:json文件解析发生了错误', parseError)
|
23 |
-
}
|
24 |
-
})
|
25 |
-
|
26 |
-
function runPm2Logs(appName) {
|
27 |
-
const command = process.platform === 'win32' ? 'pm2.cmd' : 'pm2'
|
28 |
-
const args = ['logs', '--lines', '400', appName]
|
29 |
-
const pm2LogsProcess = childProcess.spawn(command, args, { stdio: 'inherit' })
|
30 |
-
pm2LogsProcess.on('exit', (code) => {
|
31 |
-
if (code !== 0) {
|
32 |
-
console.error(`pm2 logs process exited with code ${code}`)
|
33 |
-
}
|
34 |
-
})
|
35 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/T_S_I__1.py
DELETED
@@ -1,164 +0,0 @@
|
|
1 |
-
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
2 |
-
tool to store its hinting source data.
|
3 |
-
|
4 |
-
TSI1 contains the text of the glyph programs in the form of low-level assembly
|
5 |
-
code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'.
|
6 |
-
"""
|
7 |
-
from . import DefaultTable
|
8 |
-
from fontTools.misc.loggingTools import LogMixin
|
9 |
-
from fontTools.misc.textTools import strjoin, tobytes, tostr
|
10 |
-
|
11 |
-
|
12 |
-
class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable):
|
13 |
-
|
14 |
-
extras = {0xFFFA: "ppgm", 0xFFFB: "cvt", 0xFFFC: "reserved", 0xFFFD: "fpgm"}
|
15 |
-
|
16 |
-
indextable = "TSI0"
|
17 |
-
|
18 |
-
def decompile(self, data, ttFont):
|
19 |
-
totalLength = len(data)
|
20 |
-
indextable = ttFont[self.indextable]
|
21 |
-
for indices, isExtra in zip(
|
22 |
-
(indextable.indices, indextable.extra_indices), (False, True)
|
23 |
-
):
|
24 |
-
programs = {}
|
25 |
-
for i, (glyphID, textLength, textOffset) in enumerate(indices):
|
26 |
-
if isExtra:
|
27 |
-
name = self.extras[glyphID]
|
28 |
-
else:
|
29 |
-
name = ttFont.getGlyphName(glyphID)
|
30 |
-
if textOffset > totalLength:
|
31 |
-
self.log.warning("textOffset > totalLength; %r skipped" % name)
|
32 |
-
continue
|
33 |
-
if textLength < 0x8000:
|
34 |
-
# If the length stored in the record is less than 32768, then use
|
35 |
-
# that as the length of the record.
|
36 |
-
pass
|
37 |
-
elif textLength == 0x8000:
|
38 |
-
# If the length is 32768, compute the actual length as follows:
|
39 |
-
isLast = i == (len(indices) - 1)
|
40 |
-
if isLast:
|
41 |
-
if isExtra:
|
42 |
-
# For the last "extra" record (the very last record of the
|
43 |
-
# table), the length is the difference between the total
|
44 |
-
# length of the TSI1 table and the textOffset of the final
|
45 |
-
# record.
|
46 |
-
nextTextOffset = totalLength
|
47 |
-
else:
|
48 |
-
# For the last "normal" record (the last record just prior
|
49 |
-
# to the record containing the "magic number"), the length
|
50 |
-
# is the difference between the textOffset of the record
|
51 |
-
# following the "magic number" (0xFFFE) record (i.e. the
|
52 |
-
# first "extra" record), and the textOffset of the last
|
53 |
-
# "normal" record.
|
54 |
-
nextTextOffset = indextable.extra_indices[0][2]
|
55 |
-
else:
|
56 |
-
# For all other records with a length of 0x8000, the length is
|
57 |
-
# the difference between the textOffset of the record in
|
58 |
-
# question and the textOffset of the next record.
|
59 |
-
nextTextOffset = indices[i + 1][2]
|
60 |
-
assert nextTextOffset >= textOffset, "entries not sorted by offset"
|
61 |
-
if nextTextOffset > totalLength:
|
62 |
-
self.log.warning(
|
63 |
-
"nextTextOffset > totalLength; %r truncated" % name
|
64 |
-
)
|
65 |
-
nextTextOffset = totalLength
|
66 |
-
textLength = nextTextOffset - textOffset
|
67 |
-
else:
|
68 |
-
from fontTools import ttLib
|
69 |
-
|
70 |
-
raise ttLib.TTLibError(
|
71 |
-
"%r textLength (%d) must not be > 32768" % (name, textLength)
|
72 |
-
)
|
73 |
-
text = data[textOffset : textOffset + textLength]
|
74 |
-
assert len(text) == textLength
|
75 |
-
text = tostr(text, encoding="utf-8")
|
76 |
-
if text:
|
77 |
-
programs[name] = text
|
78 |
-
if isExtra:
|
79 |
-
self.extraPrograms = programs
|
80 |
-
else:
|
81 |
-
self.glyphPrograms = programs
|
82 |
-
|
83 |
-
def compile(self, ttFont):
|
84 |
-
if not hasattr(self, "glyphPrograms"):
|
85 |
-
self.glyphPrograms = {}
|
86 |
-
self.extraPrograms = {}
|
87 |
-
data = b""
|
88 |
-
indextable = ttFont[self.indextable]
|
89 |
-
glyphNames = ttFont.getGlyphOrder()
|
90 |
-
|
91 |
-
indices = []
|
92 |
-
for i in range(len(glyphNames)):
|
93 |
-
if len(data) % 2:
|
94 |
-
data = (
|
95 |
-
data + b"\015"
|
96 |
-
) # align on 2-byte boundaries, fill with return chars. Yum.
|
97 |
-
name = glyphNames[i]
|
98 |
-
if name in self.glyphPrograms:
|
99 |
-
text = tobytes(self.glyphPrograms[name], encoding="utf-8")
|
100 |
-
else:
|
101 |
-
text = b""
|
102 |
-
textLength = len(text)
|
103 |
-
if textLength >= 0x8000:
|
104 |
-
textLength = 0x8000
|
105 |
-
indices.append((i, textLength, len(data)))
|
106 |
-
data = data + text
|
107 |
-
|
108 |
-
extra_indices = []
|
109 |
-
codes = sorted(self.extras.items())
|
110 |
-
for i in range(len(codes)):
|
111 |
-
if len(data) % 2:
|
112 |
-
data = (
|
113 |
-
data + b"\015"
|
114 |
-
) # align on 2-byte boundaries, fill with return chars.
|
115 |
-
code, name = codes[i]
|
116 |
-
if name in self.extraPrograms:
|
117 |
-
text = tobytes(self.extraPrograms[name], encoding="utf-8")
|
118 |
-
else:
|
119 |
-
text = b""
|
120 |
-
textLength = len(text)
|
121 |
-
if textLength >= 0x8000:
|
122 |
-
textLength = 0x8000
|
123 |
-
extra_indices.append((code, textLength, len(data)))
|
124 |
-
data = data + text
|
125 |
-
indextable.set(indices, extra_indices)
|
126 |
-
return data
|
127 |
-
|
128 |
-
def toXML(self, writer, ttFont):
|
129 |
-
names = sorted(self.glyphPrograms.keys())
|
130 |
-
writer.newline()
|
131 |
-
for name in names:
|
132 |
-
text = self.glyphPrograms[name]
|
133 |
-
if not text:
|
134 |
-
continue
|
135 |
-
writer.begintag("glyphProgram", name=name)
|
136 |
-
writer.newline()
|
137 |
-
writer.write_noindent(text.replace("\r", "\n"))
|
138 |
-
writer.newline()
|
139 |
-
writer.endtag("glyphProgram")
|
140 |
-
writer.newline()
|
141 |
-
writer.newline()
|
142 |
-
extra_names = sorted(self.extraPrograms.keys())
|
143 |
-
for name in extra_names:
|
144 |
-
text = self.extraPrograms[name]
|
145 |
-
if not text:
|
146 |
-
continue
|
147 |
-
writer.begintag("extraProgram", name=name)
|
148 |
-
writer.newline()
|
149 |
-
writer.write_noindent(text.replace("\r", "\n"))
|
150 |
-
writer.newline()
|
151 |
-
writer.endtag("extraProgram")
|
152 |
-
writer.newline()
|
153 |
-
writer.newline()
|
154 |
-
|
155 |
-
def fromXML(self, name, attrs, content, ttFont):
|
156 |
-
if not hasattr(self, "glyphPrograms"):
|
157 |
-
self.glyphPrograms = {}
|
158 |
-
self.extraPrograms = {}
|
159 |
-
lines = strjoin(content).replace("\r", "\n").split("\n")
|
160 |
-
text = "\r".join(lines[1:-1])
|
161 |
-
if name == "glyphProgram":
|
162 |
-
self.glyphPrograms[attrs["name"]] = text
|
163 |
-
elif name == "extraProgram":
|
164 |
-
self.extraPrograms[attrs["name"]] = text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/wrapper-6f348d45-38be7a64.js
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
import S from"./__vite-browser-external-b25bb000.js";function z(s){return s&&s.__esModule&&Object.prototype.hasOwnProperty.call(s,"default")?s.default:s}function gt(s){if(s.__esModule)return s;var e=s.default;if(typeof e=="function"){var t=function r(){if(this instanceof r){var i=[null];i.push.apply(i,arguments);var n=Function.bind.apply(e,i);return new n}return e.apply(this,arguments)};t.prototype=e.prototype}else t={};return Object.defineProperty(t,"__esModule",{value:!0}),Object.keys(s).forEach(function(r){var i=Object.getOwnPropertyDescriptor(s,r);Object.defineProperty(t,r,i.get?i:{enumerable:!0,get:function(){return s[r]}})}),t}const{Duplex:yt}=S;function Oe(s){s.emit("close")}function vt(){!this.destroyed&&this._writableState.finished&&this.destroy()}function Qe(s){this.removeListener("error",Qe),this.destroy(),this.listenerCount("error")===0&&this.emit("error",s)}function St(s,e){let t=!0;const r=new yt({...e,autoDestroy:!1,emitClose:!1,objectMode:!1,writableObjectMode:!1});return s.on("message",function(n,o){const l=!o&&r._readableState.objectMode?n.toString():n;r.push(l)||s.pause()}),s.once("error",function(n){r.destroyed||(t=!1,r.destroy(n))}),s.once("close",function(){r.destroyed||r.push(null)}),r._destroy=function(i,n){if(s.readyState===s.CLOSED){n(i),process.nextTick(Oe,r);return}let o=!1;s.once("error",function(f){o=!0,n(f)}),s.once("close",function(){o||n(i),process.nextTick(Oe,r)}),t&&s.terminate()},r._final=function(i){if(s.readyState===s.CONNECTING){s.once("open",function(){r._final(i)});return}s._socket!==null&&(s._socket._writableState.finished?(i(),r._readableState.endEmitted&&r.destroy()):(s._socket.once("finish",function(){i()}),s.close()))},r._read=function(){s.isPaused&&s.resume()},r._write=function(i,n,o){if(s.readyState===s.CONNECTING){s.once("open",function(){r._write(i,n,o)});return}s.send(i,o)},r.on("end",vt),r.on("error",Qe),r}var Et=St;const Vs=z(Et);var te={exports:{}},U={BINARY_TYPES:["nodebuffer","arraybuffer","fragments"],EMPTY_BUFFER:Buffer.alloc(0),GUID:"258EAFA5-E914-47DA-95CA-C5AB0DC85B11",kForOnEventAttribute:Symbol("kIsForOnEventAttribute"),kListener:Symbol("kListener"),kStatusCode:Symbol("status-code"),kWebSocket:Symbol("websocket"),NOOP:()=>{}},bt,xt;const{EMPTY_BUFFER:kt}=U,Se=Buffer[Symbol.species];function wt(s,e){if(s.length===0)return kt;if(s.length===1)return s[0];const t=Buffer.allocUnsafe(e);let r=0;for(let i=0;i<s.length;i++){const n=s[i];t.set(n,r),r+=n.length}return r<e?new Se(t.buffer,t.byteOffset,r):t}function Je(s,e,t,r,i){for(let n=0;n<i;n++)t[r+n]=s[n]^e[n&3]}function et(s,e){for(let t=0;t<s.length;t++)s[t]^=e[t&3]}function Ot(s){return s.length===s.buffer.byteLength?s.buffer:s.buffer.slice(s.byteOffset,s.byteOffset+s.length)}function Ee(s){if(Ee.readOnly=!0,Buffer.isBuffer(s))return s;let e;return s instanceof ArrayBuffer?e=new Se(s):ArrayBuffer.isView(s)?e=new Se(s.buffer,s.byteOffset,s.byteLength):(e=Buffer.from(s),Ee.readOnly=!1),e}te.exports={concat:wt,mask:Je,toArrayBuffer:Ot,toBuffer:Ee,unmask:et};if(!{}.WS_NO_BUFFER_UTIL)try{const s=require("bufferutil");xt=te.exports.mask=function(e,t,r,i,n){n<48?Je(e,t,r,i,n):s.mask(e,t,r,i,n)},bt=te.exports.unmask=function(e,t){e.length<32?et(e,t):s.unmask(e,t)}}catch{}var ne=te.exports;const Ce=Symbol("kDone"),ue=Symbol("kRun");let Ct=class{constructor(e){this[Ce]=()=>{this.pending--,this[ue]()},this.concurrency=e||1/0,this.jobs=[],this.pending=0}add(e){this.jobs.push(e),this[ue]()}[ue](){if(this.pending!==this.concurrency&&this.jobs.length){const e=this.jobs.shift();this.pending++,e(this[Ce])}}};var Tt=Ct;const W=S,Te=ne,Lt=Tt,{kStatusCode:tt}=U,Nt=Buffer[Symbol.species],Pt=Buffer.from([0,0,255,255]),se=Symbol("permessage-deflate"),w=Symbol("total-length"),V=Symbol("callback"),C=Symbol("buffers"),J=Symbol("error");let K,Rt=class{constructor(e,t,r){if(this._maxPayload=r|0,this._options=e||{},this._threshold=this._options.threshold!==void 0?this._options.threshold:1024,this._isServer=!!t,this._deflate=null,this._inflate=null,this.params=null,!K){const i=this._options.concurrencyLimit!==void 0?this._options.concurrencyLimit:10;K=new Lt(i)}}static get extensionName(){return"permessage-deflate"}offer(){const e={};return this._options.serverNoContextTakeover&&(e.server_no_context_takeover=!0),this._options.clientNoContextTakeover&&(e.client_no_context_takeover=!0),this._options.serverMaxWindowBits&&(e.server_max_window_bits=this._options.serverMaxWindowBits),this._options.clientMaxWindowBits?e.client_max_window_bits=this._options.clientMaxWindowBits:this._options.clientMaxWindowBits==null&&(e.client_max_window_bits=!0),e}accept(e){return e=this.normalizeParams(e),this.params=this._isServer?this.acceptAsServer(e):this.acceptAsClient(e),this.params}cleanup(){if(this._inflate&&(this._inflate.close(),this._inflate=null),this._deflate){const e=this._deflate[V];this._deflate.close(),this._deflate=null,e&&e(new Error("The deflate stream was closed while data was being processed"))}}acceptAsServer(e){const t=this._options,r=e.find(i=>!(t.serverNoContextTakeover===!1&&i.server_no_context_takeover||i.server_max_window_bits&&(t.serverMaxWindowBits===!1||typeof t.serverMaxWindowBits=="number"&&t.serverMaxWindowBits>i.server_max_window_bits)||typeof t.clientMaxWindowBits=="number"&&!i.client_max_window_bits));if(!r)throw new Error("None of the extension offers can be accepted");return t.serverNoContextTakeover&&(r.server_no_context_takeover=!0),t.clientNoContextTakeover&&(r.client_no_context_takeover=!0),typeof t.serverMaxWindowBits=="number"&&(r.server_max_window_bits=t.serverMaxWindowBits),typeof t.clientMaxWindowBits=="number"?r.client_max_window_bits=t.clientMaxWindowBits:(r.client_max_window_bits===!0||t.clientMaxWindowBits===!1)&&delete r.client_max_window_bits,r}acceptAsClient(e){const t=e[0];if(this._options.clientNoContextTakeover===!1&&t.client_no_context_takeover)throw new Error('Unexpected parameter "client_no_context_takeover"');if(!t.client_max_window_bits)typeof this._options.clientMaxWindowBits=="number"&&(t.client_max_window_bits=this._options.clientMaxWindowBits);else if(this._options.clientMaxWindowBits===!1||typeof this._options.clientMaxWindowBits=="number"&&t.client_max_window_bits>this._options.clientMaxWindowBits)throw new Error('Unexpected or invalid parameter "client_max_window_bits"');return t}normalizeParams(e){return e.forEach(t=>{Object.keys(t).forEach(r=>{let i=t[r];if(i.length>1)throw new Error(`Parameter "${r}" must have only a single value`);if(i=i[0],r==="client_max_window_bits"){if(i!==!0){const n=+i;if(!Number.isInteger(n)||n<8||n>15)throw new TypeError(`Invalid value for parameter "${r}": ${i}`);i=n}else if(!this._isServer)throw new TypeError(`Invalid value for parameter "${r}": ${i}`)}else if(r==="server_max_window_bits"){const n=+i;if(!Number.isInteger(n)||n<8||n>15)throw new TypeError(`Invalid value for parameter "${r}": ${i}`);i=n}else if(r==="client_no_context_takeover"||r==="server_no_context_takeover"){if(i!==!0)throw new TypeError(`Invalid value for parameter "${r}": ${i}`)}else throw new Error(`Unknown parameter "${r}"`);t[r]=i})}),e}decompress(e,t,r){K.add(i=>{this._decompress(e,t,(n,o)=>{i(),r(n,o)})})}compress(e,t,r){K.add(i=>{this._compress(e,t,(n,o)=>{i(),r(n,o)})})}_decompress(e,t,r){const i=this._isServer?"client":"server";if(!this._inflate){const n=`${i}_max_window_bits`,o=typeof this.params[n]!="number"?W.Z_DEFAULT_WINDOWBITS:this.params[n];this._inflate=W.createInflateRaw({...this._options.zlibInflateOptions,windowBits:o}),this._inflate[se]=this,this._inflate[w]=0,this._inflate[C]=[],this._inflate.on("error",Bt),this._inflate.on("data",st)}this._inflate[V]=r,this._inflate.write(e),t&&this._inflate.write(Pt),this._inflate.flush(()=>{const n=this._inflate[J];if(n){this._inflate.close(),this._inflate=null,r(n);return}const o=Te.concat(this._inflate[C],this._inflate[w]);this._inflate._readableState.endEmitted?(this._inflate.close(),this._inflate=null):(this._inflate[w]=0,this._inflate[C]=[],t&&this.params[`${i}_no_context_takeover`]&&this._inflate.reset()),r(null,o)})}_compress(e,t,r){const i=this._isServer?"server":"client";if(!this._deflate){const n=`${i}_max_window_bits`,o=typeof this.params[n]!="number"?W.Z_DEFAULT_WINDOWBITS:this.params[n];this._deflate=W.createDeflateRaw({...this._options.zlibDeflateOptions,windowBits:o}),this._deflate[w]=0,this._deflate[C]=[],this._deflate.on("data",Ut)}this._deflate[V]=r,this._deflate.write(e),this._deflate.flush(W.Z_SYNC_FLUSH,()=>{if(!this._deflate)return;let n=Te.concat(this._deflate[C],this._deflate[w]);t&&(n=new Nt(n.buffer,n.byteOffset,n.length-4)),this._deflate[V]=null,this._deflate[w]=0,this._deflate[C]=[],t&&this.params[`${i}_no_context_takeover`]&&this._deflate.reset(),r(null,n)})}};var oe=Rt;function Ut(s){this[C].push(s),this[w]+=s.length}function st(s){if(this[w]+=s.length,this[se]._maxPayload<1||this[w]<=this[se]._maxPayload){this[C].push(s);return}this[J]=new RangeError("Max payload size exceeded"),this[J].code="WS_ERR_UNSUPPORTED_MESSAGE_LENGTH",this[J][tt]=1009,this.removeListener("data",st),this.reset()}function Bt(s){this[se]._inflate=null,s[tt]=1007,this[V](s)}var re={exports:{}};const $t={},Mt=Object.freeze(Object.defineProperty({__proto__:null,default:$t},Symbol.toStringTag,{value:"Module"})),It=gt(Mt);var Le;const{isUtf8:Ne}=S,Dt=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,1,1,1,1,0,0,1,1,0,1,1,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,0];function Wt(s){return s>=1e3&&s<=1014&&s!==1004&&s!==1005&&s!==1006||s>=3e3&&s<=4999}function be(s){const e=s.length;let t=0;for(;t<e;)if(!(s[t]&128))t++;else if((s[t]&224)===192){if(t+1===e||(s[t+1]&192)!==128||(s[t]&254)===192)return!1;t+=2}else if((s[t]&240)===224){if(t+2>=e||(s[t+1]&192)!==128||(s[t+2]&192)!==128||s[t]===224&&(s[t+1]&224)===128||s[t]===237&&(s[t+1]&224)===160)return!1;t+=3}else if((s[t]&248)===240){if(t+3>=e||(s[t+1]&192)!==128||(s[t+2]&192)!==128||(s[t+3]&192)!==128||s[t]===240&&(s[t+1]&240)===128||s[t]===244&&s[t+1]>143||s[t]>244)return!1;t+=4}else return!1;return!0}re.exports={isValidStatusCode:Wt,isValidUTF8:be,tokenChars:Dt};if(Ne)Le=re.exports.isValidUTF8=function(s){return s.length<24?be(s):Ne(s)};else if(!{}.WS_NO_UTF_8_VALIDATE)try{const s=It;Le=re.exports.isValidUTF8=function(e){return e.length<32?be(e):s(e)}}catch{}var ae=re.exports;const{Writable:At}=S,Pe=oe,{BINARY_TYPES:Ft,EMPTY_BUFFER:Re,kStatusCode:jt,kWebSocket:Gt}=U,{concat:de,toArrayBuffer:Vt,unmask:Ht}=ne,{isValidStatusCode:zt,isValidUTF8:Ue}=ae,X=Buffer[Symbol.species],A=0,Be=1,$e=2,Me=3,_e=4,Yt=5;let qt=class extends At{constructor(e={}){super(),this._binaryType=e.binaryType||Ft[0],this._extensions=e.extensions||{},this._isServer=!!e.isServer,this._maxPayload=e.maxPayload|0,this._skipUTF8Validation=!!e.skipUTF8Validation,this[Gt]=void 0,this._bufferedBytes=0,this._buffers=[],this._compressed=!1,this._payloadLength=0,this._mask=void 0,this._fragmented=0,this._masked=!1,this._fin=!1,this._opcode=0,this._totalPayloadLength=0,this._messageLength=0,this._fragments=[],this._state=A,this._loop=!1}_write(e,t,r){if(this._opcode===8&&this._state==A)return r();this._bufferedBytes+=e.length,this._buffers.push(e),this.startLoop(r)}consume(e){if(this._bufferedBytes-=e,e===this._buffers[0].length)return this._buffers.shift();if(e<this._buffers[0].length){const r=this._buffers[0];return this._buffers[0]=new X(r.buffer,r.byteOffset+e,r.length-e),new X(r.buffer,r.byteOffset,e)}const t=Buffer.allocUnsafe(e);do{const r=this._buffers[0],i=t.length-e;e>=r.length?t.set(this._buffers.shift(),i):(t.set(new Uint8Array(r.buffer,r.byteOffset,e),i),this._buffers[0]=new X(r.buffer,r.byteOffset+e,r.length-e)),e-=r.length}while(e>0);return t}startLoop(e){let t;this._loop=!0;do switch(this._state){case A:t=this.getInfo();break;case Be:t=this.getPayloadLength16();break;case $e:t=this.getPayloadLength64();break;case Me:this.getMask();break;case _e:t=this.getData(e);break;default:this._loop=!1;return}while(this._loop);e(t)}getInfo(){if(this._bufferedBytes<2){this._loop=!1;return}const e=this.consume(2);if(e[0]&48)return this._loop=!1,g(RangeError,"RSV2 and RSV3 must be clear",!0,1002,"WS_ERR_UNEXPECTED_RSV_2_3");const t=(e[0]&64)===64;if(t&&!this._extensions[Pe.extensionName])return this._loop=!1,g(RangeError,"RSV1 must be clear",!0,1002,"WS_ERR_UNEXPECTED_RSV_1");if(this._fin=(e[0]&128)===128,this._opcode=e[0]&15,this._payloadLength=e[1]&127,this._opcode===0){if(t)return this._loop=!1,g(RangeError,"RSV1 must be clear",!0,1002,"WS_ERR_UNEXPECTED_RSV_1");if(!this._fragmented)return this._loop=!1,g(RangeError,"invalid opcode 0",!0,1002,"WS_ERR_INVALID_OPCODE");this._opcode=this._fragmented}else if(this._opcode===1||this._opcode===2){if(this._fragmented)return this._loop=!1,g(RangeError,`invalid opcode ${this._opcode}`,!0,1002,"WS_ERR_INVALID_OPCODE");this._compressed=t}else if(this._opcode>7&&this._opcode<11){if(!this._fin)return this._loop=!1,g(RangeError,"FIN must be set",!0,1002,"WS_ERR_EXPECTED_FIN");if(t)return this._loop=!1,g(RangeError,"RSV1 must be clear",!0,1002,"WS_ERR_UNEXPECTED_RSV_1");if(this._payloadLength>125||this._opcode===8&&this._payloadLength===1)return this._loop=!1,g(RangeError,`invalid payload length ${this._payloadLength}`,!0,1002,"WS_ERR_INVALID_CONTROL_PAYLOAD_LENGTH")}else return this._loop=!1,g(RangeError,`invalid opcode ${this._opcode}`,!0,1002,"WS_ERR_INVALID_OPCODE");if(!this._fin&&!this._fragmented&&(this._fragmented=this._opcode),this._masked=(e[1]&128)===128,this._isServer){if(!this._masked)return this._loop=!1,g(RangeError,"MASK must be set",!0,1002,"WS_ERR_EXPECTED_MASK")}else if(this._masked)return this._loop=!1,g(RangeError,"MASK must be clear",!0,1002,"WS_ERR_UNEXPECTED_MASK");if(this._payloadLength===126)this._state=Be;else if(this._payloadLength===127)this._state=$e;else return this.haveLength()}getPayloadLength16(){if(this._bufferedBytes<2){this._loop=!1;return}return this._payloadLength=this.consume(2).readUInt16BE(0),this.haveLength()}getPayloadLength64(){if(this._bufferedBytes<8){this._loop=!1;return}const e=this.consume(8),t=e.readUInt32BE(0);return t>Math.pow(2,53-32)-1?(this._loop=!1,g(RangeError,"Unsupported WebSocket frame: payload length > 2^53 - 1",!1,1009,"WS_ERR_UNSUPPORTED_DATA_PAYLOAD_LENGTH")):(this._payloadLength=t*Math.pow(2,32)+e.readUInt32BE(4),this.haveLength())}haveLength(){if(this._payloadLength&&this._opcode<8&&(this._totalPayloadLength+=this._payloadLength,this._totalPayloadLength>this._maxPayload&&this._maxPayload>0))return this._loop=!1,g(RangeError,"Max payload size exceeded",!1,1009,"WS_ERR_UNSUPPORTED_MESSAGE_LENGTH");this._masked?this._state=Me:this._state=_e}getMask(){if(this._bufferedBytes<4){this._loop=!1;return}this._mask=this.consume(4),this._state=_e}getData(e){let t=Re;if(this._payloadLength){if(this._bufferedBytes<this._payloadLength){this._loop=!1;return}t=this.consume(this._payloadLength),this._masked&&this._mask[0]|this._mask[1]|this._mask[2]|this._mask[3]&&Ht(t,this._mask)}if(this._opcode>7)return this.controlMessage(t);if(this._compressed){this._state=Yt,this.decompress(t,e);return}return t.length&&(this._messageLength=this._totalPayloadLength,this._fragments.push(t)),this.dataMessage()}decompress(e,t){this._extensions[Pe.extensionName].decompress(e,this._fin,(i,n)=>{if(i)return t(i);if(n.length){if(this._messageLength+=n.length,this._messageLength>this._maxPayload&&this._maxPayload>0)return t(g(RangeError,"Max payload size exceeded",!1,1009,"WS_ERR_UNSUPPORTED_MESSAGE_LENGTH"));this._fragments.push(n)}const o=this.dataMessage();if(o)return t(o);this.startLoop(t)})}dataMessage(){if(this._fin){const e=this._messageLength,t=this._fragments;if(this._totalPayloadLength=0,this._messageLength=0,this._fragmented=0,this._fragments=[],this._opcode===2){let r;this._binaryType==="nodebuffer"?r=de(t,e):this._binaryType==="arraybuffer"?r=Vt(de(t,e)):r=t,this.emit("message",r,!0)}else{const r=de(t,e);if(!this._skipUTF8Validation&&!Ue(r))return this._loop=!1,g(Error,"invalid UTF-8 sequence",!0,1007,"WS_ERR_INVALID_UTF8");this.emit("message",r,!1)}}this._state=A}controlMessage(e){if(this._opcode===8)if(this._loop=!1,e.length===0)this.emit("conclude",1005,Re),this.end();else{const t=e.readUInt16BE(0);if(!zt(t))return g(RangeError,`invalid status code ${t}`,!0,1002,"WS_ERR_INVALID_CLOSE_CODE");const r=new X(e.buffer,e.byteOffset+2,e.length-2);if(!this._skipUTF8Validation&&!Ue(r))return g(Error,"invalid UTF-8 sequence",!0,1007,"WS_ERR_INVALID_UTF8");this.emit("conclude",t,r),this.end()}else this._opcode===9?this.emit("ping",e):this.emit("pong",e);this._state=A}};var rt=qt;function g(s,e,t,r,i){const n=new s(t?`Invalid WebSocket frame: ${e}`:e);return Error.captureStackTrace(n,g),n.code=i,n[jt]=r,n}const qs=z(rt),{randomFillSync:Kt}=S,Ie=oe,{EMPTY_BUFFER:Xt}=U,{isValidStatusCode:Zt}=ae,{mask:De,toBuffer:M}=ne,x=Symbol("kByteLength"),Qt=Buffer.alloc(4);let Jt=class P{constructor(e,t,r){this._extensions=t||{},r&&(this._generateMask=r,this._maskBuffer=Buffer.alloc(4)),this._socket=e,this._firstFragment=!0,this._compress=!1,this._bufferedBytes=0,this._deflating=!1,this._queue=[]}static frame(e,t){let r,i=!1,n=2,o=!1;t.mask&&(r=t.maskBuffer||Qt,t.generateMask?t.generateMask(r):Kt(r,0,4),o=(r[0]|r[1]|r[2]|r[3])===0,n=6);let l;typeof e=="string"?(!t.mask||o)&&t[x]!==void 0?l=t[x]:(e=Buffer.from(e),l=e.length):(l=e.length,i=t.mask&&t.readOnly&&!o);let f=l;l>=65536?(n+=8,f=127):l>125&&(n+=2,f=126);const a=Buffer.allocUnsafe(i?l+n:n);return a[0]=t.fin?t.opcode|128:t.opcode,t.rsv1&&(a[0]|=64),a[1]=f,f===126?a.writeUInt16BE(l,2):f===127&&(a[2]=a[3]=0,a.writeUIntBE(l,4,6)),t.mask?(a[1]|=128,a[n-4]=r[0],a[n-3]=r[1],a[n-2]=r[2],a[n-1]=r[3],o?[a,e]:i?(De(e,r,a,n,l),[a]):(De(e,r,e,0,l),[a,e])):[a,e]}close(e,t,r,i){let n;if(e===void 0)n=Xt;else{if(typeof e!="number"||!Zt(e))throw new TypeError("First argument must be a valid error code number");if(t===void 0||!t.length)n=Buffer.allocUnsafe(2),n.writeUInt16BE(e,0);else{const l=Buffer.byteLength(t);if(l>123)throw new RangeError("The message must not be greater than 123 bytes");n=Buffer.allocUnsafe(2+l),n.writeUInt16BE(e,0),typeof t=="string"?n.write(t,2):n.set(t,2)}}const o={[x]:n.length,fin:!0,generateMask:this._generateMask,mask:r,maskBuffer:this._maskBuffer,opcode:8,readOnly:!1,rsv1:!1};this._deflating?this.enqueue([this.dispatch,n,!1,o,i]):this.sendFrame(P.frame(n,o),i)}ping(e,t,r){let i,n;if(typeof e=="string"?(i=Buffer.byteLength(e),n=!1):(e=M(e),i=e.length,n=M.readOnly),i>125)throw new RangeError("The data size must not be greater than 125 bytes");const o={[x]:i,fin:!0,generateMask:this._generateMask,mask:t,maskBuffer:this._maskBuffer,opcode:9,readOnly:n,rsv1:!1};this._deflating?this.enqueue([this.dispatch,e,!1,o,r]):this.sendFrame(P.frame(e,o),r)}pong(e,t,r){let i,n;if(typeof e=="string"?(i=Buffer.byteLength(e),n=!1):(e=M(e),i=e.length,n=M.readOnly),i>125)throw new RangeError("The data size must not be greater than 125 bytes");const o={[x]:i,fin:!0,generateMask:this._generateMask,mask:t,maskBuffer:this._maskBuffer,opcode:10,readOnly:n,rsv1:!1};this._deflating?this.enqueue([this.dispatch,e,!1,o,r]):this.sendFrame(P.frame(e,o),r)}send(e,t,r){const i=this._extensions[Ie.extensionName];let n=t.binary?2:1,o=t.compress,l,f;if(typeof e=="string"?(l=Buffer.byteLength(e),f=!1):(e=M(e),l=e.length,f=M.readOnly),this._firstFragment?(this._firstFragment=!1,o&&i&&i.params[i._isServer?"server_no_context_takeover":"client_no_context_takeover"]&&(o=l>=i._threshold),this._compress=o):(o=!1,n=0),t.fin&&(this._firstFragment=!0),i){const a={[x]:l,fin:t.fin,generateMask:this._generateMask,mask:t.mask,maskBuffer:this._maskBuffer,opcode:n,readOnly:f,rsv1:o};this._deflating?this.enqueue([this.dispatch,e,this._compress,a,r]):this.dispatch(e,this._compress,a,r)}else this.sendFrame(P.frame(e,{[x]:l,fin:t.fin,generateMask:this._generateMask,mask:t.mask,maskBuffer:this._maskBuffer,opcode:n,readOnly:f,rsv1:!1}),r)}dispatch(e,t,r,i){if(!t){this.sendFrame(P.frame(e,r),i);return}const n=this._extensions[Ie.extensionName];this._bufferedBytes+=r[x],this._deflating=!0,n.compress(e,r.fin,(o,l)=>{if(this._socket.destroyed){const f=new Error("The socket was closed while data was being compressed");typeof i=="function"&&i(f);for(let a=0;a<this._queue.length;a++){const c=this._queue[a],h=c[c.length-1];typeof h=="function"&&h(f)}return}this._bufferedBytes-=r[x],this._deflating=!1,r.readOnly=!1,this.sendFrame(P.frame(l,r),i),this.dequeue()})}dequeue(){for(;!this._deflating&&this._queue.length;){const e=this._queue.shift();this._bufferedBytes-=e[3][x],Reflect.apply(e[0],this,e.slice(1))}}enqueue(e){this._bufferedBytes+=e[3][x],this._queue.push(e)}sendFrame(e,t){e.length===2?(this._socket.cork(),this._socket.write(e[0]),this._socket.write(e[1],t),this._socket.uncork()):this._socket.write(e[0],t)}};var it=Jt;const Ks=z(it),{kForOnEventAttribute:F,kListener:pe}=U,We=Symbol("kCode"),Ae=Symbol("kData"),Fe=Symbol("kError"),je=Symbol("kMessage"),Ge=Symbol("kReason"),I=Symbol("kTarget"),Ve=Symbol("kType"),He=Symbol("kWasClean");class B{constructor(e){this[I]=null,this[Ve]=e}get target(){return this[I]}get type(){return this[Ve]}}Object.defineProperty(B.prototype,"target",{enumerable:!0});Object.defineProperty(B.prototype,"type",{enumerable:!0});class Y extends B{constructor(e,t={}){super(e),this[We]=t.code===void 0?0:t.code,this[Ge]=t.reason===void 0?"":t.reason,this[He]=t.wasClean===void 0?!1:t.wasClean}get code(){return this[We]}get reason(){return this[Ge]}get wasClean(){return this[He]}}Object.defineProperty(Y.prototype,"code",{enumerable:!0});Object.defineProperty(Y.prototype,"reason",{enumerable:!0});Object.defineProperty(Y.prototype,"wasClean",{enumerable:!0});class le extends B{constructor(e,t={}){super(e),this[Fe]=t.error===void 0?null:t.error,this[je]=t.message===void 0?"":t.message}get error(){return this[Fe]}get message(){return this[je]}}Object.defineProperty(le.prototype,"error",{enumerable:!0});Object.defineProperty(le.prototype,"message",{enumerable:!0});class xe extends B{constructor(e,t={}){super(e),this[Ae]=t.data===void 0?null:t.data}get data(){return this[Ae]}}Object.defineProperty(xe.prototype,"data",{enumerable:!0});const es={addEventListener(s,e,t={}){for(const i of this.listeners(s))if(!t[F]&&i[pe]===e&&!i[F])return;let r;if(s==="message")r=function(n,o){const l=new xe("message",{data:o?n:n.toString()});l[I]=this,Z(e,this,l)};else if(s==="close")r=function(n,o){const l=new Y("close",{code:n,reason:o.toString(),wasClean:this._closeFrameReceived&&this._closeFrameSent});l[I]=this,Z(e,this,l)};else if(s==="error")r=function(n){const o=new le("error",{error:n,message:n.message});o[I]=this,Z(e,this,o)};else if(s==="open")r=function(){const n=new B("open");n[I]=this,Z(e,this,n)};else return;r[F]=!!t[F],r[pe]=e,t.once?this.once(s,r):this.on(s,r)},removeEventListener(s,e){for(const t of this.listeners(s))if(t[pe]===e&&!t[F]){this.removeListener(s,t);break}}};var ts={CloseEvent:Y,ErrorEvent:le,Event:B,EventTarget:es,MessageEvent:xe};function Z(s,e,t){typeof s=="object"&&s.handleEvent?s.handleEvent.call(s,t):s.call(e,t)}const{tokenChars:j}=ae;function k(s,e,t){s[e]===void 0?s[e]=[t]:s[e].push(t)}function ss(s){const e=Object.create(null);let t=Object.create(null),r=!1,i=!1,n=!1,o,l,f=-1,a=-1,c=-1,h=0;for(;h<s.length;h++)if(a=s.charCodeAt(h),o===void 0)if(c===-1&&j[a]===1)f===-1&&(f=h);else if(h!==0&&(a===32||a===9))c===-1&&f!==-1&&(c=h);else if(a===59||a===44){if(f===-1)throw new SyntaxError(`Unexpected character at index ${h}`);c===-1&&(c=h);const v=s.slice(f,c);a===44?(k(e,v,t),t=Object.create(null)):o=v,f=c=-1}else throw new SyntaxError(`Unexpected character at index ${h}`);else if(l===void 0)if(c===-1&&j[a]===1)f===-1&&(f=h);else if(a===32||a===9)c===-1&&f!==-1&&(c=h);else if(a===59||a===44){if(f===-1)throw new SyntaxError(`Unexpected character at index ${h}`);c===-1&&(c=h),k(t,s.slice(f,c),!0),a===44&&(k(e,o,t),t=Object.create(null),o=void 0),f=c=-1}else if(a===61&&f!==-1&&c===-1)l=s.slice(f,h),f=c=-1;else throw new SyntaxError(`Unexpected character at index ${h}`);else if(i){if(j[a]!==1)throw new SyntaxError(`Unexpected character at index ${h}`);f===-1?f=h:r||(r=!0),i=!1}else if(n)if(j[a]===1)f===-1&&(f=h);else if(a===34&&f!==-1)n=!1,c=h;else if(a===92)i=!0;else throw new SyntaxError(`Unexpected character at index ${h}`);else if(a===34&&s.charCodeAt(h-1)===61)n=!0;else if(c===-1&&j[a]===1)f===-1&&(f=h);else if(f!==-1&&(a===32||a===9))c===-1&&(c=h);else if(a===59||a===44){if(f===-1)throw new SyntaxError(`Unexpected character at index ${h}`);c===-1&&(c=h);let v=s.slice(f,c);r&&(v=v.replace(/\\/g,""),r=!1),k(t,l,v),a===44&&(k(e,o,t),t=Object.create(null),o=void 0),l=void 0,f=c=-1}else throw new SyntaxError(`Unexpected character at index ${h}`);if(f===-1||n||a===32||a===9)throw new SyntaxError("Unexpected end of input");c===-1&&(c=h);const p=s.slice(f,c);return o===void 0?k(e,p,t):(l===void 0?k(t,p,!0):r?k(t,l,p.replace(/\\/g,"")):k(t,l,p),k(e,o,t)),e}function rs(s){return Object.keys(s).map(e=>{let t=s[e];return Array.isArray(t)||(t=[t]),t.map(r=>[e].concat(Object.keys(r).map(i=>{let n=r[i];return Array.isArray(n)||(n=[n]),n.map(o=>o===!0?i:`${i}=${o}`).join("; ")})).join("; ")).join(", ")}).join(", ")}var nt={format:rs,parse:ss};const is=S,ns=S,os=S,ot=S,as=S,{randomBytes:ls,createHash:fs}=S,{URL:me}=S,T=oe,hs=rt,cs=it,{BINARY_TYPES:ze,EMPTY_BUFFER:Q,GUID:us,kForOnEventAttribute:ge,kListener:ds,kStatusCode:_s,kWebSocket:y,NOOP:at}=U,{EventTarget:{addEventListener:ps,removeEventListener:ms}}=ts,{format:gs,parse:ys}=nt,{toBuffer:vs}=ne,Ss=30*1e3,lt=Symbol("kAborted"),ye=[8,13],O=["CONNECTING","OPEN","CLOSING","CLOSED"],Es=/^[!#$%&'*+\-.0-9A-Z^_`|a-z~]+$/;let m=class d extends is{constructor(e,t,r){super(),this._binaryType=ze[0],this._closeCode=1006,this._closeFrameReceived=!1,this._closeFrameSent=!1,this._closeMessage=Q,this._closeTimer=null,this._extensions={},this._paused=!1,this._protocol="",this._readyState=d.CONNECTING,this._receiver=null,this._sender=null,this._socket=null,e!==null?(this._bufferedAmount=0,this._isServer=!1,this._redirects=0,t===void 0?t=[]:Array.isArray(t)||(typeof t=="object"&&t!==null?(r=t,t=[]):t=[t]),ht(this,e,t,r)):this._isServer=!0}get binaryType(){return this._binaryType}set binaryType(e){ze.includes(e)&&(this._binaryType=e,this._receiver&&(this._receiver._binaryType=e))}get bufferedAmount(){return this._socket?this._socket._writableState.length+this._sender._bufferedBytes:this._bufferedAmount}get extensions(){return Object.keys(this._extensions).join()}get isPaused(){return this._paused}get onclose(){return null}get onerror(){return null}get onopen(){return null}get onmessage(){return null}get protocol(){return this._protocol}get readyState(){return this._readyState}get url(){return this._url}setSocket(e,t,r){const i=new hs({binaryType:this.binaryType,extensions:this._extensions,isServer:this._isServer,maxPayload:r.maxPayload,skipUTF8Validation:r.skipUTF8Validation});this._sender=new cs(e,this._extensions,r.generateMask),this._receiver=i,this._socket=e,i[y]=this,e[y]=this,i.on("conclude",ks),i.on("drain",ws),i.on("error",Os),i.on("message",Cs),i.on("ping",Ts),i.on("pong",Ls),e.setTimeout(0),e.setNoDelay(),t.length>0&&e.unshift(t),e.on("close",ut),e.on("data",fe),e.on("end",dt),e.on("error",_t),this._readyState=d.OPEN,this.emit("open")}emitClose(){if(!this._socket){this._readyState=d.CLOSED,this.emit("close",this._closeCode,this._closeMessage);return}this._extensions[T.extensionName]&&this._extensions[T.extensionName].cleanup(),this._receiver.removeAllListeners(),this._readyState=d.CLOSED,this.emit("close",this._closeCode,this._closeMessage)}close(e,t){if(this.readyState!==d.CLOSED){if(this.readyState===d.CONNECTING){const r="WebSocket was closed before the connection was established";b(this,this._req,r);return}if(this.readyState===d.CLOSING){this._closeFrameSent&&(this._closeFrameReceived||this._receiver._writableState.errorEmitted)&&this._socket.end();return}this._readyState=d.CLOSING,this._sender.close(e,t,!this._isServer,r=>{r||(this._closeFrameSent=!0,(this._closeFrameReceived||this._receiver._writableState.errorEmitted)&&this._socket.end())}),this._closeTimer=setTimeout(this._socket.destroy.bind(this._socket),Ss)}}pause(){this.readyState===d.CONNECTING||this.readyState===d.CLOSED||(this._paused=!0,this._socket.pause())}ping(e,t,r){if(this.readyState===d.CONNECTING)throw new Error("WebSocket is not open: readyState 0 (CONNECTING)");if(typeof e=="function"?(r=e,e=t=void 0):typeof t=="function"&&(r=t,t=void 0),typeof e=="number"&&(e=e.toString()),this.readyState!==d.OPEN){ve(this,e,r);return}t===void 0&&(t=!this._isServer),this._sender.ping(e||Q,t,r)}pong(e,t,r){if(this.readyState===d.CONNECTING)throw new Error("WebSocket is not open: readyState 0 (CONNECTING)");if(typeof e=="function"?(r=e,e=t=void 0):typeof t=="function"&&(r=t,t=void 0),typeof e=="number"&&(e=e.toString()),this.readyState!==d.OPEN){ve(this,e,r);return}t===void 0&&(t=!this._isServer),this._sender.pong(e||Q,t,r)}resume(){this.readyState===d.CONNECTING||this.readyState===d.CLOSED||(this._paused=!1,this._receiver._writableState.needDrain||this._socket.resume())}send(e,t,r){if(this.readyState===d.CONNECTING)throw new Error("WebSocket is not open: readyState 0 (CONNECTING)");if(typeof t=="function"&&(r=t,t={}),typeof e=="number"&&(e=e.toString()),this.readyState!==d.OPEN){ve(this,e,r);return}const i={binary:typeof e!="string",mask:!this._isServer,compress:!0,fin:!0,...t};this._extensions[T.extensionName]||(i.compress=!1),this._sender.send(e||Q,i,r)}terminate(){if(this.readyState!==d.CLOSED){if(this.readyState===d.CONNECTING){const e="WebSocket was closed before the connection was established";b(this,this._req,e);return}this._socket&&(this._readyState=d.CLOSING,this._socket.destroy())}}};Object.defineProperty(m,"CONNECTING",{enumerable:!0,value:O.indexOf("CONNECTING")});Object.defineProperty(m.prototype,"CONNECTING",{enumerable:!0,value:O.indexOf("CONNECTING")});Object.defineProperty(m,"OPEN",{enumerable:!0,value:O.indexOf("OPEN")});Object.defineProperty(m.prototype,"OPEN",{enumerable:!0,value:O.indexOf("OPEN")});Object.defineProperty(m,"CLOSING",{enumerable:!0,value:O.indexOf("CLOSING")});Object.defineProperty(m.prototype,"CLOSING",{enumerable:!0,value:O.indexOf("CLOSING")});Object.defineProperty(m,"CLOSED",{enumerable:!0,value:O.indexOf("CLOSED")});Object.defineProperty(m.prototype,"CLOSED",{enumerable:!0,value:O.indexOf("CLOSED")});["binaryType","bufferedAmount","extensions","isPaused","protocol","readyState","url"].forEach(s=>{Object.defineProperty(m.prototype,s,{enumerable:!0})});["open","error","close","message"].forEach(s=>{Object.defineProperty(m.prototype,`on${s}`,{enumerable:!0,get(){for(const e of this.listeners(s))if(e[ge])return e[ds];return null},set(e){for(const t of this.listeners(s))if(t[ge]){this.removeListener(s,t);break}typeof e=="function"&&this.addEventListener(s,e,{[ge]:!0})}})});m.prototype.addEventListener=ps;m.prototype.removeEventListener=ms;var ft=m;function ht(s,e,t,r){const i={protocolVersion:ye[1],maxPayload:104857600,skipUTF8Validation:!1,perMessageDeflate:!0,followRedirects:!1,maxRedirects:10,...r,createConnection:void 0,socketPath:void 0,hostname:void 0,protocol:void 0,timeout:void 0,method:"GET",host:void 0,path:void 0,port:void 0};if(!ye.includes(i.protocolVersion))throw new RangeError(`Unsupported protocol version: ${i.protocolVersion} (supported versions: ${ye.join(", ")})`);let n;if(e instanceof me)n=e,s._url=e.href;else{try{n=new me(e)}catch{throw new SyntaxError(`Invalid URL: ${e}`)}s._url=e}const o=n.protocol==="wss:",l=n.protocol==="ws+unix:";let f;if(n.protocol!=="ws:"&&!o&&!l?f=`The URL's protocol must be one of "ws:", "wss:", or "ws+unix:"`:l&&!n.pathname?f="The URL's pathname is empty":n.hash&&(f="The URL contains a fragment identifier"),f){const u=new SyntaxError(f);if(s._redirects===0)throw u;ee(s,u);return}const a=o?443:80,c=ls(16).toString("base64"),h=o?ns.request:os.request,p=new Set;let v;if(i.createConnection=o?xs:bs,i.defaultPort=i.defaultPort||a,i.port=n.port||a,i.host=n.hostname.startsWith("[")?n.hostname.slice(1,-1):n.hostname,i.headers={...i.headers,"Sec-WebSocket-Version":i.protocolVersion,"Sec-WebSocket-Key":c,Connection:"Upgrade",Upgrade:"websocket"},i.path=n.pathname+n.search,i.timeout=i.handshakeTimeout,i.perMessageDeflate&&(v=new T(i.perMessageDeflate!==!0?i.perMessageDeflate:{},!1,i.maxPayload),i.headers["Sec-WebSocket-Extensions"]=gs({[T.extensionName]:v.offer()})),t.length){for(const u of t){if(typeof u!="string"||!Es.test(u)||p.has(u))throw new SyntaxError("An invalid or duplicated subprotocol was specified");p.add(u)}i.headers["Sec-WebSocket-Protocol"]=t.join(",")}if(i.origin&&(i.protocolVersion<13?i.headers["Sec-WebSocket-Origin"]=i.origin:i.headers.Origin=i.origin),(n.username||n.password)&&(i.auth=`${n.username}:${n.password}`),l){const u=i.path.split(":");i.socketPath=u[0],i.path=u[1]}let _;if(i.followRedirects){if(s._redirects===0){s._originalIpc=l,s._originalSecure=o,s._originalHostOrSocketPath=l?i.socketPath:n.host;const u=r&&r.headers;if(r={...r,headers:{}},u)for(const[E,$]of Object.entries(u))r.headers[E.toLowerCase()]=$}else if(s.listenerCount("redirect")===0){const u=l?s._originalIpc?i.socketPath===s._originalHostOrSocketPath:!1:s._originalIpc?!1:n.host===s._originalHostOrSocketPath;(!u||s._originalSecure&&!o)&&(delete i.headers.authorization,delete i.headers.cookie,u||delete i.headers.host,i.auth=void 0)}i.auth&&!r.headers.authorization&&(r.headers.authorization="Basic "+Buffer.from(i.auth).toString("base64")),_=s._req=h(i),s._redirects&&s.emit("redirect",s.url,_)}else _=s._req=h(i);i.timeout&&_.on("timeout",()=>{b(s,_,"Opening handshake has timed out")}),_.on("error",u=>{_===null||_[lt]||(_=s._req=null,ee(s,u))}),_.on("response",u=>{const E=u.headers.location,$=u.statusCode;if(E&&i.followRedirects&&$>=300&&$<400){if(++s._redirects>i.maxRedirects){b(s,_,"Maximum redirects exceeded");return}_.abort();let q;try{q=new me(E,e)}catch{const L=new SyntaxError(`Invalid URL: ${E}`);ee(s,L);return}ht(s,q,t,r)}else s.emit("unexpected-response",_,u)||b(s,_,`Unexpected server response: ${u.statusCode}`)}),_.on("upgrade",(u,E,$)=>{if(s.emit("upgrade",u),s.readyState!==m.CONNECTING)return;if(_=s._req=null,u.headers.upgrade.toLowerCase()!=="websocket"){b(s,E,"Invalid Upgrade header");return}const q=fs("sha1").update(c+us).digest("base64");if(u.headers["sec-websocket-accept"]!==q){b(s,E,"Invalid Sec-WebSocket-Accept header");return}const D=u.headers["sec-websocket-protocol"];let L;if(D!==void 0?p.size?p.has(D)||(L="Server sent an invalid subprotocol"):L="Server sent a subprotocol but none was requested":p.size&&(L="Server sent no subprotocol"),L){b(s,E,L);return}D&&(s._protocol=D);const ke=u.headers["sec-websocket-extensions"];if(ke!==void 0){if(!v){b(s,E,"Server sent a Sec-WebSocket-Extensions header but no extension was requested");return}let he;try{he=ys(ke)}catch{b(s,E,"Invalid Sec-WebSocket-Extensions header");return}const we=Object.keys(he);if(we.length!==1||we[0]!==T.extensionName){b(s,E,"Server indicated an extension that was not requested");return}try{v.accept(he[T.extensionName])}catch{b(s,E,"Invalid Sec-WebSocket-Extensions header");return}s._extensions[T.extensionName]=v}s.setSocket(E,$,{generateMask:i.generateMask,maxPayload:i.maxPayload,skipUTF8Validation:i.skipUTF8Validation})}),i.finishRequest?i.finishRequest(_,s):_.end()}function ee(s,e){s._readyState=m.CLOSING,s.emit("error",e),s.emitClose()}function bs(s){return s.path=s.socketPath,ot.connect(s)}function xs(s){return s.path=void 0,!s.servername&&s.servername!==""&&(s.servername=ot.isIP(s.host)?"":s.host),as.connect(s)}function b(s,e,t){s._readyState=m.CLOSING;const r=new Error(t);Error.captureStackTrace(r,b),e.setHeader?(e[lt]=!0,e.abort(),e.socket&&!e.socket.destroyed&&e.socket.destroy(),process.nextTick(ee,s,r)):(e.destroy(r),e.once("error",s.emit.bind(s,"error")),e.once("close",s.emitClose.bind(s)))}function ve(s,e,t){if(e){const r=vs(e).length;s._socket?s._sender._bufferedBytes+=r:s._bufferedAmount+=r}if(t){const r=new Error(`WebSocket is not open: readyState ${s.readyState} (${O[s.readyState]})`);process.nextTick(t,r)}}function ks(s,e){const t=this[y];t._closeFrameReceived=!0,t._closeMessage=e,t._closeCode=s,t._socket[y]!==void 0&&(t._socket.removeListener("data",fe),process.nextTick(ct,t._socket),s===1005?t.close():t.close(s,e))}function ws(){const s=this[y];s.isPaused||s._socket.resume()}function Os(s){const e=this[y];e._socket[y]!==void 0&&(e._socket.removeListener("data",fe),process.nextTick(ct,e._socket),e.close(s[_s])),e.emit("error",s)}function Ye(){this[y].emitClose()}function Cs(s,e){this[y].emit("message",s,e)}function Ts(s){const e=this[y];e.pong(s,!e._isServer,at),e.emit("ping",s)}function Ls(s){this[y].emit("pong",s)}function ct(s){s.resume()}function ut(){const s=this[y];this.removeListener("close",ut),this.removeListener("data",fe),this.removeListener("end",dt),s._readyState=m.CLOSING;let e;!this._readableState.endEmitted&&!s._closeFrameReceived&&!s._receiver._writableState.errorEmitted&&(e=s._socket.read())!==null&&s._receiver.write(e),s._receiver.end(),this[y]=void 0,clearTimeout(s._closeTimer),s._receiver._writableState.finished||s._receiver._writableState.errorEmitted?s.emitClose():(s._receiver.on("error",Ye),s._receiver.on("finish",Ye))}function fe(s){this[y]._receiver.write(s)||this.pause()}function dt(){const s=this[y];s._readyState=m.CLOSING,s._receiver.end(),this.end()}function _t(){const s=this[y];this.removeListener("error",_t),this.on("error",at),s&&(s._readyState=m.CLOSING,this.destroy())}const Xs=z(ft),{tokenChars:Ns}=ae;function Ps(s){const e=new Set;let t=-1,r=-1,i=0;for(i;i<s.length;i++){const o=s.charCodeAt(i);if(r===-1&&Ns[o]===1)t===-1&&(t=i);else if(i!==0&&(o===32||o===9))r===-1&&t!==-1&&(r=i);else if(o===44){if(t===-1)throw new SyntaxError(`Unexpected character at index ${i}`);r===-1&&(r=i);const l=s.slice(t,r);if(e.has(l))throw new SyntaxError(`The "${l}" subprotocol is duplicated`);e.add(l),t=r=-1}else throw new SyntaxError(`Unexpected character at index ${i}`)}if(t===-1||r!==-1)throw new SyntaxError("Unexpected end of input");const n=s.slice(t,i);if(e.has(n))throw new SyntaxError(`The "${n}" subprotocol is duplicated`);return e.add(n),e}var Rs={parse:Ps};const Us=S,ie=S,{createHash:Bs}=S,qe=nt,N=oe,$s=Rs,Ms=ft,{GUID:Is,kWebSocket:Ds}=U,Ws=/^[+/0-9A-Za-z]{22}==$/,Ke=0,Xe=1,pt=2;class As extends Us{constructor(e,t){if(super(),e={maxPayload:100*1024*1024,skipUTF8Validation:!1,perMessageDeflate:!1,handleProtocols:null,clientTracking:!0,verifyClient:null,noServer:!1,backlog:null,server:null,host:null,path:null,port:null,WebSocket:Ms,...e},e.port==null&&!e.server&&!e.noServer||e.port!=null&&(e.server||e.noServer)||e.server&&e.noServer)throw new TypeError('One and only one of the "port", "server", or "noServer" options must be specified');if(e.port!=null?(this._server=ie.createServer((r,i)=>{const n=ie.STATUS_CODES[426];i.writeHead(426,{"Content-Length":n.length,"Content-Type":"text/plain"}),i.end(n)}),this._server.listen(e.port,e.host,e.backlog,t)):e.server&&(this._server=e.server),this._server){const r=this.emit.bind(this,"connection");this._removeListeners=js(this._server,{listening:this.emit.bind(this,"listening"),error:this.emit.bind(this,"error"),upgrade:(i,n,o)=>{this.handleUpgrade(i,n,o,r)}})}e.perMessageDeflate===!0&&(e.perMessageDeflate={}),e.clientTracking&&(this.clients=new Set,this._shouldEmitClose=!1),this.options=e,this._state=Ke}address(){if(this.options.noServer)throw new Error('The server is operating in "noServer" mode');return this._server?this._server.address():null}close(e){if(this._state===pt){e&&this.once("close",()=>{e(new Error("The server is not running"))}),process.nextTick(G,this);return}if(e&&this.once("close",e),this._state!==Xe)if(this._state=Xe,this.options.noServer||this.options.server)this._server&&(this._removeListeners(),this._removeListeners=this._server=null),this.clients?this.clients.size?this._shouldEmitClose=!0:process.nextTick(G,this):process.nextTick(G,this);else{const t=this._server;this._removeListeners(),this._removeListeners=this._server=null,t.close(()=>{G(this)})}}shouldHandle(e){if(this.options.path){const t=e.url.indexOf("?");if((t!==-1?e.url.slice(0,t):e.url)!==this.options.path)return!1}return!0}handleUpgrade(e,t,r,i){t.on("error",Ze);const n=e.headers["sec-websocket-key"],o=+e.headers["sec-websocket-version"];if(e.method!=="GET"){R(this,e,t,405,"Invalid HTTP method");return}if(e.headers.upgrade.toLowerCase()!=="websocket"){R(this,e,t,400,"Invalid Upgrade header");return}if(!n||!Ws.test(n)){R(this,e,t,400,"Missing or invalid Sec-WebSocket-Key header");return}if(o!==8&&o!==13){R(this,e,t,400,"Missing or invalid Sec-WebSocket-Version header");return}if(!this.shouldHandle(e)){H(t,400);return}const l=e.headers["sec-websocket-protocol"];let f=new Set;if(l!==void 0)try{f=$s.parse(l)}catch{R(this,e,t,400,"Invalid Sec-WebSocket-Protocol header");return}const a=e.headers["sec-websocket-extensions"],c={};if(this.options.perMessageDeflate&&a!==void 0){const h=new N(this.options.perMessageDeflate,!0,this.options.maxPayload);try{const p=qe.parse(a);p[N.extensionName]&&(h.accept(p[N.extensionName]),c[N.extensionName]=h)}catch{R(this,e,t,400,"Invalid or unacceptable Sec-WebSocket-Extensions header");return}}if(this.options.verifyClient){const h={origin:e.headers[`${o===8?"sec-websocket-origin":"origin"}`],secure:!!(e.socket.authorized||e.socket.encrypted),req:e};if(this.options.verifyClient.length===2){this.options.verifyClient(h,(p,v,_,u)=>{if(!p)return H(t,v||401,_,u);this.completeUpgrade(c,n,f,e,t,r,i)});return}if(!this.options.verifyClient(h))return H(t,401)}this.completeUpgrade(c,n,f,e,t,r,i)}completeUpgrade(e,t,r,i,n,o,l){if(!n.readable||!n.writable)return n.destroy();if(n[Ds])throw new Error("server.handleUpgrade() was called more than once with the same socket, possibly due to a misconfiguration");if(this._state>Ke)return H(n,503);const a=["HTTP/1.1 101 Switching Protocols","Upgrade: websocket","Connection: Upgrade",`Sec-WebSocket-Accept: ${Bs("sha1").update(t+Is).digest("base64")}`],c=new this.options.WebSocket(null);if(r.size){const h=this.options.handleProtocols?this.options.handleProtocols(r,i):r.values().next().value;h&&(a.push(`Sec-WebSocket-Protocol: ${h}`),c._protocol=h)}if(e[N.extensionName]){const h=e[N.extensionName].params,p=qe.format({[N.extensionName]:[h]});a.push(`Sec-WebSocket-Extensions: ${p}`),c._extensions=e}this.emit("headers",a,i),n.write(a.concat(`\r
|
2 |
-
`).join(`\r
|
3 |
-
`)),n.removeListener("error",Ze),c.setSocket(n,o,{maxPayload:this.options.maxPayload,skipUTF8Validation:this.options.skipUTF8Validation}),this.clients&&(this.clients.add(c),c.on("close",()=>{this.clients.delete(c),this._shouldEmitClose&&!this.clients.size&&process.nextTick(G,this)})),l(c,i)}}var Fs=As;function js(s,e){for(const t of Object.keys(e))s.on(t,e[t]);return function(){for(const r of Object.keys(e))s.removeListener(r,e[r])}}function G(s){s._state=pt,s.emit("close")}function Ze(){this.destroy()}function H(s,e,t,r){t=t||ie.STATUS_CODES[e],r={Connection:"close","Content-Type":"text/html","Content-Length":Buffer.byteLength(t),...r},s.once("finish",s.destroy),s.end(`HTTP/1.1 ${e} ${ie.STATUS_CODES[e]}\r
|
4 |
-
`+Object.keys(r).map(i=>`${i}: ${r[i]}`).join(`\r
|
5 |
-
`)+`\r
|
6 |
-
\r
|
7 |
-
`+t)}function R(s,e,t,r,i){if(s.listenerCount("wsClientError")){const n=new Error(i);Error.captureStackTrace(n,R),s.emit("wsClientError",n,t,e)}else H(t,r,i)}const Zs=z(Fs);export{qs as Receiver,Ks as Sender,Xs as WebSocket,Zs as WebSocketServer,Vs as createWebSocketStream,Xs as default};
|
8 |
-
//# sourceMappingURL=wrapper-6f348d45-38be7a64.js.map
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/test_data/__init__.py
DELETED
File without changes
|
spaces/Dao3/Top-20-Models/cake.css
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
.lg.svelte-1ma3u5b{
|
2 |
-
line-height:5em;
|
3 |
-
background:cornflowerblue;
|
4 |
-
color:white;!important;
|
5 |
-
}
|
6 |
-
.gradio-container-3-18-0 .prose h1{
|
7 |
-
font-size:4rem;!important;
|
8 |
-
}
|
9 |
-
#component-1188.lg.svelte-1ma3u5b{
|
10 |
-
background:#fd5050;!important;
|
11 |
-
color:white;!important;
|
12 |
-
}
|
13 |
-
#component-1187.lg.svelte-1ma3u5b{
|
14 |
-
background:#63b163;!important;
|
15 |
-
color:white;!important;
|
16 |
-
}
|
17 |
-
|
18 |
-
#component-1137 h4{
|
19 |
-
font-size:2rem;!important;
|
20 |
-
margin:0;!important;
|
21 |
-
}
|
22 |
-
#component-1138 h4{
|
23 |
-
font-size:2rem;!important;
|
24 |
-
margin:0;!important;
|
25 |
-
}
|
26 |
-
#component-1160.block.svelte-faijhx{
|
27 |
-
height:500px;!important;
|
28 |
-
}
|
29 |
-
#component-1160 div.svelte-116rqfv{
|
30 |
-
height:500px;!important;
|
31 |
-
}
|
32 |
-
#component-1160.fixed-height.svelte-zyic3i{
|
33 |
-
height:500px;!important;
|
34 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Demi2809/rvc-models/app-full.py
DELETED
@@ -1,254 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import argparse
|
4 |
-
import traceback
|
5 |
-
import logging
|
6 |
-
import gradio as gr
|
7 |
-
import numpy as np
|
8 |
-
import librosa
|
9 |
-
import torch
|
10 |
-
import asyncio
|
11 |
-
import edge_tts
|
12 |
-
import yt_dlp
|
13 |
-
import ffmpeg
|
14 |
-
import subprocess
|
15 |
-
import sys
|
16 |
-
import io
|
17 |
-
import wave
|
18 |
-
from datetime import datetime
|
19 |
-
from fairseq import checkpoint_utils
|
20 |
-
from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
|
21 |
-
from vc_infer_pipeline import VC
|
22 |
-
from config import (
|
23 |
-
is_half,
|
24 |
-
device
|
25 |
-
)
|
26 |
-
logging.getLogger("numba").setLevel(logging.WARNING)
|
27 |
-
limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
|
28 |
-
|
29 |
-
def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index, file_big_npy):
|
30 |
-
def vc_fn(
|
31 |
-
input_audio,
|
32 |
-
upload_audio,
|
33 |
-
upload_mode,
|
34 |
-
f0_up_key,
|
35 |
-
f0_method,
|
36 |
-
index_rate,
|
37 |
-
tts_mode,
|
38 |
-
tts_text,
|
39 |
-
tts_voice
|
40 |
-
):
|
41 |
-
try:
|
42 |
-
if tts_mode:
|
43 |
-
if len(tts_text) > 100 and limitation:
|
44 |
-
return "Text is too long", None
|
45 |
-
if tts_text is None or tts_voice is None:
|
46 |
-
return "You need to enter text and select a voice", None
|
47 |
-
asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
|
48 |
-
audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
|
49 |
-
else:
|
50 |
-
if upload_mode:
|
51 |
-
if input_audio is None:
|
52 |
-
return "You need to upload an audio", None
|
53 |
-
sampling_rate, audio = upload_audio
|
54 |
-
duration = audio.shape[0] / sampling_rate
|
55 |
-
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
56 |
-
if len(audio.shape) > 1:
|
57 |
-
audio = librosa.to_mono(audio.transpose(1, 0))
|
58 |
-
if sampling_rate != 16000:
|
59 |
-
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
|
60 |
-
else:
|
61 |
-
audio, sr = librosa.load(input_audio, sr=16000, mono=True)
|
62 |
-
times = [0, 0, 0]
|
63 |
-
f0_up_key = int(f0_up_key)
|
64 |
-
audio_opt = vc.pipeline(
|
65 |
-
hubert_model,
|
66 |
-
net_g,
|
67 |
-
0,
|
68 |
-
audio,
|
69 |
-
times,
|
70 |
-
f0_up_key,
|
71 |
-
f0_method,
|
72 |
-
file_index,
|
73 |
-
file_big_npy,
|
74 |
-
index_rate,
|
75 |
-
if_f0,
|
76 |
-
)
|
77 |
-
print(
|
78 |
-
f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
|
79 |
-
)
|
80 |
-
return "Success", (tgt_sr, audio_opt)
|
81 |
-
except:
|
82 |
-
info = traceback.format_exc()
|
83 |
-
print(info)
|
84 |
-
return info, (None, None)
|
85 |
-
return vc_fn
|
86 |
-
|
87 |
-
def cut_vocal_and_inst(yt_url):
|
88 |
-
if yt_url != "":
|
89 |
-
if not os.path.exists("youtube_audio"):
|
90 |
-
os.mkdir("youtube_audio")
|
91 |
-
ydl_opts = {
|
92 |
-
'format': 'bestaudio/best',
|
93 |
-
'postprocessors': [{
|
94 |
-
'key': 'FFmpegExtractAudio',
|
95 |
-
'preferredcodec': 'wav',
|
96 |
-
}],
|
97 |
-
"outtmpl": 'youtube_audio/audio',
|
98 |
-
}
|
99 |
-
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
100 |
-
ydl.download([yt_url])
|
101 |
-
yt_audio_path = "youtube_audio/audio.wav"
|
102 |
-
command = f"demucs --two-stems=vocals {yt_audio_path}"
|
103 |
-
result = subprocess.run(command.split(), stdout=subprocess.PIPE)
|
104 |
-
print(result.stdout.decode())
|
105 |
-
return ("separated/htdemucs/audio/vocals.wav", "separated/htdemucs/audio/no_vocals.wav", yt_audio_path, "separated/htdemucs/audio/vocals.wav")
|
106 |
-
|
107 |
-
def combine_vocal_and_inst(audio_data, audio_volume):
|
108 |
-
print(audio_data)
|
109 |
-
if not os.path.exists("result"):
|
110 |
-
os.mkdir("result")
|
111 |
-
vocal_path = "result/output.wav"
|
112 |
-
inst_path = "separated/htdemucs/audio/no_vocals.wav"
|
113 |
-
output_path = "result/combine.mp3"
|
114 |
-
with wave.open(vocal_path, "w") as wave_file:
|
115 |
-
wave_file.setnchannels(1)
|
116 |
-
wave_file.setsampwidth(2)
|
117 |
-
wave_file.setframerate(audio_data[0])
|
118 |
-
wave_file.writeframes(audio_data[1].tobytes())
|
119 |
-
command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [1:a]volume={audio_volume}dB[v];[0:a][v]amix=inputs=2:duration=longest -b:a 320k -c:a libmp3lame {output_path}'
|
120 |
-
result = subprocess.run(command.split(), stdout=subprocess.PIPE)
|
121 |
-
return output_path
|
122 |
-
|
123 |
-
def load_hubert():
|
124 |
-
global hubert_model
|
125 |
-
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
|
126 |
-
["hubert_base.pt"],
|
127 |
-
suffix="",
|
128 |
-
)
|
129 |
-
hubert_model = models[0]
|
130 |
-
hubert_model = hubert_model.to(device)
|
131 |
-
if is_half:
|
132 |
-
hubert_model = hubert_model.half()
|
133 |
-
else:
|
134 |
-
hubert_model = hubert_model.float()
|
135 |
-
hubert_model.eval()
|
136 |
-
|
137 |
-
def change_to_tts_mode(tts_mode, upload_mode):
|
138 |
-
if tts_mode:
|
139 |
-
return gr.Textbox.update(visible=False), gr.Audio.update(visible=False), gr.Checkbox.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
|
140 |
-
else:
|
141 |
-
if upload_mode:
|
142 |
-
return gr.Textbox.update(visible=False), gr.Audio.update(visible=True), gr.Checkbox.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
|
143 |
-
else:
|
144 |
-
return gr.Textbox.update(visible=True), gr.Audio.update(visible=False), gr.Checkbox.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
|
145 |
-
|
146 |
-
def change_to_upload_mode(upload_mode):
|
147 |
-
if upload_mode:
|
148 |
-
return gr.Textbox().update(visible=False), gr.Audio().update(visible=True)
|
149 |
-
else:
|
150 |
-
return gr.Textbox().update(visible=True), gr.Audio().update(visible=False)
|
151 |
-
|
152 |
-
if __name__ == '__main__':
|
153 |
-
parser = argparse.ArgumentParser()
|
154 |
-
parser.add_argument('--api', action="store_true", default=False)
|
155 |
-
parser.add_argument("--colab", action="store_true", default=False, help="share gradio app")
|
156 |
-
args, unknown = parser.parse_known_args()
|
157 |
-
load_hubert()
|
158 |
-
models = []
|
159 |
-
tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
|
160 |
-
voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
|
161 |
-
with open("weights/model_info.json", "r", encoding="utf-8") as f:
|
162 |
-
models_info = json.load(f)
|
163 |
-
for name, info in models_info.items():
|
164 |
-
if not info['enable']:
|
165 |
-
continue
|
166 |
-
title = info['title']
|
167 |
-
author = info.get("author", None)
|
168 |
-
cover = f"weights/{name}/{info['cover']}"
|
169 |
-
index = f"weights/{name}/{info['feature_retrieval_library']}"
|
170 |
-
npy = f"weights/{name}/{info['feature_file']}"
|
171 |
-
cpt = torch.load(f"weights/{name}/{name}.pth", map_location="cpu")
|
172 |
-
tgt_sr = cpt["config"][-1]
|
173 |
-
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
174 |
-
if_f0 = cpt.get("f0", 1)
|
175 |
-
if if_f0 == 1:
|
176 |
-
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
|
177 |
-
else:
|
178 |
-
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
179 |
-
del net_g.enc_q
|
180 |
-
print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩
|
181 |
-
net_g.eval().to(device)
|
182 |
-
if is_half:
|
183 |
-
net_g = net_g.half()
|
184 |
-
else:
|
185 |
-
net_g = net_g.float()
|
186 |
-
vc = VC(tgt_sr, device, is_half)
|
187 |
-
models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index, npy)))
|
188 |
-
with gr.Blocks() as app:
|
189 |
-
gr.Markdown(
|
190 |
-
"# <center> RVC Models\n"
|
191 |
-
"## <center> The input audio should be clean and pure voice without background music.\n"
|
192 |
-
"### <center> More feature will be added soon... \n"
|
193 |
-
"[](https://colab.research.google.com/drive/1hx6kKvIuv5XNY1Gai2PEuZhpO5z6xpVh?usp=sharing)\n\n"
|
194 |
-
"[](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)"
|
195 |
-
)
|
196 |
-
with gr.Tabs():
|
197 |
-
for (name, title, author, cover, vc_fn) in models:
|
198 |
-
with gr.TabItem(name):
|
199 |
-
with gr.Row():
|
200 |
-
gr.Markdown(
|
201 |
-
'<div align="center">'
|
202 |
-
f'<div>{title}</div>\n'+
|
203 |
-
(f'<div>Model author: {author}</div>' if author else "")+
|
204 |
-
(f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else "")+
|
205 |
-
'</div>'
|
206 |
-
)
|
207 |
-
with gr.Row():
|
208 |
-
with gr.Column():
|
209 |
-
vc_youtube = gr.Textbox(label="Youtube URL")
|
210 |
-
vc_convert = gr.Button("Convert", variant="primary")
|
211 |
-
vc_vocal_preview = gr.Audio(label="Vocal Preview")
|
212 |
-
vc_inst_preview = gr.Audio(label="Instrumental Preview")
|
213 |
-
vc_audio_preview = gr.Audio(label="Audio Preview")
|
214 |
-
with gr.Column():
|
215 |
-
vc_input = gr.Textbox(label="Input audio path")
|
216 |
-
vc_upload = gr.Audio(label="Upload audio file", visible=False, interactive=True)
|
217 |
-
upload_mode = gr.Checkbox(label="Upload mode", value=False)
|
218 |
-
vc_transpose = gr.Number(label="Transpose", value=0)
|
219 |
-
vc_f0method = gr.Radio(
|
220 |
-
label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies",
|
221 |
-
choices=["pm", "harvest"],
|
222 |
-
value="pm",
|
223 |
-
interactive=True,
|
224 |
-
)
|
225 |
-
vc_index_ratio = gr.Slider(
|
226 |
-
minimum=0,
|
227 |
-
maximum=1,
|
228 |
-
label="Retrieval feature ratio",
|
229 |
-
value=0.6,
|
230 |
-
interactive=True,
|
231 |
-
)
|
232 |
-
tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
|
233 |
-
tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text")
|
234 |
-
tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
|
235 |
-
vc_output1 = gr.Textbox(label="Output Message")
|
236 |
-
vc_output2 = gr.Audio(label="Output Audio")
|
237 |
-
vc_submit = gr.Button("Generate", variant="primary")
|
238 |
-
with gr.Column():
|
239 |
-
vc_volume = gr.Slider(
|
240 |
-
minimum=0,
|
241 |
-
maximum=10,
|
242 |
-
label="Vocal volume",
|
243 |
-
value=4,
|
244 |
-
interactive=True,
|
245 |
-
step=1
|
246 |
-
)
|
247 |
-
vc_outputCombine = gr.Audio(label="Output Combined Audio")
|
248 |
-
vc_combine = gr.Button("Combine",variant="primary")
|
249 |
-
vc_submit.click(vc_fn, [vc_input, vc_upload, upload_mode, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2])
|
250 |
-
vc_convert.click(cut_vocal_and_inst, vc_youtube, [vc_vocal_preview, vc_inst_preview, vc_audio_preview, vc_input])
|
251 |
-
vc_combine.click(combine_vocal_and_inst, [vc_output2, vc_volume], vc_outputCombine)
|
252 |
-
tts_mode.change(change_to_tts_mode, [tts_mode, upload_mode], [vc_input, vc_upload, upload_mode, tts_text, tts_voice])
|
253 |
-
upload_mode.change(change_to_upload_mode, [upload_mode], [vc_input, vc_upload])
|
254 |
-
app.queue(concurrency_count=1, max_size=20, api_open=args.api).launch(share=args.colab)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Detomo/ai-comic-generation/next.config.js
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
/** @type {import('next').NextConfig} */
|
2 |
-
const nextConfig = {
|
3 |
-
output: 'standalone',
|
4 |
-
|
5 |
-
experimental: {
|
6 |
-
serverActions: true,
|
7 |
-
serverActionsBodySizeLimit: '8mb',
|
8 |
-
},
|
9 |
-
}
|
10 |
-
|
11 |
-
module.exports = nextConfig
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/dataset_tool.py
DELETED
@@ -1,645 +0,0 @@
|
|
1 |
-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# This work is licensed under the Creative Commons Attribution-NonCommercial
|
4 |
-
# 4.0 International License. To view a copy of this license, visit
|
5 |
-
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
|
6 |
-
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
|
7 |
-
|
8 |
-
"""Tool for creating multi-resolution TFRecords datasets for StyleGAN and ProGAN."""
|
9 |
-
|
10 |
-
# pylint: disable=too-many-lines
|
11 |
-
import os
|
12 |
-
import sys
|
13 |
-
import glob
|
14 |
-
import argparse
|
15 |
-
import threading
|
16 |
-
import six.moves.queue as Queue # pylint: disable=import-error
|
17 |
-
import traceback
|
18 |
-
import numpy as np
|
19 |
-
import tensorflow as tf
|
20 |
-
import PIL.Image
|
21 |
-
import dnnlib.tflib as tflib
|
22 |
-
|
23 |
-
from training import dataset
|
24 |
-
|
25 |
-
#----------------------------------------------------------------------------
|
26 |
-
|
27 |
-
def error(msg):
|
28 |
-
print('Error: ' + msg)
|
29 |
-
exit(1)
|
30 |
-
|
31 |
-
#----------------------------------------------------------------------------
|
32 |
-
|
33 |
-
class TFRecordExporter:
|
34 |
-
def __init__(self, tfrecord_dir, expected_images, print_progress=True, progress_interval=10):
|
35 |
-
self.tfrecord_dir = tfrecord_dir
|
36 |
-
self.tfr_prefix = os.path.join(self.tfrecord_dir, os.path.basename(self.tfrecord_dir))
|
37 |
-
self.expected_images = expected_images
|
38 |
-
self.cur_images = 0
|
39 |
-
self.shape = None
|
40 |
-
self.resolution_log2 = None
|
41 |
-
self.tfr_writers = []
|
42 |
-
self.print_progress = print_progress
|
43 |
-
self.progress_interval = progress_interval
|
44 |
-
|
45 |
-
if self.print_progress:
|
46 |
-
print('Creating dataset "%s"' % tfrecord_dir)
|
47 |
-
if not os.path.isdir(self.tfrecord_dir):
|
48 |
-
os.makedirs(self.tfrecord_dir)
|
49 |
-
assert os.path.isdir(self.tfrecord_dir)
|
50 |
-
|
51 |
-
def close(self):
|
52 |
-
if self.print_progress:
|
53 |
-
print('%-40s\r' % 'Flushing data...', end='', flush=True)
|
54 |
-
for tfr_writer in self.tfr_writers:
|
55 |
-
tfr_writer.close()
|
56 |
-
self.tfr_writers = []
|
57 |
-
if self.print_progress:
|
58 |
-
print('%-40s\r' % '', end='', flush=True)
|
59 |
-
print('Added %d images.' % self.cur_images)
|
60 |
-
|
61 |
-
def choose_shuffled_order(self): # Note: Images and labels must be added in shuffled order.
|
62 |
-
order = np.arange(self.expected_images)
|
63 |
-
np.random.RandomState(123).shuffle(order)
|
64 |
-
return order
|
65 |
-
|
66 |
-
def add_image(self, img):
|
67 |
-
if self.print_progress and self.cur_images % self.progress_interval == 0:
|
68 |
-
print('%d / %d\r' % (self.cur_images, self.expected_images), end='', flush=True)
|
69 |
-
if self.shape is None:
|
70 |
-
self.shape = img.shape
|
71 |
-
self.resolution_log2 = int(np.log2(self.shape[1]))
|
72 |
-
assert self.shape[0] in [1, 3]
|
73 |
-
assert self.shape[1] == self.shape[2]
|
74 |
-
assert self.shape[1] == 2**self.resolution_log2
|
75 |
-
tfr_opt = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.NONE)
|
76 |
-
for lod in range(self.resolution_log2 - 1):
|
77 |
-
tfr_file = self.tfr_prefix + '-r%02d.tfrecords' % (self.resolution_log2 - lod)
|
78 |
-
self.tfr_writers.append(tf.python_io.TFRecordWriter(tfr_file, tfr_opt))
|
79 |
-
assert img.shape == self.shape
|
80 |
-
for lod, tfr_writer in enumerate(self.tfr_writers):
|
81 |
-
if lod:
|
82 |
-
img = img.astype(np.float32)
|
83 |
-
img = (img[:, 0::2, 0::2] + img[:, 0::2, 1::2] + img[:, 1::2, 0::2] + img[:, 1::2, 1::2]) * 0.25
|
84 |
-
quant = np.rint(img).clip(0, 255).astype(np.uint8)
|
85 |
-
ex = tf.train.Example(features=tf.train.Features(feature={
|
86 |
-
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=quant.shape)),
|
87 |
-
'data': tf.train.Feature(bytes_list=tf.train.BytesList(value=[quant.tostring()]))}))
|
88 |
-
tfr_writer.write(ex.SerializeToString())
|
89 |
-
self.cur_images += 1
|
90 |
-
|
91 |
-
def add_labels(self, labels):
|
92 |
-
if self.print_progress:
|
93 |
-
print('%-40s\r' % 'Saving labels...', end='', flush=True)
|
94 |
-
assert labels.shape[0] == self.cur_images
|
95 |
-
with open(self.tfr_prefix + '-rxx.labels', 'wb') as f:
|
96 |
-
np.save(f, labels.astype(np.float32))
|
97 |
-
|
98 |
-
def __enter__(self):
|
99 |
-
return self
|
100 |
-
|
101 |
-
def __exit__(self, *args):
|
102 |
-
self.close()
|
103 |
-
|
104 |
-
#----------------------------------------------------------------------------
|
105 |
-
|
106 |
-
class ExceptionInfo(object):
|
107 |
-
def __init__(self):
|
108 |
-
self.value = sys.exc_info()[1]
|
109 |
-
self.traceback = traceback.format_exc()
|
110 |
-
|
111 |
-
#----------------------------------------------------------------------------
|
112 |
-
|
113 |
-
class WorkerThread(threading.Thread):
|
114 |
-
def __init__(self, task_queue):
|
115 |
-
threading.Thread.__init__(self)
|
116 |
-
self.task_queue = task_queue
|
117 |
-
|
118 |
-
def run(self):
|
119 |
-
while True:
|
120 |
-
func, args, result_queue = self.task_queue.get()
|
121 |
-
if func is None:
|
122 |
-
break
|
123 |
-
try:
|
124 |
-
result = func(*args)
|
125 |
-
except:
|
126 |
-
result = ExceptionInfo()
|
127 |
-
result_queue.put((result, args))
|
128 |
-
|
129 |
-
#----------------------------------------------------------------------------
|
130 |
-
|
131 |
-
class ThreadPool(object):
|
132 |
-
def __init__(self, num_threads):
|
133 |
-
assert num_threads >= 1
|
134 |
-
self.task_queue = Queue.Queue()
|
135 |
-
self.result_queues = dict()
|
136 |
-
self.num_threads = num_threads
|
137 |
-
for _idx in range(self.num_threads):
|
138 |
-
thread = WorkerThread(self.task_queue)
|
139 |
-
thread.daemon = True
|
140 |
-
thread.start()
|
141 |
-
|
142 |
-
def add_task(self, func, args=()):
|
143 |
-
assert hasattr(func, '__call__') # must be a function
|
144 |
-
if func not in self.result_queues:
|
145 |
-
self.result_queues[func] = Queue.Queue()
|
146 |
-
self.task_queue.put((func, args, self.result_queues[func]))
|
147 |
-
|
148 |
-
def get_result(self, func): # returns (result, args)
|
149 |
-
result, args = self.result_queues[func].get()
|
150 |
-
if isinstance(result, ExceptionInfo):
|
151 |
-
print('\n\nWorker thread caught an exception:\n' + result.traceback)
|
152 |
-
raise result.value
|
153 |
-
return result, args
|
154 |
-
|
155 |
-
def finish(self):
|
156 |
-
for _idx in range(self.num_threads):
|
157 |
-
self.task_queue.put((None, (), None))
|
158 |
-
|
159 |
-
def __enter__(self): # for 'with' statement
|
160 |
-
return self
|
161 |
-
|
162 |
-
def __exit__(self, *excinfo):
|
163 |
-
self.finish()
|
164 |
-
|
165 |
-
def process_items_concurrently(self, item_iterator, process_func=lambda x: x, pre_func=lambda x: x, post_func=lambda x: x, max_items_in_flight=None):
|
166 |
-
if max_items_in_flight is None: max_items_in_flight = self.num_threads * 4
|
167 |
-
assert max_items_in_flight >= 1
|
168 |
-
results = []
|
169 |
-
retire_idx = [0]
|
170 |
-
|
171 |
-
def task_func(prepared, _idx):
|
172 |
-
return process_func(prepared)
|
173 |
-
|
174 |
-
def retire_result():
|
175 |
-
processed, (_prepared, idx) = self.get_result(task_func)
|
176 |
-
results[idx] = processed
|
177 |
-
while retire_idx[0] < len(results) and results[retire_idx[0]] is not None:
|
178 |
-
yield post_func(results[retire_idx[0]])
|
179 |
-
results[retire_idx[0]] = None
|
180 |
-
retire_idx[0] += 1
|
181 |
-
|
182 |
-
for idx, item in enumerate(item_iterator):
|
183 |
-
prepared = pre_func(item)
|
184 |
-
results.append(None)
|
185 |
-
self.add_task(func=task_func, args=(prepared, idx))
|
186 |
-
while retire_idx[0] < idx - max_items_in_flight + 2:
|
187 |
-
for res in retire_result(): yield res
|
188 |
-
while retire_idx[0] < len(results):
|
189 |
-
for res in retire_result(): yield res
|
190 |
-
|
191 |
-
#----------------------------------------------------------------------------
|
192 |
-
|
193 |
-
def display(tfrecord_dir):
|
194 |
-
print('Loading dataset "%s"' % tfrecord_dir)
|
195 |
-
tflib.init_tf({'gpu_options.allow_growth': True})
|
196 |
-
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size='full', repeat=False, shuffle_mb=0)
|
197 |
-
tflib.init_uninitialized_vars()
|
198 |
-
import cv2 # pip install opencv-python
|
199 |
-
|
200 |
-
idx = 0
|
201 |
-
while True:
|
202 |
-
try:
|
203 |
-
images, labels = dset.get_minibatch_np(1)
|
204 |
-
except tf.errors.OutOfRangeError:
|
205 |
-
break
|
206 |
-
if idx == 0:
|
207 |
-
print('Displaying images')
|
208 |
-
cv2.namedWindow('dataset_tool')
|
209 |
-
print('Press SPACE or ENTER to advance, ESC to exit')
|
210 |
-
print('\nidx = %-8d\nlabel = %s' % (idx, labels[0].tolist()))
|
211 |
-
cv2.imshow('dataset_tool', images[0].transpose(1, 2, 0)[:, :, ::-1]) # CHW => HWC, RGB => BGR
|
212 |
-
idx += 1
|
213 |
-
if cv2.waitKey() == 27:
|
214 |
-
break
|
215 |
-
print('\nDisplayed %d images.' % idx)
|
216 |
-
|
217 |
-
#----------------------------------------------------------------------------
|
218 |
-
|
219 |
-
def extract(tfrecord_dir, output_dir):
|
220 |
-
print('Loading dataset "%s"' % tfrecord_dir)
|
221 |
-
tflib.init_tf({'gpu_options.allow_growth': True})
|
222 |
-
dset = dataset.TFRecordDataset(tfrecord_dir, max_label_size=0, repeat=False, shuffle_mb=0)
|
223 |
-
tflib.init_uninitialized_vars()
|
224 |
-
|
225 |
-
print('Extracting images to "%s"' % output_dir)
|
226 |
-
if not os.path.isdir(output_dir):
|
227 |
-
os.makedirs(output_dir)
|
228 |
-
idx = 0
|
229 |
-
while True:
|
230 |
-
if idx % 10 == 0:
|
231 |
-
print('%d\r' % idx, end='', flush=True)
|
232 |
-
try:
|
233 |
-
images, _labels = dset.get_minibatch_np(1)
|
234 |
-
except tf.errors.OutOfRangeError:
|
235 |
-
break
|
236 |
-
if images.shape[1] == 1:
|
237 |
-
img = PIL.Image.fromarray(images[0][0], 'L')
|
238 |
-
else:
|
239 |
-
img = PIL.Image.fromarray(images[0].transpose(1, 2, 0), 'RGB')
|
240 |
-
img.save(os.path.join(output_dir, 'img%08d.png' % idx))
|
241 |
-
idx += 1
|
242 |
-
print('Extracted %d images.' % idx)
|
243 |
-
|
244 |
-
#----------------------------------------------------------------------------
|
245 |
-
|
246 |
-
def compare(tfrecord_dir_a, tfrecord_dir_b, ignore_labels):
|
247 |
-
max_label_size = 0 if ignore_labels else 'full'
|
248 |
-
print('Loading dataset "%s"' % tfrecord_dir_a)
|
249 |
-
tflib.init_tf({'gpu_options.allow_growth': True})
|
250 |
-
dset_a = dataset.TFRecordDataset(tfrecord_dir_a, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
|
251 |
-
print('Loading dataset "%s"' % tfrecord_dir_b)
|
252 |
-
dset_b = dataset.TFRecordDataset(tfrecord_dir_b, max_label_size=max_label_size, repeat=False, shuffle_mb=0)
|
253 |
-
tflib.init_uninitialized_vars()
|
254 |
-
|
255 |
-
print('Comparing datasets')
|
256 |
-
idx = 0
|
257 |
-
identical_images = 0
|
258 |
-
identical_labels = 0
|
259 |
-
while True:
|
260 |
-
if idx % 100 == 0:
|
261 |
-
print('%d\r' % idx, end='', flush=True)
|
262 |
-
try:
|
263 |
-
images_a, labels_a = dset_a.get_minibatch_np(1)
|
264 |
-
except tf.errors.OutOfRangeError:
|
265 |
-
images_a, labels_a = None, None
|
266 |
-
try:
|
267 |
-
images_b, labels_b = dset_b.get_minibatch_np(1)
|
268 |
-
except tf.errors.OutOfRangeError:
|
269 |
-
images_b, labels_b = None, None
|
270 |
-
if images_a is None or images_b is None:
|
271 |
-
if images_a is not None or images_b is not None:
|
272 |
-
print('Datasets contain different number of images')
|
273 |
-
break
|
274 |
-
if images_a.shape == images_b.shape and np.all(images_a == images_b):
|
275 |
-
identical_images += 1
|
276 |
-
else:
|
277 |
-
print('Image %d is different' % idx)
|
278 |
-
if labels_a.shape == labels_b.shape and np.all(labels_a == labels_b):
|
279 |
-
identical_labels += 1
|
280 |
-
else:
|
281 |
-
print('Label %d is different' % idx)
|
282 |
-
idx += 1
|
283 |
-
print('Identical images: %d / %d' % (identical_images, idx))
|
284 |
-
if not ignore_labels:
|
285 |
-
print('Identical labels: %d / %d' % (identical_labels, idx))
|
286 |
-
|
287 |
-
#----------------------------------------------------------------------------
|
288 |
-
|
289 |
-
def create_mnist(tfrecord_dir, mnist_dir):
|
290 |
-
print('Loading MNIST from "%s"' % mnist_dir)
|
291 |
-
import gzip
|
292 |
-
with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
|
293 |
-
images = np.frombuffer(file.read(), np.uint8, offset=16)
|
294 |
-
with gzip.open(os.path.join(mnist_dir, 'train-labels-idx1-ubyte.gz'), 'rb') as file:
|
295 |
-
labels = np.frombuffer(file.read(), np.uint8, offset=8)
|
296 |
-
images = images.reshape(-1, 1, 28, 28)
|
297 |
-
images = np.pad(images, [(0,0), (0,0), (2,2), (2,2)], 'constant', constant_values=0)
|
298 |
-
assert images.shape == (60000, 1, 32, 32) and images.dtype == np.uint8
|
299 |
-
assert labels.shape == (60000,) and labels.dtype == np.uint8
|
300 |
-
assert np.min(images) == 0 and np.max(images) == 255
|
301 |
-
assert np.min(labels) == 0 and np.max(labels) == 9
|
302 |
-
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
|
303 |
-
onehot[np.arange(labels.size), labels] = 1.0
|
304 |
-
|
305 |
-
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
|
306 |
-
order = tfr.choose_shuffled_order()
|
307 |
-
for idx in range(order.size):
|
308 |
-
tfr.add_image(images[order[idx]])
|
309 |
-
tfr.add_labels(onehot[order])
|
310 |
-
|
311 |
-
#----------------------------------------------------------------------------
|
312 |
-
|
313 |
-
def create_mnistrgb(tfrecord_dir, mnist_dir, num_images=1000000, random_seed=123):
|
314 |
-
print('Loading MNIST from "%s"' % mnist_dir)
|
315 |
-
import gzip
|
316 |
-
with gzip.open(os.path.join(mnist_dir, 'train-images-idx3-ubyte.gz'), 'rb') as file:
|
317 |
-
images = np.frombuffer(file.read(), np.uint8, offset=16)
|
318 |
-
images = images.reshape(-1, 28, 28)
|
319 |
-
images = np.pad(images, [(0,0), (2,2), (2,2)], 'constant', constant_values=0)
|
320 |
-
assert images.shape == (60000, 32, 32) and images.dtype == np.uint8
|
321 |
-
assert np.min(images) == 0 and np.max(images) == 255
|
322 |
-
|
323 |
-
with TFRecordExporter(tfrecord_dir, num_images) as tfr:
|
324 |
-
rnd = np.random.RandomState(random_seed)
|
325 |
-
for _idx in range(num_images):
|
326 |
-
tfr.add_image(images[rnd.randint(images.shape[0], size=3)])
|
327 |
-
|
328 |
-
#----------------------------------------------------------------------------
|
329 |
-
|
330 |
-
def create_cifar10(tfrecord_dir, cifar10_dir):
|
331 |
-
print('Loading CIFAR-10 from "%s"' % cifar10_dir)
|
332 |
-
import pickle
|
333 |
-
images = []
|
334 |
-
labels = []
|
335 |
-
for batch in range(1, 6):
|
336 |
-
with open(os.path.join(cifar10_dir, 'data_batch_%d' % batch), 'rb') as file:
|
337 |
-
data = pickle.load(file, encoding='latin1')
|
338 |
-
images.append(data['data'].reshape(-1, 3, 32, 32))
|
339 |
-
labels.append(data['labels'])
|
340 |
-
images = np.concatenate(images)
|
341 |
-
labels = np.concatenate(labels)
|
342 |
-
assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
|
343 |
-
assert labels.shape == (50000,) and labels.dtype == np.int32
|
344 |
-
assert np.min(images) == 0 and np.max(images) == 255
|
345 |
-
assert np.min(labels) == 0 and np.max(labels) == 9
|
346 |
-
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
|
347 |
-
onehot[np.arange(labels.size), labels] = 1.0
|
348 |
-
|
349 |
-
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
|
350 |
-
order = tfr.choose_shuffled_order()
|
351 |
-
for idx in range(order.size):
|
352 |
-
tfr.add_image(images[order[idx]])
|
353 |
-
tfr.add_labels(onehot[order])
|
354 |
-
|
355 |
-
#----------------------------------------------------------------------------
|
356 |
-
|
357 |
-
def create_cifar100(tfrecord_dir, cifar100_dir):
|
358 |
-
print('Loading CIFAR-100 from "%s"' % cifar100_dir)
|
359 |
-
import pickle
|
360 |
-
with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
|
361 |
-
data = pickle.load(file, encoding='latin1')
|
362 |
-
images = data['data'].reshape(-1, 3, 32, 32)
|
363 |
-
labels = np.array(data['fine_labels'])
|
364 |
-
assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
|
365 |
-
assert labels.shape == (50000,) and labels.dtype == np.int32
|
366 |
-
assert np.min(images) == 0 and np.max(images) == 255
|
367 |
-
assert np.min(labels) == 0 and np.max(labels) == 99
|
368 |
-
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
|
369 |
-
onehot[np.arange(labels.size), labels] = 1.0
|
370 |
-
|
371 |
-
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
|
372 |
-
order = tfr.choose_shuffled_order()
|
373 |
-
for idx in range(order.size):
|
374 |
-
tfr.add_image(images[order[idx]])
|
375 |
-
tfr.add_labels(onehot[order])
|
376 |
-
|
377 |
-
#----------------------------------------------------------------------------
|
378 |
-
|
379 |
-
def create_svhn(tfrecord_dir, svhn_dir):
|
380 |
-
print('Loading SVHN from "%s"' % svhn_dir)
|
381 |
-
import pickle
|
382 |
-
images = []
|
383 |
-
labels = []
|
384 |
-
for batch in range(1, 4):
|
385 |
-
with open(os.path.join(svhn_dir, 'train_%d.pkl' % batch), 'rb') as file:
|
386 |
-
data = pickle.load(file, encoding='latin1')
|
387 |
-
images.append(data[0])
|
388 |
-
labels.append(data[1])
|
389 |
-
images = np.concatenate(images)
|
390 |
-
labels = np.concatenate(labels)
|
391 |
-
assert images.shape == (73257, 3, 32, 32) and images.dtype == np.uint8
|
392 |
-
assert labels.shape == (73257,) and labels.dtype == np.uint8
|
393 |
-
assert np.min(images) == 0 and np.max(images) == 255
|
394 |
-
assert np.min(labels) == 0 and np.max(labels) == 9
|
395 |
-
onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
|
396 |
-
onehot[np.arange(labels.size), labels] = 1.0
|
397 |
-
|
398 |
-
with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
|
399 |
-
order = tfr.choose_shuffled_order()
|
400 |
-
for idx in range(order.size):
|
401 |
-
tfr.add_image(images[order[idx]])
|
402 |
-
tfr.add_labels(onehot[order])
|
403 |
-
|
404 |
-
#----------------------------------------------------------------------------
|
405 |
-
|
406 |
-
def create_lsun(tfrecord_dir, lmdb_dir, resolution=256, max_images=None):
|
407 |
-
print('Loading LSUN dataset from "%s"' % lmdb_dir)
|
408 |
-
import lmdb # pip install lmdb # pylint: disable=import-error
|
409 |
-
import cv2 # pip install opencv-python
|
410 |
-
import io
|
411 |
-
with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn:
|
412 |
-
total_images = txn.stat()['entries'] # pylint: disable=no-value-for-parameter
|
413 |
-
if max_images is None:
|
414 |
-
max_images = total_images
|
415 |
-
with TFRecordExporter(tfrecord_dir, max_images) as tfr:
|
416 |
-
for _idx, (_key, value) in enumerate(txn.cursor()):
|
417 |
-
try:
|
418 |
-
try:
|
419 |
-
img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1)
|
420 |
-
if img is None:
|
421 |
-
raise IOError('cv2.imdecode failed')
|
422 |
-
img = img[:, :, ::-1] # BGR => RGB
|
423 |
-
except IOError:
|
424 |
-
img = np.asarray(PIL.Image.open(io.BytesIO(value)))
|
425 |
-
crop = np.min(img.shape[:2])
|
426 |
-
img = img[(img.shape[0] - crop) // 2 : (img.shape[0] + crop) // 2, (img.shape[1] - crop) // 2 : (img.shape[1] + crop) // 2]
|
427 |
-
img = PIL.Image.fromarray(img, 'RGB')
|
428 |
-
img = img.resize((resolution, resolution), PIL.Image.ANTIALIAS)
|
429 |
-
img = np.asarray(img)
|
430 |
-
img = img.transpose([2, 0, 1]) # HWC => CHW
|
431 |
-
tfr.add_image(img)
|
432 |
-
except:
|
433 |
-
print(sys.exc_info()[1])
|
434 |
-
if tfr.cur_images == max_images:
|
435 |
-
break
|
436 |
-
|
437 |
-
#----------------------------------------------------------------------------
|
438 |
-
|
439 |
-
def create_lsun_wide(tfrecord_dir, lmdb_dir, width=512, height=384, max_images=None):
|
440 |
-
assert width == 2 ** int(np.round(np.log2(width)))
|
441 |
-
assert height <= width
|
442 |
-
print('Loading LSUN dataset from "%s"' % lmdb_dir)
|
443 |
-
import lmdb # pip install lmdb # pylint: disable=import-error
|
444 |
-
import cv2 # pip install opencv-python
|
445 |
-
import io
|
446 |
-
with lmdb.open(lmdb_dir, readonly=True).begin(write=False) as txn:
|
447 |
-
total_images = txn.stat()['entries'] # pylint: disable=no-value-for-parameter
|
448 |
-
if max_images is None:
|
449 |
-
max_images = total_images
|
450 |
-
with TFRecordExporter(tfrecord_dir, max_images, print_progress=False) as tfr:
|
451 |
-
for idx, (_key, value) in enumerate(txn.cursor()):
|
452 |
-
try:
|
453 |
-
try:
|
454 |
-
img = cv2.imdecode(np.fromstring(value, dtype=np.uint8), 1)
|
455 |
-
if img is None:
|
456 |
-
raise IOError('cv2.imdecode failed')
|
457 |
-
img = img[:, :, ::-1] # BGR => RGB
|
458 |
-
except IOError:
|
459 |
-
img = np.asarray(PIL.Image.open(io.BytesIO(value)))
|
460 |
-
|
461 |
-
ch = int(np.round(width * img.shape[0] / img.shape[1]))
|
462 |
-
if img.shape[1] < width or ch < height:
|
463 |
-
continue
|
464 |
-
|
465 |
-
img = img[(img.shape[0] - ch) // 2 : (img.shape[0] + ch) // 2]
|
466 |
-
img = PIL.Image.fromarray(img, 'RGB')
|
467 |
-
img = img.resize((width, height), PIL.Image.ANTIALIAS)
|
468 |
-
img = np.asarray(img)
|
469 |
-
img = img.transpose([2, 0, 1]) # HWC => CHW
|
470 |
-
|
471 |
-
canvas = np.zeros([3, width, width], dtype=np.uint8)
|
472 |
-
canvas[:, (width - height) // 2 : (width + height) // 2] = img
|
473 |
-
tfr.add_image(canvas)
|
474 |
-
print('\r%d / %d => %d ' % (idx + 1, total_images, tfr.cur_images), end='')
|
475 |
-
|
476 |
-
except:
|
477 |
-
print(sys.exc_info()[1])
|
478 |
-
if tfr.cur_images == max_images:
|
479 |
-
break
|
480 |
-
print()
|
481 |
-
|
482 |
-
#----------------------------------------------------------------------------
|
483 |
-
|
484 |
-
def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121):
|
485 |
-
print('Loading CelebA from "%s"' % celeba_dir)
|
486 |
-
glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png')
|
487 |
-
image_filenames = sorted(glob.glob(glob_pattern))
|
488 |
-
expected_images = 202599
|
489 |
-
if len(image_filenames) != expected_images:
|
490 |
-
error('Expected to find %d images' % expected_images)
|
491 |
-
|
492 |
-
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
|
493 |
-
order = tfr.choose_shuffled_order()
|
494 |
-
for idx in range(order.size):
|
495 |
-
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
|
496 |
-
assert img.shape == (218, 178, 3)
|
497 |
-
img = img[cy - 64 : cy + 64, cx - 64 : cx + 64]
|
498 |
-
img = img.transpose(2, 0, 1) # HWC => CHW
|
499 |
-
tfr.add_image(img)
|
500 |
-
|
501 |
-
#----------------------------------------------------------------------------
|
502 |
-
|
503 |
-
def create_from_images(tfrecord_dir, image_dir, shuffle):
|
504 |
-
print('Loading images from "%s"' % image_dir)
|
505 |
-
image_filenames = sorted(glob.glob(os.path.join(image_dir, '*')))
|
506 |
-
if len(image_filenames) == 0:
|
507 |
-
error('No input images found')
|
508 |
-
|
509 |
-
img = np.asarray(PIL.Image.open(image_filenames[0]))
|
510 |
-
resolution = img.shape[0]
|
511 |
-
channels = img.shape[2] if img.ndim == 3 else 1
|
512 |
-
if img.shape[1] != resolution:
|
513 |
-
error('Input images must have the same width and height')
|
514 |
-
if resolution != 2 ** int(np.floor(np.log2(resolution))):
|
515 |
-
error('Input image resolution must be a power-of-two')
|
516 |
-
if channels not in [1, 3]:
|
517 |
-
error('Input images must be stored as RGB or grayscale')
|
518 |
-
|
519 |
-
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
|
520 |
-
order = tfr.choose_shuffled_order() if shuffle else np.arange(len(image_filenames))
|
521 |
-
for idx in range(order.size):
|
522 |
-
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
|
523 |
-
if channels == 1:
|
524 |
-
img = img[np.newaxis, :, :] # HW => CHW
|
525 |
-
else:
|
526 |
-
img = img.transpose([2, 0, 1]) # HWC => CHW
|
527 |
-
tfr.add_image(img)
|
528 |
-
|
529 |
-
#----------------------------------------------------------------------------
|
530 |
-
|
531 |
-
def create_from_hdf5(tfrecord_dir, hdf5_filename, shuffle):
|
532 |
-
print('Loading HDF5 archive from "%s"' % hdf5_filename)
|
533 |
-
import h5py # conda install h5py
|
534 |
-
with h5py.File(hdf5_filename, 'r') as hdf5_file:
|
535 |
-
hdf5_data = max([value for key, value in hdf5_file.items() if key.startswith('data')], key=lambda lod: lod.shape[3])
|
536 |
-
with TFRecordExporter(tfrecord_dir, hdf5_data.shape[0]) as tfr:
|
537 |
-
order = tfr.choose_shuffled_order() if shuffle else np.arange(hdf5_data.shape[0])
|
538 |
-
for idx in range(order.size):
|
539 |
-
tfr.add_image(hdf5_data[order[idx]])
|
540 |
-
npy_filename = os.path.splitext(hdf5_filename)[0] + '-labels.npy'
|
541 |
-
if os.path.isfile(npy_filename):
|
542 |
-
tfr.add_labels(np.load(npy_filename)[order])
|
543 |
-
|
544 |
-
#----------------------------------------------------------------------------
|
545 |
-
|
546 |
-
def execute_cmdline(argv):
|
547 |
-
prog = argv[0]
|
548 |
-
parser = argparse.ArgumentParser(
|
549 |
-
prog = prog,
|
550 |
-
description = 'Tool for creating multi-resolution TFRecords datasets for StyleGAN and ProGAN.',
|
551 |
-
epilog = 'Type "%s <command> -h" for more information.' % prog)
|
552 |
-
|
553 |
-
subparsers = parser.add_subparsers(dest='command')
|
554 |
-
subparsers.required = True
|
555 |
-
def add_command(cmd, desc, example=None):
|
556 |
-
epilog = 'Example: %s %s' % (prog, example) if example is not None else None
|
557 |
-
return subparsers.add_parser(cmd, description=desc, help=desc, epilog=epilog)
|
558 |
-
|
559 |
-
p = add_command( 'display', 'Display images in dataset.',
|
560 |
-
'display datasets/mnist')
|
561 |
-
p.add_argument( 'tfrecord_dir', help='Directory containing dataset')
|
562 |
-
|
563 |
-
p = add_command( 'extract', 'Extract images from dataset.',
|
564 |
-
'extract datasets/mnist mnist-images')
|
565 |
-
p.add_argument( 'tfrecord_dir', help='Directory containing dataset')
|
566 |
-
p.add_argument( 'output_dir', help='Directory to extract the images into')
|
567 |
-
|
568 |
-
p = add_command( 'compare', 'Compare two datasets.',
|
569 |
-
'compare datasets/mydataset datasets/mnist')
|
570 |
-
p.add_argument( 'tfrecord_dir_a', help='Directory containing first dataset')
|
571 |
-
p.add_argument( 'tfrecord_dir_b', help='Directory containing second dataset')
|
572 |
-
p.add_argument( '--ignore_labels', help='Ignore labels (default: 0)', type=int, default=0)
|
573 |
-
|
574 |
-
p = add_command( 'create_mnist', 'Create dataset for MNIST.',
|
575 |
-
'create_mnist datasets/mnist ~/downloads/mnist')
|
576 |
-
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
|
577 |
-
p.add_argument( 'mnist_dir', help='Directory containing MNIST')
|
578 |
-
|
579 |
-
p = add_command( 'create_mnistrgb', 'Create dataset for MNIST-RGB.',
|
580 |
-
'create_mnistrgb datasets/mnistrgb ~/downloads/mnist')
|
581 |
-
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
|
582 |
-
p.add_argument( 'mnist_dir', help='Directory containing MNIST')
|
583 |
-
p.add_argument( '--num_images', help='Number of composite images to create (default: 1000000)', type=int, default=1000000)
|
584 |
-
p.add_argument( '--random_seed', help='Random seed (default: 123)', type=int, default=123)
|
585 |
-
|
586 |
-
p = add_command( 'create_cifar10', 'Create dataset for CIFAR-10.',
|
587 |
-
'create_cifar10 datasets/cifar10 ~/downloads/cifar10')
|
588 |
-
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
|
589 |
-
p.add_argument( 'cifar10_dir', help='Directory containing CIFAR-10')
|
590 |
-
|
591 |
-
p = add_command( 'create_cifar100', 'Create dataset for CIFAR-100.',
|
592 |
-
'create_cifar100 datasets/cifar100 ~/downloads/cifar100')
|
593 |
-
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
|
594 |
-
p.add_argument( 'cifar100_dir', help='Directory containing CIFAR-100')
|
595 |
-
|
596 |
-
p = add_command( 'create_svhn', 'Create dataset for SVHN.',
|
597 |
-
'create_svhn datasets/svhn ~/downloads/svhn')
|
598 |
-
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
|
599 |
-
p.add_argument( 'svhn_dir', help='Directory containing SVHN')
|
600 |
-
|
601 |
-
p = add_command( 'create_lsun', 'Create dataset for single LSUN category.',
|
602 |
-
'create_lsun datasets/lsun-car-100k ~/downloads/lsun/car_lmdb --resolution 256 --max_images 100000')
|
603 |
-
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
|
604 |
-
p.add_argument( 'lmdb_dir', help='Directory containing LMDB database')
|
605 |
-
p.add_argument( '--resolution', help='Output resolution (default: 256)', type=int, default=256)
|
606 |
-
p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None)
|
607 |
-
|
608 |
-
p = add_command( 'create_lsun_wide', 'Create LSUN dataset with non-square aspect ratio.',
|
609 |
-
'create_lsun_wide datasets/lsun-car-512x384 ~/downloads/lsun/car_lmdb --width 512 --height 384')
|
610 |
-
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
|
611 |
-
p.add_argument( 'lmdb_dir', help='Directory containing LMDB database')
|
612 |
-
p.add_argument( '--width', help='Output width (default: 512)', type=int, default=512)
|
613 |
-
p.add_argument( '--height', help='Output height (default: 384)', type=int, default=384)
|
614 |
-
p.add_argument( '--max_images', help='Maximum number of images (default: none)', type=int, default=None)
|
615 |
-
|
616 |
-
p = add_command( 'create_celeba', 'Create dataset for CelebA.',
|
617 |
-
'create_celeba datasets/celeba ~/downloads/celeba')
|
618 |
-
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
|
619 |
-
p.add_argument( 'celeba_dir', help='Directory containing CelebA')
|
620 |
-
p.add_argument( '--cx', help='Center X coordinate (default: 89)', type=int, default=89)
|
621 |
-
p.add_argument( '--cy', help='Center Y coordinate (default: 121)', type=int, default=121)
|
622 |
-
|
623 |
-
p = add_command( 'create_from_images', 'Create dataset from a directory full of images.',
|
624 |
-
'create_from_images datasets/mydataset myimagedir')
|
625 |
-
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
|
626 |
-
p.add_argument( 'image_dir', help='Directory containing the images')
|
627 |
-
p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
|
628 |
-
|
629 |
-
p = add_command( 'create_from_hdf5', 'Create dataset from legacy HDF5 archive.',
|
630 |
-
'create_from_hdf5 datasets/celebahq ~/downloads/celeba-hq-1024x1024.h5')
|
631 |
-
p.add_argument( 'tfrecord_dir', help='New dataset directory to be created')
|
632 |
-
p.add_argument( 'hdf5_filename', help='HDF5 archive containing the images')
|
633 |
-
p.add_argument( '--shuffle', help='Randomize image order (default: 1)', type=int, default=1)
|
634 |
-
|
635 |
-
args = parser.parse_args(argv[1:] if len(argv) > 1 else ['-h'])
|
636 |
-
func = globals()[args.command]
|
637 |
-
del args.command
|
638 |
-
func(**vars(args))
|
639 |
-
|
640 |
-
#----------------------------------------------------------------------------
|
641 |
-
|
642 |
-
if __name__ == "__main__":
|
643 |
-
execute_cmdline(sys.argv)
|
644 |
-
|
645 |
-
#----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DragGan/DragGan/stylegan_human/utils/data_utils.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
# Copyright (c) SenseTime Research. All rights reserved.
|
2 |
-
|
3 |
-
|
4 |
-
import os
|
5 |
-
|
6 |
-
from PIL import Image
|
7 |
-
|
8 |
-
IMG_EXTENSIONS = [
|
9 |
-
'.jpg', '.JPG', '.jpeg', '.JPEG',
|
10 |
-
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tiff'
|
11 |
-
]
|
12 |
-
|
13 |
-
|
14 |
-
def is_image_file(filename):
|
15 |
-
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
|
16 |
-
|
17 |
-
|
18 |
-
def tensor2im(var):
|
19 |
-
# var shape: (3, H, W)
|
20 |
-
var = var.cpu().detach().transpose(0, 2).transpose(0, 1).numpy()
|
21 |
-
var = ((var + 1) / 2)
|
22 |
-
var[var < 0] = 0
|
23 |
-
var[var > 1] = 1
|
24 |
-
var = var * 255
|
25 |
-
return Image.fromarray(var.astype('uint8'))
|
26 |
-
|
27 |
-
|
28 |
-
def make_dataset(dir):
|
29 |
-
images = []
|
30 |
-
assert os.path.isdir(dir), '%s is not a valid directory' % dir
|
31 |
-
for root, _, fnames in sorted(os.walk(dir)):
|
32 |
-
for fname in fnames:
|
33 |
-
if is_image_file(fname):
|
34 |
-
path = os.path.join(root, fname)
|
35 |
-
fname = fname.split('.')[0]
|
36 |
-
images.append((fname, path))
|
37 |
-
return images
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Eddycrack864/Applio-Inference/infer/lib/infer_pack/modules/F0Predictor/F0Predictor.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
class F0Predictor(object):
|
2 |
-
def compute_f0(self, wav, p_len):
|
3 |
-
"""
|
4 |
-
input: wav:[signal_length]
|
5 |
-
p_len:int
|
6 |
-
output: f0:[signal_length//hop_length]
|
7 |
-
"""
|
8 |
-
pass
|
9 |
-
|
10 |
-
def compute_f0_uv(self, wav, p_len):
|
11 |
-
"""
|
12 |
-
input: wav:[signal_length]
|
13 |
-
p_len:int
|
14 |
-
output: f0:[signal_length//hop_length],uv:[signal_length//hop_length]
|
15 |
-
"""
|
16 |
-
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Eddycrack864/Applio-Inference/infer/lib/uvr5_pack/lib_v5/layers_537227KB.py
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn.functional as F
|
3 |
-
from torch import nn
|
4 |
-
|
5 |
-
from . import spec_utils
|
6 |
-
|
7 |
-
|
8 |
-
class Conv2DBNActiv(nn.Module):
|
9 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
10 |
-
super(Conv2DBNActiv, self).__init__()
|
11 |
-
self.conv = nn.Sequential(
|
12 |
-
nn.Conv2d(
|
13 |
-
nin,
|
14 |
-
nout,
|
15 |
-
kernel_size=ksize,
|
16 |
-
stride=stride,
|
17 |
-
padding=pad,
|
18 |
-
dilation=dilation,
|
19 |
-
bias=False,
|
20 |
-
),
|
21 |
-
nn.BatchNorm2d(nout),
|
22 |
-
activ(),
|
23 |
-
)
|
24 |
-
|
25 |
-
def __call__(self, x):
|
26 |
-
return self.conv(x)
|
27 |
-
|
28 |
-
|
29 |
-
class SeperableConv2DBNActiv(nn.Module):
|
30 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU):
|
31 |
-
super(SeperableConv2DBNActiv, self).__init__()
|
32 |
-
self.conv = nn.Sequential(
|
33 |
-
nn.Conv2d(
|
34 |
-
nin,
|
35 |
-
nin,
|
36 |
-
kernel_size=ksize,
|
37 |
-
stride=stride,
|
38 |
-
padding=pad,
|
39 |
-
dilation=dilation,
|
40 |
-
groups=nin,
|
41 |
-
bias=False,
|
42 |
-
),
|
43 |
-
nn.Conv2d(nin, nout, kernel_size=1, bias=False),
|
44 |
-
nn.BatchNorm2d(nout),
|
45 |
-
activ(),
|
46 |
-
)
|
47 |
-
|
48 |
-
def __call__(self, x):
|
49 |
-
return self.conv(x)
|
50 |
-
|
51 |
-
|
52 |
-
class Encoder(nn.Module):
|
53 |
-
def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU):
|
54 |
-
super(Encoder, self).__init__()
|
55 |
-
self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
56 |
-
self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ)
|
57 |
-
|
58 |
-
def __call__(self, x):
|
59 |
-
skip = self.conv1(x)
|
60 |
-
h = self.conv2(skip)
|
61 |
-
|
62 |
-
return h, skip
|
63 |
-
|
64 |
-
|
65 |
-
class Decoder(nn.Module):
|
66 |
-
def __init__(
|
67 |
-
self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False
|
68 |
-
):
|
69 |
-
super(Decoder, self).__init__()
|
70 |
-
self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ)
|
71 |
-
self.dropout = nn.Dropout2d(0.1) if dropout else None
|
72 |
-
|
73 |
-
def __call__(self, x, skip=None):
|
74 |
-
x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
|
75 |
-
if skip is not None:
|
76 |
-
skip = spec_utils.crop_center(skip, x)
|
77 |
-
x = torch.cat([x, skip], dim=1)
|
78 |
-
h = self.conv(x)
|
79 |
-
|
80 |
-
if self.dropout is not None:
|
81 |
-
h = self.dropout(h)
|
82 |
-
|
83 |
-
return h
|
84 |
-
|
85 |
-
|
86 |
-
class ASPPModule(nn.Module):
|
87 |
-
def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU):
|
88 |
-
super(ASPPModule, self).__init__()
|
89 |
-
self.conv1 = nn.Sequential(
|
90 |
-
nn.AdaptiveAvgPool2d((1, None)),
|
91 |
-
Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ),
|
92 |
-
)
|
93 |
-
self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ)
|
94 |
-
self.conv3 = SeperableConv2DBNActiv(
|
95 |
-
nin, nin, 3, 1, dilations[0], dilations[0], activ=activ
|
96 |
-
)
|
97 |
-
self.conv4 = SeperableConv2DBNActiv(
|
98 |
-
nin, nin, 3, 1, dilations[1], dilations[1], activ=activ
|
99 |
-
)
|
100 |
-
self.conv5 = SeperableConv2DBNActiv(
|
101 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
102 |
-
)
|
103 |
-
self.conv6 = SeperableConv2DBNActiv(
|
104 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
105 |
-
)
|
106 |
-
self.conv7 = SeperableConv2DBNActiv(
|
107 |
-
nin, nin, 3, 1, dilations[2], dilations[2], activ=activ
|
108 |
-
)
|
109 |
-
self.bottleneck = nn.Sequential(
|
110 |
-
Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1)
|
111 |
-
)
|
112 |
-
|
113 |
-
def forward(self, x):
|
114 |
-
_, _, h, w = x.size()
|
115 |
-
feat1 = F.interpolate(
|
116 |
-
self.conv1(x), size=(h, w), mode="bilinear", align_corners=True
|
117 |
-
)
|
118 |
-
feat2 = self.conv2(x)
|
119 |
-
feat3 = self.conv3(x)
|
120 |
-
feat4 = self.conv4(x)
|
121 |
-
feat5 = self.conv5(x)
|
122 |
-
feat6 = self.conv6(x)
|
123 |
-
feat7 = self.conv7(x)
|
124 |
-
out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1)
|
125 |
-
bottle = self.bottleneck(out)
|
126 |
-
return bottle
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/EinfachOlder/ChatGPT-prompt-generator/app.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
2 |
-
import gradio as gr
|
3 |
-
|
4 |
-
tokenizer = AutoTokenizer.from_pretrained("merve/chatgpt-prompts-bart-long")
|
5 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompts-bart-long", from_tf=True)
|
6 |
-
|
7 |
-
def generate(prompt):
|
8 |
-
|
9 |
-
batch = tokenizer(prompt, return_tensors="pt")
|
10 |
-
generated_ids = model.generate(batch["input_ids"], max_new_tokens=150)
|
11 |
-
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
12 |
-
return output[0]
|
13 |
-
|
14 |
-
input_component = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer")
|
15 |
-
output_component = gr.Textbox(label = "Prompt")
|
16 |
-
examples = [["photographer"], ["developer"]]
|
17 |
-
description = "This app generates ChatGPT prompts, it's based on a BART model trained on [this dataset](https://huggingface.co/datasets/fka/awesome-chatgpt-prompts). 📓 Simply enter a persona that you want the prompt to be generated based on. 🧙🏻🧑🏻🚀🧑🏻🎨🧑🏻🔬🧑🏻💻🧑🏼🏫🧑🏽🌾"
|
18 |
-
gr.Interface(generate, inputs = input_component, outputs=output_component, examples=examples, title = "👨🏻🎤 ChatGPT Prompt Generator 👨🏻🎤", description=description).launch()
|
19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Elbhnasy/Foodvision_mini/app.py
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
### 1. Imports and class names setup ###
|
2 |
-
import gradio as gr
|
3 |
-
import os
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from model import create_effnetb1_model
|
7 |
-
from timeit import default_timer as timer
|
8 |
-
from typing import Tuple, Dict
|
9 |
-
|
10 |
-
# Setup class names
|
11 |
-
class_names = ["pizza", "steak", "sushi"]
|
12 |
-
|
13 |
-
### 2. Model and transforms preparation ###
|
14 |
-
|
15 |
-
# Create EffNetB2 model
|
16 |
-
effnetb1, effnetb1_transforms = create_effnetb1_model(num_classes=len(class_names) )
|
17 |
-
|
18 |
-
# Load saved weights
|
19 |
-
effnetb1.load_state_dict(torch.load(f="pretrained_effnetb1_feature_extractor_pizza_steak_sushi_20_percent.pth",
|
20 |
-
map_location=torch.device("cpu"),))
|
21 |
-
### 3. Predict function ###
|
22 |
-
# Create predict function
|
23 |
-
def predict(img) -> Tuple[Dict, float]:
|
24 |
-
"""
|
25 |
-
Transforms and performs a prediction on img.
|
26 |
-
:param img: target image .
|
27 |
-
:return: prediction and time taken.
|
28 |
-
"""
|
29 |
-
# Start the timer
|
30 |
-
start_time = timer()
|
31 |
-
|
32 |
-
# Transform the target image and add a batch dimension
|
33 |
-
img = effnetb1_transforms(img).unsqueeze(0)
|
34 |
-
|
35 |
-
# Put model into evaluation mode and turn on inference mode
|
36 |
-
effnetb1.eval()
|
37 |
-
with torch.inference_mode():
|
38 |
-
# Pass the transformed image through the model and turn the prediction logits into prediction probabilities
|
39 |
-
pred_probs = torch.softmax(effnetb1(img), dim=1)
|
40 |
-
|
41 |
-
# Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
|
42 |
-
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
|
43 |
-
|
44 |
-
# Calculate the prediction time
|
45 |
-
pred_time = round(timer() - start_time, 5)
|
46 |
-
|
47 |
-
# Return the prediction dictionary and prediction time
|
48 |
-
return pred_labels_and_probs, pred_time
|
49 |
-
|
50 |
-
### 4. Gradio app ###
|
51 |
-
|
52 |
-
# Create title, description and article strings
|
53 |
-
title = "FoodVision Mini 🍕🥩🍣"
|
54 |
-
description = "An EfficientNetB1 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
|
55 |
-
article = "I will add it soon wait.."
|
56 |
-
|
57 |
-
# Create examples list from "examples/" directory
|
58 |
-
example_list = [["examples/" + example] for example in os.listdir("examples")]
|
59 |
-
inputs=gr.inputs.Image(type='pil', label="upload Image", source="upload")
|
60 |
-
|
61 |
-
# Create the Gradio demo
|
62 |
-
demo = gr.Interface(fn=predict, # mapping function from input to output
|
63 |
-
inputs=inputs, # what are the inputs?
|
64 |
-
outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
|
65 |
-
gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
|
66 |
-
# Create examples list from "examples/" directory
|
67 |
-
examples=example_list,
|
68 |
-
title=title,
|
69 |
-
description=description,
|
70 |
-
article=article)
|
71 |
-
|
72 |
-
# Launch the demo!
|
73 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/FFZG-cleopatra/latvian-twitter-sentiment-classifier/config.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
import transformers
|
2 |
-
import os
|
3 |
-
import torch
|
4 |
-
import requests
|
5 |
-
|
6 |
-
MAX_LEN = 150 #256
|
7 |
-
TRAIN_BATCH_SIZE = 8
|
8 |
-
VALID_BATCH_SIZE = 4
|
9 |
-
EPOCHS = 5
|
10 |
-
|
11 |
-
# Folder to contain all the datasets
|
12 |
-
from huggingface_hub import hf_hub_download
|
13 |
-
|
14 |
-
DATASET_LOCATION = "" #
|
15 |
-
print("hi")
|
16 |
-
|
17 |
-
MODEL_PATH = hf_hub_download(repo_id="thak123/bert-emoji-latvian-twitter-classifier", filename="model.bin")
|
18 |
-
|
19 |
-
# from huggingface_hub import snapshot_download
|
20 |
-
# snapshot_download(repo_id="thak123/bert-emoji-latvian-twitter-classifier", allow_patterns="*.bin")
|
21 |
-
|
22 |
-
|
23 |
-
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
24 |
-
|
25 |
-
# 7 EPOCH Version
|
26 |
-
BERT_PATH = "FFZG-cleopatra/bert-emoji-latvian-twitter"
|
27 |
-
|
28 |
-
|
29 |
-
# TODO check if lower casing is required
|
30 |
-
# BertTokenizer
|
31 |
-
TOKENIZER = transformers.BertTokenizer.from_pretrained(
|
32 |
-
BERT_PATH,
|
33 |
-
do_lower_case=True
|
34 |
-
)
|
35 |
-
|
36 |
-
####################################################################################################################################
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/GT-RIPL/GPT-K/knowledge/utils.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import hashlib
|
3 |
-
import torch
|
4 |
-
|
5 |
-
|
6 |
-
def file_hash(file):
|
7 |
-
# Ref: https://stackoverflow.com/a/59056837
|
8 |
-
with open(file, "rb") as f:
|
9 |
-
hash_fn = hashlib.blake2b()
|
10 |
-
chunk = f.read(8192)
|
11 |
-
while chunk:
|
12 |
-
hash_fn.update(chunk)
|
13 |
-
chunk = f.read(8192)
|
14 |
-
|
15 |
-
return hash_fn.hexdigest()
|
16 |
-
|
17 |
-
|
18 |
-
@torch.no_grad()
|
19 |
-
def refine_cosine(Xa, Xq, I, device, k=None):
|
20 |
-
if k is not None:
|
21 |
-
assert k <= I.shape[1]
|
22 |
-
else:
|
23 |
-
k = I.shape[1]
|
24 |
-
|
25 |
-
Xi = torch.tensor(Xq, device=device).unsqueeze(1) # bs x 1 x d
|
26 |
-
Xj = torch.tensor(Xa[I.flatten()], device=device) # K * bs x d
|
27 |
-
Xj = Xj.reshape(*I.shape, Xq.shape[-1]) # bs x K x d
|
28 |
-
|
29 |
-
sim = torch.sum(Xi * Xj, dim=-1) # bs x K
|
30 |
-
sort_idx = torch.argsort(sim, dim=1, descending=True).cpu().numpy()
|
31 |
-
I_refined, S_refined = [], []
|
32 |
-
for idx_i, sim_i, sort_i in zip(I, sim.cpu().numpy(), sort_idx):
|
33 |
-
I_refined.append(idx_i[sort_i][:k])
|
34 |
-
S_refined.append(sim_i[sort_i][:k])
|
35 |
-
I_refined = np.stack(I_refined)
|
36 |
-
S_refined = np.stack(S_refined)
|
37 |
-
|
38 |
-
return S_refined, I_refined
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/GXSA/bingo/src/components/ui/button.tsx
DELETED
@@ -1,57 +0,0 @@
|
|
1 |
-
import * as React from 'react'
|
2 |
-
import { Slot } from '@radix-ui/react-slot'
|
3 |
-
import { cva, type VariantProps } from 'class-variance-authority'
|
4 |
-
|
5 |
-
import { cn } from '@/lib/utils'
|
6 |
-
|
7 |
-
const buttonVariants = cva(
|
8 |
-
'inline-flex items-center justify-center rounded-md text-sm font-medium shadow ring-offset-background transition-colors outline-none disabled:pointer-events-none disabled:opacity-50',
|
9 |
-
{
|
10 |
-
variants: {
|
11 |
-
variant: {
|
12 |
-
default:
|
13 |
-
'bg-primary text-primary-foreground shadow-md hover:bg-primary/90',
|
14 |
-
destructive:
|
15 |
-
'bg-destructive text-destructive-foreground hover:bg-destructive/90',
|
16 |
-
outline:
|
17 |
-
'border border-input hover:bg-accent hover:text-accent-foreground',
|
18 |
-
secondary:
|
19 |
-
'bg-secondary text-secondary-foreground hover:bg-secondary/80',
|
20 |
-
ghost: 'shadow-none hover:bg-accent hover:text-accent-foreground',
|
21 |
-
link: 'text-primary underline-offset-4 shadow-none hover:underline'
|
22 |
-
},
|
23 |
-
size: {
|
24 |
-
default: 'h-8 px-4 py-2',
|
25 |
-
sm: 'h-8 rounded-md px-3',
|
26 |
-
lg: 'h-11 rounded-md px-8',
|
27 |
-
icon: 'h-8 w-8 p-0'
|
28 |
-
}
|
29 |
-
},
|
30 |
-
defaultVariants: {
|
31 |
-
variant: 'default',
|
32 |
-
size: 'default'
|
33 |
-
}
|
34 |
-
}
|
35 |
-
)
|
36 |
-
|
37 |
-
export interface ButtonProps
|
38 |
-
extends React.ButtonHTMLAttributes<HTMLButtonElement>,
|
39 |
-
VariantProps<typeof buttonVariants> {
|
40 |
-
asChild?: boolean
|
41 |
-
}
|
42 |
-
|
43 |
-
const Button = React.forwardRef<HTMLButtonElement, ButtonProps>(
|
44 |
-
({ className, variant, size, asChild = false, ...props }, ref) => {
|
45 |
-
const Comp = asChild ? Slot : 'button'
|
46 |
-
return (
|
47 |
-
<Comp
|
48 |
-
className={cn(buttonVariants({ variant, size, className }))}
|
49 |
-
ref={ref}
|
50 |
-
{...props}
|
51 |
-
/>
|
52 |
-
)
|
53 |
-
}
|
54 |
-
)
|
55 |
-
Button.displayName = 'Button'
|
56 |
-
|
57 |
-
export { Button, buttonVariants }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Gen-Sim/Gen-Sim/misc/job_query.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import openai
|
3 |
-
import pandas as pd
|
4 |
-
|
5 |
-
openai.api_key = os.getenv("OPENAI_API_KEY")
|
6 |
-
print(openai.FineTuningJob.list(limit=10))
|
7 |
-
print("==============================================")
|
8 |
-
latest_job = openai.FineTuningJob.list(limit=10)["data"][0]["id"]
|
9 |
-
print(openai.FineTuningJob.retrieve(latest_job))
|
10 |
-
print("==============================================")
|
11 |
-
print(openai.FineTuningJob.list_events(id=latest_job, limit=1))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Goutam982/RVC_V2_voice_clone/i18n/locale_diff.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import os
|
3 |
-
from collections import OrderedDict
|
4 |
-
|
5 |
-
# Define the standard file name
|
6 |
-
standard_file = "zh_CN.json"
|
7 |
-
|
8 |
-
# Find all JSON files in the directory
|
9 |
-
dir_path = "./"
|
10 |
-
languages = [
|
11 |
-
f for f in os.listdir(dir_path) if f.endswith(".json") and f != standard_file
|
12 |
-
]
|
13 |
-
|
14 |
-
# Load the standard file
|
15 |
-
with open(standard_file, "r", encoding="utf-8") as f:
|
16 |
-
standard_data = json.load(f, object_pairs_hook=OrderedDict)
|
17 |
-
|
18 |
-
# Loop through each language file
|
19 |
-
for lang_file in languages:
|
20 |
-
# Load the language file
|
21 |
-
with open(lang_file, "r", encoding="utf-8") as f:
|
22 |
-
lang_data = json.load(f, object_pairs_hook=OrderedDict)
|
23 |
-
|
24 |
-
# Find the difference between the language file and the standard file
|
25 |
-
diff = set(standard_data.keys()) - set(lang_data.keys())
|
26 |
-
|
27 |
-
miss = set(lang_data.keys()) - set(standard_data.keys())
|
28 |
-
|
29 |
-
# Add any missing keys to the language file
|
30 |
-
for key in diff:
|
31 |
-
lang_data[key] = key
|
32 |
-
|
33 |
-
# Del any extra keys to the language file
|
34 |
-
for key in miss:
|
35 |
-
del lang_data[key]
|
36 |
-
|
37 |
-
# Sort the keys of the language file to match the order of the standard file
|
38 |
-
lang_data = OrderedDict(
|
39 |
-
sorted(lang_data.items(), key=lambda x: list(standard_data.keys()).index(x[0]))
|
40 |
-
)
|
41 |
-
|
42 |
-
# Save the updated language file
|
43 |
-
with open(lang_file, "w", encoding="utf-8") as f:
|
44 |
-
json.dump(lang_data, f, ensure_ascii=False, indent=4)
|
45 |
-
f.write("\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/data/mmcif_parsing.py
DELETED
@@ -1,384 +0,0 @@
|
|
1 |
-
# Copyright 2021 DeepMind Technologies Limited
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
"""Parses the mmCIF file format."""
|
16 |
-
import collections
|
17 |
-
import dataclasses
|
18 |
-
import io
|
19 |
-
from typing import Any, Mapping, Optional, Sequence, Tuple
|
20 |
-
|
21 |
-
from absl import logging
|
22 |
-
from Bio import PDB
|
23 |
-
from Bio.Data import SCOPData
|
24 |
-
|
25 |
-
# Type aliases:
|
26 |
-
ChainId = str
|
27 |
-
PdbHeader = Mapping[str, Any]
|
28 |
-
PdbStructure = PDB.Structure.Structure
|
29 |
-
SeqRes = str
|
30 |
-
MmCIFDict = Mapping[str, Sequence[str]]
|
31 |
-
|
32 |
-
|
33 |
-
@dataclasses.dataclass(frozen=True)
|
34 |
-
class Monomer:
|
35 |
-
id: str
|
36 |
-
num: int
|
37 |
-
|
38 |
-
|
39 |
-
# Note - mmCIF format provides no guarantees on the type of author-assigned
|
40 |
-
# sequence numbers. They need not be integers.
|
41 |
-
@dataclasses.dataclass(frozen=True)
|
42 |
-
class AtomSite:
|
43 |
-
residue_name: str
|
44 |
-
author_chain_id: str
|
45 |
-
mmcif_chain_id: str
|
46 |
-
author_seq_num: str
|
47 |
-
mmcif_seq_num: int
|
48 |
-
insertion_code: str
|
49 |
-
hetatm_atom: str
|
50 |
-
model_num: int
|
51 |
-
|
52 |
-
|
53 |
-
# Used to map SEQRES index to a residue in the structure.
|
54 |
-
@dataclasses.dataclass(frozen=True)
|
55 |
-
class ResiduePosition:
|
56 |
-
chain_id: str
|
57 |
-
residue_number: int
|
58 |
-
insertion_code: str
|
59 |
-
|
60 |
-
|
61 |
-
@dataclasses.dataclass(frozen=True)
|
62 |
-
class ResidueAtPosition:
|
63 |
-
position: Optional[ResiduePosition]
|
64 |
-
name: str
|
65 |
-
is_missing: bool
|
66 |
-
hetflag: str
|
67 |
-
|
68 |
-
|
69 |
-
@dataclasses.dataclass(frozen=True)
|
70 |
-
class MmcifObject:
|
71 |
-
"""Representation of a parsed mmCIF file.
|
72 |
-
|
73 |
-
Contains:
|
74 |
-
file_id: A meaningful name, e.g. a pdb_id. Should be unique amongst all
|
75 |
-
files being processed.
|
76 |
-
header: Biopython header.
|
77 |
-
structure: Biopython structure.
|
78 |
-
chain_to_seqres: Dict mapping chain_id to 1 letter amino acid sequence. E.g.
|
79 |
-
{'A': 'ABCDEFG'}
|
80 |
-
seqres_to_structure: Dict; for each chain_id contains a mapping between
|
81 |
-
SEQRES index and a ResidueAtPosition. e.g. {'A': {0: ResidueAtPosition,
|
82 |
-
1: ResidueAtPosition,
|
83 |
-
...}}
|
84 |
-
raw_string: The raw string used to construct the MmcifObject.
|
85 |
-
"""
|
86 |
-
file_id: str
|
87 |
-
header: PdbHeader
|
88 |
-
structure: PdbStructure
|
89 |
-
chain_to_seqres: Mapping[ChainId, SeqRes]
|
90 |
-
seqres_to_structure: Mapping[ChainId, Mapping[int, ResidueAtPosition]]
|
91 |
-
raw_string: Any
|
92 |
-
|
93 |
-
|
94 |
-
@dataclasses.dataclass(frozen=True)
|
95 |
-
class ParsingResult:
|
96 |
-
"""Returned by the parse function.
|
97 |
-
|
98 |
-
Contains:
|
99 |
-
mmcif_object: A MmcifObject, may be None if no chain could be successfully
|
100 |
-
parsed.
|
101 |
-
errors: A dict mapping (file_id, chain_id) to any exception generated.
|
102 |
-
"""
|
103 |
-
mmcif_object: Optional[MmcifObject]
|
104 |
-
errors: Mapping[Tuple[str, str], Any]
|
105 |
-
|
106 |
-
|
107 |
-
class ParseError(Exception):
|
108 |
-
"""An error indicating that an mmCIF file could not be parsed."""
|
109 |
-
|
110 |
-
|
111 |
-
def mmcif_loop_to_list(prefix: str,
|
112 |
-
parsed_info: MmCIFDict) -> Sequence[Mapping[str, str]]:
|
113 |
-
"""Extracts loop associated with a prefix from mmCIF data as a list.
|
114 |
-
|
115 |
-
Reference for loop_ in mmCIF:
|
116 |
-
http://mmcif.wwpdb.org/docs/tutorials/mechanics/pdbx-mmcif-syntax.html
|
117 |
-
|
118 |
-
Args:
|
119 |
-
prefix: Prefix shared by each of the data items in the loop.
|
120 |
-
e.g. '_entity_poly_seq.', where the data items are _entity_poly_seq.num,
|
121 |
-
_entity_poly_seq.mon_id. Should include the trailing period.
|
122 |
-
parsed_info: A dict of parsed mmCIF data, e.g. _mmcif_dict from a Biopython
|
123 |
-
parser.
|
124 |
-
|
125 |
-
Returns:
|
126 |
-
Returns a list of dicts; each dict represents 1 entry from an mmCIF loop.
|
127 |
-
"""
|
128 |
-
cols = []
|
129 |
-
data = []
|
130 |
-
for key, value in parsed_info.items():
|
131 |
-
if key.startswith(prefix):
|
132 |
-
cols.append(key)
|
133 |
-
data.append(value)
|
134 |
-
|
135 |
-
assert all([len(xs) == len(data[0]) for xs in data]), (
|
136 |
-
'mmCIF error: Not all loops are the same length: %s' % cols)
|
137 |
-
|
138 |
-
return [dict(zip(cols, xs)) for xs in zip(*data)]
|
139 |
-
|
140 |
-
|
141 |
-
def mmcif_loop_to_dict(prefix: str,
|
142 |
-
index: str,
|
143 |
-
parsed_info: MmCIFDict,
|
144 |
-
) -> Mapping[str, Mapping[str, str]]:
|
145 |
-
"""Extracts loop associated with a prefix from mmCIF data as a dictionary.
|
146 |
-
|
147 |
-
Args:
|
148 |
-
prefix: Prefix shared by each of the data items in the loop.
|
149 |
-
e.g. '_entity_poly_seq.', where the data items are _entity_poly_seq.num,
|
150 |
-
_entity_poly_seq.mon_id. Should include the trailing period.
|
151 |
-
index: Which item of loop data should serve as the key.
|
152 |
-
parsed_info: A dict of parsed mmCIF data, e.g. _mmcif_dict from a Biopython
|
153 |
-
parser.
|
154 |
-
|
155 |
-
Returns:
|
156 |
-
Returns a dict of dicts; each dict represents 1 entry from an mmCIF loop,
|
157 |
-
indexed by the index column.
|
158 |
-
"""
|
159 |
-
entries = mmcif_loop_to_list(prefix, parsed_info)
|
160 |
-
return {entry[index]: entry for entry in entries}
|
161 |
-
|
162 |
-
|
163 |
-
def parse(*,
|
164 |
-
file_id: str,
|
165 |
-
mmcif_string: str,
|
166 |
-
catch_all_errors: bool = True) -> ParsingResult:
|
167 |
-
"""Entry point, parses an mmcif_string.
|
168 |
-
|
169 |
-
Args:
|
170 |
-
file_id: A string identifier for this file. Should be unique within the
|
171 |
-
collection of files being processed.
|
172 |
-
mmcif_string: Contents of an mmCIF file.
|
173 |
-
catch_all_errors: If True, all exceptions are caught and error messages are
|
174 |
-
returned as part of the ParsingResult. If False exceptions will be allowed
|
175 |
-
to propagate.
|
176 |
-
|
177 |
-
Returns:
|
178 |
-
A ParsingResult.
|
179 |
-
"""
|
180 |
-
errors = {}
|
181 |
-
try:
|
182 |
-
parser = PDB.MMCIFParser(QUIET=True)
|
183 |
-
handle = io.StringIO(mmcif_string)
|
184 |
-
full_structure = parser.get_structure('', handle)
|
185 |
-
first_model_structure = _get_first_model(full_structure)
|
186 |
-
# Extract the _mmcif_dict from the parser, which contains useful fields not
|
187 |
-
# reflected in the Biopython structure.
|
188 |
-
parsed_info = parser._mmcif_dict # pylint:disable=protected-access
|
189 |
-
|
190 |
-
# Ensure all values are lists, even if singletons.
|
191 |
-
for key, value in parsed_info.items():
|
192 |
-
if not isinstance(value, list):
|
193 |
-
parsed_info[key] = [value]
|
194 |
-
|
195 |
-
header = _get_header(parsed_info)
|
196 |
-
|
197 |
-
# Determine the protein chains, and their start numbers according to the
|
198 |
-
# internal mmCIF numbering scheme (likely but not guaranteed to be 1).
|
199 |
-
valid_chains = _get_protein_chains(parsed_info=parsed_info)
|
200 |
-
if not valid_chains:
|
201 |
-
return ParsingResult(
|
202 |
-
None, {(file_id, ''): 'No protein chains found in this file.'})
|
203 |
-
seq_start_num = {chain_id: min([monomer.num for monomer in seq])
|
204 |
-
for chain_id, seq in valid_chains.items()}
|
205 |
-
|
206 |
-
# Loop over the atoms for which we have coordinates. Populate two mappings:
|
207 |
-
# -mmcif_to_author_chain_id (maps internal mmCIF chain ids to chain ids used
|
208 |
-
# the authors / Biopython).
|
209 |
-
# -seq_to_structure_mappings (maps idx into sequence to ResidueAtPosition).
|
210 |
-
mmcif_to_author_chain_id = {}
|
211 |
-
seq_to_structure_mappings = {}
|
212 |
-
for atom in _get_atom_site_list(parsed_info):
|
213 |
-
if atom.model_num != '1':
|
214 |
-
# We only process the first model at the moment.
|
215 |
-
continue
|
216 |
-
|
217 |
-
mmcif_to_author_chain_id[atom.mmcif_chain_id] = atom.author_chain_id
|
218 |
-
|
219 |
-
if atom.mmcif_chain_id in valid_chains:
|
220 |
-
hetflag = ' '
|
221 |
-
if atom.hetatm_atom == 'HETATM':
|
222 |
-
# Water atoms are assigned a special hetflag of W in Biopython. We
|
223 |
-
# need to do the same, so that this hetflag can be used to fetch
|
224 |
-
# a residue from the Biopython structure by id.
|
225 |
-
if atom.residue_name in ('HOH', 'WAT'):
|
226 |
-
hetflag = 'W'
|
227 |
-
else:
|
228 |
-
hetflag = 'H_' + atom.residue_name
|
229 |
-
insertion_code = atom.insertion_code
|
230 |
-
if not _is_set(atom.insertion_code):
|
231 |
-
insertion_code = ' '
|
232 |
-
position = ResiduePosition(chain_id=atom.author_chain_id,
|
233 |
-
residue_number=int(atom.author_seq_num),
|
234 |
-
insertion_code=insertion_code)
|
235 |
-
seq_idx = int(atom.mmcif_seq_num) - seq_start_num[atom.mmcif_chain_id]
|
236 |
-
current = seq_to_structure_mappings.get(atom.author_chain_id, {})
|
237 |
-
current[seq_idx] = ResidueAtPosition(position=position,
|
238 |
-
name=atom.residue_name,
|
239 |
-
is_missing=False,
|
240 |
-
hetflag=hetflag)
|
241 |
-
seq_to_structure_mappings[atom.author_chain_id] = current
|
242 |
-
|
243 |
-
# Add missing residue information to seq_to_structure_mappings.
|
244 |
-
for chain_id, seq_info in valid_chains.items():
|
245 |
-
author_chain = mmcif_to_author_chain_id[chain_id]
|
246 |
-
current_mapping = seq_to_structure_mappings[author_chain]
|
247 |
-
for idx, monomer in enumerate(seq_info):
|
248 |
-
if idx not in current_mapping:
|
249 |
-
current_mapping[idx] = ResidueAtPosition(position=None,
|
250 |
-
name=monomer.id,
|
251 |
-
is_missing=True,
|
252 |
-
hetflag=' ')
|
253 |
-
|
254 |
-
author_chain_to_sequence = {}
|
255 |
-
for chain_id, seq_info in valid_chains.items():
|
256 |
-
author_chain = mmcif_to_author_chain_id[chain_id]
|
257 |
-
seq = []
|
258 |
-
for monomer in seq_info:
|
259 |
-
code = SCOPData.protein_letters_3to1.get(monomer.id, 'X')
|
260 |
-
seq.append(code if len(code) == 1 else 'X')
|
261 |
-
seq = ''.join(seq)
|
262 |
-
author_chain_to_sequence[author_chain] = seq
|
263 |
-
|
264 |
-
mmcif_object = MmcifObject(
|
265 |
-
file_id=file_id,
|
266 |
-
header=header,
|
267 |
-
structure=first_model_structure,
|
268 |
-
chain_to_seqres=author_chain_to_sequence,
|
269 |
-
seqres_to_structure=seq_to_structure_mappings,
|
270 |
-
raw_string=parsed_info)
|
271 |
-
|
272 |
-
return ParsingResult(mmcif_object=mmcif_object, errors=errors)
|
273 |
-
except Exception as e: # pylint:disable=broad-except
|
274 |
-
errors[(file_id, '')] = e
|
275 |
-
if not catch_all_errors:
|
276 |
-
raise
|
277 |
-
return ParsingResult(mmcif_object=None, errors=errors)
|
278 |
-
|
279 |
-
|
280 |
-
def _get_first_model(structure: PdbStructure) -> PdbStructure:
|
281 |
-
"""Returns the first model in a Biopython structure."""
|
282 |
-
return next(structure.get_models())
|
283 |
-
|
284 |
-
_MIN_LENGTH_OF_CHAIN_TO_BE_COUNTED_AS_PEPTIDE = 21
|
285 |
-
|
286 |
-
|
287 |
-
def get_release_date(parsed_info: MmCIFDict) -> str:
|
288 |
-
"""Returns the oldest revision date."""
|
289 |
-
revision_dates = parsed_info['_pdbx_audit_revision_history.revision_date']
|
290 |
-
return min(revision_dates)
|
291 |
-
|
292 |
-
|
293 |
-
def _get_header(parsed_info: MmCIFDict) -> PdbHeader:
|
294 |
-
"""Returns a basic header containing method, release date and resolution."""
|
295 |
-
header = {}
|
296 |
-
|
297 |
-
experiments = mmcif_loop_to_list('_exptl.', parsed_info)
|
298 |
-
header['structure_method'] = ','.join([
|
299 |
-
experiment['_exptl.method'].lower() for experiment in experiments])
|
300 |
-
|
301 |
-
# Note: The release_date here corresponds to the oldest revision. We prefer to
|
302 |
-
# use this for dataset filtering over the deposition_date.
|
303 |
-
if '_pdbx_audit_revision_history.revision_date' in parsed_info:
|
304 |
-
header['release_date'] = get_release_date(parsed_info)
|
305 |
-
else:
|
306 |
-
logging.warning('Could not determine release_date: %s',
|
307 |
-
parsed_info['_entry.id'])
|
308 |
-
|
309 |
-
header['resolution'] = 0.00
|
310 |
-
for res_key in ('_refine.ls_d_res_high', '_em_3d_reconstruction.resolution',
|
311 |
-
'_reflns.d_resolution_high'):
|
312 |
-
if res_key in parsed_info:
|
313 |
-
try:
|
314 |
-
raw_resolution = parsed_info[res_key][0]
|
315 |
-
header['resolution'] = float(raw_resolution)
|
316 |
-
except ValueError:
|
317 |
-
logging.warning('Invalid resolution format: %s', parsed_info[res_key])
|
318 |
-
|
319 |
-
return header
|
320 |
-
|
321 |
-
|
322 |
-
def _get_atom_site_list(parsed_info: MmCIFDict) -> Sequence[AtomSite]:
|
323 |
-
"""Returns list of atom sites; contains data not present in the structure."""
|
324 |
-
return [AtomSite(*site) for site in zip( # pylint:disable=g-complex-comprehension
|
325 |
-
parsed_info['_atom_site.label_comp_id'],
|
326 |
-
parsed_info['_atom_site.auth_asym_id'],
|
327 |
-
parsed_info['_atom_site.label_asym_id'],
|
328 |
-
parsed_info['_atom_site.auth_seq_id'],
|
329 |
-
parsed_info['_atom_site.label_seq_id'],
|
330 |
-
parsed_info['_atom_site.pdbx_PDB_ins_code'],
|
331 |
-
parsed_info['_atom_site.group_PDB'],
|
332 |
-
parsed_info['_atom_site.pdbx_PDB_model_num'],
|
333 |
-
)]
|
334 |
-
|
335 |
-
|
336 |
-
def _get_protein_chains(
|
337 |
-
*, parsed_info: Mapping[str, Any]) -> Mapping[ChainId, Sequence[Monomer]]:
|
338 |
-
"""Extracts polymer information for protein chains only.
|
339 |
-
|
340 |
-
Args:
|
341 |
-
parsed_info: _mmcif_dict produced by the Biopython parser.
|
342 |
-
|
343 |
-
Returns:
|
344 |
-
A dict mapping mmcif chain id to a list of Monomers.
|
345 |
-
"""
|
346 |
-
# Get polymer information for each entity in the structure.
|
347 |
-
entity_poly_seqs = mmcif_loop_to_list('_entity_poly_seq.', parsed_info)
|
348 |
-
|
349 |
-
polymers = collections.defaultdict(list)
|
350 |
-
for entity_poly_seq in entity_poly_seqs:
|
351 |
-
polymers[entity_poly_seq['_entity_poly_seq.entity_id']].append(
|
352 |
-
Monomer(id=entity_poly_seq['_entity_poly_seq.mon_id'],
|
353 |
-
num=int(entity_poly_seq['_entity_poly_seq.num'])))
|
354 |
-
|
355 |
-
# Get chemical compositions. Will allow us to identify which of these polymers
|
356 |
-
# are proteins.
|
357 |
-
chem_comps = mmcif_loop_to_dict('_chem_comp.', '_chem_comp.id', parsed_info)
|
358 |
-
|
359 |
-
# Get chains information for each entity. Necessary so that we can return a
|
360 |
-
# dict keyed on chain id rather than entity.
|
361 |
-
struct_asyms = mmcif_loop_to_list('_struct_asym.', parsed_info)
|
362 |
-
|
363 |
-
entity_to_mmcif_chains = collections.defaultdict(list)
|
364 |
-
for struct_asym in struct_asyms:
|
365 |
-
chain_id = struct_asym['_struct_asym.id']
|
366 |
-
entity_id = struct_asym['_struct_asym.entity_id']
|
367 |
-
entity_to_mmcif_chains[entity_id].append(chain_id)
|
368 |
-
|
369 |
-
# Identify and return the valid protein chains.
|
370 |
-
valid_chains = {}
|
371 |
-
for entity_id, seq_info in polymers.items():
|
372 |
-
chain_ids = entity_to_mmcif_chains[entity_id]
|
373 |
-
|
374 |
-
# Reject polymers without any peptide-like components, such as DNA/RNA.
|
375 |
-
if any(['peptide' in chem_comps[monomer.id]['_chem_comp.type']
|
376 |
-
for monomer in seq_info]):
|
377 |
-
for chain_id in chain_ids:
|
378 |
-
valid_chains[chain_id] = seq_info
|
379 |
-
return valid_chains
|
380 |
-
|
381 |
-
|
382 |
-
def _is_set(data: str) -> bool:
|
383 |
-
"""Returns False if data is a special mmCIF character indicating 'unset'."""
|
384 |
-
return data not in ('.', '?')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/model/prng.py
DELETED
@@ -1,69 +0,0 @@
|
|
1 |
-
# Copyright 2021 DeepMind Technologies Limited
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
"""A collection of utilities surrounding PRNG usage in protein folding."""
|
16 |
-
|
17 |
-
import haiku as hk
|
18 |
-
import jax
|
19 |
-
|
20 |
-
|
21 |
-
def safe_dropout(*, tensor, safe_key, rate, is_deterministic, is_training):
|
22 |
-
if is_training and rate != 0.0 and not is_deterministic:
|
23 |
-
return hk.dropout(safe_key.get(), rate, tensor)
|
24 |
-
else:
|
25 |
-
return tensor
|
26 |
-
|
27 |
-
|
28 |
-
class SafeKey:
|
29 |
-
"""Safety wrapper for PRNG keys."""
|
30 |
-
|
31 |
-
def __init__(self, key):
|
32 |
-
self._key = key
|
33 |
-
self._used = False
|
34 |
-
|
35 |
-
def _assert_not_used(self):
|
36 |
-
if self._used:
|
37 |
-
raise RuntimeError('Random key has been used previously.')
|
38 |
-
|
39 |
-
def get(self):
|
40 |
-
self._assert_not_used()
|
41 |
-
self._used = True
|
42 |
-
return self._key
|
43 |
-
|
44 |
-
def split(self, num_keys=2):
|
45 |
-
self._assert_not_used()
|
46 |
-
self._used = True
|
47 |
-
new_keys = jax.random.split(self._key, num_keys)
|
48 |
-
return jax.tree_map(SafeKey, tuple(new_keys))
|
49 |
-
|
50 |
-
def duplicate(self, num_keys=2):
|
51 |
-
self._assert_not_used()
|
52 |
-
self._used = True
|
53 |
-
return tuple(SafeKey(self._key) for _ in range(num_keys))
|
54 |
-
|
55 |
-
|
56 |
-
def _safe_key_flatten(safe_key):
|
57 |
-
# Flatten transfers "ownership" to the tree
|
58 |
-
return (safe_key._key,), safe_key._used # pylint: disable=protected-access
|
59 |
-
|
60 |
-
|
61 |
-
def _safe_key_unflatten(aux_data, children):
|
62 |
-
ret = SafeKey(children[0])
|
63 |
-
ret._used = aux_data # pylint: disable=protected-access
|
64 |
-
return ret
|
65 |
-
|
66 |
-
|
67 |
-
jax.tree_util.register_pytree_node(
|
68 |
-
SafeKey, _safe_key_flatten, _safe_key_unflatten)
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/htc.py
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
from ..builder import DETECTORS
|
2 |
-
from .cascade_rcnn import CascadeRCNN
|
3 |
-
|
4 |
-
|
5 |
-
@DETECTORS.register_module()
|
6 |
-
class HybridTaskCascade(CascadeRCNN):
|
7 |
-
"""Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_"""
|
8 |
-
|
9 |
-
def __init__(self, **kwargs):
|
10 |
-
super(HybridTaskCascade, self).__init__(**kwargs)
|
11 |
-
|
12 |
-
@property
|
13 |
-
def with_semantic(self):
|
14 |
-
"""bool: whether the detector has a semantic head"""
|
15 |
-
return self.roi_head.with_semantic
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Gradio-Blocks/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x512_40k_voc12aug.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/danet_r50-d8.py',
|
3 |
-
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_40k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|