Commit
·
242da3e
1
Parent(s):
15ca04d
Update parquet files (step 28 of 397)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1368565466ki/ZSTRD/monotonic_align/core.py +0 -36
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kundli 5.5 Full Version for Free from a Trusted Source.md +0 -36
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit Advanced PDF Editor 3.10 Serial Number Benefits and Features.md +0 -31
- spaces/1gistliPinn/ChatGPT4/Examples/Driver Camara Web Hp Oem Wb918la Abm.md +0 -12
- spaces/1gistliPinn/ChatGPT4/Examples/Fangoria Magazine All Issues Cbr HOT!.md +0 -38
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Black GBWhatsApp APK The Ultimate Guide to the Best WhatsApp Mod.md +0 -118
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crazy Octopus Mod APK - A Fun and Addictive Game for Android - Download Now.md +0 -82
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dark Riddle 13.5.0 APK A Thrilling Adventure Game with Puzzles and Mystery.md +0 -125
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator APK Everything You Need to Know.md +0 -199
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bhop GO APK and Enjoy the Best Parkour Experience.md +0 -95
- spaces/1phancelerku/anime-remove-background/Create Amazing AR Effects for TikTok with Effect House - Download Now.md +0 -75
- spaces/AIFILMS/generate_human_motion/pyrender/pyrender/mesh.py +0 -328
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/opencpop/map.py +0 -8
- spaces/AIGC-Audio/AudioGPT/sound_extraction/model/resunet_film.py +0 -110
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/train_vggishish.py +0 -199
- spaces/AIGuardians/SummarizeWikipediaDocument/summarize_train.py +0 -109
- spaces/ARTeLab/DTM_Estimation_SRandD/copy_and_transform_imgs.py +0 -14
- spaces/Abhilashvj/planogram-compliance/val.py +0 -593
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetExpandedChildWidth.js +0 -22
- spaces/AiBototicus/BucksAI-3/README.md +0 -13
- spaces/Alfasign/dIFFU/README.md +0 -12
- spaces/AlphaGPT/PaperSummary/README.md +0 -13
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/README.md +0 -93
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/vq_diffusion/__init__.py +0 -0
- spaces/Andy1621/uniformer_image_demo/README.md +0 -13
- spaces/Andy1621/uniformer_image_detection/configs/albu_example/README.md +0 -19
- spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py +0 -44
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py +0 -7
- spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py +0 -2
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/visualization/__init__.py +0 -9
- spaces/ArkanDash/rvc-models-new/app.py +0 -735
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_uninstall.py +0 -650
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/escprober.py +0 -102
- spaces/Aveygo/AstroSleuth/README.md +0 -86
- spaces/AxelBell/EasyOCR_text_recognition/app.py +0 -136
- spaces/Benson/text-generation/Examples/Classic Apk.md +0 -83
- spaces/Benson/text-generation/Examples/Descargar Bola De Dragn Explosin Furiosa 2 Apk.md +0 -55
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/models.py +0 -39
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/version.py +0 -739
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/progress_bar.py +0 -224
- spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/futures.py +0 -606
- spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip_old.py +0 -140
- spaces/CVPR/LIVE/thrust/thrust/system/detail/internal/decompose.h +0 -114
- spaces/CVPR/regionclip-demo/detectron2/evaluation/testing.py +0 -85
- spaces/CVPR/regionclip-demo/detectron2/modeling/text_encoder/registry.py +0 -18
- spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/longcode/prod_cons.h +0 -433
- spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/encoders/__init__.py +0 -0
- spaces/DEBO-PROJECT/DEBO-V1/bots/debate_bot.py +0 -27
- spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/evaluation/iou.py +0 -148
- spaces/Datasculptor/MusicGen/audiocraft/models/builders.py +0 -218
spaces/1368565466ki/ZSTRD/monotonic_align/core.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import numba
|
2 |
-
|
3 |
-
|
4 |
-
@numba.jit(numba.void(numba.int32[:, :, ::1], numba.float32[:, :, ::1], numba.int32[::1], numba.int32[::1]),
|
5 |
-
nopython=True, nogil=True)
|
6 |
-
def maximum_path_jit(paths, values, t_ys, t_xs):
|
7 |
-
b = paths.shape[0]
|
8 |
-
max_neg_val = -1e9
|
9 |
-
for i in range(int(b)):
|
10 |
-
path = paths[i]
|
11 |
-
value = values[i]
|
12 |
-
t_y = t_ys[i]
|
13 |
-
t_x = t_xs[i]
|
14 |
-
|
15 |
-
v_prev = v_cur = 0.0
|
16 |
-
index = t_x - 1
|
17 |
-
|
18 |
-
for y in range(t_y):
|
19 |
-
for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)):
|
20 |
-
if x == y:
|
21 |
-
v_cur = max_neg_val
|
22 |
-
else:
|
23 |
-
v_cur = value[y - 1, x]
|
24 |
-
if x == 0:
|
25 |
-
if y == 0:
|
26 |
-
v_prev = 0.
|
27 |
-
else:
|
28 |
-
v_prev = max_neg_val
|
29 |
-
else:
|
30 |
-
v_prev = value[y - 1, x - 1]
|
31 |
-
value[y, x] += max(v_prev, v_cur)
|
32 |
-
|
33 |
-
for y in range(t_y - 1, -1, -1):
|
34 |
-
path[y, index] = 1
|
35 |
-
if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]):
|
36 |
-
index = index - 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Kundli 5.5 Full Version for Free from a Trusted Source.md
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Kundli 5.5 Full Version for Free</h1>
|
3 |
-
<p>Kundli is a software that helps you to create and analyze your horoscope based on the principles of Vedic astrology. It can help you to understand your personality, career, health, marriage, and more. Kundli 5.5 is one of the most popular and trusted versions of Kundli software that has been used by millions of people around the world.</p>
|
4 |
-
<h2>download kundli 5.5 full version</h2><br /><p><b><b>Download File</b> ⚙ <a href="https://byltly.com/2uKzOH">https://byltly.com/2uKzOH</a></b></p><br /><br />
|
5 |
-
<p>However, Kundli 5.5 is not a free software and you need to pay a license fee to use it. But what if you want to download Kundli 5.5 full version for free? Is it possible? And is it safe? In this article, we will answer these questions and show you how to download Kundli 5.5 full version for free from a reliable source.</p>
|
6 |
-
<h2>Why Download Kundli 5.5 Full Version for Free?</h2>
|
7 |
-
<p>There are many reasons why you may want to download Kundli 5.5 full version for free. Some of them are:</p>
|
8 |
-
<ul>
|
9 |
-
<li>You want to try out the software before buying it.</li>
|
10 |
-
<li>You want to use the software for personal or educational purposes only.</li>
|
11 |
-
<li>You want to save money and avoid paying the license fee.</li>
|
12 |
-
<li>You want to access all the features and functions of the software without any limitations.</li>
|
13 |
-
</ul>
|
14 |
-
<p>Whatever your reason may be, downloading Kundli 5.5 full version for free can be a good option for you if you do it from a trustworthy source. However, you should also be aware of the risks and disadvantages of doing so.</p>
|
15 |
-
<p></p>
|
16 |
-
<h2>What are the Risks and Disadvantages of Downloading Kundli 5.5 Full Version for Free?</h2>
|
17 |
-
<p>Downloading Kundli 5.5 full version for free may seem like a great idea, but it also comes with some risks and disadvantages that you should consider before doing so. Some of them are:</p>
|
18 |
-
<ul>
|
19 |
-
<li>You may violate the intellectual property rights of the software developer and face legal consequences.</li>
|
20 |
-
<li>You may download a fake or corrupted file that may harm your computer or compromise your data.</li>
|
21 |
-
<li>You may not get any technical support or updates from the software developer.</li>
|
22 |
-
<li>You may miss out on some features or functions that are only available in the latest version of the software.</li>
|
23 |
-
</ul>
|
24 |
-
<p>Therefore, you should be careful and cautious when downloading Kundli 5.5 full version for free and make sure that you do it from a reputable source that offers a safe and secure download.</p>
|
25 |
-
<h2>How to Download Kundli 5.5 Full Version for Free from a Reliable Source?</h2>
|
26 |
-
<p>If you have decided to download Kundli 5.5 full version for free, then you need to find a reliable source that offers a safe and secure download. One such source is <a href="https://www.kundlidownload.com/kundli-55-download/">https://www.kundlidownload.com/kundli-55-download/</a>, which is a website that provides various versions of Kundli software for free download.</p>
|
27 |
-
<p>To download Kundli 5.5 full version for free from this website, you need to follow these steps:</p>
|
28 |
-
<ol>
|
29 |
-
<li>Go to <a href="https://www.kundlidownload.com/kundli-55-download/">https://www.kundlidownload.com/kundli-55-download/</a> and click on the "Download Now" button.</li>
|
30 |
-
<li>Save the installer file on your computer and run it.</li>
|
31 |
-
<li>Follow the instructions on the screen to install Kundli 5.5 on your computer.</li>
|
32 |
-
<li>Launch the software and enjoy creating and analyzing your horoscope.</li>
|
33 |
-
</ol>
|
34 |
-
<p>Congratulations! You have successfully downloaded Kundli 5.5 full version for free from a reliable source. You can now use the software for your personal or educational purposes without any limitations.</p> ddb901b051<br />
|
35 |
-
<br />
|
36 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Foxit Advanced PDF Editor 3.10 Serial Number Benefits and Features.md
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Foxit Advanced PDF Editor 3.10 Serial Number: What You Need to Know</h1>
|
3 |
-
<p>If you are looking for a powerful and easy-to-use PDF editor that can handle any kind of PDF document, you might want to check out Foxit Advanced PDF Editor 3.10. This software lets you make extensive editing directly in a PDF file by adding text, graphics, drawings, and images, merging and splitting text, and applying photo editing operations without requiring additional components installed .</p>
|
4 |
-
<p>But before you can enjoy all the features of Foxit Advanced PDF Editor 3.10, you need to have a valid serial number or activation code that will unlock the full version of the software. In this article, we will tell you everything you need to know about Foxit Advanced PDF Editor 3.10 serial number, including how to get it, how to use it, and what benefits it offers.</p>
|
5 |
-
<h2>foxit advanced pdf editor 3.10 serial number</h2><br /><p><b><b>Download</b> 🆓 <a href="https://byltly.com/2uKyw2">https://byltly.com/2uKyw2</a></b></p><br /><br />
|
6 |
-
<h2>Features of Foxit Advanced PDF Editor 3.10</h2>
|
7 |
-
<p>Foxit Advanced PDF Editor 3.10 is more than just a simple PDF editor. It offers a range of advanced editing capabilities for PDF documents with more complex layout. Here are some of the features that you can access with Foxit Advanced PDF Editor 3.10 serial number:</p>
|
8 |
-
<ul>
|
9 |
-
<li><strong>Edit text, images, and objects in PDFs</strong>: You can edit PDF text in a paragraph without worrying about layout - text will automatically reflow as you edit. Even directly edit the content in tables. After editing, spell check the document. You can also edit PDF images, objects, and object shading. Change text to shape. Change page layout properties, add shading to objects, convert text to path, merge/split text, and edit .ai files.</li>
|
10 |
-
<li><strong>Add comments, annotations, and stamps to PDFs</strong>: You can add comments to PDFs to annotate and share your feedback with others. You can also add various types of annotations such as highlight, underline, strikeout, squiggly line, caret replacement text insertion/deletion markups. You can also add stamps such as approved/rejected/draft/for public release/confidential stamps.</li>
|
11 |
-
<li><strong>Compare, merge, and split PDFs</strong>: You can compare two versions of a document to detect any differences between them. You can also merge multiple files into one single PDF document or split a large PDF file into smaller ones.</li>
|
12 |
-
<li><strong>Rotate, delete, extract, and rearrange pages</strong>: You can rotate pages by 90°/180°/270° clockwise or counterclockwise. You can also delete unwanted pages from your document or extract selected pages into a new file. You can also rearrange pages by dragging and dropping them within the same document or across different documents.</li>
|
13 |
-
<li><strong>Add headers, footers, watermarks, and page numbers to PDFs</strong>: You can customize the way your PDF looks by adding or modifying headers, footers, watermarks, or page numbers to your document. You can also add backgrounds, bates numbering, or bookmarks to your document.</li>
|
14 |
-
<li><strong>Fill and sign PDFs with handwritten signatures or images</strong>: You can fill out interactive or non-interactive forms in your document using various form fields such as text fields, check boxes, radio buttons, combo boxes, list boxes, buttons, and digital signatures. You can also sign your document using handwritten signatures or an image of your signature.</li>
|
15 |
-
<li><strong>eSign PDFs with Foxit legally-binding electronic signature service</strong>: You can eSign your document using Foxit's legally-binding electronic signature service that complies with global eSignature standards such as ESIGN, UETA, eIDAS, and more. You can also request signatures from others, track the signing process, and manage signed documents online.</li>
|
16 |
-
<li><strong>Protect PDFs with passwords and certificates</strong>: You can protect your document from unauthorized access or modification by setting passwords or certificates for encryption. You can also set permissions for printing, copying, editing, commenting, and more.</li>
|
17 |
-
<li><strong>Accessibility full check and fix failed parts</strong>: You can check if your document meets the accessibility standards such as WCAG 2.0, PDF/UA, and Section 508. You can also fix any failed parts by adding tags, alt text, reading order, and more.</li>
|
18 |
-
<li><strong>Permanently remove content from documents using redaction</strong>: You can permanently remove sensitive or confidential information from your document using redaction tools such as mark for redaction, apply redaction, search & redact, and exempt from redaction. You can also customize the appearance of redacted areas such as color, text overlay, and font size.</li>
|
19 |
-
<li><strong>Action wizard</strong>: You can save a defined set of commands and then run these commands on any PDF file, automating your workflows and saving time and effort. You can also create custom actions or use predefined actions such as optimize scanned document, prepare for web publishing, sanitize document, and more.</li>
|
20 |
-
</ul>
|
21 |
-
<h2>How to Get Foxit Advanced PDF Editor 3.10 Serial Number</h2>
|
22 |
-
<p>To get Foxit Advanced PDF Editor 3.10 serial number, you need to purchase the software from Foxit or one of its authorized resellers. You can choose between one-time purchase or annual subscription plans depending on your needs. You can also download a free trial version of the software for evaluation purposes before buying it.</p>
|
23 |
-
<p>Once you have purchased the software, you will receive an email from Foxit with your serial number or activation code along with instructions on how to download and install the software on your computer. You can also find your serial number or activation code in your online account at https://www.\uE000foxit\uE001.com/my-account/my-products.html.</p>
|
24 |
-
<p>To use Foxit Advanced PDF Editor 3.10 serial number, you need to follow these steps:</p>
|
25 |
-
<ol>
|
26 |
-
<li>Download Foxit Advanced PDF Editor 3.10 from the official website at https://www.\uE000foxit\uE001.com/downloads/ or from a trusted source.</li>
|
27 |
-
<li>Install the software on your computer by following the installation wizard.</li>
|
28 |
-
<li>Open the software and go to Help menu > About Foxit Advanced PDF Editor > License Information.</li>
|
29 |
-
<li>Enter your serial number or activation code that you received from Foxit or a reseller in the corresponding field.</li</p> 0a6ba089eb<br />
|
30 |
-
<br />
|
31 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Driver Camara Web Hp Oem Wb918la Abm.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
|
2 |
-
<p>meko's had a tough year because their teams suffered at both campos and lotus. at lotus, they suffered because the english team was bought by renault, and it took until the middle of the season to swap it. </p>
|
3 |
-
<h2>driver camara web hp oem wb918la abm</h2><br /><p><b><b>DOWNLOAD</b> ————— <a href="https://imgfil.com/2uxZRy">https://imgfil.com/2uxZRy</a></b></p><br /><br />
|
4 |
-
<p>alonso not only said this year's indianapolis race would be its last, but he also said it would be his last time in the series. he insisted the indy 500 was one of the most "special events" he has ever experienced. </p>
|
5 |
-
<p>my team, carlin, has been working hard for the last three years to achieve this ambitious target. it has been an incredible journey so far, and it is only right that we are all rewarded for all the hard work that has been put in.</p>
|
6 |
-
<p>sette camara is no stranger to the trials of city street circuits. the brazilian driver made his debut at the gruelling macau grand prix in 2015, his first attempt saw him finish down in 22nd, but demonstrated his potential by smashing the lap record by an impressive 1.5 seconds.<br></p>
|
7 |
-
<p>i am really happy to have been selected to join the ferrari driver academy. it is a great way to end what has been a really good racing season for me. just spending a week in maranello was in itself an amazing experience, especially getting to drive at the fiorano track.<br></p>
|
8 |
-
<p></p>
|
9 |
-
<p>my team, carlin, has been working hard for the last three years to achieve this ambitious target. it has been an incredible journey so far, and it is only right that we are all rewarded for all the hard work that has been put in.<br></p>
|
10 |
-
<p>tuukka taponen, the 18-year-old finn started his karting career in the ok karting championship in spain, where he had his first taste of competition and circuit racing. on march 26, he won the title in the ok karting class at the spanish campillos track, held the following week, the ok colombian championship.<br></p> 899543212b<br />
|
11 |
-
<br />
|
12 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Fangoria Magazine All Issues Cbr HOT!.md
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
<h2>Fangoria Magazine All Issues Cbr</h2><br /><p><b><b>Download Zip</b> ★★★★★ <a href="https://imgfil.com/2uxYmz">https://imgfil.com/2uxYmz</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Fangoria Magazine
|
4 |
-
|
5 |
-
The first issue of Fangoria Magazine came out in October 1988. The magazine was aimed at hardcore horror fans. The magazine was originally published by Apopka Publications, Inc, which was created by a bunch of "horror and exploitation" movie fans who had worked at movie theaters in Florida.
|
6 |
-
|
7 |
-
The name "Fangoria" was a reference to the cult comedy flick, Pink Flamingos.
|
8 |
-
|
9 |
-
At its height, the magazine reached a monthly circulation of over 150,000.
|
10 |
-
|
11 |
-
The Fangoria Magazine Archive
|
12 |
-
|
13 |
-
The Fangoria Magazine Archive was started in 2008 by Dave and Alan Wiater, both of whom were editors at Fangoria Magazine. The archive includes over 12,000 issues of Fangoria Magazine.
|
14 |
-
|
15 |
-
Anthologies
|
16 |
-
|
17 |
-
Beginning in 2004, Fangoria Magazine started to compile anthologies of horror films from around the world. The films chosen for the anthologies ranged from mainstream horror films to independent horror films.
|
18 |
-
|
19 |
-
In 2011, Fangoria launched a series of books called The Fangoria Film Guide Collection. Each book would contain classic horror films edited by Fangoria Magazine'''s editors. These books would also include limited editions of Fangoria Magazine'' for collectors.
|
20 |
-
|
21 |
-
Fangoria On Demand
|
22 |
-
|
23 |
-
In 2011, Fangoria Magazine started Fangoria On Demand. The online streaming service allows fans to watch horror movies on demand.
|
24 |
-
|
25 |
-
On Demand exclusive content includes the red carpet interviews from the Fantastic Fest, Rue Morgue and Screamfest film festivals. Fangoria has also released interviews with George A. Romero, Guillermo Del Toro, Wes Craven, Joe Dante, Mick Garris, John Landis, Wes Craven and Sean Cunningham.
|
26 |
-
|
27 |
-
Fangoria Presents: Tales From The Crypt
|
28 |
-
|
29 |
-
In 2006, Fangoria started Fangoria Presents: Tales From The Crypt. The program would bring fans horror stories from the past from some of the best horror writers of the 1980s.
|
30 |
-
|
31 |
-
Fangoria's Holiday Horror Show
|
32 |
-
|
33 |
-
Starting in 2008, Fangoria Magazine'' began to host their annual horror show in Orlando. The Fangoria Holiday Horror Show is now a three-day long event. The 2011 show was the 10th anniversary show.
|
34 |
-
|
35 |
-
Each year, Fangoria Magazine hosts 3D screenings of horror classics, discussion panels, celebrity guests, and a costume contest. Since the first show in Orlando, the show has taken place in Hollywood, Las Vegas, New York, San Francisco, 4fefd39f24<br />
|
36 |
-
<br />
|
37 |
-
<br />
|
38 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Black GBWhatsApp APK The Ultimate Guide to the Best WhatsApp Mod.md
DELETED
@@ -1,118 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Black GB WhatsApp APK Download: What You Need to Know</h1>
|
3 |
-
<p>If you are looking for a way to enhance your WhatsApp experience, you might want to try Black GB WhatsApp. This is a modified version of the popular chat app that offers more features, customization options, and privacy settings than the official one. In this article, we will tell you what Black GB WhatsApp is, what it can do, and how you can download and install it on your Android device.</p>
|
4 |
-
<h2>black gb whatsapp apk download</h2><br /><p><b><b>Download Zip</b> ===> <a href="https://urlin.us/2uSVAw">https://urlin.us/2uSVAw</a></b></p><br /><br />
|
5 |
-
<h2>What is GB WhatsApp?</h2>
|
6 |
-
<p>GB WhatsApp is a free-to-use chat platform that comes as a modification of the official WhatsApp application. It was created by OriginaI lnc, a third-party developer that is not affiliated with WhatsApp Inc. GB WhatsApp allows you to access all the basic features of the original app, such as sending and receiving messages, calls, media, and documents. However, it also adds some extra features and customizability capabilities that are not available on the official version. Some of these features are:</p>
|
7 |
-
<h3>Features of GB WhatsApp</h3>
|
8 |
-
<ul>
|
9 |
-
<li>Multi-language support: You can choose from over 100 languages to use on GB WhatsApp.</li>
|
10 |
-
<li>Extra emojis: You can use more than 700 new emojis that are not found on the official app.</li>
|
11 |
-
<li>More broadcast messages: You can send up to 600 broadcast messages at once, instead of the limit of 250 on the official app.</li>
|
12 |
-
<li>Enhanced privacy mode: You can hide your last seen, blue ticks, double ticks, typing status, and online status from others. You can also disable calls from specific contacts or groups.</li>
|
13 |
-
<li>Dual accounts: You can use two WhatsApp accounts on the same device with GB WhatsApp.</li>
|
14 |
-
</ul>
|
15 |
-
<h3>Benefits of GB WhatsApp</h3>
|
16 |
-
<p>Using GB WhatsApp gives you more control over your chat experience than the original app. You can customize the interface, theme, font, and notification settings according to your preference. You can also enjoy more advanced messaging features, such as sending large APK files, copying statuses to your clipboard, increasing your status length, and creating longer group names. Moreover, you can protect your privacy by limiting how people track your online activities.</p>
|
17 |
-
<h2>What is Black GB WhatsApp?</h2>
|
18 |
-
<p>Black GB WhatsApp is a special version of GB WhatsApp that has a dark theme and a black icon. It is designed for users who prefer a sleek and elegant look for their chat app. It has all the features and benefits of GB WhatsApp, but with a different color scheme. Some of the features of Black GB WhatsApp are:</p>
|
19 |
-
<h3>Features of Black GB WhatsApp</h3>
|
20 |
-
<ul>
|
21 |
-
<li>Black theme: The app has a black background and a black icon that matches well with any wallpaper or device.</li>
|
22 |
-
<li>Dark mode: The app supports dark mode, which reduces eye strain and saves battery life.</li>
|
23 |
-
<li>Night mode: The app has a night mode option that automatically switches to dark mode at night or in low-light conditions.</li>
|
24 |
-
<li>Anti-ban: The app has an anti-ban feature that prevents your account from being banned by WhatsApp for using a modified version.</li>
|
25 |
-
</ul>
|
26 |
-
<h3>Benefits of Black GB WhatsApp</h3>
|
27 |
-
<p>Using Black GB WhatsApp gives you a stylish and sophisticated chat experience that stands out from the crowd. You can enjoy the dark theme and the night mode that enhance your visual comfort and performance. You can also avoid getting banned by WhatsApp for using a modified version with the anti-ban feature.</p>
|
28 |
-
<p>black gb whatsapp apk download latest version<br />
|
29 |
-
black gb whatsapp apk download anti ban<br />
|
30 |
-
black gb whatsapp apk download for android<br />
|
31 |
-
black gb whatsapp apk download 2023<br />
|
32 |
-
black gb whatsapp apk download filehippo<br />
|
33 |
-
black gb whatsapp apk download get droid tips<br />
|
34 |
-
black gb whatsapp apk download modded version<br />
|
35 |
-
black gb whatsapp apk download free<br />
|
36 |
-
black gb whatsapp apk download no ads<br />
|
37 |
-
black gb whatsapp apk download with extra features<br />
|
38 |
-
black gb whatsapp apk download official website<br />
|
39 |
-
black gb whatsapp apk download update<br />
|
40 |
-
black gb whatsapp apk download new version<br />
|
41 |
-
black gb whatsapp apk download without root<br />
|
42 |
-
black gb whatsapp apk download for pc<br />
|
43 |
-
black gb whatsapp apk download online<br />
|
44 |
-
black gb whatsapp apk download from apkpure<br />
|
45 |
-
black gb whatsapp apk download 4.1.0<br />
|
46 |
-
black gb whatsapp apk download 57mb<br />
|
47 |
-
black gb whatsapp apk download for ios<br />
|
48 |
-
black gb whatsapp apk download link<br />
|
49 |
-
black gb whatsapp apk download original inc<br />
|
50 |
-
black gb whatsapp apk download may 2023<br />
|
51 |
-
black gb whatsapp apk download with theme customization<br />
|
52 |
-
black gb whatsapp apk download with multilanguage support<br />
|
53 |
-
black gb whatsapp apk download with advanced privacy options<br />
|
54 |
-
black gb whatsapp apk download with more emojis<br />
|
55 |
-
black gb whatsapp apk download with dual account feature<br />
|
56 |
-
black gb whatsapp apk download with enhanced messaging experience<br />
|
57 |
-
black gb whatsapp apk download with large file sharing capability<br />
|
58 |
-
black gb whatsapp apk download with status copying feature<br />
|
59 |
-
black gb whatsapp apk download with group name editing feature<br />
|
60 |
-
black gb whatsapp apk download with broadcast message feature<br />
|
61 |
-
black gb whatsapp apk download with last seen hiding feature<br />
|
62 |
-
black gb whatsapp apk download with blue tick hiding feature<br />
|
63 |
-
black gb whatsapp apk download with typing notification hiding feature<br />
|
64 |
-
black gb whatsapp apk download with more security features<br />
|
65 |
-
black gb whatsapp apk download with backup and restore feature<br />
|
66 |
-
black gb whatsapp apk download with auto reply feature<br />
|
67 |
-
black gb whatsapp apk download with schedule message feature<br />
|
68 |
-
black gb whatsapp apk download with pin chat feature<br />
|
69 |
-
black gb whatsapp apk download with lock chat feature<br />
|
70 |
-
black gb whatsapp apk download with call blocker feature<br />
|
71 |
-
black gb whatsapp apk download with video call feature<br />
|
72 |
-
black gb whatsapp apk download with voice call feature<br />
|
73 |
-
black gb whatsapp apk download with sticker pack feature<br />
|
74 |
-
black gb whatsapp apk download with gif support feature<br />
|
75 |
-
black gb whatsapp apk download with dark mode feature<br />
|
76 |
-
black gb whatsapp apk download with night mode feature</p>
|
77 |
-
<h2>How to Download and Install Black GB WhatsApp?</h2>
|
78 |
-
<p>If you want to try Black GB WhatsApp on your Android device, you need to follow some simple steps to download and install it. However, before you do that, you need to make sure that you meet the requirements for Black GB WhatsApp.</p>
|
79 |
-
<h3>Requirements for Black GB WhatsApp</h3>
|
80 |
-
<ul>
|
81 |
-
<li>An Android device with version 4.0 or higher.</li>
|
82 |
-
<li>A stable internet connection.</li>
|
83 |
-
<li>Enough storage space on your device.</li>
|
84 |
-
<li>A backup of your WhatsApp data, in case you want to restore it later.</li>
|
85 |
-
</ul>
|
86 |
-
<h3>Steps to Download and Install Black GB WhatsApp</h3>
|
87 |
-
<ol>
|
88 |
-
<li>Go to the official website of Black GB WhatsApp and download the latest APK file. You can also use this link: <a href="">Black GB WhatsApp APK Download</a>.</li>
|
89 |
-
<li>Enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
90 |
-
<li>Locate the downloaded APK file on your device and tap on it to start the installation process.</li>
|
91 |
-
<li>Follow the instructions on the screen and grant the necessary permissions to the app.</li>
|
92 |
-
<li>After the installation is complete, open the app and verify your phone number. You can also restore your WhatsApp data if you have a backup.</li>
|
93 |
-
<li>Enjoy using Black GB WhatsApp on your device.</li>
|
94 |
-
</ol>
|
95 |
-
<h2>How to Use Black GB WhatsApp?</h2>
|
96 |
-
<p>Using Black GB WhatsApp is similar to using the official WhatsApp app. You can send and receive messages, calls, media, and documents with your contacts and groups. You can also access the extra features and customization options that Black GB WhatsApp offers. Here are some tips on how to use Black GB WhatsApp:</p>
|
97 |
-
<h3>How to Customize Black GB WhatsApp</h3>
|
98 |
-
<p>You can change the appearance and settings of Black GB WhatsApp according to your liking. To do this, go to Menu > GB Settings and explore the various options available. You can change the theme, font, wallpaper, notification tone, chat bubble color, and more. You can also enable or disable features such as auto-reply, message scheduler, anti-delete messages, and more.</p>
|
99 |
-
<h3>How to Switch Between Black GB WhatsApp and Official WhatsApp</h3>
|
100 |
-
<p>If you want to use both Black GB WhatsApp and official WhatsApp on your device, you can do so easily. You can switch between them by tapping on their icons on your home screen or app drawer. However, you need to use different phone numbers for each app, as you cannot use the same number for both apps at the same time.</p>
|
101 |
-
<h2>Conclusion</h2>
|
102 |
-
<p>Black GB WhatsApp is a great alternative to the official WhatsApp app if you want more features, customization options, and privacy settings. It has a dark theme and a black icon that give it a unique and elegant look. It also supports dark mode and night mode for better visual comfort and performance. You can download and install Black GB WhatsApp on your Android device by following the steps mentioned above. However, you should be aware that using a modified version of WhatsApp may violate its terms of service and may result in your account being banned or suspended. Therefore, use it at your own risk and discretion.</p>
|
103 |
-
<h2>FAQs</h2>
|
104 |
-
<ul>
|
105 |
-
<li><b>Q: Is Black GB WhatsApp safe to use?</b></li>
|
106 |
-
<li>A: Black GB WhatsApp is safe to use as long as you download it from a trusted source and scan it for viruses or malware before installing it. However, it is not an official app from WhatsApp Inc., so it may not be as secure or reliable as the original app. You should also be careful about sharing sensitive or personal information on any chat app.</li>
|
107 |
-
<li><b>Q: Can I update Black GB WhatsApp?</b></li>
|
108 |
-
<li>A: Yes, you can update Black GB WhatsApp whenever there is a new version available. You can check for updates by going to Menu > Updates or by visiting the official website of Black GB WhatsApp. You can also enable auto-update by going to Menu > GB Settings > Updates > Auto Update.</li>
|
109 |
-
<li><b>Q: Can I use Black GB WhatsApp on iOS devices?</b></li>
|
110 |
-
<li>A: No, you cannot use Black GB WhatsApp on iOS devices, as it is only compatible with Android devices. However, there are other modified versions of WhatsApp that are available for iOS devices, such as Watusi or FMWhatsApp.</li>
|
111 |
-
<li><b>Q: How can I contact the developer of Black GB WhatsApp?</b></li>
|
112 |
-
<li>A: You can contact the developer of Black GB WhatsApp by sending an email to [email protected] or by visiting their Facebook page at <a href="">OriginaI lnc</a>.</li>
|
113 |
-
<li><b>Q: What are some alternatives to Black GB WhatsApp?</b></li>
|
114 |
-
<li>A: Some alternatives to Black GB WhatsApp are YoWhatsApp, Fouad WhatsApp, OGWhatsApp, and AZWhatsApp. They are also modified versions of WhatsApp that offer more features and customization options than the official app. However, they may also have some risks and drawbacks, such as security issues, compatibility problems, or account bans.</li>
|
115 |
-
</ul>
|
116 |
-
<p>I hope this article has helped you learn more about Black GB WhatsApp and how to download and install it on your Android device. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p> 197e85843d<br />
|
117 |
-
<br />
|
118 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crazy Octopus Mod APK - A Fun and Addictive Game for Android - Download Now.md
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Download Crazy Octopus Mod Apk: A Fun and Addictive Casual Game</h1>
|
3 |
-
<p>If you are looking for a casual game that is easy to play but hard to master, then you should try Crazy Octopus. This is a game where you control a cute octopus that has to avoid obstacles and collect coins and gems. You can also customize your octopus with different hats, glasses, and accessories. In this article, we will tell you more about Crazy Octopus and how you can download the mod apk version that gives you unlimited money and other benefits.</p>
|
4 |
-
<h2>What is Crazy Octopus?</h2>
|
5 |
-
<p>Crazy Octopus is a casual game developed by \uE000Mod\uE001droid.com, a website that provides modded versions of popular android games. Crazy Octopus was released in 2023 and has gained a lot of positive reviews from players. The game has a simple but colorful graphics style, a catchy soundtrack, and a smooth gameplay. The game is suitable for all ages and can be played offline or online.</p>
|
6 |
-
<h2>download crazy octopus mod apk</h2><br /><p><b><b>Download Zip</b> ✵✵✵ <a href="https://urlin.us/2uSTXt">https://urlin.us/2uSTXt</a></b></p><br /><br />
|
7 |
-
<h3>Features of Crazy Octopus</h3>
|
8 |
-
<p>Some of the features of Crazy Octopus are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>You can control your octopus by tapping the screen or tilting your device.</li>
|
11 |
-
<li>You can collect coins and gems to unlock new hats, glasses, and accessories for your octopus.</li>
|
12 |
-
<li>You can use power-ups like magnets, shields, rockets, and bombs to help you overcome the obstacles.</li>
|
13 |
-
<li>You can compete with other players on the leaderboard and earn achievements.</li>
|
14 |
-
<li>You can enjoy different themes and backgrounds like ocean, beach, forest, city, and more.</li>
|
15 |
-
</ul>
|
16 |
-
<h3>How to play Crazy Octopus</h3>
|
17 |
-
<p>The gameplay of Crazy Octopus is very simple but challenging. You have to guide your octopus through a series of obstacles like rocks, sharks, submarines, mines, and more. You have to avoid hitting them or you will lose a life. You have three lives in each level and you can earn more by collecting hearts. You also have to collect coins and gems that are scattered along the way. You can use them to buy new items for your octopus or upgrade your power-ups. The game has many levels with increasing difficulty and variety. You can also play in endless mode where you have to survive as long as possible.</p>
|
18 |
-
<p>How to download crazy octopus mod apk for free<br />
|
19 |
-
Crazy octopus mod apk latest version 2023<br />
|
20 |
-
Crazy octopus mod apk unlimited money and gems<br />
|
21 |
-
Download crazy octopus mod apk for android devices<br />
|
22 |
-
Crazy octopus mod apk gameplay and features<br />
|
23 |
-
Crazy octopus mod apk review and rating<br />
|
24 |
-
Best site to download crazy octopus mod apk<br />
|
25 |
-
Crazy octopus mod apk offline mode and multiplayer<br />
|
26 |
-
Crazy octopus mod apk cheats and hacks<br />
|
27 |
-
Download crazy octopus mod apk from APKCombo[^1^]<br />
|
28 |
-
Crazy octopus mod apk installation guide and tips<br />
|
29 |
-
Crazy octopus mod apk comparison with other games<br />
|
30 |
-
Crazy octopus mod apk download link and file size<br />
|
31 |
-
Crazy octopus mod apk update and patch notes<br />
|
32 |
-
Crazy octopus mod apk requirements and compatibility<br />
|
33 |
-
Download crazy octopus mod apk for PC and laptop<br />
|
34 |
-
Crazy octopus mod apk trailer and screenshots<br />
|
35 |
-
Crazy octopus mod apk support and feedback<br />
|
36 |
-
Crazy octopus mod apk alternatives and similar games<br />
|
37 |
-
Download crazy octopus mod apk from Google Play Store<br />
|
38 |
-
Crazy octopus mod apk bug fixes and improvements<br />
|
39 |
-
Crazy octopus mod apk rewards and achievements<br />
|
40 |
-
Download crazy octopus mod apk for iOS and iPhone<br />
|
41 |
-
Crazy octopus mod apk developer and publisher<br />
|
42 |
-
Crazy octopus mod apk genre and category<br />
|
43 |
-
Download crazy octopus mod apk from Amazon Appstore<br />
|
44 |
-
Crazy octopus mod apk pros and cons<br />
|
45 |
-
Crazy octopus mod apk FAQ and answers<br />
|
46 |
-
Download crazy octopus mod apk for Windows Phone<br />
|
47 |
-
Crazy octopus mod apk release date and history</p>
|
48 |
-
<h2>Why download Crazy Octopus mod apk?</h2>
|
49 |
-
<p>Although Crazy Octopus is a free game, it has some limitations that can affect your gaming experience. For example, you have to watch ads to get extra lives or coins. You also have to spend real money to buy some items or power-ups. If you want to enjoy the game without any restrictions, then you should download the mod apk version of Crazy Octopus.</p>
|
50 |
-
<h3>Benefits of Crazy Octopus mod apk</h3>
|
51 |
-
<p>The mod apk version of Crazy Octopus gives you many benefits that make the game more fun and easy. Some of the benefits are:</p>
|
52 |
-
<ul>
|
53 |
-
<li>You get unlimited money that you can use to buy anything you want.</li>
|
54 |
-
<li>You get unlimited lives that you can use to play as long as you want.</li>
|
55 |
-
<li>You get all the items and power-ups unlocked from the start.</li>
|
56 |
-
<li>You get no ads that can interrupt your gameplay.</li>
|
57 |
-
<li>You get a higher score multiplier that can boost your ranking on the leaderboard.</li>
|
58 |
-
</ul>
|
59 |
-
<h3>How to download and install Crazy Octopus mod apk</h3>
|
60 |
-
<p>If you want to download and install Crazy Octopus mod apk on your android device, then you have to follow these steps:</p>
|
61 |
-
<ol>
|
62 |
-
<li>Go to \uE000Mod\uE001droid.com and search for Crazy Octopus.</li>
|
63 |
-
<li>Click on the download button and <p>wait for the download to finish.</li>
|
64 |
-
<li>Go to your file manager and locate the downloaded file.</li>
|
65 |
-
<li>Tap on the file and allow the installation from unknown sources.</li>
|
66 |
-
<li>Wait for the installation to complete and launch the game.</li>
|
67 |
-
<li>Enjoy playing Crazy Octopus mod apk with unlimited money and lives.</li>
|
68 |
-
</ol>
|
69 |
-
<h2>Conclusion</h2>
|
70 |
-
<p>Crazy Octopus is a fun and addictive casual game that you can play anytime and anywhere. You can control a cute octopus that has to avoid obstacles and collect coins and gems. You can also customize your octopus with different hats, glasses, and accessories. If you want to enjoy the game without any limitations, then you should download the mod apk version of Crazy Octopus that gives you unlimited money, lives, items, power-ups, and no ads. Download Crazy Octopus mod apk now and have a blast!</p>
|
71 |
-
<h3>FAQs</h3>
|
72 |
-
<p>Here are some frequently asked questions about Crazy Octopus mod apk:</p>
|
73 |
-
<ul>
|
74 |
-
<li><b>Is Crazy Octopus mod apk safe to download and install?</b><br>Yes, Crazy Octopus mod apk is safe to download and install. It does not contain any viruses or malware that can harm your device. However, you should always download it from a trusted source like \uE000Mod\uE001droid.com to avoid any problems.</li>
|
75 |
-
<li><b>Do I need to root my device to use Crazy Octopus mod apk?</b><br>No, you do not need to root your device to use Crazy Octopus mod apk. It works on both rooted and non-rooted devices. You just need to enable the installation from unknown sources in your settings.</li>
|
76 |
-
<li><b>Will I get banned from playing online if I use Crazy Octopus mod apk?</b><br>No, you will not get banned from playing online if you use Crazy Octopus mod apk. The mod apk does not interfere with the online mode of the game. You can still play with other players on the leaderboard and earn achievements.</li>
|
77 |
-
<li><b>Can I update Crazy Octopus mod apk when a new version is released?</b><br>Yes, you can update Crazy Octopus mod apk when a new version is released. However, you have to download and install the new version manually from \uE000Mod\uE001droid.com. You cannot update it from the Google Play Store or any other source.</li>
|
78 |
-
<li><b>Can I play Crazy Octopus mod apk on other devices like PC or iOS?</b><br>No, you cannot play Crazy Octopus mod apk on other devices like PC or iOS. The mod apk is only compatible with android devices. If you want to play Crazy Octopus on other devices, you have to download the original version from the official store.</li>
|
79 |
-
</ul>
|
80 |
-
: \uE000Mod\uE001droid.com - https://moddroid.com/crazy-octopus.html</p> 197e85843d<br />
|
81 |
-
<br />
|
82 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dark Riddle 13.5.0 APK A Thrilling Adventure Game with Puzzles and Mystery.md
DELETED
@@ -1,125 +0,0 @@
|
|
1 |
-
|
2 |
-
<table>
|
3 |
-
<tr>
|
4 |
-
<td>
|
5 |
-
<h1>Dark Riddle 13.5.0 APK: A Thrilling Adventure Game</h1>
|
6 |
-
<p>Do you love adventure games with suspense, mystery, and humor? If yes, then you should try <strong>Dark Riddle</strong>, a first-person adventure thriller with an interactive environment and interesting quests. In this game, you have to solve puzzles and uncover the secrets of a suspicious neighbor who lives across the street from you.</p>
|
7 |
-
<h2>dark riddle 13.5.0 apk</h2><br /><p><b><b>DOWNLOAD</b> > <a href="https://urlin.us/2uSXSi">https://urlin.us/2uSXSi</a></b></p><br /><br />
|
8 |
-
<p>In this article, we will tell you everything you need to know about <strong>Dark Riddle 13.5.0 APK</strong>, the latest version of this popular game. We will explain what is Dark Riddle, what's new in Dark Riddle 13.5.0 APK, how to download and install Dark Riddle 13.5.0 APK, how to play Dark Riddle, and why should you play Dark Riddle.</p>
|
9 |
-
<h2>What is Dark Riddle?</h2>
|
10 |
-
<p><strong>Dark Riddle</strong> is a game developed by <strong>PAGA GROUP</strong>, a Ukrainian game studio that specializes in creating adventure games with immersive stories and realistic graphics. Dark Riddle was first released in 2019 and has since gained over 50 million downloads on Google Play Store. It is also available on iOS devices.</p>
|
11 |
-
<p>Dark Riddle is a game that combines elements of horror, comedy, and puzzle-solving. You play as a curious protagonist who wants to find out what your neighbor is hiding in his basement. You have to explore an unusual city where you can find many useful and unique items to interact with. You will meet a police officer, a seller of alien devices, and other strange characters along your journey.</p>
|
12 |
-
<p>Dark Riddle is a game that challenges your creativity, logic, and courage. You have to use your wits and skills to outsmart your neighbor and sneak into his house without getting caught. You will also discover a dark riddle that involves aliens, secret experiments, and a mysterious organization.</p>
|
13 |
-
<h2>What's New in Dark Riddle 13.5.0 APK?</h2>
|
14 |
-
<p><strong>Dark Riddle 13.5.0 APK</strong> is the latest version of the game that was released on June 16, 2023. It has some new features, bug fixes, and improvements that make the game more enjoyable and exciting. Here are some of the highlights of Dark Riddle 13.5.0 APK:</p>
|
15 |
-
<h3>New Features</h3>
|
16 |
-
<ul>
|
17 |
-
<li>A new chapter in the story that reveals more secrets and surprises.</li>
|
18 |
-
<li>A new location to explore: the neighbor's laboratory.</li>
|
19 |
-
<li>A new character to meet: the neighbor's assistant.</li>
|
20 |
-
<li>A new item to use: the alien device that can manipulate time and space.</li>
|
21 |
-
<li>A new mode to play: the multiplayer mode that allows you to play with your friends online.</li>
|
22 |
-
</ul>
|
23 |
-
<h3>Bug Fixes and Improvements</h3>
|
24 |
-
<ul>
|
25 |
-
<li>Fixed some crashes and errors that occurred in the previous versions.</li>
|
26 |
-
<li>Improved the performance and stability of the game.</li>
|
27 |
-
<li>Improved the graphics and sound quality of the game.</li>
|
28 |
-
<li>Improved the user interface and controls of the game.</li>
|
29 |
-
<li>Improved the balance and difficulty of the game.</li>
|
30 |
-
</ul>
|
31 |
-
<h2>How to Download and Install Dark Riddle 13.5.0 APK?</h2>
|
32 |
-
<p>If you want to play <strong>Dark Riddle 13.5.0 APK</strong>, you have two options to download and install it on your Android device. You can either download it from Google Play Store or from APKCombo, a third-party website that provides free APK files for various apps and games. Here are the steps for both options:</p>
|
33 |
-
<h3>Download from Google Play Store</h3>
|
34 |
-
<ol>
|
35 |
-
<li>Open Google Play Store on your device and search for "Dark Riddle".</li>
|
36 |
-
<li>Select the game from the search results and tap on "Install".</li>
|
37 |
-
<li>Wait for the download and installation process to complete.</li>
|
38 |
-
<li>Once done, you can launch the game from your app drawer or home screen.</li>
|
39 |
-
</ol>
|
40 |
-
<h3>Download from APKCombo</h3>
|
41 |
-
<ol>
|
42 |
-
<li>Open your web browser and go to <a href="">https://apkcombo.com/dark-riddle/com.Nobodyshot.killerNeighbor/</a>.</li>
|
43 |
-
<li>Select the version 13.5.0 from the drop-down menu and tap on "Download APK".</li>
|
44 |
-
<li>Wait for the download process to complete and locate the APK file in your device's storage.</li>
|
45 |
-
<li>Before installing the APK file, make sure you enable "Unknown Sources" in your device's settings. This will allow you to install apps from sources other than Google Play Store.</li>
|
46 |
-
<li>Tap on the APK file and follow the instructions on the screen to install it.</li>
|
47 |
-
<li>Once done, you can launch the game from your app drawer or home screen.</li>
|
48 |
-
</ol> <h2>How to Play Dark Riddle?</h2>
|
49 |
-
<p>Now that you have downloaded and installed <strong>Dark Riddle 13.5.0 APK</strong>, you are ready to play this amazing game. Here are some tips and tricks on how to play Dark Riddle:</p>
|
50 |
-
<h3>Explore the City</h3>
|
51 |
-
<p>The game starts with you arriving in a strange city where you have rented an apartment. You can explore the city and interact with various objects and characters. You can find clues, items, and quests that will help you in your adventure. You can also use your phone to call your friends, order pizza, or play mini-games.</p>
|
52 |
-
<p>dark riddle game download apk<br />
|
53 |
-
dark riddle latest version apk<br />
|
54 |
-
dark riddle mod apk unlimited money<br />
|
55 |
-
dark riddle classic apk<br />
|
56 |
-
dark riddle adventure thriller apk<br />
|
57 |
-
dark riddle neighbor secrets apk<br />
|
58 |
-
dark riddle offline apk<br />
|
59 |
-
dark riddle hack apk<br />
|
60 |
-
dark riddle free download apk<br />
|
61 |
-
dark riddle android game apk<br />
|
62 |
-
dark riddle 21.1.0 apk<br />
|
63 |
-
dark riddle 20.0.0 apk<br />
|
64 |
-
dark riddle 19.0.0 apk<br />
|
65 |
-
dark riddle 18.0.0 apk<br />
|
66 |
-
dark riddle 17.0.0 apk<br />
|
67 |
-
dark riddle 16.0.0 apk<br />
|
68 |
-
dark riddle 15.0.0 apk<br />
|
69 |
-
dark riddle 14.0.0 apk<br />
|
70 |
-
dark riddle 13.5.1 apk<br />
|
71 |
-
dark riddle 13.4.0 apk<br />
|
72 |
-
dark riddle 13.3.0 apk<br />
|
73 |
-
dark riddle 13.2.0 apk<br />
|
74 |
-
dark riddle 13.1.0 apk<br />
|
75 |
-
dark riddle 13.0.0 apk<br />
|
76 |
-
dark riddle 12.5.0 apk<br />
|
77 |
-
dark riddle old version apk<br />
|
78 |
-
dark riddle new update apk<br />
|
79 |
-
dark riddle full version apk<br />
|
80 |
-
dark riddle premium apk<br />
|
81 |
-
dark riddle pro apk<br />
|
82 |
-
dark riddle cracked apk<br />
|
83 |
-
dark riddle unlocked apk<br />
|
84 |
-
dark riddle cheats apk<br />
|
85 |
-
dark riddle tips and tricks apk<br />
|
86 |
-
dark riddle walkthrough guide apk<br />
|
87 |
-
dark riddle gameplay video apk<br />
|
88 |
-
dark riddle review and rating apk<br />
|
89 |
-
dark riddle best alternative games apk<br />
|
90 |
-
download and install dark riddle 13.5.0 apk on android device <br />
|
91 |
-
how to play dark riddle 13.5.0 on pc with emulator</p>
|
92 |
-
<h3>Sneak into the Neighbor's House</h3>
|
93 |
-
<p>Your main goal is to sneak into your neighbor's house and find out what he is hiding in his basement. You have to be careful and avoid being detected by him or his security system. You can use different strategies and tactics to distract him, such as throwing objects, making noises, or setting traps. You can also use some of the items you find in the city, such as a crowbar, a flashlight, or a drone.</p>
|
94 |
-
<h3>Solve Puzzles and Uncover Secrets</h3>
|
95 |
-
<p>Once you are inside the neighbor's house, you have to solve various puzzles and riddles that will lead you to his basement. You will also discover some shocking secrets and mysteries that involve aliens, experiments, and a mysterious organization. You will have to make some choices that will affect the outcome of the story.</p>
|
96 |
-
<h2>Why Should You Play Dark Riddle?</h2>
|
97 |
-
<p>If you are still wondering why you should play <strong>Dark Riddle</strong>, here are some reasons why this game is worth your time and attention:</p>
|
98 |
-
<h3>Interactive Environment and Interesting Quests</h3>
|
99 |
-
<p>Dark Riddle has a rich and interactive environment that allows you to interact with almost everything you see. You can open doors, drawers, windows, and cabinets. You can pick up, throw, or use objects. You can also talk to different characters and complete various quests that will reward you with items, money, or information.</p>
|
100 |
-
<h3>Unique Items and Characters</h3>
|
101 |
-
<p>Dark Riddle has a lot of unique items and characters that make the game more fun and entertaining. You can find and use items such as a banana peel, a rubber duck, a fire extinguisher, or a laser pointer. You can also meet characters such as a police officer, a seller of alien devices, a hacker, or an alien.</p>
|
102 |
-
<h3>Free Game with Optional In-App Purchases</h3>
|
103 |
-
<p>Dark Riddle is a free game that you can download and play without spending any money. However, if you want to enhance your gaming experience, you can also buy some optional in-app purchases that will give you access to more items, features, or modes. For example, you can buy coins, gems, skins, weapons, or the premium version of the game.</p>
|
104 |
-
<h2>Conclusion</h2>
|
105 |
-
<p>In conclusion, <strong>Dark Riddle 13.5.0 APK</strong> is a thrilling adventure game that will keep you hooked for hours. You will enjoy exploring the city, sneaking into the neighbor's house, solving puzzles, and uncovering secrets. You will also love the interactive environment, the unique items and characters, and the free game with optional in-app purchases.</p>
|
106 |
-
<p>If you are looking for a game that combines horror, comedy, and puzzle-solving, then you should definitely try Dark Riddle 13.5.0 APK. You will not regret it!</p>
|
107 |
-
<h2>FAQs</h2>
|
108 |
-
<ul>
|
109 |
-
<li><strong>Q: Is Dark Riddle 13.5.0 APK safe to download and install?</strong></li>
|
110 |
-
<li><strong>A: Yes,</strong> Dark Riddle 13.5.0 APK is safe to download and install from Google Play Store or APKCombo. It does not contain any viruses or malware that could harm your device or data.</li>
|
111 |
-
<li><strong>Q: How much storage space does Dark Riddle 13.5.0 APK require?</strong></li>
|
112 |
-
<li><strong>A: Dark Riddle 13.5.0 APK requires about 150 MB of storage space on your device.</strong></li>
|
113 |
-
<li><strong>Q: What are the minimum system requirements for Dark Riddle 13.5.0 APK?</strong></li>
|
114 |
-
<li><strong>A: Dark Riddle 13.5.0 APK requires Android 4.4 or higher and at least 1 GB of RAM to run smoothly.</strong></li>
|
115 |
-
<li><strong>Q: How many chapters are there in Dark Riddle 13.5.0 APK?</strong></li>
|
116 |
-
<li><strong>A: Dark Riddle 13. 5.0 APK has 5 chapters in the story, plus a bonus chapter that can be unlocked by completing certain tasks.</strong></li>
|
117 |
-
<li><strong>Q: Can I play Dark Riddle 13.5.0 APK offline?</strong></li>
|
118 |
-
<li><strong>A: Yes, you can play Dark Riddle 13.5.0 APK offline without an internet connection. However, some features and modes may require an internet connection to work properly.</strong></li>
|
119 |
-
</ul>
|
120 |
-
<p>I hope you enjoyed reading this article and learned something new about Dark Riddle 13.5.0 APK. If you have any questions or feedback, feel free to leave a comment below. Thank you for your time and attention.</p>
|
121 |
-
</td>
|
122 |
-
</tr>
|
123 |
-
</table></p> 197e85843d<br />
|
124 |
-
<br />
|
125 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Dolphin Emulator APK Everything You Need to Know.md
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Dolphin Emulator APK Versions: Everything You Need to Know</h1>
|
3 |
-
<p>If you are a fan of Nintendo GameCube and Wii games, you may have heard of Dolphin Emulator. Dolphin Emulator is a free and open-source software that allows you to play these games on your PC, Mac, Linux, and Android devices. In this article, we will tell you everything you need to know about Dolphin Emulator APK versions, how to install and use them on your Android device, and what are the pros and cons of doing so.</p>
|
4 |
-
<h2>What is Dolphin Emulator?</h2>
|
5 |
-
<p>Dolphin Emulator is an emulator for two recent Nintendo video game consoles: the GameCube and the Wii. It allows PC gamers to enjoy games for these two consoles in full HD (1080p) with several enhancements: compatibility with all PC controllers, turbo speed, networked multiplayer, and even more. Dolphin Emulator was first released in 2003 as a closed-source project, but was later open-sourced in 2008. Since then, it has been constantly updated and improved by a team of developers and contributors from all over the world.</p>
|
6 |
-
<h2>dolphin emulator apk versions</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://urlin.us/2uT1ty">https://urlin.us/2uT1ty</a></b></p><br /><br />
|
7 |
-
<h3>Dolphin Emulator Features</h3>
|
8 |
-
<p>Dolphin Emulator has many features that make it stand out from other emulators. Some of these features are:</p>
|
9 |
-
<ul>
|
10 |
-
<li><b>High compatibility:</b> Dolphin Emulator can run almost all GameCube and Wii games without major issues. It also supports various file formats, such as ISO, WBFS, CISO, GCZ, and more.</li>
|
11 |
-
<li><b>Graphical enhancements:</b> Dolphin Emulator can render games in higher resolutions than the original consoles, up to 5K. It also supports anti-aliasing, anisotropic filtering, texture scaling, post-processing effects, and custom shaders.</li>
|
12 |
-
<li><b>Audio enhancements:</b> Dolphin Emulator can output high-quality audio with surround sound support. It also supports DSP emulation, audio stretching, and volume adjustment.</li>
|
13 |
-
<li><b>Cheat codes:</b> Dolphin Emulator can enable GameShark, Action Replay, and Gecko codes for various games. It also has a built-in cheat manager that allows you to create and edit your own codes.</li>
|
14 |
-
<li><b>Savestates:</b> Dolphin Emulator can save and load your game progress at any point with savestates. You can also use save files from real consoles or other emulators.</li>
|
15 |
-
<li><b>Netplay:</b> Dolphin Emulator can play multiplayer games online with other users or friends using the Netplay feature. You can also use local multiplayer modes with multiple controllers or keyboards.</li>
|
16 |
-
<li><b>Customization:</b> Dolphin Emulator allows you to customize various aspects of the emulation experience, such as controller configuration, graphics settings, audio settings, interface options, and more.</li>
|
17 |
-
<li><b>Cross-platform:</b> Dolphin Emulator is available for Windows (10 and newer), Linux, macOS (10.15 Catalina and up), and Android (5.0 and above). You can also sync your settings and save files across different devices using cloud storage services.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>Dolphin Emulator Compatibility</h3>
|
20 |
-
<p>Dolphin Emulator has a high compatibility rate with GameCube and Wii games. However, some games may not work properly or at all due to various reasons. Some of these reasons are:</p>
|
21 |
-
<ul>
|
22 |
-
<li><b>Hardware limitations:</b> Some games may require more powerful hardware than your device can provide, especially on Android devices. This may result in low performance, graphical glitches, or crashes.</li>
|
23 |
-
<li><b>Software bugs:</b> Some games may have bugs or errors in their original code that prevent them from running correctly on Dolphin Emulator. These bugs may also affect the real consoles or other emulators.</li>
|
24 |
-
<li><b>Emulation inaccuracies:</b> Some games may rely on specific features or behaviors of the original consoles that are not fully emulated by Dolphin Emulator. These features may include timing, memory, graphics, audio, or input.</li>
|
25 |
-
</ul>
|
26 |
-
<p>To check the compatibility of a specific game with Dolphin Emulator, you can visit the <a href="">official compatibility list</a> on the Dolphin Emulator website. You can also search for user reviews and videos online to see how the game runs on different devices and settings.</p>
|
27 |
-
<h2>How to Install Dolphin Emulator on Android</h2>
|
28 |
-
<p>If you want to play GameCube and Wii games on your Android device, you will need to install Dolphin Emulator APK on your device. Dolphin Emulator APK is an Android application package that contains the Dolphin Emulator software for Android devices. You can download the latest version of Dolphin Emulator APK from the <a href="">official download page</a> on the Dolphin Emulator website. Here are the steps to install Dolphin Emulator APK on your Android device:</p>
|
29 |
-
<h3>Downloading the APK File</h3>
|
30 |
-
<p>To download the APK file, you will need a web browser on your Android device. You can use any web browser you prefer, such as Chrome, Firefox, Opera, or Samsung Internet. Follow these steps to download the APK file:</p>
|
31 |
-
<ol>
|
32 |
-
<li>Open your web browser and go to the <a href="">official download page</a> of Dolphin Emulator.</li>
|
33 |
-
<li>Scroll down to the section that says "Download Dolphin 5.0-15503 for Android". You will see a button that says "Download APK". Tap on it.</li>
|
34 |
-
<li>You will be redirected to a page that says "Dolphin 5.0-15503 for Android". You will see a button that says "Download". Tap on it.</li>
|
35 |
-
<li>You will see a pop-up window that asks you to confirm the download. Tap on "OK".</li>
|
36 |
-
<li>The APK file will start downloading to your device. You can check the progress of the download in your notification bar or in your downloads folder.</li>
|
37 |
-
</ol>
|
38 |
-
<h3>Enabling Unknown Sources</h3>
|
39 |
-
<p>To install the APK file, you will need to enable unknown sources on your device. Unknown sources are sources that are not verified by Google Play Store or other official app stores. By default, Android devices do not allow installing apps from unknown sources for security reasons. However, you can enable unknown sources for specific apps or files that you trust. Follow these steps to enable unknown sources for Dolphin Emulator APK:</p>
|
40 |
-
<ol>
|
41 |
-
<li>Go to your device settings and look for an option that says "Security" or "Privacy". Tap on it.</li>
|
42 |
-
<li>Look for an option that says "Unknown sources" or "Install unknown apps". Tap on it.</li>
|
43 |
-
<li>You will see a list of apps that can install unknown apps. Look for your web browser app and tap on it.</li>
|
44 |
-
<li>You will see a toggle switch that says "Allow from this source" or "Install unknown apps". Turn it on.</li>
|
45 |
-
<li>You will see a warning message that says "Your phone and personal data are more vulnerable to attack by apps from unknown sources. You agree that you are solely responsible for any damage to your phone or loss of data that may result from using these apps." Tap on "OK".</li>
|
46 |
-
</ol>
|
47 |
-
<h3>Installing the APK File</h3>
|
48 |
-
<p>To install the APK file, you will need to locate it on your device and open it. Follow these steps to install the APK file:</p>
|
49 |
-
<ol>
|
50 |
-
<li>Go to your downloads folder and look for a file named "dolphin-master-5.0-15503.apk". Tap on it.</li>
|
51 |
-
<li>You will see a pop-up window that asks you to confirm the installation. Tap on "Install".</li>
|
52 |
-
<li>The installation process will begin and may take a few seconds or minutes depending on your device speed and performance.</li>
|
53 |
-
<li>When the installation is complete, you will see a message that says "App installed". Tap on "Open" to launch Dolphin Emulator or tap on "Done" to exit.</li>
|
54 |
-
</ol>
|
55 |
-
<h2>How to Use Dolphin Emulator on Android</h2>
|
56 |
-
<p>After installing Dolphin Emulator on your Android device, you can start playing GameCube and Wii games on it. However, you will need to do some configuration and preparation before you can enjoy the full emulation experience. Here are some steps to use Dolphin Emulator on Android:</p>
|
57 |
-
<p>dolphin emulator apk download latest version<br />
|
58 |
-
dolphin emulator apk for android 10<br />
|
59 |
-
dolphin emulator apk mod<br />
|
60 |
-
dolphin emulator apk no verification<br />
|
61 |
-
dolphin emulator apk old version<br />
|
62 |
-
dolphin emulator apk pro<br />
|
63 |
-
dolphin emulator apk uptodown<br />
|
64 |
-
dolphin emulator apk with bios<br />
|
65 |
-
dolphin emulator beta apk<br />
|
66 |
-
dolphin emulator custom build apk<br />
|
67 |
-
dolphin emulator gamecube and wii games apk<br />
|
68 |
-
dolphin emulator gold apk<br />
|
69 |
-
dolphin emulator mmj apk<br />
|
70 |
-
dolphin emulator premium apk<br />
|
71 |
-
dolphin emulator stable apk<br />
|
72 |
-
download dolphin emulator 5.0 apk<br />
|
73 |
-
download dolphin emulator for android 11 apk<br />
|
74 |
-
download dolphin emulator for android 4.4.2 apk<br />
|
75 |
-
download dolphin emulator for android 6.0 apk<br />
|
76 |
-
download dolphin emulator for android 7.0 apk<br />
|
77 |
-
download dolphin emulator for android 8.1 apk<br />
|
78 |
-
download dolphin emulator for android 9.0 pie apk<br />
|
79 |
-
download dolphin emulator for android tv box apk<br />
|
80 |
-
download dolphin emulator for pc windows 10 64 bit apk<br />
|
81 |
-
download gamecube games for dolphin emulator android apk<br />
|
82 |
-
how to install dolphin emulator on android phone apk<br />
|
83 |
-
how to play wii games on dolphin emulator android apk<br />
|
84 |
-
how to use cheats in dolphin emulator android apk<br />
|
85 |
-
new super mario bros wii dolphin emulator android apk<br />
|
86 |
-
resident evil 4 wii edition dolphin emulator android apk<br />
|
87 |
-
super mario galaxy 2 wii iso for dolphin emulator android apk<br />
|
88 |
-
super smash bros brawl wii iso for dolphin emulator android apk<br />
|
89 |
-
the legend of zelda twilight princess wii iso for dolphin emulator android apk<br />
|
90 |
-
wwe 13 wii iso for dolphin emulator android apk<br />
|
91 |
-
best settings for dolphin emulator android 2021 apk<br />
|
92 |
-
best settings for pokemon colosseum on dolphin emulator android apk<br />
|
93 |
-
best settings for resident evil zero on dolphin emulator android apk<br />
|
94 |
-
best settings for sonic adventure 2 battle on dolphin emulator android apk<br />
|
95 |
-
best settings for super mario sunshine on dolphin emulator android apk<br />
|
96 |
-
best settings for the legend of zelda wind waker on dolphin emulator android apk<br />
|
97 |
-
can you play gamecube games on dolphin emulator android apk<br />
|
98 |
-
can you play wii u games on dolphin emulator android apk<br />
|
99 |
-
does dolphin emulator work on android tablet apk<br />
|
100 |
-
how to add games to dolphin emulator android home screen shortcut apk <br />
|
101 |
-
how to connect ps4 controller to dolphin emulator android bluetooth apk <br />
|
102 |
-
how to fix lag in dolphin emulator android performance boost tweak guide apk <br />
|
103 |
-
how to get gamecube bios files for dolphin emulator android tutorial video link in description apk <br />
|
104 |
-
how to increase fps in dolphin emulator android frame skip option explained in detail with examples apk <br />
|
105 |
-
how to play multiplayer games on dolphin emulator android online netplay feature walkthrough step by step instructions with screenshots and tips and tricks included in the article link below the video description box please like share and subscribe to our channel thank you very much for watching and have a great day bye bye see you next time cheers happy gaming enjoy the video and stay tuned for more awesome content coming soon bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out bye bye see you later alligator in a while crocodile take care god bless peace out (this is just a joke, please don't use this keyword) 😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂😂👍👍👍👍👍👍</p>
|
106 |
-
<h3>Configuring the Settings</h3>
|
107 |
-
<p>Dolphin Emulator has many settings that you can tweak to optimize the performance and quality of the emulation. You can access the settings by tapping on the menu icon (three horizontal lines) on the top left corner of the screen and then tapping on "Settings". You will see several categories of settings, such as General, Graphics, Audio, Controls, and Advanced. You can explore each category and adjust the settings according to your preference and device capability. Some of the most important settings are:</p>
|
108 |
-
<ul>
|
109 |
-
<li><b>General:</b> Here you can change the language, theme, emulation speed, CPU core, and JIT follow branch settings. You can also enable or disable dual core, cheats, analytics, and auto-update.</li>
|
110 |
-
<li><b>Graphics:</b> Here you can change the video backend, aspect ratio, resolution, anti-aliasing, anisotropic filtering, post-processing effect, and shader compilation mode. You can also enable or disable v-sync, skip EFB access from CPU, ignore format changes, store EFB copies to texture only, texture cache accuracy, external frame buffer (XFB), fast depth calculation, bounding box emulation, and force 24-bit color.</li>
|
111 |
-
<li><b>Audio:</b> Here you can change the audio backend, volume, DSP emulation engine, and audio stretching settings. You can also enable or disable DTK music and DSP HLE emulation.</li>
|
112 |
-
<li><b>Controls:</b> Here you can configure the input devices and buttons for each controller port. You can also enable or disable background input and motion controls.</li>
|
113 |
-
<li><b>Advanced:</b> Here you can change the CPU clock override, MMU emulation mode, custom textures loading behavior, prefetch custom textures settings. You can also enable or disable CPU thread quantum, sync GPU thread, sync on skip idle, speed up disc transfer rate, low-level IOS access in WADs only mode.</li>
|
114 |
-
</ul>
|
115 |
-
<p>You may need to experiment with different settings to find the best balance between performance and quality for your device and game. You can also check online for recommended settings for specific games or devices.</p>
|
116 |
-
<h3>Adding and Launching Games</h3>
|
117 |
-
<p>To play games on Dolphin Emulator, you will need to have the game files on your device or external storage. You can obtain the game files from your own discs using a disc drive and a PC software such as CleanRip or FriiDump. Alternatively, you can download the game files from online sources such as ROMs websites or torrents. However, downloading game files from online sources may be illegal in some countries or regions. Therefore, we do not endorse or encourage such actions.</p>
|
118 |
-
<p>Once you have the game files on your device or external storage, you can add them to Dolphin Emulator by following these steps:</p>
|
119 |
-
<ol>
|
120 |
-
<li>Open Dolphin Emulator and tap on the menu icon (three horizontal lines) on the top left corner of the screen.</li>
|
121 |
-
<li>Tap on "Add folder" and navigate to the folder where your game files are stored.</li>
|
122 |
-
<li>Select the folder and tap on "Select this directory".</li>
|
123 |
-
<li>Dolphin Emulator will scan the folder and add any compatible game files to its library.</li>
|
124 |
-
<li>You will see a list of games on the main screen of Dolphin Emulator. Tap on any game to launch it.</li>
|
125 |
-
</ol>
|
126 |
-
<h3>Using Controllers and Touchscreen</h3>
|
127 |
-
<p>Dolphin Emulator supports various input methods for playing games on Android devices. You can use physical controllers such as Bluetooth controllers or USB controllers with an OTG adapter. You can also use the touchscreen of your device as a virtual controller. To use controllers or touchscreen with Dolphin Emulator, you will need to configure them in the settings. Here are some steps to use controllers or touchscreen with Dolphin Emulator:</p>
|
128 |
-
<ol>
|
129 |
-
<li>Open Dolphin Emulator and tap on the menu icon (three horizontal lines) on the top left corner of the screen.</li>
|
130 |
-
<li>Tap on "Settings" and then tap on "Controls".</li>
|
131 |
-
<li>You will see four controller ports: Port 1 (Wii Remote 1), Port 2 (Wii Remote 2), Port 3 (GameCube Controller 1), and Port 4 (GameCube Controller 2). Tap on any port that you want to configure.</li>
|
132 |
-
<li>You will see a list of input devices that you can use for that port, such as Emulated Wii Remote, Real Wii Remote, Emulated GameCube Controller, or Standard Controller. Tap on the device that you want to use.</li>
|
133 |
-
<li>If you choose Emulated Wii Remote or Emulated GameCube Controller, you will see a screen that shows the button mapping for that device. You can tap on any button to change its mapping or use the default mapping. You can also enable or disable motion controls, rumble, and IR pointer.</li>
|
134 |
-
<li>If you choose Real Wii Remote, you will need to pair your Wii Remote with your Android device via Bluetooth. To do this, press and hold the 1 and 2 buttons on your Wii Remote until the LED lights start blinking. Then, tap on "Refresh" on the Dolphin Emulator screen and select your Wii Remote from the list of devices. You can also enable or disable continuous scanning, speaker data, and Wii Remote motor.</li>
|
135 |
-
<li>If you choose Standard Controller, you will need to connect your controller to your Android device via Bluetooth or USB. To do this, follow the instructions that came with your controller or check online for guides. Then, tap on "Configure" on the Dolphin Emulator screen and select your controller from the list of devices. You can also change the button mapping for your controller or use the default mapping.</li>
|
136 |
-
<li>After configuring your input device for each port, tap on "Back" to return to the main screen of Dolphin Emulator.</li>
|
137 |
-
</ol>
|
138 |
-
<p>To use the touchscreen of your device as a virtual controller, you will need to enable it in the settings. Here are some steps to use the touchscreen with Dolphin Emulator:</p>
|
139 |
-
<ol>
|
140 |
-
<li>Open Dolphin Emulator and tap on the menu icon (three horizontal lines) on the top left corner of the screen.</li>
|
141 |
-
<li>Tap on "Settings" and then tap on "Controls".</li>
|
142 |
-
<li>Tap on "Port 1 (Wii Remote 1)" and then tap on "Emulated Wii Remote".</li>
|
143 |
-
<li>Tap on "Overlay Controls" and turn it on.</li>
|
144 |
-
<li>You will see a screen that shows the overlay controls for the emulated Wii Remote. You can adjust the size, position, opacity, and visibility of each control element by tapping on it and using the sliders. You can also enable or disable motion controls, rumble, and IR pointer.</li>
|
145 |
-
<li>After adjusting the overlay controls, tap on "Back" to return to the main screen of Dolphin Emulator.</li>
|
146 |
-
</ol>
|
147 |
-
<h2>Pros and Cons of Dolphin Emulator on Android</h2>
|
148 |
-
<p>Dolphin Emulator is a great way to play GameCube and Wii games on your Android device. However, it also has some pros and cons that you should be aware of before using it. Here are some of the pros and cons of Dolphin Emulator on Android:</p>
|
149 |
-
<h3>Pros</h3>
|
150 |
-
<ul>
|
151 |
-
<li><b>Portable gaming:</b> Dolphin Emulator allows you to play your favorite GameCube and Wii games anywhere and anytime on your Android device. You don't need to carry around bulky consoles or discs anymore.</li>
|
152 |
-
<li><b>Enhanced gaming:</b> Dolphin Emulator can improve the graphics and audio quality of your games with various settings and options. You can also use cheats, savestates, netplay, and customization features to enhance your gaming experience.</li>
|
153 |
-
<li><b>Free and open-source:</b> Dolphin Emulator is a free and open-source software that anyone can download and use without any restrictions or fees. You can also contribute to its development or support its creators by donating or sharing feedback.</li>
|
154 |
-
</ul>
|
155 |
-
<h3>Cons</h3>
|
156 |
-
<ul>
|
157 |
-
<li><b>Performance issues:</b> Dolphin Emulator may not run smoothly or at all on some Android devices due to hardware limitations or software bugs. You may experience low frame rates, graphical glitches, audio stuttering, or crashes depending on your device and game.</li>
|
158 |
-
<li><b>Battery drain:</b> Dolphin Emulator consumes a lot of battery power when running games on your Android device. You may need to charge your device frequently or use a power bank when playing games for long periods.</li>
|
159 |
-
<li><b>Legal issues:</b> Dolphin Emulator does not provide any game files for download. You will need to obtain them from your own discs or online sources. However, downloading game files from online sources may be illegal in some countries or regions. Therefore, you should check your local laws before doing so.</li>
|
160 |
-
</ul>
|
161 |
-
<h2>Conclusion</h2>
|
162 |
-
<p>However, it also has some drawbacks and challenges that you should be aware of before using it. You may encounter performance issues, battery drain, or legal issues depending on your device and game. Therefore, you should use Dolphin Emulator with caution and responsibility.</p>
|
163 |
-
<p>We hope this article has helped you learn more about Dolphin Emulator APK versions, how to install and use them on your Android device, and what are the pros and cons of doing so. If you have any questions or feedback, feel free to leave a comment below or contact us through our website. Happy gaming!</p>
|
164 |
-
<h2>FAQs</h2>
|
165 |
-
<p>Here are some frequently asked questions about Dolphin Emulator APK versions:</p>
|
166 |
-
<ol>
|
167 |
-
<li><b>What are the minimum requirements for Dolphin Emulator on Android?</b></li>
|
168 |
-
<p>The minimum requirements for Dolphin Emulator on Android are:</p>
|
169 |
-
<ul>
|
170 |
-
<li>Android 5.0 (Lollipop) or higher</li>
|
171 |
-
<li>A 64-bit processor (ARMv8 or x86_64)</li>
|
172 |
-
<li>A graphics processor that supports OpenGL ES 3.0 or higher</li>
|
173 |
-
<li>At least 2 GB of RAM</li>
|
174 |
-
<li>At least 4 GB of free storage space</li>
|
175 |
-
</ul>
|
176 |
-
<li><b>Where can I get the latest version of Dolphin Emulator APK?</b></li>
|
177 |
-
<p>You can get the latest version of Dolphin Emulator APK from the <a href="">official download page</a> on the Dolphin Emulator website. You can also check the <a href="">official blog</a> or the <a href="">official GitHub page</a> for the latest news and updates on Dolphin Emulator.</p>
|
178 |
-
<li><b>How can I update Dolphin Emulator APK on my Android device?</b></li>
|
179 |
-
<p>You can update Dolphin Emulator APK on your Android device by following these steps:</p>
|
180 |
-
<ol>
|
181 |
-
<li>Download the latest version of Dolphin Emulator APK from the <a href="">official download page</a> on the Dolphin Emulator website.</li>
|
182 |
-
<li>Open your file manager app and locate the downloaded APK file.</li>
|
183 |
-
<li>Tap on the APK file and confirm the installation.</li>
|
184 |
-
<li>The installation process will overwrite the previous version of Dolphin Emulator on your device.</li>
|
185 |
-
<li>When the installation is complete, you can launch Dolphin Emulator and enjoy the new features and improvements.</li>
|
186 |
-
</ol>
|
187 |
-
<li><b>How can I uninstall Dolphin Emulator APK from my Android device?</b></li>
|
188 |
-
<p>You can uninstall Dolphin Emulator APK from your Android device by following these steps:</p>
|
189 |
-
<ol>
|
190 |
-
<li>Go to your device settings and look for an option that says "Apps" or "Applications". Tap on it.</li>
|
191 |
-
<li>Look for an app that says "Dolphin" or "Dolphin Emulator". Tap on it.</li>
|
192 |
-
<li>You will see a screen that shows the app information and options. Tap on "Uninstall".</li>
|
193 |
-
<li>You will see a pop-up window that asks you to confirm the uninstallation. Tap on "OK".</li>
|
194 |
-
<li>The uninstallation process will remove Dolphin Emulator from your device.</li>
|
195 |
-
</ol>
|
196 |
-
<li><b>How can I get help or support for Dolphin Emulator on Android?</b></li>
|
197 |
-
<p>You can get help or support for Dolphin Emulator on Android by visiting the <a href="">official forums</a>, the <a href="">official wiki</a>, or the <a href="">official Discord server</a>. You can also report bugs or request features on the <a href="">official issue tracker</a>.</p> 197e85843d<br />
|
198 |
-
<br />
|
199 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bhop GO APK and Enjoy the Best Parkour Experience.md
DELETED
@@ -1,95 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bhop GO APK: A Parkour Game for Android</h1>
|
3 |
-
<p>Do you love parkour games? Do you want to practice your bunny hopping skills in realistic 3D environments? If yes, then you should download Bhop GO APK, a simulation game that lets you bhop faster in FPS and simulation games. In this article, we will tell you what Bhop GO APK is, how to download and install it, how to play it, what are its features, what are its pros and cons, and some FAQs about it.</p>
|
4 |
-
<h2>bhop go apk</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://urlin.us/2uSTJQ">https://urlin.us/2uSTJQ</a></b></p><br /><br />
|
5 |
-
<h2>What is Bhop GO APK?</h2>
|
6 |
-
<p>Bhop GO APK is a simulation game that lets you practice bunny hopping in 3D maps. Bunny hopping or bhop is a skill to jump faster in FPS and simulation games by turning left and right while jumping to get more speed. It is simply parkour with air strafes. You can avoid falling and obstacles as they can slow your hopping down. You can also use checkpoints to help you finish the maps easily.</p>
|
7 |
-
<h2>How to Download and Install Bhop GO APK?</h2>
|
8 |
-
<h3>Download from Google Play Store</h3>
|
9 |
-
<p>If you want to download Bhop GO APK from Google Play Store, you can follow these steps:</p>
|
10 |
-
<ol>
|
11 |
-
<li>Open Google Play Store on your Android device.</li>
|
12 |
-
<li>Search for Bhop GO in the search bar.</li>
|
13 |
-
<li>Tap on the Bhop GO icon and then tap on Install.</li>
|
14 |
-
<li>Wait for the installation to complete and then open the game.</li>
|
15 |
-
</ol>
|
16 |
-
<h3>Download from APKCombo</h3>
|
17 |
-
<p>If you want to download Bhop GO APK from APKCombo, you can follow these steps:</p>
|
18 |
-
<ol>
|
19 |
-
<li>Open your web browser and go to [APKCombo].</li>
|
20 |
-
<li>Search for Bhop GO in the search bar.</li>
|
21 |
-
<li>Tap on the Bhop GO icon and then tap on Download APK.</li>
|
22 |
-
<li>After the download is complete, open the APK file and tap on Install.</li>
|
23 |
-
<li>You may need to enable unknown sources in your settings to install the APK file.</li>
|
24 |
-
<li>Wait for the installation to complete and then open the game.</li>
|
25 |
-
</ol>
|
26 |
-
<h2>How to Play Bhop GO APK?</h2>
|
27 |
-
<h3>Single Player Mode</h3>
|
28 |
-
<p>Single player mode lets you play offline and practice your bhop skills on various maps. You can choose from different difficulty levels, such as easy, normal, hard, and extreme. You can also create your own maps using the map editor. You can use the joystick to move, jump, and strafe. You can also use the buttons to crouch, sprint, and use items. You can see your speed, time, and checkpoints on the screen. You can pause the game and change the settings anytime.</p>
|
29 |
-
<h3>Multiplayer Mode</h3>
|
30 |
-
<p>Multiplayer mode lets you play online with friends or other players and compete for the best time on different maps. You can join or create a room with up to 10 players. You can chat with other players and see their names and ranks. You can also vote for the next map or kick a player. You can see your position, time, and speed on the screen. You can also see other players' movements and trails. You can pause the game and change the settings anytime.</p>
|
31 |
-
<p>bhop go apk download<br />
|
32 |
-
bhop go apk mod<br />
|
33 |
-
bhop go apk latest version<br />
|
34 |
-
bhop go apk android<br />
|
35 |
-
bhop go apk free<br />
|
36 |
-
bhop go apk offline<br />
|
37 |
-
bhop go apk online<br />
|
38 |
-
bhop go apk multiplayer<br />
|
39 |
-
bhop go apk hack<br />
|
40 |
-
bhop go apk unlimited coins<br />
|
41 |
-
bhop go apk 2023<br />
|
42 |
-
bhop go apk update<br />
|
43 |
-
bhop go apk old version<br />
|
44 |
-
bhop go apk xapk<br />
|
45 |
-
bhop go apk for pc<br />
|
46 |
-
bhop go apk pure<br />
|
47 |
-
bhop go apk mirror<br />
|
48 |
-
bhop go apk uptodown<br />
|
49 |
-
bhop go apk revdl<br />
|
50 |
-
bhop go apk rexdl<br />
|
51 |
-
bhop go apk no ads<br />
|
52 |
-
bhop go apk pro<br />
|
53 |
-
bhop go apk premium<br />
|
54 |
-
bhop go apk full version<br />
|
55 |
-
bhop go apk cracked<br />
|
56 |
-
bhop go game download apk<br />
|
57 |
-
bhop go game mod apk<br />
|
58 |
-
bhop go game hack apk<br />
|
59 |
-
bhop go game online apk<br />
|
60 |
-
bhop go game offline apk<br />
|
61 |
-
download bhop go simulation game 3d online offline mod menu hack cheat unlimited money coins gems skins knives weapons cases crates roulette vip maps membership free latest version 2023 update new android ios mobile phone tablet device app application file install setup play store google play services apkpure apkmirror apknite apptopia apkpure.com apkmirror.com apknite.com apptopia.com shockapp shockapp.com shockapp.bhopgo com.shockapp.bhopgo gmail.mattwilson720.Blop com.gmail.mattwilson720.Blop activegamedev.com/bhopgo activegamedev.com activegamedev Matthew Killoran Wilson Matthew Wilson Killoran Wilson Matthew Killoran active game dev activegamedev blop blopgo blopgo.com blop.go blop.go.com</p>
|
62 |
-
<h2>What are the Features of Bhop GO APK?</h2>
|
63 |
-
<h3>Collecting Loot on Maps</h3>
|
64 |
-
<p>You can find trampolines, bounce pads, knives, weapons, and skins on different maps to make the game more fun. Trampolines and bounce pads can help you jump higher and faster. Knives and weapons can help you attack other players or objects. Skins can help you customize your character's appearance. You can also buy cases with coins and get random loot.</p>
|
65 |
-
<h3>Racing for World Records</h3>
|
66 |
-
<p>You can check your rank and statistics on the leaderboard and try to beat the world records of other bhoppers. You can see your best time, average time, total time, total jumps, total maps, total coins, total kills, total deaths, total wins, total losses, and total cases on your profile. You can also see the top 100 players for each map and mode. You can also share your achievements with your friends on social media.</p>
|
67 |
-
<h3>Customizing Your Character and Inventory</h3>
|
68 |
-
<p>You can earn coins by playing the game or watching ads. You can use coins to buy cases, spin and win cool knives, gloves, weapons, and skins for your character and inventory. You can also sell or trade your items with other players. You can change your character's name, color, model, trail, gravity, speed, jump force, and sound effects in the settings.</p>
|
69 |
-
<h2>What are the Pros and Cons of Bhop GO APK?</h2>
|
70 |
-
<p>Bhop GO APK is a fun and challenging game that lets you practice your bhop skills in realistic 3D environments. However, it also has some drawbacks that you should be aware of before downloading it. Here is a comparison table of the pros and cons of Bhop GO APK:</p>
|
71 |
-
<table>
|
72 |
-
<tr><th>Pros</th><th>Cons</th></tr>
|
73 |
-
<tr><td>- It is free to download and play</td><td>- It contains ads that may be annoying or intrusive</td></tr>
|
74 |
-
<tr><td>- It has realistic physics and graphics</td><td>- It may lag or crash on some devices</td></tr>
|
75 |
-
<tr><td>- It has many maps and modes to choose from</td><td>- It may have some bugs or glitches</td></tr>
|
76 |
-
<tr><td>- It has a lot of loot and customization options</td><td>- It may require internet connection for some features</td></tr>
|
77 |
-
<tr><td>- It has a friendly and active community</td><td>- It may have some toxic or cheating players</td></tr>
|
78 |
-
</table>
|
79 |
-
<h2>Conclusion</h2>
|
80 |
-
<p>Bhop GO APK is a simulation game that lets you practice bunny hopping in 3D maps. It is a great way to improve your bhop skills and have fun with friends or other players. You can download and install it from Google Play Store or APKCombo. You can play it in single player or multiplayer mode. You can collect loot, race for world records, and customize your character and inventory. You can also enjoy the realistic physics and graphics of the game. However, you should also be aware of the ads, bugs, glitches, lag, internet connection, and toxic or cheating players that may affect your gaming experience. Overall, Bhop GO APK is a game that you should try out if you love parkour games. <h2>FAQs</h2>
|
81 |
-
<p>Here are some common questions and answers about Bhop GO APK:</p>
|
82 |
-
<ol>
|
83 |
-
<li>What is the latest version of Bhop GO APK?</li>
|
84 |
-
<p>The latest version of Bhop GO APK is 195, which was updated on June 16, 2023. It has some bug fixes and improvements.</p>
|
85 |
-
<li>How can I contact the developer of Bhop GO APK?</li>
|
86 |
-
<p>You can contact the developer of Bhop GO APK by emailing them at [email protected] or by visiting their website at [ShockApps].</p>
|
87 |
-
<li>How can I report a bug or a cheater in Bhop GO APK?</li>
|
88 |
-
<p>You can report a bug or a cheater in Bhop GO APK by using the feedback button in the game settings or by emailing the developer at [email protected].</p>
|
89 |
-
<li>How can I support the development of Bhop GO APK?</li>
|
90 |
-
<p>You can support the development of Bhop GO APK by rating and reviewing the game on Google Play Store or APKCombo, by sharing the game with your friends on social media, or by donating to the developer via PayPal.</p>
|
91 |
-
<li>Is Bhop GO APK safe to download and install?</li>
|
92 |
-
<p>Bhop GO APK is safe to download and install from Google Play Store or APKCombo, as they are trusted sources that scan the APK files for viruses and malware. However, you should always be careful when downloading and installing any APK file from unknown sources, as they may contain harmful or malicious code.</p>
|
93 |
-
</ol></p> 197e85843d<br />
|
94 |
-
<br />
|
95 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Create Amazing AR Effects for TikTok with Effect House - Download Now.md
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>TikTok Effect House Download: How to Create and Share Amazing AR Effects for TikTok</h1>
|
3 |
-
<p>TikTok is a platform where you can express your creativity in many ways, such as making short-form videos, hosting live streams, and now, creating augmented reality (AR) effects. With Effect House, you can design and develop your own Community Effects for TikTok, and share them with millions of users around the world. In this article, we will show you how to download and use Effect House, what kind of effects you can create with it, how to publish and manage your effects on TikTok, how to find and use other creators' effects on TikTok, and how to connect and collaborate with other effect creators.</p>
|
4 |
-
<h2>tiktok effect house download</h2><br /><p><b><b>Download Zip</b> ::: <a href="https://jinyurl.com/2uNRnq">https://jinyurl.com/2uNRnq</a></b></p><br /><br />
|
5 |
-
<h2>What is Effect House?</h2>
|
6 |
-
<p>Effect House is a platform that allows you to design and develop Community Effects for TikTok. Community Effects are AR effects that can be used by anyone on TikTok to enhance their videos with interactive and immersive elements. Effect House is made for beginners as well as professional designers and developers. You can create, publish, and share dynamic effects that can be used by TikTok users around the world. To get started, you need to log in with your TikTok account and download Effect House from the official website.</p>
|
7 |
-
<h2>How to Download and Use Effect House?</h2>
|
8 |
-
<p>To download Effect House, you need to visit the official website and log in with your TikTok account. You will then be able to download the software for Windows or Mac. Once you have installed Effect House on your computer, you can launch it and start creating your own effects. You will have access to guides, tutorials, templates, and other community resources that will help you in your effect creation journey. You can also preview your effects in real-time on your phone by scanning a QR code or using a USB cable.</p>
|
9 |
-
<h2>What Kind of Effects Can You Create with Effect House?</h2>
|
10 |
-
<p>Effect House gives you all the tools you need to create AR effects that inspire creativity across TikTok. Some of the features that Effect House offers include:</p>
|
11 |
-
<ul>
|
12 |
-
<li><strong>Segmentation:</strong> You can segment different parts of the scene, such as hair, landscapes, or clothing, and apply different effects to them.</li>
|
13 |
-
<li><strong>Head Tracking:</strong> You can track the movement and orientation of the user's head and face, and attach 3D objects or animations to them.</li>
|
14 |
-
<li><strong>Visual Scripting:</strong> You can use a graphical interface to create logic and interactions for your effects without coding.</li>
|
15 |
-
<li><strong>And more:</strong> You can also use advanced tracking, rich interactions, audio synthesis, particle systems, shaders, and more to create stunning effects.</li>
|
16 |
-
</ul>
|
17 |
-
<h2>How to Publish and Manage Your Effects on TikTok?</h2>
|
18 |
-
<p>Once you have created your effects with Effect House, you can submit them for approval through the effect management portal. You will receive an email notification from the TikTok team on the status of your effects. If your effects are approved, they will be published on TikTok and available for anyone to use. You can also track how your effects perform through analytics, such as views , likes, comments, and shares. You can also generate shareable links for your effects that you can post on your social media platforms or send to your friends. To manage your effects, you can edit, update, or delete them at any time through the effect management portal.</p>
|
19 |
-
<h2>How to Find and Use Other Creators' Effects on TikTok?</h2>
|
20 |
-
<p>If you want to explore and use other creators' effects on TikTok, you have several options to do so. You can:</p>
|
21 |
-
<ul>
|
22 |
-
<li><strong>Explore the effects tab on their profiles:</strong> You can visit the profiles of other effect creators and tap on the effects tab to see all the effects they have created. You can then try them out by tapping on the effect icon and recording a video with it.</li>
|
23 |
-
<li><strong>Explore the effects detail page:</strong> You can also tap on the effect name or icon on any video that uses an effect to go to the effects detail page. There, you can see more information about the effect, such as its creator, description, and related videos. You can also try it out by tapping on the try it button.</li>
|
24 |
-
<li><strong>Scan the effects QR code:</strong> You can also scan the QR code of any effect that you see on other platforms, such as websites, posters, or flyers. To do so, you need to open the TikTok app and tap on the discover tab. Then, tap on the scan button and point your camera at the QR code. You will then be able to access and use the effect.</li>
|
25 |
-
<li><strong>Use the effect link:</strong> You can also use the effect link that is generated by the effect creator or shared by other users. To do so, you need to copy and paste the link into your browser or tap on it if you see it on another app. You will then be redirected to the TikTok app where you can use the effect.</li>
|
26 |
-
</ul>
|
27 |
-
<h2>How to Connect and Collaborate with Other Effect Creators?</h2>
|
28 |
-
<p>One of the best things about Effect House is that it allows you to connect and collaborate with other effect creators who share your passion and vision. You can join the Effect House Discord community, where you can chat with other creators, share feedback, ask questions, get support, and learn from each other. You can also get inspired by other creators' work by browsing their effects on TikTok or Effect House's website. You can also follow them on TikTok or other social media platforms to stay updated on their latest creations.</p>
|
29 |
-
<h2>Conclusion</h2>
|
30 |
-
<p>Effect House is a platform that enables you to create and share amazing AR effects for TikTok. It is easy to use, powerful, and fun. You can design and develop your own Community Effects for TikTok, and share them with millions of users around the world. You can also explore and use other creators' effects on TikTok, and connect and collaborate with them through the Effect House Discord community. Effect House is a great way to express your creativity and enhance your TikTok experience. So what are you waiting for? Download Effect House today and start creating your own effects!</p>
|
31 |
-
<h3>FAQs</h3>
|
32 |
-
<p>Here are some common questions about Effect House:</p>
|
33 |
-
<ol>
|
34 |
-
<li><strong>Is Effect House free?</strong></li>
|
35 |
-
<p>Yes, Effect House is free to download and use. However, you need to have a TikTok account to log in and create effects.</p>
|
36 |
-
<p>How to create AR effects for TikTok with Effect House<br />
|
37 |
-
Effect House tutorials and guides for TikTok creators<br />
|
38 |
-
Effect House login and download for Mac and Windows<br />
|
39 |
-
TikTok Effect House community and resources<br />
|
40 |
-
Effect House segmentation and head tracking features<br />
|
41 |
-
How to submit and publish effects on TikTok Effect House<br />
|
42 |
-
Effect House visual scripting and interaction tools<br />
|
43 |
-
How to view and use effects from TikTok Effect House<br />
|
44 |
-
Effect House analytics and performance tracking for TikTok effects<br />
|
45 |
-
How to report effects that violate TikTok Community Guidelines<br />
|
46 |
-
Effect House templates and examples for TikTok effects<br />
|
47 |
-
How to join Effect House Discord and connect with other creators<br />
|
48 |
-
Effect House QR code and effect link for TikTok effects<br />
|
49 |
-
How to find effects by name or creator on TikTok Effect House<br />
|
50 |
-
Effect House attribution and profile tab for TikTok effects<br />
|
51 |
-
How to preview and test effects in-app on TikTok Effect House<br />
|
52 |
-
Effect House review and approval process for TikTok effects<br />
|
53 |
-
Effect House effect management portal for TikTok effects<br />
|
54 |
-
How to create dynamic and interactive effects on TikTok Effect House<br />
|
55 |
-
Effect House robust AR capabilities for TikTok effects<br />
|
56 |
-
How to design and develop Community Effects for TikTok with Effect House<br />
|
57 |
-
Effect House changes and updates on the TikTok app<br />
|
58 |
-
How to share feedback and suggestions on Effect House topics<br />
|
59 |
-
Effect House learning resources and video tutorials for TikTok effects<br />
|
60 |
-
How to create effects for different categories on TikTok Effect House<br />
|
61 |
-
How to use Effect House built-in capabilities for TikTok effects<br />
|
62 |
-
How to create effects that inspire creativity on TikTok with Effect House<br />
|
63 |
-
Effect House effect tray and detail page for TikTok effects<br />
|
64 |
-
How to track the status of your effects on TikTok Effect House<br />
|
65 |
-
Effect House powerful, intuitive, and expressive features for TikTok effects</p>
|
66 |
-
<li><strong>What are the system requirements for Effect House?</strong></li>
|
67 |
-
<p>You need to have a Windows 10 or Mac OS 10.15 or higher computer with at least 8 GB of RAM and a dedicated graphics card to run Effect House. You also need to have a smartphone with Android 7.0 or iOS 13.0 or higher to preview your effects.</p>
|
68 |
-
<li><strong>How long does it take to get my effects approved by TikTok?</strong></li>
|
69 |
-
<p>The approval process may vary depending on the volume of submissions and the quality of your effects. Generally, it takes about 24 hours for your effects to be reviewed by the TikTok team. You will receive an email notification once your effects are approved or rejected.</p>
|
70 |
-
<li><strong>How can I monetize my effects on TikTok?</strong></li>
|
71 |
-
<p>TikTok does not currently offer a direct way to monetize your effects on TikTok. However, you can use your effects as a way to showcase your skills and portfolio, attract more followers and engagement, promote your brand or business, or collaborate with other creators or brands.</p>
|
72 |
-
<li><strong>Where can I find more resources and support for Effect House?</strong></li>
|
73 |
-
<p>You can find more resources and support for Effect House on their official website, where you can access guides, tutorials, templates, and other community resources. You can also join the Effect House Discord community, where you can chat with other effect creators, share feedback, ask questions, get support, and learn from each other. You can also contact the Effect House team through email or social media if you have any issues or suggestions.</p> 197e85843d<br />
|
74 |
-
<br />
|
75 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/mesh.py
DELETED
@@ -1,328 +0,0 @@
|
|
1 |
-
"""Meshes, conforming to the glTF 2.0 standards as specified in
|
2 |
-
https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-mesh
|
3 |
-
|
4 |
-
Author: Matthew Matl
|
5 |
-
"""
|
6 |
-
import copy
|
7 |
-
|
8 |
-
import numpy as np
|
9 |
-
import trimesh
|
10 |
-
|
11 |
-
from .primitive import Primitive
|
12 |
-
from .constants import GLTF
|
13 |
-
from .material import MetallicRoughnessMaterial
|
14 |
-
|
15 |
-
|
16 |
-
class Mesh(object):
|
17 |
-
"""A set of primitives to be rendered.
|
18 |
-
|
19 |
-
Parameters
|
20 |
-
----------
|
21 |
-
name : str
|
22 |
-
The user-defined name of this object.
|
23 |
-
primitives : list of :class:`Primitive`
|
24 |
-
The primitives associated with this mesh.
|
25 |
-
weights : (k,) float
|
26 |
-
Array of weights to be applied to the Morph Targets.
|
27 |
-
is_visible : bool
|
28 |
-
If False, the mesh will not be rendered.
|
29 |
-
"""
|
30 |
-
|
31 |
-
def __init__(self, primitives, name=None, weights=None, is_visible=True):
|
32 |
-
self.primitives = primitives
|
33 |
-
self.name = name
|
34 |
-
self.weights = weights
|
35 |
-
self.is_visible = is_visible
|
36 |
-
|
37 |
-
self._bounds = None
|
38 |
-
|
39 |
-
@property
|
40 |
-
def name(self):
|
41 |
-
"""str : The user-defined name of this object.
|
42 |
-
"""
|
43 |
-
return self._name
|
44 |
-
|
45 |
-
@name.setter
|
46 |
-
def name(self, value):
|
47 |
-
if value is not None:
|
48 |
-
value = str(value)
|
49 |
-
self._name = value
|
50 |
-
|
51 |
-
@property
|
52 |
-
def primitives(self):
|
53 |
-
"""list of :class:`Primitive` : The primitives associated
|
54 |
-
with this mesh.
|
55 |
-
"""
|
56 |
-
return self._primitives
|
57 |
-
|
58 |
-
@primitives.setter
|
59 |
-
def primitives(self, value):
|
60 |
-
self._primitives = value
|
61 |
-
|
62 |
-
@property
|
63 |
-
def weights(self):
|
64 |
-
"""(k,) float : Weights to be applied to morph targets.
|
65 |
-
"""
|
66 |
-
return self._weights
|
67 |
-
|
68 |
-
@weights.setter
|
69 |
-
def weights(self, value):
|
70 |
-
self._weights = value
|
71 |
-
|
72 |
-
@property
|
73 |
-
def is_visible(self):
|
74 |
-
"""bool : Whether the mesh is visible.
|
75 |
-
"""
|
76 |
-
return self._is_visible
|
77 |
-
|
78 |
-
@is_visible.setter
|
79 |
-
def is_visible(self, value):
|
80 |
-
self._is_visible = value
|
81 |
-
|
82 |
-
@property
|
83 |
-
def bounds(self):
|
84 |
-
"""(2,3) float : The axis-aligned bounds of the mesh.
|
85 |
-
"""
|
86 |
-
if self._bounds is None:
|
87 |
-
bounds = np.array([[np.infty, np.infty, np.infty],
|
88 |
-
[-np.infty, -np.infty, -np.infty]])
|
89 |
-
for p in self.primitives:
|
90 |
-
bounds[0] = np.minimum(bounds[0], p.bounds[0])
|
91 |
-
bounds[1] = np.maximum(bounds[1], p.bounds[1])
|
92 |
-
self._bounds = bounds
|
93 |
-
return self._bounds
|
94 |
-
|
95 |
-
@property
|
96 |
-
def centroid(self):
|
97 |
-
"""(3,) float : The centroid of the mesh's axis-aligned bounding box
|
98 |
-
(AABB).
|
99 |
-
"""
|
100 |
-
return np.mean(self.bounds, axis=0)
|
101 |
-
|
102 |
-
@property
|
103 |
-
def extents(self):
|
104 |
-
"""(3,) float : The lengths of the axes of the mesh's AABB.
|
105 |
-
"""
|
106 |
-
return np.diff(self.bounds, axis=0).reshape(-1)
|
107 |
-
|
108 |
-
@property
|
109 |
-
def scale(self):
|
110 |
-
"""(3,) float : The length of the diagonal of the mesh's AABB.
|
111 |
-
"""
|
112 |
-
return np.linalg.norm(self.extents)
|
113 |
-
|
114 |
-
@property
|
115 |
-
def is_transparent(self):
|
116 |
-
"""bool : If True, the mesh is partially-transparent.
|
117 |
-
"""
|
118 |
-
for p in self.primitives:
|
119 |
-
if p.is_transparent:
|
120 |
-
return True
|
121 |
-
return False
|
122 |
-
|
123 |
-
@staticmethod
|
124 |
-
def from_points(points, colors=None, normals=None,
|
125 |
-
is_visible=True, poses=None):
|
126 |
-
"""Create a Mesh from a set of points.
|
127 |
-
|
128 |
-
Parameters
|
129 |
-
----------
|
130 |
-
points : (n,3) float
|
131 |
-
The point positions.
|
132 |
-
colors : (n,3) or (n,4) float, optional
|
133 |
-
RGB or RGBA colors for each point.
|
134 |
-
normals : (n,3) float, optionals
|
135 |
-
The normal vectors for each point.
|
136 |
-
is_visible : bool
|
137 |
-
If False, the points will not be rendered.
|
138 |
-
poses : (x,4,4)
|
139 |
-
Array of 4x4 transformation matrices for instancing this object.
|
140 |
-
|
141 |
-
Returns
|
142 |
-
-------
|
143 |
-
mesh : :class:`Mesh`
|
144 |
-
The created mesh.
|
145 |
-
"""
|
146 |
-
primitive = Primitive(
|
147 |
-
positions=points,
|
148 |
-
normals=normals,
|
149 |
-
color_0=colors,
|
150 |
-
mode=GLTF.POINTS,
|
151 |
-
poses=poses
|
152 |
-
)
|
153 |
-
mesh = Mesh(primitives=[primitive], is_visible=is_visible)
|
154 |
-
return mesh
|
155 |
-
|
156 |
-
@staticmethod
|
157 |
-
def from_trimesh(mesh, material=None, is_visible=True,
|
158 |
-
poses=None, wireframe=False, smooth=True):
|
159 |
-
"""Create a Mesh from a :class:`~trimesh.base.Trimesh`.
|
160 |
-
|
161 |
-
Parameters
|
162 |
-
----------
|
163 |
-
mesh : :class:`~trimesh.base.Trimesh` or list of them
|
164 |
-
A triangular mesh or a list of meshes.
|
165 |
-
material : :class:`Material`
|
166 |
-
The material of the object. Overrides any mesh material.
|
167 |
-
If not specified and the mesh has no material, a default material
|
168 |
-
will be used.
|
169 |
-
is_visible : bool
|
170 |
-
If False, the mesh will not be rendered.
|
171 |
-
poses : (n,4,4) float
|
172 |
-
Array of 4x4 transformation matrices for instancing this object.
|
173 |
-
wireframe : bool
|
174 |
-
If `True`, the mesh will be rendered as a wireframe object
|
175 |
-
smooth : bool
|
176 |
-
If `True`, the mesh will be rendered with interpolated vertex
|
177 |
-
normals. Otherwise, the mesh edges will stay sharp.
|
178 |
-
|
179 |
-
Returns
|
180 |
-
-------
|
181 |
-
mesh : :class:`Mesh`
|
182 |
-
The created mesh.
|
183 |
-
"""
|
184 |
-
|
185 |
-
if isinstance(mesh, (list, tuple, set, np.ndarray)):
|
186 |
-
meshes = list(mesh)
|
187 |
-
elif isinstance(mesh, trimesh.Trimesh):
|
188 |
-
meshes = [mesh]
|
189 |
-
else:
|
190 |
-
raise TypeError('Expected a Trimesh or a list, got a {}'
|
191 |
-
.format(type(mesh)))
|
192 |
-
|
193 |
-
primitives = []
|
194 |
-
for m in meshes:
|
195 |
-
positions = None
|
196 |
-
normals = None
|
197 |
-
indices = None
|
198 |
-
|
199 |
-
# Compute positions, normals, and indices
|
200 |
-
if smooth:
|
201 |
-
positions = m.vertices.copy()
|
202 |
-
normals = m.vertex_normals.copy()
|
203 |
-
indices = m.faces.copy()
|
204 |
-
else:
|
205 |
-
positions = m.vertices[m.faces].reshape((3 * len(m.faces), 3))
|
206 |
-
normals = np.repeat(m.face_normals, 3, axis=0)
|
207 |
-
|
208 |
-
# Compute colors, texture coords, and material properties
|
209 |
-
color_0, texcoord_0, primitive_material = Mesh._get_trimesh_props(m, smooth=smooth, material=material)
|
210 |
-
|
211 |
-
# Override if material is given.
|
212 |
-
if material is not None:
|
213 |
-
#primitive_material = copy.copy(material)
|
214 |
-
primitive_material = copy.deepcopy(material) # TODO
|
215 |
-
|
216 |
-
if primitive_material is None:
|
217 |
-
# Replace material with default if needed
|
218 |
-
primitive_material = MetallicRoughnessMaterial(
|
219 |
-
alphaMode='BLEND',
|
220 |
-
baseColorFactor=[0.3, 0.3, 0.3, 1.0],
|
221 |
-
metallicFactor=0.2,
|
222 |
-
roughnessFactor=0.8
|
223 |
-
)
|
224 |
-
|
225 |
-
primitive_material.wireframe = wireframe
|
226 |
-
|
227 |
-
# Create the primitive
|
228 |
-
primitives.append(Primitive(
|
229 |
-
positions=positions,
|
230 |
-
normals=normals,
|
231 |
-
texcoord_0=texcoord_0,
|
232 |
-
color_0=color_0,
|
233 |
-
indices=indices,
|
234 |
-
material=primitive_material,
|
235 |
-
mode=GLTF.TRIANGLES,
|
236 |
-
poses=poses
|
237 |
-
))
|
238 |
-
|
239 |
-
return Mesh(primitives=primitives, is_visible=is_visible)
|
240 |
-
|
241 |
-
@staticmethod
|
242 |
-
def _get_trimesh_props(mesh, smooth=False, material=None):
|
243 |
-
"""Gets the vertex colors, texture coordinates, and material properties
|
244 |
-
from a :class:`~trimesh.base.Trimesh`.
|
245 |
-
"""
|
246 |
-
colors = None
|
247 |
-
texcoords = None
|
248 |
-
|
249 |
-
# If the trimesh visual is undefined, return none for both
|
250 |
-
if not mesh.visual.defined:
|
251 |
-
return colors, texcoords, material
|
252 |
-
|
253 |
-
# Process vertex colors
|
254 |
-
if material is None:
|
255 |
-
if mesh.visual.kind == 'vertex':
|
256 |
-
vc = mesh.visual.vertex_colors.copy()
|
257 |
-
if smooth:
|
258 |
-
colors = vc
|
259 |
-
else:
|
260 |
-
colors = vc[mesh.faces].reshape(
|
261 |
-
(3 * len(mesh.faces), vc.shape[1])
|
262 |
-
)
|
263 |
-
material = MetallicRoughnessMaterial(
|
264 |
-
alphaMode='BLEND',
|
265 |
-
baseColorFactor=[1.0, 1.0, 1.0, 1.0],
|
266 |
-
metallicFactor=0.2,
|
267 |
-
roughnessFactor=0.8
|
268 |
-
)
|
269 |
-
# Process face colors
|
270 |
-
elif mesh.visual.kind == 'face':
|
271 |
-
if smooth:
|
272 |
-
raise ValueError('Cannot use face colors with a smooth mesh')
|
273 |
-
else:
|
274 |
-
colors = np.repeat(mesh.visual.face_colors, 3, axis=0)
|
275 |
-
|
276 |
-
material = MetallicRoughnessMaterial(
|
277 |
-
alphaMode='BLEND',
|
278 |
-
baseColorFactor=[1.0, 1.0, 1.0, 1.0],
|
279 |
-
metallicFactor=0.2,
|
280 |
-
roughnessFactor=0.8
|
281 |
-
)
|
282 |
-
|
283 |
-
# Process texture colors
|
284 |
-
if mesh.visual.kind == 'texture':
|
285 |
-
# Configure UV coordinates
|
286 |
-
if mesh.visual.uv is not None and len(mesh.visual.uv) != 0:
|
287 |
-
uv = mesh.visual.uv.copy()
|
288 |
-
if smooth:
|
289 |
-
texcoords = uv
|
290 |
-
else:
|
291 |
-
texcoords = uv[mesh.faces].reshape(
|
292 |
-
(3 * len(mesh.faces), uv.shape[1])
|
293 |
-
)
|
294 |
-
|
295 |
-
if material is None:
|
296 |
-
# Configure mesh material
|
297 |
-
mat = mesh.visual.material
|
298 |
-
|
299 |
-
if isinstance(mat, trimesh.visual.texture.PBRMaterial):
|
300 |
-
material = MetallicRoughnessMaterial(
|
301 |
-
normalTexture=mat.normalTexture,
|
302 |
-
occlusionTexture=mat.occlusionTexture,
|
303 |
-
emissiveTexture=mat.emissiveTexture,
|
304 |
-
emissiveFactor=mat.emissiveFactor,
|
305 |
-
alphaMode='BLEND',
|
306 |
-
baseColorFactor=mat.baseColorFactor,
|
307 |
-
baseColorTexture=mat.baseColorTexture,
|
308 |
-
metallicFactor=mat.metallicFactor,
|
309 |
-
roughnessFactor=mat.roughnessFactor,
|
310 |
-
metallicRoughnessTexture=mat.metallicRoughnessTexture,
|
311 |
-
doubleSided=mat.doubleSided,
|
312 |
-
alphaCutoff=mat.alphaCutoff
|
313 |
-
)
|
314 |
-
elif isinstance(mat, trimesh.visual.texture.SimpleMaterial):
|
315 |
-
glossiness = mat.kwargs.get('Ns', 1.0)
|
316 |
-
if isinstance(glossiness, list):
|
317 |
-
glossiness = float(glossiness[0])
|
318 |
-
roughness = (2 / (glossiness + 2)) ** (1.0 / 4.0)
|
319 |
-
material = MetallicRoughnessMaterial(
|
320 |
-
alphaMode='BLEND',
|
321 |
-
roughnessFactor=roughness,
|
322 |
-
baseColorFactor=mat.diffuse,
|
323 |
-
baseColorTexture=mat.image,
|
324 |
-
)
|
325 |
-
elif isinstance(mat, MetallicRoughnessMaterial):
|
326 |
-
material = mat
|
327 |
-
|
328 |
-
return colors, texcoords, material
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/inference/svs/opencpop/map.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
def cpop_pinyin2ph_func():
|
2 |
-
# In the README file of opencpop dataset, they defined a "pinyin to phoneme mapping table"
|
3 |
-
pinyin2phs = {'AP': 'AP', 'SP': 'SP'}
|
4 |
-
with open('NeuralSeq/inference/svs/opencpop/cpop_pinyin2ph.txt') as rf:
|
5 |
-
for line in rf.readlines():
|
6 |
-
elements = [x.strip() for x in line.split('|') if x.strip() != '']
|
7 |
-
pinyin2phs[elements[0]] = elements[1]
|
8 |
-
return pinyin2phs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/sound_extraction/model/resunet_film.py
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
from .modules import *
|
2 |
-
import numpy as np
|
3 |
-
|
4 |
-
class UNetRes_FiLM(nn.Module):
|
5 |
-
def __init__(self, channels, cond_embedding_dim, nsrc=1):
|
6 |
-
super(UNetRes_FiLM, self).__init__()
|
7 |
-
activation = 'relu'
|
8 |
-
momentum = 0.01
|
9 |
-
|
10 |
-
self.nsrc = nsrc
|
11 |
-
self.channels = channels
|
12 |
-
self.downsample_ratio = 2 ** 6 # This number equals 2^{#encoder_blocks}
|
13 |
-
|
14 |
-
self.encoder_block1 = EncoderBlockRes2BCond(in_channels=channels * nsrc, out_channels=32,
|
15 |
-
downsample=(2, 2), activation=activation, momentum=momentum,
|
16 |
-
cond_embedding_dim=cond_embedding_dim)
|
17 |
-
self.encoder_block2 = EncoderBlockRes2BCond(in_channels=32, out_channels=64,
|
18 |
-
downsample=(2, 2), activation=activation, momentum=momentum,
|
19 |
-
cond_embedding_dim=cond_embedding_dim)
|
20 |
-
self.encoder_block3 = EncoderBlockRes2BCond(in_channels=64, out_channels=128,
|
21 |
-
downsample=(2, 2), activation=activation, momentum=momentum,
|
22 |
-
cond_embedding_dim=cond_embedding_dim)
|
23 |
-
self.encoder_block4 = EncoderBlockRes2BCond(in_channels=128, out_channels=256,
|
24 |
-
downsample=(2, 2), activation=activation, momentum=momentum,
|
25 |
-
cond_embedding_dim=cond_embedding_dim)
|
26 |
-
self.encoder_block5 = EncoderBlockRes2BCond(in_channels=256, out_channels=384,
|
27 |
-
downsample=(2, 2), activation=activation, momentum=momentum,
|
28 |
-
cond_embedding_dim=cond_embedding_dim)
|
29 |
-
self.encoder_block6 = EncoderBlockRes2BCond(in_channels=384, out_channels=384,
|
30 |
-
downsample=(2, 2), activation=activation, momentum=momentum,
|
31 |
-
cond_embedding_dim=cond_embedding_dim)
|
32 |
-
self.conv_block7 = ConvBlockResCond(in_channels=384, out_channels=384,
|
33 |
-
kernel_size=(3, 3), activation=activation, momentum=momentum,
|
34 |
-
cond_embedding_dim=cond_embedding_dim)
|
35 |
-
self.decoder_block1 = DecoderBlockRes2BCond(in_channels=384, out_channels=384,
|
36 |
-
stride=(2, 2), activation=activation, momentum=momentum,
|
37 |
-
cond_embedding_dim=cond_embedding_dim)
|
38 |
-
self.decoder_block2 = DecoderBlockRes2BCond(in_channels=384, out_channels=384,
|
39 |
-
stride=(2, 2), activation=activation, momentum=momentum,
|
40 |
-
cond_embedding_dim=cond_embedding_dim)
|
41 |
-
self.decoder_block3 = DecoderBlockRes2BCond(in_channels=384, out_channels=256,
|
42 |
-
stride=(2, 2), activation=activation, momentum=momentum,
|
43 |
-
cond_embedding_dim=cond_embedding_dim)
|
44 |
-
self.decoder_block4 = DecoderBlockRes2BCond(in_channels=256, out_channels=128,
|
45 |
-
stride=(2, 2), activation=activation, momentum=momentum,
|
46 |
-
cond_embedding_dim=cond_embedding_dim)
|
47 |
-
self.decoder_block5 = DecoderBlockRes2BCond(in_channels=128, out_channels=64,
|
48 |
-
stride=(2, 2), activation=activation, momentum=momentum,
|
49 |
-
cond_embedding_dim=cond_embedding_dim)
|
50 |
-
self.decoder_block6 = DecoderBlockRes2BCond(in_channels=64, out_channels=32,
|
51 |
-
stride=(2, 2), activation=activation, momentum=momentum,
|
52 |
-
cond_embedding_dim=cond_embedding_dim)
|
53 |
-
|
54 |
-
self.after_conv_block1 = ConvBlockResCond(in_channels=32, out_channels=32,
|
55 |
-
kernel_size=(3, 3), activation=activation, momentum=momentum,
|
56 |
-
cond_embedding_dim=cond_embedding_dim)
|
57 |
-
|
58 |
-
self.after_conv2 = nn.Conv2d(in_channels=32, out_channels=1,
|
59 |
-
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True)
|
60 |
-
|
61 |
-
self.init_weights()
|
62 |
-
|
63 |
-
def init_weights(self):
|
64 |
-
init_layer(self.after_conv2)
|
65 |
-
|
66 |
-
def forward(self, sp, cond_vec, dec_cond_vec):
|
67 |
-
"""
|
68 |
-
Args:
|
69 |
-
input: sp: (batch_size, channels_num, segment_samples)
|
70 |
-
Outputs:
|
71 |
-
output_dict: {
|
72 |
-
'wav': (batch_size, channels_num, segment_samples),
|
73 |
-
'sp': (batch_size, channels_num, time_steps, freq_bins)}
|
74 |
-
"""
|
75 |
-
|
76 |
-
x = sp
|
77 |
-
# Pad spectrogram to be evenly divided by downsample ratio.
|
78 |
-
origin_len = x.shape[2] # time_steps
|
79 |
-
pad_len = int(np.ceil(x.shape[2] / self.downsample_ratio)) * self.downsample_ratio - origin_len
|
80 |
-
x = F.pad(x, pad=(0, 0, 0, pad_len))
|
81 |
-
x = x[..., 0: x.shape[-1] - 2] # (bs, channels, T, F)
|
82 |
-
|
83 |
-
# UNet
|
84 |
-
(x1_pool, x1) = self.encoder_block1(x, cond_vec) # x1_pool: (bs, 32, T / 2, F / 2)
|
85 |
-
(x2_pool, x2) = self.encoder_block2(x1_pool, cond_vec) # x2_pool: (bs, 64, T / 4, F / 4)
|
86 |
-
(x3_pool, x3) = self.encoder_block3(x2_pool, cond_vec) # x3_pool: (bs, 128, T / 8, F / 8)
|
87 |
-
(x4_pool, x4) = self.encoder_block4(x3_pool, dec_cond_vec) # x4_pool: (bs, 256, T / 16, F / 16)
|
88 |
-
(x5_pool, x5) = self.encoder_block5(x4_pool, dec_cond_vec) # x5_pool: (bs, 512, T / 32, F / 32)
|
89 |
-
(x6_pool, x6) = self.encoder_block6(x5_pool, dec_cond_vec) # x6_pool: (bs, 1024, T / 64, F / 64)
|
90 |
-
x_center = self.conv_block7(x6_pool, dec_cond_vec) # (bs, 2048, T / 64, F / 64)
|
91 |
-
x7 = self.decoder_block1(x_center, x6, dec_cond_vec) # (bs, 1024, T / 32, F / 32)
|
92 |
-
x8 = self.decoder_block2(x7, x5, dec_cond_vec) # (bs, 512, T / 16, F / 16)
|
93 |
-
x9 = self.decoder_block3(x8, x4, cond_vec) # (bs, 256, T / 8, F / 8)
|
94 |
-
x10 = self.decoder_block4(x9, x3, cond_vec) # (bs, 128, T / 4, F / 4)
|
95 |
-
x11 = self.decoder_block5(x10, x2, cond_vec) # (bs, 64, T / 2, F / 2)
|
96 |
-
x12 = self.decoder_block6(x11, x1, cond_vec) # (bs, 32, T, F)
|
97 |
-
x = self.after_conv_block1(x12, cond_vec) # (bs, 32, T, F)
|
98 |
-
x = self.after_conv2(x) # (bs, channels, T, F)
|
99 |
-
|
100 |
-
# Recover shape
|
101 |
-
x = F.pad(x, pad=(0, 2))
|
102 |
-
x = x[:, :, 0: origin_len, :]
|
103 |
-
return x
|
104 |
-
|
105 |
-
|
106 |
-
if __name__ == "__main__":
|
107 |
-
model = UNetRes_FiLM(channels=1, cond_embedding_dim=16)
|
108 |
-
cond_vec = torch.randn((1, 16))
|
109 |
-
dec_vec = cond_vec
|
110 |
-
print(model(torch.randn((1, 1, 1001, 513)), cond_vec, dec_vec).size())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/train_vggishish.py
DELETED
@@ -1,199 +0,0 @@
|
|
1 |
-
from loss import WeightedCrossEntropy
|
2 |
-
import random
|
3 |
-
|
4 |
-
import numpy as np
|
5 |
-
import torch
|
6 |
-
import torchvision
|
7 |
-
from omegaconf import OmegaConf
|
8 |
-
from torch.utils.data.dataloader import DataLoader
|
9 |
-
from tqdm import tqdm
|
10 |
-
|
11 |
-
from dataset import VGGSound
|
12 |
-
from transforms import Crop, StandardNormalizeAudio, ToTensor
|
13 |
-
from logger import LoggerWithTBoard
|
14 |
-
from metrics import metrics
|
15 |
-
from model import VGGishish
|
16 |
-
|
17 |
-
if __name__ == "__main__":
|
18 |
-
cfg_cli = OmegaConf.from_cli()
|
19 |
-
cfg_yml = OmegaConf.load(cfg_cli.config)
|
20 |
-
# the latter arguments are prioritized
|
21 |
-
cfg = OmegaConf.merge(cfg_yml, cfg_cli)
|
22 |
-
OmegaConf.set_readonly(cfg, True)
|
23 |
-
print(OmegaConf.to_yaml(cfg))
|
24 |
-
|
25 |
-
logger = LoggerWithTBoard(cfg)
|
26 |
-
|
27 |
-
random.seed(cfg.seed)
|
28 |
-
np.random.seed(cfg.seed)
|
29 |
-
torch.manual_seed(cfg.seed)
|
30 |
-
torch.cuda.manual_seed_all(cfg.seed)
|
31 |
-
# makes iterations faster (in this case 30%) if your inputs are of a fixed size
|
32 |
-
# https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/3
|
33 |
-
torch.backends.cudnn.benchmark = True
|
34 |
-
|
35 |
-
transforms = [
|
36 |
-
StandardNormalizeAudio(cfg.mels_path),
|
37 |
-
]
|
38 |
-
if cfg.cropped_size not in [None, 'None', 'none']:
|
39 |
-
logger.print_logger.info(f'Using cropping {cfg.cropped_size}')
|
40 |
-
transforms.append(Crop(cfg.cropped_size))
|
41 |
-
transforms.append(ToTensor())
|
42 |
-
transforms = torchvision.transforms.transforms.Compose(transforms)
|
43 |
-
|
44 |
-
datasets = {
|
45 |
-
'train': VGGSound('train', cfg.mels_path, transforms),
|
46 |
-
'valid': VGGSound('valid', cfg.mels_path, transforms),
|
47 |
-
'test': VGGSound('test', cfg.mels_path, transforms),
|
48 |
-
}
|
49 |
-
|
50 |
-
loaders = {
|
51 |
-
'train': DataLoader(datasets['train'], batch_size=cfg.batch_size, shuffle=True, drop_last=True,
|
52 |
-
num_workers=cfg.num_workers, pin_memory=True),
|
53 |
-
'valid': DataLoader(datasets['valid'], batch_size=cfg.batch_size,
|
54 |
-
num_workers=cfg.num_workers, pin_memory=True),
|
55 |
-
'test': DataLoader(datasets['test'], batch_size=cfg.batch_size,
|
56 |
-
num_workers=cfg.num_workers, pin_memory=True),
|
57 |
-
}
|
58 |
-
|
59 |
-
device = torch.device(cfg.device if torch.cuda.is_available() else 'cpu')
|
60 |
-
|
61 |
-
model = VGGishish(cfg.conv_layers, cfg.use_bn, num_classes=len(datasets['train'].target2label))
|
62 |
-
model = model.to(device)
|
63 |
-
param_num = logger.log_param_num(model)
|
64 |
-
|
65 |
-
if cfg.optimizer == 'adam':
|
66 |
-
optimizer = torch.optim.Adam(
|
67 |
-
model.parameters(), lr=cfg.learning_rate, betas=cfg.betas, weight_decay=cfg.weight_decay)
|
68 |
-
elif cfg.optimizer == 'sgd':
|
69 |
-
optimizer = torch.optim.SGD(
|
70 |
-
model.parameters(), lr=cfg.learning_rate, momentum=cfg.momentum, weight_decay=cfg.weight_decay)
|
71 |
-
else:
|
72 |
-
raise NotImplementedError
|
73 |
-
|
74 |
-
if cfg.cls_weights_in_loss:
|
75 |
-
weights = 1 / datasets['train'].class_counts
|
76 |
-
else:
|
77 |
-
weights = torch.ones(len(datasets['train'].target2label))
|
78 |
-
criterion = WeightedCrossEntropy(weights.to(device))
|
79 |
-
|
80 |
-
# loop over the train and validation multiple times (typical PT boilerplate)
|
81 |
-
no_change_epochs = 0
|
82 |
-
best_valid_loss = float('inf')
|
83 |
-
early_stop_triggered = False
|
84 |
-
|
85 |
-
for epoch in range(cfg.num_epochs):
|
86 |
-
|
87 |
-
for phase in ['train', 'valid']:
|
88 |
-
if phase == 'train':
|
89 |
-
model.train()
|
90 |
-
else:
|
91 |
-
model.eval()
|
92 |
-
|
93 |
-
running_loss = 0
|
94 |
-
preds_from_each_batch = []
|
95 |
-
targets_from_each_batch = []
|
96 |
-
|
97 |
-
prog_bar = tqdm(loaders[phase], f'{phase} ({epoch})', ncols=0)
|
98 |
-
for i, batch in enumerate(prog_bar):
|
99 |
-
inputs = batch['input'].to(device)
|
100 |
-
targets = batch['target'].to(device)
|
101 |
-
|
102 |
-
# zero the parameter gradients
|
103 |
-
optimizer.zero_grad()
|
104 |
-
|
105 |
-
# forward + backward + optimize
|
106 |
-
with torch.set_grad_enabled(phase == 'train'):
|
107 |
-
outputs = model(inputs)
|
108 |
-
loss = criterion(outputs, targets, to_weight=phase == 'train')
|
109 |
-
|
110 |
-
if phase == 'train':
|
111 |
-
loss.backward()
|
112 |
-
optimizer.step()
|
113 |
-
|
114 |
-
# loss
|
115 |
-
running_loss += loss.item()
|
116 |
-
|
117 |
-
# for metrics calculation later on
|
118 |
-
preds_from_each_batch += [outputs.detach().cpu()]
|
119 |
-
targets_from_each_batch += [targets.cpu()]
|
120 |
-
|
121 |
-
# iter logging
|
122 |
-
if i % 50 == 0:
|
123 |
-
logger.log_iter_loss(loss.item(), epoch*len(loaders[phase])+i, phase)
|
124 |
-
# tracks loss in the tqdm progress bar
|
125 |
-
prog_bar.set_postfix(loss=loss.item())
|
126 |
-
|
127 |
-
# logging loss
|
128 |
-
epoch_loss = running_loss / len(loaders[phase])
|
129 |
-
logger.log_epoch_loss(epoch_loss, epoch, phase)
|
130 |
-
|
131 |
-
# logging metrics
|
132 |
-
preds_from_each_batch = torch.cat(preds_from_each_batch)
|
133 |
-
targets_from_each_batch = torch.cat(targets_from_each_batch)
|
134 |
-
metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
|
135 |
-
logger.log_epoch_metrics(metrics_dict, epoch, phase)
|
136 |
-
|
137 |
-
# Early stopping
|
138 |
-
if phase == 'valid':
|
139 |
-
if epoch_loss < best_valid_loss:
|
140 |
-
no_change_epochs = 0
|
141 |
-
best_valid_loss = epoch_loss
|
142 |
-
logger.log_best_model(model, epoch_loss, epoch, optimizer, metrics_dict)
|
143 |
-
else:
|
144 |
-
no_change_epochs += 1
|
145 |
-
logger.print_logger.info(
|
146 |
-
f'Valid loss hasnt changed for {no_change_epochs} patience: {cfg.patience}'
|
147 |
-
)
|
148 |
-
if no_change_epochs >= cfg.patience:
|
149 |
-
early_stop_triggered = True
|
150 |
-
|
151 |
-
if early_stop_triggered:
|
152 |
-
logger.print_logger.info(f'Training is early stopped @ {epoch}')
|
153 |
-
break
|
154 |
-
|
155 |
-
logger.print_logger.info('Finished Training')
|
156 |
-
|
157 |
-
# loading the best model
|
158 |
-
ckpt = torch.load(logger.best_model_path)
|
159 |
-
model.load_state_dict(ckpt['model'])
|
160 |
-
logger.print_logger.info(f'Loading the best model from {logger.best_model_path}')
|
161 |
-
logger.print_logger.info((f'The model was trained for {ckpt["epoch"]} epochs. Loss: {ckpt["loss"]:.4f}'))
|
162 |
-
|
163 |
-
# Testing the model
|
164 |
-
model.eval()
|
165 |
-
running_loss = 0
|
166 |
-
preds_from_each_batch = []
|
167 |
-
targets_from_each_batch = []
|
168 |
-
|
169 |
-
for i, batch in enumerate(loaders['test']):
|
170 |
-
inputs = batch['input'].to(device)
|
171 |
-
targets = batch['target'].to(device)
|
172 |
-
|
173 |
-
# zero the parameter gradients
|
174 |
-
optimizer.zero_grad()
|
175 |
-
|
176 |
-
# forward + backward + optimize
|
177 |
-
with torch.set_grad_enabled(False):
|
178 |
-
outputs = model(inputs)
|
179 |
-
loss = criterion(outputs, targets, to_weight=False)
|
180 |
-
|
181 |
-
# loss
|
182 |
-
running_loss += loss.item()
|
183 |
-
|
184 |
-
# for metrics calculation later on
|
185 |
-
preds_from_each_batch += [outputs.detach().cpu()]
|
186 |
-
targets_from_each_batch += [targets.cpu()]
|
187 |
-
|
188 |
-
# logging metrics
|
189 |
-
preds_from_each_batch = torch.cat(preds_from_each_batch)
|
190 |
-
targets_from_each_batch = torch.cat(targets_from_each_batch)
|
191 |
-
test_metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
|
192 |
-
test_metrics_dict['avg_loss'] = running_loss / len(loaders['test'])
|
193 |
-
test_metrics_dict['param_num'] = param_num
|
194 |
-
# TODO: I have no idea why tboard doesn't keep metrics (hparams) when
|
195 |
-
# I run this experiment from cli: `python train_vggishish.py config=./configs/vggish.yaml`
|
196 |
-
# while when I run it in vscode debugger the metrics are logger (wtf)
|
197 |
-
logger.log_test_metrics(test_metrics_dict, dict(cfg), ckpt['epoch'])
|
198 |
-
|
199 |
-
logger.print_logger.info('Finished the experiment')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGuardians/SummarizeWikipediaDocument/summarize_train.py
DELETED
@@ -1,109 +0,0 @@
|
|
1 |
-
import transformers
|
2 |
-
from datasets import load_dataset, load_metric
|
3 |
-
import datasets
|
4 |
-
import random
|
5 |
-
import pandas as pd
|
6 |
-
from IPython.display import display, HTML
|
7 |
-
from transformers import AutoTokenizer
|
8 |
-
from transformers import AutoModelForSeq2SeqLM, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer
|
9 |
-
|
10 |
-
|
11 |
-
model_checkpoint = "t5-small"
|
12 |
-
|
13 |
-
raw_datasets = load_dataset("xsum")
|
14 |
-
metric = load_metric("rouge")
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
def show_random_elements(dataset, num_examples=5):
|
19 |
-
assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
|
20 |
-
picks = []
|
21 |
-
for _ in range(num_examples):
|
22 |
-
pick = random.randint(0, len(dataset) - 1)
|
23 |
-
while pick in picks:
|
24 |
-
pick = random.randint(0, len(dataset) - 1)
|
25 |
-
picks.append(pick)
|
26 |
-
|
27 |
-
df = pd.DataFrame(dataset[picks])
|
28 |
-
for column, typ in dataset.features.items():
|
29 |
-
if isinstance(typ, datasets.ClassLabel):
|
30 |
-
df[column] = df[column].transform(lambda i: typ.names[i])
|
31 |
-
display(HTML(df.to_html()))
|
32 |
-
|
33 |
-
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
|
34 |
-
print(transformers.__version__)
|
35 |
-
|
36 |
-
if model_checkpoint in ["t5-small", "t5-base", "t5-larg", "t5-3b", "t5-11b"]:
|
37 |
-
prefix = "summarize: "
|
38 |
-
else:
|
39 |
-
prefix = ""
|
40 |
-
|
41 |
-
max_input_length = 1024
|
42 |
-
max_target_length = 128
|
43 |
-
|
44 |
-
def preprocess_function(examples):
|
45 |
-
inputs = [prefix + doc for doc in examples["document"]]
|
46 |
-
model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)
|
47 |
-
|
48 |
-
# Setup the tokenizer for targets
|
49 |
-
with tokenizer.as_target_tokenizer():
|
50 |
-
labels = tokenizer(examples["summary"], max_length=max_target_length, truncation=True)
|
51 |
-
|
52 |
-
model_inputs["labels"] = labels["input_ids"]
|
53 |
-
return model_inputs
|
54 |
-
|
55 |
-
|
56 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
|
57 |
-
|
58 |
-
batch_size = 16
|
59 |
-
model_name = model_checkpoint.split("/")[-1]
|
60 |
-
args = Seq2SeqTrainingArguments(
|
61 |
-
f"{model_name}-finetuned-xsum",
|
62 |
-
evaluation_strategy = "epoch",
|
63 |
-
learning_rate=2e-5,
|
64 |
-
per_device_train_batch_size=batch_size,
|
65 |
-
per_device_eval_batch_size=batch_size,
|
66 |
-
weight_decay=0.01,
|
67 |
-
save_total_limit=3,
|
68 |
-
num_train_epochs=1,
|
69 |
-
predict_with_generate=True,
|
70 |
-
fp16=True,
|
71 |
-
push_to_hub=True,
|
72 |
-
)
|
73 |
-
|
74 |
-
import nltk
|
75 |
-
import numpy as np
|
76 |
-
|
77 |
-
|
78 |
-
def compute_metrics(eval_pred):
|
79 |
-
predictions, labels = eval_pred
|
80 |
-
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
|
81 |
-
# Replace -100 in the labels as we can't decode them.
|
82 |
-
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
|
83 |
-
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
|
84 |
-
|
85 |
-
# Rouge expects a newline after each sentence
|
86 |
-
decoded_preds = ["\n".join(nltk.sent_tokenize(pred.strip())) for pred in decoded_preds]
|
87 |
-
decoded_labels = ["\n".join(nltk.sent_tokenize(label.strip())) for label in decoded_labels]
|
88 |
-
|
89 |
-
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
|
90 |
-
# Extract a few results
|
91 |
-
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
|
92 |
-
|
93 |
-
# Add mean generated length
|
94 |
-
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in predictions]
|
95 |
-
result["gen_len"] = np.mean(prediction_lens)
|
96 |
-
|
97 |
-
return {k: round(v, 4) for k, v in result.items()}
|
98 |
-
|
99 |
-
trainer = Seq2SeqTrainer(
|
100 |
-
model,
|
101 |
-
args,
|
102 |
-
train_dataset=tokenized_datasets["train"],
|
103 |
-
eval_dataset=tokenized_datasets["validation"],
|
104 |
-
data_collator=data_collator,
|
105 |
-
tokenizer=tokenizer,
|
106 |
-
compute_metrics=compute_metrics
|
107 |
-
)
|
108 |
-
|
109 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ARTeLab/DTM_Estimation_SRandD/copy_and_transform_imgs.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
from osgeo import gdal
|
2 |
-
import os
|
3 |
-
from PIL import Image
|
4 |
-
import numpy as np
|
5 |
-
|
6 |
-
path = '/home/super/datasets-nas/hirise_oxia_planum_test_tiles_thruth/'
|
7 |
-
|
8 |
-
for i, file_name in enumerate(os.listdir(path)[40:90]):
|
9 |
-
file_path = os.path.join(path, file_name)
|
10 |
-
x = gdal.Open(file_path)
|
11 |
-
x_array = x.ReadAsArray()
|
12 |
-
# print(x_array.shape)
|
13 |
-
pil_img = Image.fromarray(np.uint8(x_array), 'L')
|
14 |
-
pil_img.save(f'demo_imgs/{i}.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Abhilashvj/planogram-compliance/val.py
DELETED
@@ -1,593 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
Validate a trained YOLOv5 model accuracy on a custom dataset
|
4 |
-
|
5 |
-
Usage:
|
6 |
-
$ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640
|
7 |
-
"""
|
8 |
-
|
9 |
-
import argparse
|
10 |
-
import json
|
11 |
-
import os
|
12 |
-
import sys
|
13 |
-
from pathlib import Path
|
14 |
-
from threading import Thread
|
15 |
-
|
16 |
-
import numpy as np
|
17 |
-
import torch
|
18 |
-
from tqdm import tqdm
|
19 |
-
|
20 |
-
FILE = Path(__file__).absolute()
|
21 |
-
sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path
|
22 |
-
|
23 |
-
from models.experimental import attempt_load
|
24 |
-
from utils.callbacks import Callbacks
|
25 |
-
from utils.datasets import create_dataloader
|
26 |
-
from utils.general import (
|
27 |
-
box_iou,
|
28 |
-
check_dataset,
|
29 |
-
check_img_size,
|
30 |
-
check_requirements,
|
31 |
-
check_suffix,
|
32 |
-
check_yaml,
|
33 |
-
coco80_to_coco91_class,
|
34 |
-
colorstr,
|
35 |
-
increment_path,
|
36 |
-
non_max_suppression,
|
37 |
-
scale_coords,
|
38 |
-
set_logging,
|
39 |
-
xywh2xyxy,
|
40 |
-
xyxy2xywh,
|
41 |
-
)
|
42 |
-
from utils.metrics import ConfusionMatrix, ap_per_class
|
43 |
-
from utils.plots import output_to_target, plot_images, plot_study_txt
|
44 |
-
from utils.torch_utils import select_device, time_sync
|
45 |
-
|
46 |
-
|
47 |
-
def save_one_txt(predn, save_conf, shape, file):
|
48 |
-
# Save one txt result
|
49 |
-
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
|
50 |
-
for *xyxy, conf, cls in predn.tolist():
|
51 |
-
xywh = (
|
52 |
-
(xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
|
53 |
-
) # normalized xywh
|
54 |
-
line = (
|
55 |
-
(cls, *xywh, conf) if save_conf else (cls, *xywh)
|
56 |
-
) # label format
|
57 |
-
with open(file, "a") as f:
|
58 |
-
f.write(("%g " * len(line)).rstrip() % line + "\n")
|
59 |
-
|
60 |
-
|
61 |
-
def save_one_json(predn, jdict, path, class_map):
|
62 |
-
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
|
63 |
-
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
|
64 |
-
box = xyxy2xywh(predn[:, :4]) # xywh
|
65 |
-
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
|
66 |
-
for p, b in zip(predn.tolist(), box.tolist()):
|
67 |
-
jdict.append(
|
68 |
-
{
|
69 |
-
"image_id": image_id,
|
70 |
-
"category_id": class_map[int(p[5])],
|
71 |
-
"bbox": [round(x, 3) for x in b],
|
72 |
-
"score": round(p[4], 5),
|
73 |
-
}
|
74 |
-
)
|
75 |
-
|
76 |
-
|
77 |
-
def process_batch(detections, labels, iouv):
|
78 |
-
"""
|
79 |
-
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
|
80 |
-
Arguments:
|
81 |
-
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
|
82 |
-
labels (Array[M, 5]), class, x1, y1, x2, y2
|
83 |
-
Returns:
|
84 |
-
correct (Array[N, 10]), for 10 IoU levels
|
85 |
-
"""
|
86 |
-
correct = torch.zeros(
|
87 |
-
detections.shape[0],
|
88 |
-
iouv.shape[0],
|
89 |
-
dtype=torch.bool,
|
90 |
-
device=iouv.device,
|
91 |
-
)
|
92 |
-
iou = box_iou(labels[:, 1:], detections[:, :4])
|
93 |
-
x = torch.where(
|
94 |
-
(iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])
|
95 |
-
) # IoU above threshold and classes match
|
96 |
-
if x[0].shape[0]:
|
97 |
-
matches = (
|
98 |
-
torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1)
|
99 |
-
.cpu()
|
100 |
-
.numpy()
|
101 |
-
) # [label, detection, iou]
|
102 |
-
if x[0].shape[0] > 1:
|
103 |
-
matches = matches[matches[:, 2].argsort()[::-1]]
|
104 |
-
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
|
105 |
-
# matches = matches[matches[:, 2].argsort()[::-1]]
|
106 |
-
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
|
107 |
-
matches = torch.Tensor(matches).to(iouv.device)
|
108 |
-
correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
|
109 |
-
return correct
|
110 |
-
|
111 |
-
|
112 |
-
@torch.no_grad()
|
113 |
-
def run(
|
114 |
-
data,
|
115 |
-
weights=None, # model.pt path(s)
|
116 |
-
batch_size=32, # batch size
|
117 |
-
imgsz=640, # inference size (pixels)
|
118 |
-
conf_thres=0.001, # confidence threshold
|
119 |
-
iou_thres=0.6, # NMS IoU threshold
|
120 |
-
task="val", # train, val, test, speed or study
|
121 |
-
device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu
|
122 |
-
single_cls=False, # treat as single-class dataset
|
123 |
-
augment=False, # augmented inference
|
124 |
-
verbose=False, # verbose output
|
125 |
-
save_txt=False, # save results to *.txt
|
126 |
-
save_hybrid=False, # save label+prediction hybrid results to *.txt
|
127 |
-
save_conf=False, # save confidences in --save-txt labels
|
128 |
-
save_json=False, # save a COCO-JSON results file
|
129 |
-
project="runs/val", # save to project/name
|
130 |
-
name="exp", # save to project/name
|
131 |
-
exist_ok=False, # existing project/name ok, do not increment
|
132 |
-
half=True, # use FP16 half-precision inference
|
133 |
-
model=None,
|
134 |
-
dataloader=None,
|
135 |
-
save_dir=Path(""),
|
136 |
-
plots=True,
|
137 |
-
callbacks=Callbacks(),
|
138 |
-
compute_loss=None,
|
139 |
-
):
|
140 |
-
# Initialize/load model and set device
|
141 |
-
training = model is not None
|
142 |
-
if training: # called by train.py
|
143 |
-
device = next(model.parameters()).device # get model device
|
144 |
-
|
145 |
-
else: # called directly
|
146 |
-
device = select_device(device, batch_size=batch_size)
|
147 |
-
|
148 |
-
# Directories
|
149 |
-
save_dir = increment_path(
|
150 |
-
Path(project) / name, exist_ok=exist_ok
|
151 |
-
) # increment run
|
152 |
-
(save_dir / "labels" if save_txt else save_dir).mkdir(
|
153 |
-
parents=True, exist_ok=True
|
154 |
-
) # make dir
|
155 |
-
|
156 |
-
# Load model
|
157 |
-
check_suffix(weights, ".pt")
|
158 |
-
model = attempt_load(weights, map_location=device) # load FP32 model
|
159 |
-
gs = max(int(model.stride.max()), 32) # grid size (max stride)
|
160 |
-
imgsz = check_img_size(imgsz, s=gs) # check image size
|
161 |
-
|
162 |
-
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
|
163 |
-
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
|
164 |
-
# model = nn.DataParallel(model)
|
165 |
-
|
166 |
-
# Data
|
167 |
-
data = check_dataset(data) # check
|
168 |
-
|
169 |
-
# Half
|
170 |
-
half &= device.type != "cpu" # half precision only supported on CUDA
|
171 |
-
if half:
|
172 |
-
model.half()
|
173 |
-
|
174 |
-
# Configure
|
175 |
-
model.eval()
|
176 |
-
is_coco = isinstance(data.get("val"), str) and data["val"].endswith(
|
177 |
-
"coco/val2017.txt"
|
178 |
-
) # COCO dataset
|
179 |
-
nc = 1 if single_cls else int(data["nc"]) # number of classes
|
180 |
-
iouv = torch.linspace(0.5, 0.95, 10).to(
|
181 |
-
device
|
182 |
-
) # iou vector for [email protected]:0.95
|
183 |
-
niou = iouv.numel()
|
184 |
-
|
185 |
-
# Dataloader
|
186 |
-
if not training:
|
187 |
-
if device.type != "cpu":
|
188 |
-
model(
|
189 |
-
torch.zeros(1, 3, imgsz, imgsz)
|
190 |
-
.to(device)
|
191 |
-
.type_as(next(model.parameters()))
|
192 |
-
) # run once
|
193 |
-
task = (
|
194 |
-
task if task in ("train", "val", "test") else "val"
|
195 |
-
) # path to train/val/test images
|
196 |
-
dataloader = create_dataloader(
|
197 |
-
data[task],
|
198 |
-
imgsz,
|
199 |
-
batch_size,
|
200 |
-
gs,
|
201 |
-
single_cls,
|
202 |
-
pad=0.5,
|
203 |
-
rect=True,
|
204 |
-
prefix=colorstr(f"{task}: "),
|
205 |
-
)[0]
|
206 |
-
|
207 |
-
seen = 0
|
208 |
-
confusion_matrix = ConfusionMatrix(nc=nc)
|
209 |
-
names = {
|
210 |
-
k: v
|
211 |
-
for k, v in enumerate(
|
212 |
-
model.names if hasattr(model, "names") else model.module.names
|
213 |
-
)
|
214 |
-
}
|
215 |
-
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
|
216 |
-
s = ("%20s" + "%11s" * 6) % (
|
217 |
-
"Class",
|
218 |
-
"Images",
|
219 |
-
"Labels",
|
220 |
-
"P",
|
221 |
-
"R",
|
222 |
-
"[email protected]",
|
223 |
-
"[email protected]:.95",
|
224 |
-
)
|
225 |
-
dt, p, r, f1, mp, mr, map50, map = (
|
226 |
-
[0.0, 0.0, 0.0],
|
227 |
-
0.0,
|
228 |
-
0.0,
|
229 |
-
0.0,
|
230 |
-
0.0,
|
231 |
-
0.0,
|
232 |
-
0.0,
|
233 |
-
0.0,
|
234 |
-
)
|
235 |
-
loss = torch.zeros(3, device=device)
|
236 |
-
jdict, stats, ap, ap_class = [], [], [], []
|
237 |
-
for batch_i, (img, targets, paths, shapes) in enumerate(
|
238 |
-
tqdm(dataloader, desc=s)
|
239 |
-
):
|
240 |
-
t1 = time_sync()
|
241 |
-
img = img.to(device, non_blocking=True)
|
242 |
-
img = img.half() if half else img.float() # uint8 to fp16/32
|
243 |
-
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
244 |
-
targets = targets.to(device)
|
245 |
-
nb, _, height, width = img.shape # batch size, channels, height, width
|
246 |
-
t2 = time_sync()
|
247 |
-
dt[0] += t2 - t1
|
248 |
-
|
249 |
-
# Run model
|
250 |
-
out, train_out = model(
|
251 |
-
img, augment=augment
|
252 |
-
) # inference and training outputs
|
253 |
-
dt[1] += time_sync() - t2
|
254 |
-
|
255 |
-
# Compute loss
|
256 |
-
if compute_loss:
|
257 |
-
loss += compute_loss([x.float() for x in train_out], targets)[
|
258 |
-
1
|
259 |
-
] # box, obj, cls
|
260 |
-
|
261 |
-
# Run NMS
|
262 |
-
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(
|
263 |
-
device
|
264 |
-
) # to pixels
|
265 |
-
lb = (
|
266 |
-
[targets[targets[:, 0] == i, 1:] for i in range(nb)]
|
267 |
-
if save_hybrid
|
268 |
-
else []
|
269 |
-
) # for autolabelling
|
270 |
-
t3 = time_sync()
|
271 |
-
out = non_max_suppression(
|
272 |
-
out,
|
273 |
-
conf_thres,
|
274 |
-
iou_thres,
|
275 |
-
labels=lb,
|
276 |
-
multi_label=True,
|
277 |
-
agnostic=single_cls,
|
278 |
-
)
|
279 |
-
dt[2] += time_sync() - t3
|
280 |
-
|
281 |
-
# Statistics per image
|
282 |
-
for si, pred in enumerate(out):
|
283 |
-
labels = targets[targets[:, 0] == si, 1:]
|
284 |
-
nl = len(labels)
|
285 |
-
tcls = labels[:, 0].tolist() if nl else [] # target class
|
286 |
-
path, shape = Path(paths[si]), shapes[si][0]
|
287 |
-
seen += 1
|
288 |
-
|
289 |
-
if len(pred) == 0:
|
290 |
-
if nl:
|
291 |
-
stats.append(
|
292 |
-
(
|
293 |
-
torch.zeros(0, niou, dtype=torch.bool),
|
294 |
-
torch.Tensor(),
|
295 |
-
torch.Tensor(),
|
296 |
-
tcls,
|
297 |
-
)
|
298 |
-
)
|
299 |
-
continue
|
300 |
-
|
301 |
-
# Predictions
|
302 |
-
if single_cls:
|
303 |
-
pred[:, 5] = 0
|
304 |
-
predn = pred.clone()
|
305 |
-
scale_coords(
|
306 |
-
img[si].shape[1:], predn[:, :4], shape, shapes[si][1]
|
307 |
-
) # native-space pred
|
308 |
-
|
309 |
-
# Evaluate
|
310 |
-
if nl:
|
311 |
-
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
|
312 |
-
scale_coords(
|
313 |
-
img[si].shape[1:], tbox, shape, shapes[si][1]
|
314 |
-
) # native-space labels
|
315 |
-
labelsn = torch.cat(
|
316 |
-
(labels[:, 0:1], tbox), 1
|
317 |
-
) # native-space labels
|
318 |
-
correct = process_batch(predn, labelsn, iouv)
|
319 |
-
if plots:
|
320 |
-
confusion_matrix.process_batch(predn, labelsn)
|
321 |
-
else:
|
322 |
-
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
|
323 |
-
stats.append(
|
324 |
-
(correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)
|
325 |
-
) # (correct, conf, pcls, tcls)
|
326 |
-
|
327 |
-
# Save/log
|
328 |
-
if save_txt:
|
329 |
-
save_one_txt(
|
330 |
-
predn,
|
331 |
-
save_conf,
|
332 |
-
shape,
|
333 |
-
file=save_dir / "labels" / (path.stem + ".txt"),
|
334 |
-
)
|
335 |
-
if save_json:
|
336 |
-
save_one_json(
|
337 |
-
predn, jdict, path, class_map
|
338 |
-
) # append to COCO-JSON dictionary
|
339 |
-
callbacks.run(
|
340 |
-
"on_val_image_end", pred, predn, path, names, img[si]
|
341 |
-
)
|
342 |
-
|
343 |
-
# Plot images
|
344 |
-
if plots and batch_i < 3:
|
345 |
-
f = save_dir / f"val_batch{batch_i}_labels.jpg" # labels
|
346 |
-
Thread(
|
347 |
-
target=plot_images,
|
348 |
-
args=(img, targets, paths, f, names),
|
349 |
-
daemon=True,
|
350 |
-
).start()
|
351 |
-
f = save_dir / f"val_batch{batch_i}_pred.jpg" # predictions
|
352 |
-
Thread(
|
353 |
-
target=plot_images,
|
354 |
-
args=(img, output_to_target(out), paths, f, names),
|
355 |
-
daemon=True,
|
356 |
-
).start()
|
357 |
-
|
358 |
-
# Compute statistics
|
359 |
-
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
|
360 |
-
if len(stats) and stats[0].any():
|
361 |
-
p, r, ap, f1, ap_class = ap_per_class(
|
362 |
-
*stats, plot=plots, save_dir=save_dir, names=names
|
363 |
-
)
|
364 |
-
ap50, ap = ap[:, 0], ap.mean(1) # [email protected], [email protected]:0.95
|
365 |
-
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
|
366 |
-
nt = np.bincount(
|
367 |
-
stats[3].astype(np.int64), minlength=nc
|
368 |
-
) # number of targets per class
|
369 |
-
else:
|
370 |
-
nt = torch.zeros(1)
|
371 |
-
|
372 |
-
# Print results
|
373 |
-
pf = "%20s" + "%11i" * 2 + "%11.3g" * 4 # print format
|
374 |
-
print(pf % ("all", seen, nt.sum(), mp, mr, map50, map))
|
375 |
-
|
376 |
-
# Print results per class
|
377 |
-
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
|
378 |
-
for i, c in enumerate(ap_class):
|
379 |
-
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
|
380 |
-
|
381 |
-
# Print speeds
|
382 |
-
t = tuple(x / seen * 1e3 for x in dt) # speeds per image
|
383 |
-
if not training:
|
384 |
-
shape = (batch_size, 3, imgsz, imgsz)
|
385 |
-
print(
|
386 |
-
f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}"
|
387 |
-
% t
|
388 |
-
)
|
389 |
-
|
390 |
-
# Plots
|
391 |
-
if plots:
|
392 |
-
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
|
393 |
-
callbacks.run("on_val_end")
|
394 |
-
|
395 |
-
# Save JSON
|
396 |
-
if save_json and len(jdict):
|
397 |
-
w = (
|
398 |
-
Path(weights[0] if isinstance(weights, list) else weights).stem
|
399 |
-
if weights is not None
|
400 |
-
else ""
|
401 |
-
) # weights
|
402 |
-
anno_json = str(
|
403 |
-
Path(data.get("path", "../coco"))
|
404 |
-
/ "annotations/instances_val2017.json"
|
405 |
-
) # annotations json
|
406 |
-
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
|
407 |
-
print(f"\nEvaluating pycocotools mAP... saving {pred_json}...")
|
408 |
-
with open(pred_json, "w") as f:
|
409 |
-
json.dump(jdict, f)
|
410 |
-
|
411 |
-
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
|
412 |
-
check_requirements(["pycocotools"])
|
413 |
-
from pycocotools.coco import COCO
|
414 |
-
from pycocotools.cocoeval import COCOeval
|
415 |
-
|
416 |
-
anno = COCO(anno_json) # init annotations api
|
417 |
-
pred = anno.loadRes(pred_json) # init predictions api
|
418 |
-
eval = COCOeval(anno, pred, "bbox")
|
419 |
-
if is_coco:
|
420 |
-
eval.params.imgIds = [
|
421 |
-
int(Path(x).stem) for x in dataloader.dataset.img_files
|
422 |
-
] # image IDs to evaluate
|
423 |
-
eval.evaluate()
|
424 |
-
eval.accumulate()
|
425 |
-
eval.summarize()
|
426 |
-
map, map50 = eval.stats[
|
427 |
-
:2
|
428 |
-
] # update results ([email protected]:0.95, [email protected])
|
429 |
-
except Exception as e:
|
430 |
-
print(f"pycocotools unable to run: {e}")
|
431 |
-
|
432 |
-
# Return results
|
433 |
-
model.float() # for training
|
434 |
-
if not training:
|
435 |
-
s = (
|
436 |
-
f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}"
|
437 |
-
if save_txt
|
438 |
-
else ""
|
439 |
-
)
|
440 |
-
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
|
441 |
-
maps = np.zeros(nc) + map
|
442 |
-
for i, c in enumerate(ap_class):
|
443 |
-
maps[c] = ap[i]
|
444 |
-
return (
|
445 |
-
(mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()),
|
446 |
-
maps,
|
447 |
-
t,
|
448 |
-
)
|
449 |
-
|
450 |
-
|
451 |
-
def parse_opt():
|
452 |
-
parser = argparse.ArgumentParser(prog="val.py")
|
453 |
-
parser.add_argument(
|
454 |
-
"--data",
|
455 |
-
type=str,
|
456 |
-
default="data/coco128.yaml",
|
457 |
-
help="dataset.yaml path",
|
458 |
-
)
|
459 |
-
parser.add_argument(
|
460 |
-
"--weights",
|
461 |
-
nargs="+",
|
462 |
-
type=str,
|
463 |
-
default="yolov5s.pt",
|
464 |
-
help="model.pt path(s)",
|
465 |
-
)
|
466 |
-
parser.add_argument(
|
467 |
-
"--batch-size", type=int, default=32, help="batch size"
|
468 |
-
)
|
469 |
-
parser.add_argument(
|
470 |
-
"--imgsz",
|
471 |
-
"--img",
|
472 |
-
"--img-size",
|
473 |
-
type=int,
|
474 |
-
default=640,
|
475 |
-
help="inference size (pixels)",
|
476 |
-
)
|
477 |
-
parser.add_argument(
|
478 |
-
"--conf-thres", type=float, default=0.001, help="confidence threshold"
|
479 |
-
)
|
480 |
-
parser.add_argument(
|
481 |
-
"--iou-thres", type=float, default=0.6, help="NMS IoU threshold"
|
482 |
-
)
|
483 |
-
parser.add_argument(
|
484 |
-
"--task", default="val", help="train, val, test, speed or study"
|
485 |
-
)
|
486 |
-
parser.add_argument(
|
487 |
-
"--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu"
|
488 |
-
)
|
489 |
-
parser.add_argument(
|
490 |
-
"--single-cls",
|
491 |
-
action="store_true",
|
492 |
-
help="treat as single-class dataset",
|
493 |
-
)
|
494 |
-
parser.add_argument(
|
495 |
-
"--augment", action="store_true", help="augmented inference"
|
496 |
-
)
|
497 |
-
parser.add_argument(
|
498 |
-
"--verbose", action="store_true", help="report mAP by class"
|
499 |
-
)
|
500 |
-
parser.add_argument(
|
501 |
-
"--save-txt", action="store_true", help="save results to *.txt"
|
502 |
-
)
|
503 |
-
parser.add_argument(
|
504 |
-
"--save-hybrid",
|
505 |
-
action="store_true",
|
506 |
-
help="save label+prediction hybrid results to *.txt",
|
507 |
-
)
|
508 |
-
parser.add_argument(
|
509 |
-
"--save-conf",
|
510 |
-
action="store_true",
|
511 |
-
help="save confidences in --save-txt labels",
|
512 |
-
)
|
513 |
-
parser.add_argument(
|
514 |
-
"--save-json",
|
515 |
-
action="store_true",
|
516 |
-
help="save a COCO-JSON results file",
|
517 |
-
)
|
518 |
-
parser.add_argument(
|
519 |
-
"--project", default="runs/val", help="save to project/name"
|
520 |
-
)
|
521 |
-
parser.add_argument("--name", default="exp", help="save to project/name")
|
522 |
-
parser.add_argument(
|
523 |
-
"--exist-ok",
|
524 |
-
action="store_true",
|
525 |
-
help="existing project/name ok, do not increment",
|
526 |
-
)
|
527 |
-
parser.add_argument(
|
528 |
-
"--half", action="store_true", help="use FP16 half-precision inference"
|
529 |
-
)
|
530 |
-
opt = parser.parse_args()
|
531 |
-
opt.save_json |= opt.data.endswith("coco.yaml")
|
532 |
-
opt.save_txt |= opt.save_hybrid
|
533 |
-
opt.data = check_yaml(opt.data) # check YAML
|
534 |
-
return opt
|
535 |
-
|
536 |
-
|
537 |
-
def main(opt):
|
538 |
-
set_logging()
|
539 |
-
print(
|
540 |
-
colorstr("val: ") + ", ".join(f"{k}={v}" for k, v in vars(opt).items())
|
541 |
-
)
|
542 |
-
check_requirements(
|
543 |
-
requirements=FILE.parent / "requirements.txt",
|
544 |
-
exclude=("tensorboard", "thop"),
|
545 |
-
)
|
546 |
-
|
547 |
-
if opt.task in ("train", "val", "test"): # run normally
|
548 |
-
run(**vars(opt))
|
549 |
-
|
550 |
-
elif opt.task == "speed": # speed benchmarks
|
551 |
-
for w in (
|
552 |
-
opt.weights if isinstance(opt.weights, list) else [opt.weights]
|
553 |
-
):
|
554 |
-
run(
|
555 |
-
opt.data,
|
556 |
-
weights=w,
|
557 |
-
batch_size=opt.batch_size,
|
558 |
-
imgsz=opt.imgsz,
|
559 |
-
conf_thres=0.25,
|
560 |
-
iou_thres=0.45,
|
561 |
-
save_json=False,
|
562 |
-
plots=False,
|
563 |
-
)
|
564 |
-
|
565 |
-
elif opt.task == "study": # run over a range of settings and save/plot
|
566 |
-
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
|
567 |
-
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
|
568 |
-
for w in (
|
569 |
-
opt.weights if isinstance(opt.weights, list) else [opt.weights]
|
570 |
-
):
|
571 |
-
f = f"study_{Path(opt.data).stem}_{Path(w).stem}.txt" # filename to save to
|
572 |
-
y = [] # y axis
|
573 |
-
for i in x: # img-size
|
574 |
-
print(f"\nRunning {f} point {i}...")
|
575 |
-
r, _, t = run(
|
576 |
-
opt.data,
|
577 |
-
weights=w,
|
578 |
-
batch_size=opt.batch_size,
|
579 |
-
imgsz=i,
|
580 |
-
conf_thres=opt.conf_thres,
|
581 |
-
iou_thres=opt.iou_thres,
|
582 |
-
save_json=opt.save_json,
|
583 |
-
plots=False,
|
584 |
-
)
|
585 |
-
y.append(r + t) # results and times
|
586 |
-
np.savetxt(f, y, fmt="%10.4g") # save
|
587 |
-
os.system("zip -r study.zip study_*.txt")
|
588 |
-
plot_study_txt(x=x) # plot
|
589 |
-
|
590 |
-
|
591 |
-
if __name__ == "__main__":
|
592 |
-
opt = parse_opt()
|
593 |
-
main(opt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/GetExpandedChildWidth.js
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
var GetExpandedChildWidth = function (child, parentWidth) {
|
2 |
-
if (parentWidth === undefined) {
|
3 |
-
parentWidth = this.width;
|
4 |
-
}
|
5 |
-
|
6 |
-
var childWidth;
|
7 |
-
var childConfig = child.rexSizer;
|
8 |
-
var padding = childConfig.padding;
|
9 |
-
if (this.orientation === 0) { // x
|
10 |
-
if ((childConfig.proportion > 0) && (this.proportionLength > 0)) {
|
11 |
-
childWidth = (childConfig.proportion * this.proportionLength);
|
12 |
-
}
|
13 |
-
} else { // y
|
14 |
-
if (childConfig.expand) {
|
15 |
-
var innerWidth = parentWidth - this.space.left - this.space.right;
|
16 |
-
childWidth = innerWidth - padding.left - padding.right;
|
17 |
-
}
|
18 |
-
}
|
19 |
-
return childWidth;
|
20 |
-
}
|
21 |
-
|
22 |
-
export default GetExpandedChildWidth;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AiBototicus/BucksAI-3/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AiBototicus Autotrain Birds 48829118237
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: gray
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.24.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: bigscience-openrail-m
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alfasign/dIFFU/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: 411 Models Toy World
|
3 |
-
emoji: 🪅🌐
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.15.0
|
8 |
-
app_file: app.py
|
9 |
-
duplicated_from: Yntec/ToyWorld
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlphaGPT/PaperSummary/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: PaperSummary
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.34.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: cc-by-nc-nd-4.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/README.md
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
# Distillation for quantization on Textual Inversion models to personalize text2image
|
2 |
-
|
3 |
-
[Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images._By using just 3-5 images new concepts can be taught to Stable Diffusion and the model personalized on your own images_
|
4 |
-
The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion.
|
5 |
-
We have enabled distillation for quantization in `textual_inversion.py` to do quantization aware training as well as distillation on the model generated by Textual Inversion method.
|
6 |
-
|
7 |
-
## Installing the dependencies
|
8 |
-
|
9 |
-
Before running the scripts, make sure to install the library's training dependencies:
|
10 |
-
|
11 |
-
```bash
|
12 |
-
pip install -r requirements.txt
|
13 |
-
```
|
14 |
-
|
15 |
-
## Prepare Datasets
|
16 |
-
|
17 |
-
One picture which is from the huggingface datasets [sd-concepts-library/dicoo2](https://huggingface.co/sd-concepts-library/dicoo2) is needed, and save it to the `./dicoo` directory. The picture is shown below:
|
18 |
-
|
19 |
-
<a href="https://huggingface.co/sd-concepts-library/dicoo2/blob/main/concept_images/1.jpeg">
|
20 |
-
<img src="https://huggingface.co/sd-concepts-library/dicoo2/resolve/main/concept_images/1.jpeg" width = "300" height="300">
|
21 |
-
</a>
|
22 |
-
|
23 |
-
## Get a FP32 Textual Inversion model
|
24 |
-
|
25 |
-
Use the following command to fine-tune the Stable Diffusion model on the above dataset to obtain the FP32 Textual Inversion model.
|
26 |
-
|
27 |
-
```bash
|
28 |
-
export MODEL_NAME="CompVis/stable-diffusion-v1-4"
|
29 |
-
export DATA_DIR="./dicoo"
|
30 |
-
|
31 |
-
accelerate launch textual_inversion.py \
|
32 |
-
--pretrained_model_name_or_path=$MODEL_NAME \
|
33 |
-
--train_data_dir=$DATA_DIR \
|
34 |
-
--learnable_property="object" \
|
35 |
-
--placeholder_token="<dicoo>" --initializer_token="toy" \
|
36 |
-
--resolution=512 \
|
37 |
-
--train_batch_size=1 \
|
38 |
-
--gradient_accumulation_steps=4 \
|
39 |
-
--max_train_steps=3000 \
|
40 |
-
--learning_rate=5.0e-04 --scale_lr \
|
41 |
-
--lr_scheduler="constant" \
|
42 |
-
--lr_warmup_steps=0 \
|
43 |
-
--output_dir="dicoo_model"
|
44 |
-
```
|
45 |
-
|
46 |
-
## Do distillation for quantization
|
47 |
-
|
48 |
-
Distillation for quantization is a method that combines [intermediate layer knowledge distillation](https://github.com/intel/neural-compressor/blob/master/docs/source/distillation.md#intermediate-layer-knowledge-distillation) and [quantization aware training](https://github.com/intel/neural-compressor/blob/master/docs/source/quantization.md#quantization-aware-training) in the same training process to improve the performance of the quantized model. Provided a FP32 model, the distillation for quantization approach will take this model itself as the teacher model and transfer the knowledges of the specified layers to the student model, i.e. quantized version of the FP32 model, during the quantization aware training process.
|
49 |
-
|
50 |
-
Once you have the FP32 Textual Inversion model, the following command will take the FP32 Textual Inversion model as input to do distillation for quantization and generate the INT8 Textual Inversion model.
|
51 |
-
|
52 |
-
```bash
|
53 |
-
export FP32_MODEL_NAME="./dicoo_model"
|
54 |
-
export DATA_DIR="./dicoo"
|
55 |
-
|
56 |
-
accelerate launch textual_inversion.py \
|
57 |
-
--pretrained_model_name_or_path=$FP32_MODEL_NAME \
|
58 |
-
--train_data_dir=$DATA_DIR \
|
59 |
-
--use_ema --learnable_property="object" \
|
60 |
-
--placeholder_token="<dicoo>" --initializer_token="toy" \
|
61 |
-
--resolution=512 \
|
62 |
-
--train_batch_size=1 \
|
63 |
-
--gradient_accumulation_steps=4 \
|
64 |
-
--max_train_steps=300 \
|
65 |
-
--learning_rate=5.0e-04 --max_grad_norm=3 \
|
66 |
-
--lr_scheduler="constant" \
|
67 |
-
--lr_warmup_steps=0 \
|
68 |
-
--output_dir="int8_model" \
|
69 |
-
--do_quantization --do_distillation --verify_loading
|
70 |
-
```
|
71 |
-
|
72 |
-
After the distillation for quantization process, the quantized UNet would be 4 times smaller (3279MB -> 827MB).
|
73 |
-
|
74 |
-
## Inference
|
75 |
-
|
76 |
-
Once you have trained a INT8 model with the above command, the inference can be done simply using the `text2images.py` script. Make sure to include the `placeholder_token` in your prompt.
|
77 |
-
|
78 |
-
```bash
|
79 |
-
export INT8_MODEL_NAME="./int8_model"
|
80 |
-
|
81 |
-
python text2images.py \
|
82 |
-
--pretrained_model_name_or_path=$INT8_MODEL_NAME \
|
83 |
-
--caption "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings." \
|
84 |
-
--images_num 4
|
85 |
-
```
|
86 |
-
|
87 |
-
Here is the comparison of images generated by the FP32 model (left) and INT8 model (right) respectively:
|
88 |
-
|
89 |
-
<p float="left">
|
90 |
-
<img src="https://huggingface.co/datasets/Intel/textual_inversion_dicoo_dfq/resolve/main/FP32.png" width = "300" height = "300" alt="FP32" align=center />
|
91 |
-
<img src="https://huggingface.co/datasets/Intel/textual_inversion_dicoo_dfq/resolve/main/INT8.png" width = "300" height = "300" alt="INT8" align=center />
|
92 |
-
</p>
|
93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/vq_diffusion/__init__.py
DELETED
File without changes
|
spaces/Andy1621/uniformer_image_demo/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Uniformer_image_demo
|
3 |
-
emoji: 📷
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: green
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.3
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/albu_example/README.md
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
# Albu Example
|
2 |
-
|
3 |
-
[OTHERS]
|
4 |
-
|
5 |
-
```
|
6 |
-
@article{2018arXiv180906839B,
|
7 |
-
author = {A. Buslaev, A. Parinov, E. Khvedchenya, V.~I. Iglovikov and A.~A. Kalinin},
|
8 |
-
title = "{Albumentations: fast and flexible image augmentations}",
|
9 |
-
journal = {ArXiv e-prints},
|
10 |
-
eprint = {1809.06839},
|
11 |
-
year = 2018
|
12 |
-
}
|
13 |
-
```
|
14 |
-
|
15 |
-
## Results and Models
|
16 |
-
|
17 |
-
| Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
|
18 |
-
|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:|
|
19 |
-
| R-50 | pytorch | 1x | 4.4 | 16.6 | 38.0 | 34.5 |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208-ab203bcd.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208_225520.log.json) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://detectron/resnet101_caffe',
|
4 |
-
backbone=dict(depth=101))
|
5 |
-
img_norm_cfg = dict(
|
6 |
-
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
|
7 |
-
train_pipeline = [
|
8 |
-
dict(type='LoadImageFromFile'),
|
9 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
10 |
-
dict(
|
11 |
-
type='Resize',
|
12 |
-
img_scale=[(1333, 640), (1333, 800)],
|
13 |
-
multiscale_mode='value',
|
14 |
-
keep_ratio=True),
|
15 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
16 |
-
dict(type='Normalize', **img_norm_cfg),
|
17 |
-
dict(type='Pad', size_divisor=32),
|
18 |
-
dict(type='DefaultFormatBundle'),
|
19 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
20 |
-
]
|
21 |
-
test_pipeline = [
|
22 |
-
dict(type='LoadImageFromFile'),
|
23 |
-
dict(
|
24 |
-
type='MultiScaleFlipAug',
|
25 |
-
img_scale=(1333, 800),
|
26 |
-
flip=False,
|
27 |
-
transforms=[
|
28 |
-
dict(type='Resize', keep_ratio=True),
|
29 |
-
dict(type='RandomFlip'),
|
30 |
-
dict(type='Normalize', **img_norm_cfg),
|
31 |
-
dict(type='Pad', size_divisor=32),
|
32 |
-
dict(type='ImageToTensor', keys=['img']),
|
33 |
-
dict(type='Collect', keys=['img']),
|
34 |
-
])
|
35 |
-
]
|
36 |
-
data = dict(
|
37 |
-
samples_per_gpu=2,
|
38 |
-
workers_per_gpu=2,
|
39 |
-
train=dict(pipeline=train_pipeline),
|
40 |
-
val=dict(pipeline=test_pipeline),
|
41 |
-
test=dict(pipeline=test_pipeline))
|
42 |
-
# learning policy
|
43 |
-
lr_config = dict(step=[16, 22])
|
44 |
-
runner = dict(type='EpochBasedRunner', max_epochs=24)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x512_20k_voc12aug.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/deeplabv3_r50-d8.py',
|
3 |
-
'../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_20k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x512_20k_voc12aug.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './pspnet_r50-d8_512x512_20k_voc12aug.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/visualization/__init__.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
from .color import Color, color_val
|
3 |
-
from .image import imshow, imshow_bboxes, imshow_det_bboxes
|
4 |
-
from .optflow import flow2rgb, flowshow, make_color_wheel
|
5 |
-
|
6 |
-
__all__ = [
|
7 |
-
'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes',
|
8 |
-
'flowshow', 'flow2rgb', 'make_color_wheel'
|
9 |
-
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArkanDash/rvc-models-new/app.py
DELETED
@@ -1,735 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import glob
|
3 |
-
import json
|
4 |
-
import traceback
|
5 |
-
import logging
|
6 |
-
import gradio as gr
|
7 |
-
import numpy as np
|
8 |
-
import librosa
|
9 |
-
import torch
|
10 |
-
import asyncio
|
11 |
-
import edge_tts
|
12 |
-
import yt_dlp
|
13 |
-
import ffmpeg
|
14 |
-
import subprocess
|
15 |
-
import sys
|
16 |
-
import io
|
17 |
-
import wave
|
18 |
-
from datetime import datetime
|
19 |
-
from fairseq import checkpoint_utils
|
20 |
-
from lib.infer_pack.models import (
|
21 |
-
SynthesizerTrnMs256NSFsid,
|
22 |
-
SynthesizerTrnMs256NSFsid_nono,
|
23 |
-
SynthesizerTrnMs768NSFsid,
|
24 |
-
SynthesizerTrnMs768NSFsid_nono,
|
25 |
-
)
|
26 |
-
from vc_infer_pipeline import VC
|
27 |
-
from config import Config
|
28 |
-
config = Config()
|
29 |
-
logging.getLogger("numba").setLevel(logging.WARNING)
|
30 |
-
spaces = os.getenv("SYSTEM") == "spaces"
|
31 |
-
force_support = None
|
32 |
-
if config.unsupported is False:
|
33 |
-
if config.device == "mps" or config.device == "cpu":
|
34 |
-
force_support = False
|
35 |
-
else:
|
36 |
-
force_support = True
|
37 |
-
|
38 |
-
audio_mode = []
|
39 |
-
f0method_mode = []
|
40 |
-
f0method_info = ""
|
41 |
-
|
42 |
-
if force_support is False or spaces is True:
|
43 |
-
if spaces is True:
|
44 |
-
audio_mode = ["Upload audio", "TTS Audio"]
|
45 |
-
else:
|
46 |
-
audio_mode = ["Input path", "Upload audio", "TTS Audio"]
|
47 |
-
f0method_mode = ["pm", "harvest"]
|
48 |
-
f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better). (Default: PM)"
|
49 |
-
else:
|
50 |
-
audio_mode = ["Input path", "Upload audio", "Youtube", "TTS Audio"]
|
51 |
-
f0method_mode = ["pm", "harvest", "crepe"]
|
52 |
-
f0method_info = "PM is fast, Harvest is good but extremely slow, Rvmpe is alternative to harvest (might be better), and Crepe effect is good but requires GPU (Default: PM)"
|
53 |
-
|
54 |
-
if os.path.isfile("rmvpe.pt"):
|
55 |
-
f0method_mode.insert(2, "rmvpe")
|
56 |
-
|
57 |
-
def create_vc_fn(model_name, tgt_sr, net_g, vc, if_f0, version, file_index):
|
58 |
-
def vc_fn(
|
59 |
-
vc_audio_mode,
|
60 |
-
vc_input,
|
61 |
-
vc_upload,
|
62 |
-
tts_text,
|
63 |
-
tts_voice,
|
64 |
-
f0_up_key,
|
65 |
-
f0_method,
|
66 |
-
index_rate,
|
67 |
-
filter_radius,
|
68 |
-
resample_sr,
|
69 |
-
rms_mix_rate,
|
70 |
-
protect,
|
71 |
-
):
|
72 |
-
try:
|
73 |
-
logs = []
|
74 |
-
print(f"Converting using {model_name}...")
|
75 |
-
logs.append(f"Converting using {model_name}...")
|
76 |
-
yield "\n".join(logs), None
|
77 |
-
if vc_audio_mode == "Input path" or "Youtube" and vc_input != "":
|
78 |
-
audio, sr = librosa.load(vc_input, sr=16000, mono=True)
|
79 |
-
elif vc_audio_mode == "Upload audio":
|
80 |
-
if vc_upload is None:
|
81 |
-
return "You need to upload an audio", None
|
82 |
-
sampling_rate, audio = vc_upload
|
83 |
-
duration = audio.shape[0] / sampling_rate
|
84 |
-
if duration > 20 and spaces:
|
85 |
-
return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
|
86 |
-
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
87 |
-
if len(audio.shape) > 1:
|
88 |
-
audio = librosa.to_mono(audio.transpose(1, 0))
|
89 |
-
if sampling_rate != 16000:
|
90 |
-
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
|
91 |
-
elif vc_audio_mode == "TTS Audio":
|
92 |
-
if len(tts_text) > 100 and spaces:
|
93 |
-
return "Text is too long", None
|
94 |
-
if tts_text is None or tts_voice is None:
|
95 |
-
return "You need to enter text and select a voice", None
|
96 |
-
asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
|
97 |
-
audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
|
98 |
-
vc_input = "tts.mp3"
|
99 |
-
times = [0, 0, 0]
|
100 |
-
f0_up_key = int(f0_up_key)
|
101 |
-
audio_opt = vc.pipeline(
|
102 |
-
hubert_model,
|
103 |
-
net_g,
|
104 |
-
0,
|
105 |
-
audio,
|
106 |
-
vc_input,
|
107 |
-
times,
|
108 |
-
f0_up_key,
|
109 |
-
f0_method,
|
110 |
-
file_index,
|
111 |
-
# file_big_npy,
|
112 |
-
index_rate,
|
113 |
-
if_f0,
|
114 |
-
filter_radius,
|
115 |
-
tgt_sr,
|
116 |
-
resample_sr,
|
117 |
-
rms_mix_rate,
|
118 |
-
version,
|
119 |
-
protect,
|
120 |
-
f0_file=None,
|
121 |
-
)
|
122 |
-
info = f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
|
123 |
-
print(f"{model_name} | {info}")
|
124 |
-
logs.append(f"Successfully Convert {model_name}\n{info}")
|
125 |
-
yield "\n".join(logs), (tgt_sr, audio_opt)
|
126 |
-
except Exception as err:
|
127 |
-
info = traceback.format_exc()
|
128 |
-
print(info)
|
129 |
-
primt(f"Error when using {model_name}.\n{str(err)}")
|
130 |
-
yield info, None
|
131 |
-
return vc_fn
|
132 |
-
|
133 |
-
def load_model():
|
134 |
-
categories = []
|
135 |
-
if os.path.isfile("weights/folder_info.json"):
|
136 |
-
for _, w_dirs, _ in os.walk(f"weights"):
|
137 |
-
category_count_total = len(w_dirs)
|
138 |
-
category_count = 1
|
139 |
-
with open("weights/folder_info.json", "r", encoding="utf-8") as f:
|
140 |
-
folder_info = json.load(f)
|
141 |
-
for category_name, category_info in folder_info.items():
|
142 |
-
if not category_info['enable']:
|
143 |
-
continue
|
144 |
-
category_title = category_info['title']
|
145 |
-
category_folder = category_info['folder_path']
|
146 |
-
description = category_info['description']
|
147 |
-
print(f"Load {category_title} [{category_count}/{category_count_total}]")
|
148 |
-
models = []
|
149 |
-
for _, m_dirs, _ in os.walk(f"weights/{category_folder}"):
|
150 |
-
model_count_total = len(m_dirs)
|
151 |
-
model_count = 1
|
152 |
-
with open(f"weights/{category_folder}/model_info.json", "r", encoding="utf-8") as f:
|
153 |
-
models_info = json.load(f)
|
154 |
-
for character_name, info in models_info.items():
|
155 |
-
if not info['enable']:
|
156 |
-
continue
|
157 |
-
model_title = info['title']
|
158 |
-
model_name = info['model_path']
|
159 |
-
model_author = info.get("author", None)
|
160 |
-
model_cover = f"weights/{category_folder}/{character_name}/{info['cover']}"
|
161 |
-
model_index = f"weights/{category_folder}/{character_name}/{info['feature_retrieval_library']}"
|
162 |
-
cpt = torch.load(f"weights/{category_folder}/{character_name}/{model_name}", map_location="cpu")
|
163 |
-
tgt_sr = cpt["config"][-1]
|
164 |
-
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
165 |
-
if_f0 = cpt.get("f0", 1)
|
166 |
-
version = cpt.get("version", "v1")
|
167 |
-
if version == "v1":
|
168 |
-
if if_f0 == 1:
|
169 |
-
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
|
170 |
-
else:
|
171 |
-
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
172 |
-
model_version = "V1"
|
173 |
-
elif version == "v2":
|
174 |
-
if if_f0 == 1:
|
175 |
-
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
|
176 |
-
else:
|
177 |
-
net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
|
178 |
-
model_version = "V2"
|
179 |
-
del net_g.enc_q
|
180 |
-
print(net_g.load_state_dict(cpt["weight"], strict=False))
|
181 |
-
net_g.eval().to(config.device)
|
182 |
-
if config.is_half:
|
183 |
-
net_g = net_g.half()
|
184 |
-
else:
|
185 |
-
net_g = net_g.float()
|
186 |
-
vc = VC(tgt_sr, config)
|
187 |
-
print(f"Model loaded [{model_count}/{model_count_total}]: {character_name} / {info['feature_retrieval_library']} | ({model_version})")
|
188 |
-
model_count += 1
|
189 |
-
models.append((character_name, model_title, model_author, model_cover, model_version, create_vc_fn(model_name, tgt_sr, net_g, vc, if_f0, version, model_index)))
|
190 |
-
category_count += 1
|
191 |
-
categories.append([category_title, description, models])
|
192 |
-
elif os.path.exists("weights"):
|
193 |
-
models = []
|
194 |
-
for w_root, w_dirs, _ in os.walk("weights"):
|
195 |
-
model_count = 1
|
196 |
-
for sub_dir in w_dirs:
|
197 |
-
pth_files = glob.glob(f"weights/{sub_dir}/*.pth")
|
198 |
-
index_files = glob.glob(f"weights/{sub_dir}/*.index")
|
199 |
-
if pth_files == []:
|
200 |
-
print(f"Model [{model_count}/{len(w_dirs)}]: No Model file detected, skipping...")
|
201 |
-
continue
|
202 |
-
cpt = torch.load(pth_files[0])
|
203 |
-
tgt_sr = cpt["config"][-1]
|
204 |
-
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
205 |
-
if_f0 = cpt.get("f0", 1)
|
206 |
-
version = cpt.get("version", "v1")
|
207 |
-
if version == "v1":
|
208 |
-
if if_f0 == 1:
|
209 |
-
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
|
210 |
-
else:
|
211 |
-
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
212 |
-
model_version = "V1"
|
213 |
-
elif version == "v2":
|
214 |
-
if if_f0 == 1:
|
215 |
-
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
|
216 |
-
else:
|
217 |
-
net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
|
218 |
-
model_version = "V2"
|
219 |
-
del net_g.enc_q
|
220 |
-
print(net_g.load_state_dict(cpt["weight"], strict=False))
|
221 |
-
net_g.eval().to(config.device)
|
222 |
-
if config.is_half:
|
223 |
-
net_g = net_g.half()
|
224 |
-
else:
|
225 |
-
net_g = net_g.float()
|
226 |
-
vc = VC(tgt_sr, config)
|
227 |
-
if index_files == []:
|
228 |
-
print("Warning: No Index file detected!")
|
229 |
-
index_info = "None"
|
230 |
-
model_index = ""
|
231 |
-
else:
|
232 |
-
index_info = index_files[0]
|
233 |
-
model_index = index_files[0]
|
234 |
-
print(f"Model loaded [{model_count}/{len(w_dirs)}]: {index_files[0]} / {index_info} | ({model_version})")
|
235 |
-
model_count += 1
|
236 |
-
models.append((index_files[0][:-4], index_files[0][:-4], "", "", model_version, create_vc_fn(index_files[0], tgt_sr, net_g, vc, if_f0, version, model_index)))
|
237 |
-
categories.append(["Models", "", models])
|
238 |
-
else:
|
239 |
-
categories = []
|
240 |
-
return categories
|
241 |
-
|
242 |
-
def download_audio(url, audio_provider):
|
243 |
-
logs = []
|
244 |
-
if url == "":
|
245 |
-
logs.append("URL required!")
|
246 |
-
yield None, "\n".join(logs)
|
247 |
-
return None, "\n".join(logs)
|
248 |
-
if not os.path.exists("dl_audio"):
|
249 |
-
os.mkdir("dl_audio")
|
250 |
-
if audio_provider == "Youtube":
|
251 |
-
logs.append("Downloading the audio...")
|
252 |
-
yield None, "\n".join(logs)
|
253 |
-
ydl_opts = {
|
254 |
-
'noplaylist': True,
|
255 |
-
'format': 'bestaudio/best',
|
256 |
-
'postprocessors': [{
|
257 |
-
'key': 'FFmpegExtractAudio',
|
258 |
-
'preferredcodec': 'wav',
|
259 |
-
}],
|
260 |
-
"outtmpl": 'dl_audio/audio',
|
261 |
-
}
|
262 |
-
audio_path = "dl_audio/audio.wav"
|
263 |
-
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
264 |
-
ydl.download([url])
|
265 |
-
logs.append("Download Complete.")
|
266 |
-
yield audio_path, "\n".join(logs)
|
267 |
-
|
268 |
-
def cut_vocal_and_inst(split_model):
|
269 |
-
logs = []
|
270 |
-
logs.append("Starting the audio splitting process...")
|
271 |
-
yield "\n".join(logs), None, None, None
|
272 |
-
command = f"demucs --two-stems=vocals -n {split_model} dl_audio/audio.wav -o output"
|
273 |
-
result = subprocess.Popen(command.split(), stdout=subprocess.PIPE, text=True)
|
274 |
-
for line in result.stdout:
|
275 |
-
logs.append(line)
|
276 |
-
yield "\n".join(logs), None, None, None
|
277 |
-
print(result.stdout)
|
278 |
-
vocal = f"output/{split_model}/audio/vocals.wav"
|
279 |
-
inst = f"output/{split_model}/audio/no_vocals.wav"
|
280 |
-
logs.append("Audio splitting complete.")
|
281 |
-
yield "\n".join(logs), vocal, inst, vocal
|
282 |
-
|
283 |
-
def combine_vocal_and_inst(audio_data, vocal_volume, inst_volume, split_model):
|
284 |
-
if not os.path.exists("output/result"):
|
285 |
-
os.mkdir("output/result")
|
286 |
-
vocal_path = "output/result/output.wav"
|
287 |
-
output_path = "output/result/combine.mp3"
|
288 |
-
inst_path = f"output/{split_model}/audio/no_vocals.wav"
|
289 |
-
with wave.open(vocal_path, "w") as wave_file:
|
290 |
-
wave_file.setnchannels(1)
|
291 |
-
wave_file.setsampwidth(2)
|
292 |
-
wave_file.setframerate(audio_data[0])
|
293 |
-
wave_file.writeframes(audio_data[1].tobytes())
|
294 |
-
command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [0:a]volume={inst_volume}[i];[1:a]volume={vocal_volume}[v];[i][v]amix=inputs=2:duration=longest[a] -map [a] -b:a 320k -c:a libmp3lame {output_path}'
|
295 |
-
result = subprocess.run(command.split(), stdout=subprocess.PIPE)
|
296 |
-
print(result.stdout.decode())
|
297 |
-
return output_path
|
298 |
-
|
299 |
-
def load_hubert():
|
300 |
-
global hubert_model
|
301 |
-
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
|
302 |
-
["hubert_base.pt"],
|
303 |
-
suffix="",
|
304 |
-
)
|
305 |
-
hubert_model = models[0]
|
306 |
-
hubert_model = hubert_model.to(config.device)
|
307 |
-
if config.is_half:
|
308 |
-
hubert_model = hubert_model.half()
|
309 |
-
else:
|
310 |
-
hubert_model = hubert_model.float()
|
311 |
-
hubert_model.eval()
|
312 |
-
|
313 |
-
def change_audio_mode(vc_audio_mode):
|
314 |
-
if vc_audio_mode == "Input path":
|
315 |
-
return (
|
316 |
-
# Input & Upload
|
317 |
-
gr.Textbox.update(visible=True),
|
318 |
-
gr.Checkbox.update(visible=False),
|
319 |
-
gr.Audio.update(visible=False),
|
320 |
-
# Youtube
|
321 |
-
gr.Dropdown.update(visible=False),
|
322 |
-
gr.Textbox.update(visible=False),
|
323 |
-
gr.Textbox.update(visible=False),
|
324 |
-
gr.Button.update(visible=False),
|
325 |
-
# Splitter
|
326 |
-
gr.Dropdown.update(visible=False),
|
327 |
-
gr.Textbox.update(visible=False),
|
328 |
-
gr.Button.update(visible=False),
|
329 |
-
gr.Audio.update(visible=False),
|
330 |
-
gr.Audio.update(visible=False),
|
331 |
-
gr.Audio.update(visible=False),
|
332 |
-
gr.Slider.update(visible=False),
|
333 |
-
gr.Slider.update(visible=False),
|
334 |
-
gr.Audio.update(visible=False),
|
335 |
-
gr.Button.update(visible=False),
|
336 |
-
# TTS
|
337 |
-
gr.Textbox.update(visible=False),
|
338 |
-
gr.Dropdown.update(visible=False)
|
339 |
-
)
|
340 |
-
elif vc_audio_mode == "Upload audio":
|
341 |
-
return (
|
342 |
-
# Input & Upload
|
343 |
-
gr.Textbox.update(visible=False),
|
344 |
-
gr.Checkbox.update(visible=True),
|
345 |
-
gr.Audio.update(visible=True),
|
346 |
-
# Youtube
|
347 |
-
gr.Dropdown.update(visible=False),
|
348 |
-
gr.Textbox.update(visible=False),
|
349 |
-
gr.Textbox.update(visible=False),
|
350 |
-
gr.Button.update(visible=False),
|
351 |
-
# Splitter
|
352 |
-
gr.Dropdown.update(visible=False),
|
353 |
-
gr.Textbox.update(visible=False),
|
354 |
-
gr.Button.update(visible=False),
|
355 |
-
gr.Audio.update(visible=False),
|
356 |
-
gr.Audio.update(visible=False),
|
357 |
-
gr.Audio.update(visible=False),
|
358 |
-
gr.Slider.update(visible=False),
|
359 |
-
gr.Slider.update(visible=False),
|
360 |
-
gr.Audio.update(visible=False),
|
361 |
-
gr.Button.update(visible=False),
|
362 |
-
# TTS
|
363 |
-
gr.Textbox.update(visible=False),
|
364 |
-
gr.Dropdown.update(visible=False)
|
365 |
-
)
|
366 |
-
elif vc_audio_mode == "Youtube":
|
367 |
-
return (
|
368 |
-
# Input & Upload
|
369 |
-
gr.Textbox.update(visible=False),
|
370 |
-
gr.Checkbox.update(visible=False),
|
371 |
-
gr.Audio.update(visible=False),
|
372 |
-
# Youtube
|
373 |
-
gr.Dropdown.update(visible=True),
|
374 |
-
gr.Textbox.update(visible=True),
|
375 |
-
gr.Textbox.update(visible=True),
|
376 |
-
gr.Button.update(visible=True),
|
377 |
-
# Splitter
|
378 |
-
gr.Dropdown.update(visible=True),
|
379 |
-
gr.Textbox.update(visible=True),
|
380 |
-
gr.Button.update(visible=True),
|
381 |
-
gr.Audio.update(visible=True),
|
382 |
-
gr.Audio.update(visible=True),
|
383 |
-
gr.Audio.update(visible=True),
|
384 |
-
gr.Slider.update(visible=True),
|
385 |
-
gr.Slider.update(visible=True),
|
386 |
-
gr.Audio.update(visible=True),
|
387 |
-
gr.Button.update(visible=True),
|
388 |
-
# TTS
|
389 |
-
gr.Textbox.update(visible=False),
|
390 |
-
gr.Dropdown.update(visible=False)
|
391 |
-
)
|
392 |
-
elif vc_audio_mode == "TTS Audio":
|
393 |
-
return (
|
394 |
-
# Input & Upload
|
395 |
-
gr.Textbox.update(visible=False),
|
396 |
-
gr.Checkbox.update(visible=False),
|
397 |
-
gr.Audio.update(visible=False),
|
398 |
-
# Youtube
|
399 |
-
gr.Dropdown.update(visible=False),
|
400 |
-
gr.Textbox.update(visible=False),
|
401 |
-
gr.Textbox.update(visible=False),
|
402 |
-
gr.Button.update(visible=False),
|
403 |
-
# Splitter
|
404 |
-
gr.Dropdown.update(visible=False),
|
405 |
-
gr.Textbox.update(visible=False),
|
406 |
-
gr.Button.update(visible=False),
|
407 |
-
gr.Audio.update(visible=False),
|
408 |
-
gr.Audio.update(visible=False),
|
409 |
-
gr.Audio.update(visible=False),
|
410 |
-
gr.Slider.update(visible=False),
|
411 |
-
gr.Slider.update(visible=False),
|
412 |
-
gr.Audio.update(visible=False),
|
413 |
-
gr.Button.update(visible=False),
|
414 |
-
# TTS
|
415 |
-
gr.Textbox.update(visible=True),
|
416 |
-
gr.Dropdown.update(visible=True)
|
417 |
-
)
|
418 |
-
|
419 |
-
def use_microphone(microphone):
|
420 |
-
if microphone == True:
|
421 |
-
return gr.Audio.update(source="microphone")
|
422 |
-
else:
|
423 |
-
return gr.Audio.update(source="upload")
|
424 |
-
|
425 |
-
if __name__ == '__main__':
|
426 |
-
load_hubert()
|
427 |
-
categories = load_model()
|
428 |
-
tts_voice_list = asyncio.new_event_loop().run_until_complete(edge_tts.list_voices())
|
429 |
-
voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
|
430 |
-
with gr.Blocks() as app:
|
431 |
-
gr.Markdown(
|
432 |
-
"<div align='center'>\n\n"+
|
433 |
-
"# RVC Genshin Impact\n\n"+
|
434 |
-
"### Recommended to use Google Colab to use other character and feature.\n\n"+
|
435 |
-
"[](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n\n"+
|
436 |
-
"</div>\n\n"+
|
437 |
-
"[](https://github.com/ArkanDash/Multi-Model-RVC-Inference)"
|
438 |
-
)
|
439 |
-
if categories == []:
|
440 |
-
gr.Markdown(
|
441 |
-
"<div align='center'>\n\n"+
|
442 |
-
"## No model found, please add the model into weights folder\n\n"+
|
443 |
-
"</div>"
|
444 |
-
)
|
445 |
-
for (folder_title, description, models) in categories:
|
446 |
-
with gr.TabItem(folder_title):
|
447 |
-
if description:
|
448 |
-
gr.Markdown(f"### <center> {description}")
|
449 |
-
with gr.Tabs():
|
450 |
-
if not models:
|
451 |
-
gr.Markdown("# <center> No Model Loaded.")
|
452 |
-
gr.Markdown("## <center> Please add the model or fix your model path.")
|
453 |
-
continue
|
454 |
-
for (name, title, author, cover, model_version, vc_fn) in models:
|
455 |
-
with gr.TabItem(name):
|
456 |
-
with gr.Row():
|
457 |
-
gr.Markdown(
|
458 |
-
'<div align="center">'
|
459 |
-
f'<div>{title}</div>\n'+
|
460 |
-
f'<div>RVC {model_version} Model</div>\n'+
|
461 |
-
(f'<div>Model author: {author}</div>' if author else "")+
|
462 |
-
(f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else "")+
|
463 |
-
'</div>'
|
464 |
-
)
|
465 |
-
with gr.Row():
|
466 |
-
if spaces is False:
|
467 |
-
with gr.TabItem("Input"):
|
468 |
-
with gr.Row():
|
469 |
-
with gr.Column():
|
470 |
-
vc_audio_mode = gr.Dropdown(label="Input voice", choices=audio_mode, allow_custom_value=False, value="Upload audio")
|
471 |
-
# Input
|
472 |
-
vc_input = gr.Textbox(label="Input audio path", visible=False)
|
473 |
-
# Upload
|
474 |
-
vc_microphone_mode = gr.Checkbox(label="Use Microphone", value=False, visible=True, interactive=True)
|
475 |
-
vc_upload = gr.Audio(label="Upload audio file", source="upload", visible=True, interactive=True)
|
476 |
-
# Youtube
|
477 |
-
vc_download_audio = gr.Dropdown(label="Provider", choices=["Youtube"], allow_custom_value=False, visible=False, value="Youtube", info="Select provider (Default: Youtube)")
|
478 |
-
vc_link = gr.Textbox(label="Youtube URL", visible=False, info="Example: https://www.youtube.com/watch?v=Nc0sB1Bmf-A", placeholder="https://www.youtube.com/watch?v=...")
|
479 |
-
vc_log_yt = gr.Textbox(label="Output Information", visible=False, interactive=False)
|
480 |
-
vc_download_button = gr.Button("Download Audio", variant="primary", visible=False)
|
481 |
-
vc_audio_preview = gr.Audio(label="Audio Preview", visible=False)
|
482 |
-
# TTS
|
483 |
-
tts_text = gr.Textbox(label="TTS text", info="Text to speech input", visible=False)
|
484 |
-
tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
|
485 |
-
with gr.Column():
|
486 |
-
vc_split_model = gr.Dropdown(label="Splitter Model", choices=["hdemucs_mmi", "htdemucs", "htdemucs_ft", "mdx", "mdx_q", "mdx_extra_q"], allow_custom_value=False, visible=False, value="htdemucs", info="Select the splitter model (Default: htdemucs)")
|
487 |
-
vc_split_log = gr.Textbox(label="Output Information", visible=False, interactive=False)
|
488 |
-
vc_split = gr.Button("Split Audio", variant="primary", visible=False)
|
489 |
-
vc_vocal_preview = gr.Audio(label="Vocal Preview", visible=False)
|
490 |
-
vc_inst_preview = gr.Audio(label="Instrumental Preview", visible=False)
|
491 |
-
with gr.TabItem("Convert"):
|
492 |
-
with gr.Row():
|
493 |
-
with gr.Column():
|
494 |
-
vc_transform0 = gr.Number(label="Transpose", value=0, info='Type "12" to change from male to female voice. Type "-12" to change female to male voice')
|
495 |
-
f0method0 = gr.Radio(
|
496 |
-
label="Pitch extraction algorithm",
|
497 |
-
info=f0method_info,
|
498 |
-
choices=f0method_mode,
|
499 |
-
value="pm",
|
500 |
-
interactive=True
|
501 |
-
)
|
502 |
-
index_rate1 = gr.Slider(
|
503 |
-
minimum=0,
|
504 |
-
maximum=1,
|
505 |
-
label="Retrieval feature ratio",
|
506 |
-
info="(Default: 0.7)",
|
507 |
-
value=0.7,
|
508 |
-
interactive=True,
|
509 |
-
)
|
510 |
-
filter_radius0 = gr.Slider(
|
511 |
-
minimum=0,
|
512 |
-
maximum=7,
|
513 |
-
label="Apply Median Filtering",
|
514 |
-
info="The value represents the filter radius and can reduce breathiness.",
|
515 |
-
value=3,
|
516 |
-
step=1,
|
517 |
-
interactive=True,
|
518 |
-
)
|
519 |
-
resample_sr0 = gr.Slider(
|
520 |
-
minimum=0,
|
521 |
-
maximum=48000,
|
522 |
-
label="Resample the output audio",
|
523 |
-
info="Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling",
|
524 |
-
value=0,
|
525 |
-
step=1,
|
526 |
-
interactive=True,
|
527 |
-
)
|
528 |
-
rms_mix_rate0 = gr.Slider(
|
529 |
-
minimum=0,
|
530 |
-
maximum=1,
|
531 |
-
label="Volume Envelope",
|
532 |
-
info="Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used",
|
533 |
-
value=1,
|
534 |
-
interactive=True,
|
535 |
-
)
|
536 |
-
protect0 = gr.Slider(
|
537 |
-
minimum=0,
|
538 |
-
maximum=0.5,
|
539 |
-
label="Voice Protection",
|
540 |
-
info="Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy",
|
541 |
-
value=0.5,
|
542 |
-
step=0.01,
|
543 |
-
interactive=True,
|
544 |
-
)
|
545 |
-
with gr.Column():
|
546 |
-
vc_log = gr.Textbox(label="Output Information", interactive=False)
|
547 |
-
vc_output = gr.Audio(label="Output Audio", interactive=False)
|
548 |
-
vc_convert = gr.Button("Convert", variant="primary")
|
549 |
-
vc_vocal_volume = gr.Slider(
|
550 |
-
minimum=0,
|
551 |
-
maximum=10,
|
552 |
-
label="Vocal volume",
|
553 |
-
value=1,
|
554 |
-
interactive=True,
|
555 |
-
step=1,
|
556 |
-
info="Adjust vocal volume (Default: 1}",
|
557 |
-
visible=False
|
558 |
-
)
|
559 |
-
vc_inst_volume = gr.Slider(
|
560 |
-
minimum=0,
|
561 |
-
maximum=10,
|
562 |
-
label="Instrument volume",
|
563 |
-
value=1,
|
564 |
-
interactive=True,
|
565 |
-
step=1,
|
566 |
-
info="Adjust instrument volume (Default: 1}",
|
567 |
-
visible=False
|
568 |
-
)
|
569 |
-
vc_combined_output = gr.Audio(label="Output Combined Audio", visible=False)
|
570 |
-
vc_combine = gr.Button("Combine",variant="primary", visible=False)
|
571 |
-
else:
|
572 |
-
with gr.Column():
|
573 |
-
vc_audio_mode = gr.Dropdown(label="Input voice", choices=audio_mode, allow_custom_value=False, value="Upload audio")
|
574 |
-
# Input
|
575 |
-
vc_input = gr.Textbox(label="Input audio path", visible=False)
|
576 |
-
# Upload
|
577 |
-
vc_microphone_mode = gr.Checkbox(label="Use Microphone", value=False, visible=True, interactive=True)
|
578 |
-
vc_upload = gr.Audio(label="Upload audio file", source="upload", visible=True, interactive=True)
|
579 |
-
# Youtube
|
580 |
-
vc_download_audio = gr.Dropdown(label="Provider", choices=["Youtube"], allow_custom_value=False, visible=False, value="Youtube", info="Select provider (Default: Youtube)")
|
581 |
-
vc_link = gr.Textbox(label="Youtube URL", visible=False, info="Example: https://www.youtube.com/watch?v=Nc0sB1Bmf-A", placeholder="https://www.youtube.com/watch?v=...")
|
582 |
-
vc_log_yt = gr.Textbox(label="Output Information", visible=False, interactive=False)
|
583 |
-
vc_download_button = gr.Button("Download Audio", variant="primary", visible=False)
|
584 |
-
vc_audio_preview = gr.Audio(label="Audio Preview", visible=False)
|
585 |
-
# Splitter
|
586 |
-
vc_split_model = gr.Dropdown(label="Splitter Model", choices=["hdemucs_mmi", "htdemucs", "htdemucs_ft", "mdx", "mdx_q", "mdx_extra_q"], allow_custom_value=False, visible=False, value="htdemucs", info="Select the splitter model (Default: htdemucs)")
|
587 |
-
vc_split_log = gr.Textbox(label="Output Information", visible=False, interactive=False)
|
588 |
-
vc_split = gr.Button("Split Audio", variant="primary", visible=False)
|
589 |
-
vc_vocal_preview = gr.Audio(label="Vocal Preview", visible=False)
|
590 |
-
vc_inst_preview = gr.Audio(label="Instrumental Preview", visible=False)
|
591 |
-
# TTS
|
592 |
-
tts_text = gr.Textbox(label="TTS text", info="Text to speech input", visible=False)
|
593 |
-
tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
|
594 |
-
with gr.Column():
|
595 |
-
vc_transform0 = gr.Number(label="Transpose", value=0, info='Type "12" to change from male to female voice. Type "-12" to change female to male voice')
|
596 |
-
f0method0 = gr.Radio(
|
597 |
-
label="Pitch extraction algorithm",
|
598 |
-
info=f0method_info,
|
599 |
-
choices=f0method_mode,
|
600 |
-
value="pm",
|
601 |
-
interactive=True
|
602 |
-
)
|
603 |
-
index_rate1 = gr.Slider(
|
604 |
-
minimum=0,
|
605 |
-
maximum=1,
|
606 |
-
label="Retrieval feature ratio",
|
607 |
-
info="(Default: 0.7)",
|
608 |
-
value=0.7,
|
609 |
-
interactive=True,
|
610 |
-
)
|
611 |
-
filter_radius0 = gr.Slider(
|
612 |
-
minimum=0,
|
613 |
-
maximum=7,
|
614 |
-
label="Apply Median Filtering",
|
615 |
-
info="The value represents the filter radius and can reduce breathiness.",
|
616 |
-
value=3,
|
617 |
-
step=1,
|
618 |
-
interactive=True,
|
619 |
-
)
|
620 |
-
resample_sr0 = gr.Slider(
|
621 |
-
minimum=0,
|
622 |
-
maximum=48000,
|
623 |
-
label="Resample the output audio",
|
624 |
-
info="Resample the output audio in post-processing to the final sample rate. Set to 0 for no resampling",
|
625 |
-
value=0,
|
626 |
-
step=1,
|
627 |
-
interactive=True,
|
628 |
-
)
|
629 |
-
rms_mix_rate0 = gr.Slider(
|
630 |
-
minimum=0,
|
631 |
-
maximum=1,
|
632 |
-
label="Volume Envelope",
|
633 |
-
info="Use the volume envelope of the input to replace or mix with the volume envelope of the output. The closer the ratio is to 1, the more the output envelope is used",
|
634 |
-
value=1,
|
635 |
-
interactive=True,
|
636 |
-
)
|
637 |
-
protect0 = gr.Slider(
|
638 |
-
minimum=0,
|
639 |
-
maximum=0.5,
|
640 |
-
label="Voice Protection",
|
641 |
-
info="Protect voiceless consonants and breath sounds to prevent artifacts such as tearing in electronic music. Set to 0.5 to disable. Decrease the value to increase protection, but it may reduce indexing accuracy",
|
642 |
-
value=0.5,
|
643 |
-
step=0.01,
|
644 |
-
interactive=True,
|
645 |
-
)
|
646 |
-
with gr.Column():
|
647 |
-
vc_log = gr.Textbox(label="Output Information", interactive=False)
|
648 |
-
vc_output = gr.Audio(label="Output Audio", interactive=False)
|
649 |
-
vc_convert = gr.Button("Convert", variant="primary")
|
650 |
-
vc_vocal_volume = gr.Slider(
|
651 |
-
minimum=0,
|
652 |
-
maximum=10,
|
653 |
-
label="Vocal volume",
|
654 |
-
value=1,
|
655 |
-
interactive=True,
|
656 |
-
step=1,
|
657 |
-
info="Adjust vocal volume (Default: 1}",
|
658 |
-
visible=False
|
659 |
-
)
|
660 |
-
vc_inst_volume = gr.Slider(
|
661 |
-
minimum=0,
|
662 |
-
maximum=10,
|
663 |
-
label="Instrument volume",
|
664 |
-
value=1,
|
665 |
-
interactive=True,
|
666 |
-
step=1,
|
667 |
-
info="Adjust instrument volume (Default: 1}",
|
668 |
-
visible=False
|
669 |
-
)
|
670 |
-
vc_combined_output = gr.Audio(label="Output Combined Audio", visible=False)
|
671 |
-
vc_combine = gr.Button("Combine",variant="primary", visible=False)
|
672 |
-
vc_convert.click(
|
673 |
-
fn=vc_fn,
|
674 |
-
inputs=[
|
675 |
-
vc_audio_mode,
|
676 |
-
vc_input,
|
677 |
-
vc_upload,
|
678 |
-
tts_text,
|
679 |
-
tts_voice,
|
680 |
-
vc_transform0,
|
681 |
-
f0method0,
|
682 |
-
index_rate1,
|
683 |
-
filter_radius0,
|
684 |
-
resample_sr0,
|
685 |
-
rms_mix_rate0,
|
686 |
-
protect0,
|
687 |
-
],
|
688 |
-
outputs=[vc_log ,vc_output]
|
689 |
-
)
|
690 |
-
vc_download_button.click(
|
691 |
-
fn=download_audio,
|
692 |
-
inputs=[vc_link, vc_download_audio],
|
693 |
-
outputs=[vc_audio_preview, vc_log_yt]
|
694 |
-
)
|
695 |
-
vc_split.click(
|
696 |
-
fn=cut_vocal_and_inst,
|
697 |
-
inputs=[vc_split_model],
|
698 |
-
outputs=[vc_split_log, vc_vocal_preview, vc_inst_preview, vc_input]
|
699 |
-
)
|
700 |
-
vc_combine.click(
|
701 |
-
fn=combine_vocal_and_inst,
|
702 |
-
inputs=[vc_output, vc_vocal_volume, vc_inst_volume, vc_split_model],
|
703 |
-
outputs=[vc_combined_output]
|
704 |
-
)
|
705 |
-
vc_microphone_mode.change(
|
706 |
-
fn=use_microphone,
|
707 |
-
inputs=vc_microphone_mode,
|
708 |
-
outputs=vc_upload
|
709 |
-
)
|
710 |
-
vc_audio_mode.change(
|
711 |
-
fn=change_audio_mode,
|
712 |
-
inputs=[vc_audio_mode],
|
713 |
-
outputs=[
|
714 |
-
vc_input,
|
715 |
-
vc_microphone_mode,
|
716 |
-
vc_upload,
|
717 |
-
vc_download_audio,
|
718 |
-
vc_link,
|
719 |
-
vc_log_yt,
|
720 |
-
vc_download_button,
|
721 |
-
vc_split_model,
|
722 |
-
vc_split_log,
|
723 |
-
vc_split,
|
724 |
-
vc_audio_preview,
|
725 |
-
vc_vocal_preview,
|
726 |
-
vc_inst_preview,
|
727 |
-
vc_vocal_volume,
|
728 |
-
vc_inst_volume,
|
729 |
-
vc_combined_output,
|
730 |
-
vc_combine,
|
731 |
-
tts_text,
|
732 |
-
tts_voice
|
733 |
-
]
|
734 |
-
)
|
735 |
-
app.queue(concurrency_count=5, max_size=50, api_open=config.api).launch(share=config.share)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/req/req_uninstall.py
DELETED
@@ -1,650 +0,0 @@
|
|
1 |
-
import functools
|
2 |
-
import os
|
3 |
-
import sys
|
4 |
-
import sysconfig
|
5 |
-
from importlib.util import cache_from_source
|
6 |
-
from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Set, Tuple
|
7 |
-
|
8 |
-
from pip._internal.exceptions import UninstallationError
|
9 |
-
from pip._internal.locations import get_bin_prefix, get_bin_user
|
10 |
-
from pip._internal.metadata import BaseDistribution
|
11 |
-
from pip._internal.utils.compat import WINDOWS
|
12 |
-
from pip._internal.utils.egg_link import egg_link_path_from_location
|
13 |
-
from pip._internal.utils.logging import getLogger, indent_log
|
14 |
-
from pip._internal.utils.misc import ask, normalize_path, renames, rmtree
|
15 |
-
from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory
|
16 |
-
from pip._internal.utils.virtualenv import running_under_virtualenv
|
17 |
-
|
18 |
-
logger = getLogger(__name__)
|
19 |
-
|
20 |
-
|
21 |
-
def _script_names(
|
22 |
-
bin_dir: str, script_name: str, is_gui: bool
|
23 |
-
) -> Generator[str, None, None]:
|
24 |
-
"""Create the fully qualified name of the files created by
|
25 |
-
{console,gui}_scripts for the given ``dist``.
|
26 |
-
Returns the list of file names
|
27 |
-
"""
|
28 |
-
exe_name = os.path.join(bin_dir, script_name)
|
29 |
-
yield exe_name
|
30 |
-
if not WINDOWS:
|
31 |
-
return
|
32 |
-
yield f"{exe_name}.exe"
|
33 |
-
yield f"{exe_name}.exe.manifest"
|
34 |
-
if is_gui:
|
35 |
-
yield f"{exe_name}-script.pyw"
|
36 |
-
else:
|
37 |
-
yield f"{exe_name}-script.py"
|
38 |
-
|
39 |
-
|
40 |
-
def _unique(
|
41 |
-
fn: Callable[..., Generator[Any, None, None]]
|
42 |
-
) -> Callable[..., Generator[Any, None, None]]:
|
43 |
-
@functools.wraps(fn)
|
44 |
-
def unique(*args: Any, **kw: Any) -> Generator[Any, None, None]:
|
45 |
-
seen: Set[Any] = set()
|
46 |
-
for item in fn(*args, **kw):
|
47 |
-
if item not in seen:
|
48 |
-
seen.add(item)
|
49 |
-
yield item
|
50 |
-
|
51 |
-
return unique
|
52 |
-
|
53 |
-
|
54 |
-
@_unique
|
55 |
-
def uninstallation_paths(dist: BaseDistribution) -> Generator[str, None, None]:
|
56 |
-
"""
|
57 |
-
Yield all the uninstallation paths for dist based on RECORD-without-.py[co]
|
58 |
-
|
59 |
-
Yield paths to all the files in RECORD. For each .py file in RECORD, add
|
60 |
-
the .pyc and .pyo in the same directory.
|
61 |
-
|
62 |
-
UninstallPathSet.add() takes care of the __pycache__ .py[co].
|
63 |
-
|
64 |
-
If RECORD is not found, raises UninstallationError,
|
65 |
-
with possible information from the INSTALLER file.
|
66 |
-
|
67 |
-
https://packaging.python.org/specifications/recording-installed-packages/
|
68 |
-
"""
|
69 |
-
location = dist.location
|
70 |
-
assert location is not None, "not installed"
|
71 |
-
|
72 |
-
entries = dist.iter_declared_entries()
|
73 |
-
if entries is None:
|
74 |
-
msg = "Cannot uninstall {dist}, RECORD file not found.".format(dist=dist)
|
75 |
-
installer = dist.installer
|
76 |
-
if not installer or installer == "pip":
|
77 |
-
dep = "{}=={}".format(dist.raw_name, dist.version)
|
78 |
-
msg += (
|
79 |
-
" You might be able to recover from this via: "
|
80 |
-
"'pip install --force-reinstall --no-deps {}'.".format(dep)
|
81 |
-
)
|
82 |
-
else:
|
83 |
-
msg += " Hint: The package was installed by {}.".format(installer)
|
84 |
-
raise UninstallationError(msg)
|
85 |
-
|
86 |
-
for entry in entries:
|
87 |
-
path = os.path.join(location, entry)
|
88 |
-
yield path
|
89 |
-
if path.endswith(".py"):
|
90 |
-
dn, fn = os.path.split(path)
|
91 |
-
base = fn[:-3]
|
92 |
-
path = os.path.join(dn, base + ".pyc")
|
93 |
-
yield path
|
94 |
-
path = os.path.join(dn, base + ".pyo")
|
95 |
-
yield path
|
96 |
-
|
97 |
-
|
98 |
-
def compact(paths: Iterable[str]) -> Set[str]:
|
99 |
-
"""Compact a path set to contain the minimal number of paths
|
100 |
-
necessary to contain all paths in the set. If /a/path/ and
|
101 |
-
/a/path/to/a/file.txt are both in the set, leave only the
|
102 |
-
shorter path."""
|
103 |
-
|
104 |
-
sep = os.path.sep
|
105 |
-
short_paths: Set[str] = set()
|
106 |
-
for path in sorted(paths, key=len):
|
107 |
-
should_skip = any(
|
108 |
-
path.startswith(shortpath.rstrip("*"))
|
109 |
-
and path[len(shortpath.rstrip("*").rstrip(sep))] == sep
|
110 |
-
for shortpath in short_paths
|
111 |
-
)
|
112 |
-
if not should_skip:
|
113 |
-
short_paths.add(path)
|
114 |
-
return short_paths
|
115 |
-
|
116 |
-
|
117 |
-
def compress_for_rename(paths: Iterable[str]) -> Set[str]:
|
118 |
-
"""Returns a set containing the paths that need to be renamed.
|
119 |
-
|
120 |
-
This set may include directories when the original sequence of paths
|
121 |
-
included every file on disk.
|
122 |
-
"""
|
123 |
-
case_map = {os.path.normcase(p): p for p in paths}
|
124 |
-
remaining = set(case_map)
|
125 |
-
unchecked = sorted({os.path.split(p)[0] for p in case_map.values()}, key=len)
|
126 |
-
wildcards: Set[str] = set()
|
127 |
-
|
128 |
-
def norm_join(*a: str) -> str:
|
129 |
-
return os.path.normcase(os.path.join(*a))
|
130 |
-
|
131 |
-
for root in unchecked:
|
132 |
-
if any(os.path.normcase(root).startswith(w) for w in wildcards):
|
133 |
-
# This directory has already been handled.
|
134 |
-
continue
|
135 |
-
|
136 |
-
all_files: Set[str] = set()
|
137 |
-
all_subdirs: Set[str] = set()
|
138 |
-
for dirname, subdirs, files in os.walk(root):
|
139 |
-
all_subdirs.update(norm_join(root, dirname, d) for d in subdirs)
|
140 |
-
all_files.update(norm_join(root, dirname, f) for f in files)
|
141 |
-
# If all the files we found are in our remaining set of files to
|
142 |
-
# remove, then remove them from the latter set and add a wildcard
|
143 |
-
# for the directory.
|
144 |
-
if not (all_files - remaining):
|
145 |
-
remaining.difference_update(all_files)
|
146 |
-
wildcards.add(root + os.sep)
|
147 |
-
|
148 |
-
return set(map(case_map.__getitem__, remaining)) | wildcards
|
149 |
-
|
150 |
-
|
151 |
-
def compress_for_output_listing(paths: Iterable[str]) -> Tuple[Set[str], Set[str]]:
|
152 |
-
"""Returns a tuple of 2 sets of which paths to display to user
|
153 |
-
|
154 |
-
The first set contains paths that would be deleted. Files of a package
|
155 |
-
are not added and the top-level directory of the package has a '*' added
|
156 |
-
at the end - to signify that all it's contents are removed.
|
157 |
-
|
158 |
-
The second set contains files that would have been skipped in the above
|
159 |
-
folders.
|
160 |
-
"""
|
161 |
-
|
162 |
-
will_remove = set(paths)
|
163 |
-
will_skip = set()
|
164 |
-
|
165 |
-
# Determine folders and files
|
166 |
-
folders = set()
|
167 |
-
files = set()
|
168 |
-
for path in will_remove:
|
169 |
-
if path.endswith(".pyc"):
|
170 |
-
continue
|
171 |
-
if path.endswith("__init__.py") or ".dist-info" in path:
|
172 |
-
folders.add(os.path.dirname(path))
|
173 |
-
files.add(path)
|
174 |
-
|
175 |
-
# probably this one https://github.com/python/mypy/issues/390
|
176 |
-
_normcased_files = set(map(os.path.normcase, files)) # type: ignore
|
177 |
-
|
178 |
-
folders = compact(folders)
|
179 |
-
|
180 |
-
# This walks the tree using os.walk to not miss extra folders
|
181 |
-
# that might get added.
|
182 |
-
for folder in folders:
|
183 |
-
for dirpath, _, dirfiles in os.walk(folder):
|
184 |
-
for fname in dirfiles:
|
185 |
-
if fname.endswith(".pyc"):
|
186 |
-
continue
|
187 |
-
|
188 |
-
file_ = os.path.join(dirpath, fname)
|
189 |
-
if (
|
190 |
-
os.path.isfile(file_)
|
191 |
-
and os.path.normcase(file_) not in _normcased_files
|
192 |
-
):
|
193 |
-
# We are skipping this file. Add it to the set.
|
194 |
-
will_skip.add(file_)
|
195 |
-
|
196 |
-
will_remove = files | {os.path.join(folder, "*") for folder in folders}
|
197 |
-
|
198 |
-
return will_remove, will_skip
|
199 |
-
|
200 |
-
|
201 |
-
class StashedUninstallPathSet:
|
202 |
-
"""A set of file rename operations to stash files while
|
203 |
-
tentatively uninstalling them."""
|
204 |
-
|
205 |
-
def __init__(self) -> None:
|
206 |
-
# Mapping from source file root to [Adjacent]TempDirectory
|
207 |
-
# for files under that directory.
|
208 |
-
self._save_dirs: Dict[str, TempDirectory] = {}
|
209 |
-
# (old path, new path) tuples for each move that may need
|
210 |
-
# to be undone.
|
211 |
-
self._moves: List[Tuple[str, str]] = []
|
212 |
-
|
213 |
-
def _get_directory_stash(self, path: str) -> str:
|
214 |
-
"""Stashes a directory.
|
215 |
-
|
216 |
-
Directories are stashed adjacent to their original location if
|
217 |
-
possible, or else moved/copied into the user's temp dir."""
|
218 |
-
|
219 |
-
try:
|
220 |
-
save_dir: TempDirectory = AdjacentTempDirectory(path)
|
221 |
-
except OSError:
|
222 |
-
save_dir = TempDirectory(kind="uninstall")
|
223 |
-
self._save_dirs[os.path.normcase(path)] = save_dir
|
224 |
-
|
225 |
-
return save_dir.path
|
226 |
-
|
227 |
-
def _get_file_stash(self, path: str) -> str:
|
228 |
-
"""Stashes a file.
|
229 |
-
|
230 |
-
If no root has been provided, one will be created for the directory
|
231 |
-
in the user's temp directory."""
|
232 |
-
path = os.path.normcase(path)
|
233 |
-
head, old_head = os.path.dirname(path), None
|
234 |
-
save_dir = None
|
235 |
-
|
236 |
-
while head != old_head:
|
237 |
-
try:
|
238 |
-
save_dir = self._save_dirs[head]
|
239 |
-
break
|
240 |
-
except KeyError:
|
241 |
-
pass
|
242 |
-
head, old_head = os.path.dirname(head), head
|
243 |
-
else:
|
244 |
-
# Did not find any suitable root
|
245 |
-
head = os.path.dirname(path)
|
246 |
-
save_dir = TempDirectory(kind="uninstall")
|
247 |
-
self._save_dirs[head] = save_dir
|
248 |
-
|
249 |
-
relpath = os.path.relpath(path, head)
|
250 |
-
if relpath and relpath != os.path.curdir:
|
251 |
-
return os.path.join(save_dir.path, relpath)
|
252 |
-
return save_dir.path
|
253 |
-
|
254 |
-
def stash(self, path: str) -> str:
|
255 |
-
"""Stashes the directory or file and returns its new location.
|
256 |
-
Handle symlinks as files to avoid modifying the symlink targets.
|
257 |
-
"""
|
258 |
-
path_is_dir = os.path.isdir(path) and not os.path.islink(path)
|
259 |
-
if path_is_dir:
|
260 |
-
new_path = self._get_directory_stash(path)
|
261 |
-
else:
|
262 |
-
new_path = self._get_file_stash(path)
|
263 |
-
|
264 |
-
self._moves.append((path, new_path))
|
265 |
-
if path_is_dir and os.path.isdir(new_path):
|
266 |
-
# If we're moving a directory, we need to
|
267 |
-
# remove the destination first or else it will be
|
268 |
-
# moved to inside the existing directory.
|
269 |
-
# We just created new_path ourselves, so it will
|
270 |
-
# be removable.
|
271 |
-
os.rmdir(new_path)
|
272 |
-
renames(path, new_path)
|
273 |
-
return new_path
|
274 |
-
|
275 |
-
def commit(self) -> None:
|
276 |
-
"""Commits the uninstall by removing stashed files."""
|
277 |
-
for _, save_dir in self._save_dirs.items():
|
278 |
-
save_dir.cleanup()
|
279 |
-
self._moves = []
|
280 |
-
self._save_dirs = {}
|
281 |
-
|
282 |
-
def rollback(self) -> None:
|
283 |
-
"""Undoes the uninstall by moving stashed files back."""
|
284 |
-
for p in self._moves:
|
285 |
-
logger.info("Moving to %s\n from %s", *p)
|
286 |
-
|
287 |
-
for new_path, path in self._moves:
|
288 |
-
try:
|
289 |
-
logger.debug("Replacing %s from %s", new_path, path)
|
290 |
-
if os.path.isfile(new_path) or os.path.islink(new_path):
|
291 |
-
os.unlink(new_path)
|
292 |
-
elif os.path.isdir(new_path):
|
293 |
-
rmtree(new_path)
|
294 |
-
renames(path, new_path)
|
295 |
-
except OSError as ex:
|
296 |
-
logger.error("Failed to restore %s", new_path)
|
297 |
-
logger.debug("Exception: %s", ex)
|
298 |
-
|
299 |
-
self.commit()
|
300 |
-
|
301 |
-
@property
|
302 |
-
def can_rollback(self) -> bool:
|
303 |
-
return bool(self._moves)
|
304 |
-
|
305 |
-
|
306 |
-
class UninstallPathSet:
|
307 |
-
"""A set of file paths to be removed in the uninstallation of a
|
308 |
-
requirement."""
|
309 |
-
|
310 |
-
def __init__(self, dist: BaseDistribution) -> None:
|
311 |
-
self._paths: Set[str] = set()
|
312 |
-
self._refuse: Set[str] = set()
|
313 |
-
self._pth: Dict[str, UninstallPthEntries] = {}
|
314 |
-
self._dist = dist
|
315 |
-
self._moved_paths = StashedUninstallPathSet()
|
316 |
-
# Create local cache of normalize_path results. Creating an UninstallPathSet
|
317 |
-
# can result in hundreds/thousands of redundant calls to normalize_path with
|
318 |
-
# the same args, which hurts performance.
|
319 |
-
self._normalize_path_cached = functools.lru_cache()(normalize_path)
|
320 |
-
|
321 |
-
def _permitted(self, path: str) -> bool:
|
322 |
-
"""
|
323 |
-
Return True if the given path is one we are permitted to
|
324 |
-
remove/modify, False otherwise.
|
325 |
-
|
326 |
-
"""
|
327 |
-
# aka is_local, but caching normalized sys.prefix
|
328 |
-
if not running_under_virtualenv():
|
329 |
-
return True
|
330 |
-
return path.startswith(self._normalize_path_cached(sys.prefix))
|
331 |
-
|
332 |
-
def add(self, path: str) -> None:
|
333 |
-
head, tail = os.path.split(path)
|
334 |
-
|
335 |
-
# we normalize the head to resolve parent directory symlinks, but not
|
336 |
-
# the tail, since we only want to uninstall symlinks, not their targets
|
337 |
-
path = os.path.join(self._normalize_path_cached(head), os.path.normcase(tail))
|
338 |
-
|
339 |
-
if not os.path.exists(path):
|
340 |
-
return
|
341 |
-
if self._permitted(path):
|
342 |
-
self._paths.add(path)
|
343 |
-
else:
|
344 |
-
self._refuse.add(path)
|
345 |
-
|
346 |
-
# __pycache__ files can show up after 'installed-files.txt' is created,
|
347 |
-
# due to imports
|
348 |
-
if os.path.splitext(path)[1] == ".py":
|
349 |
-
self.add(cache_from_source(path))
|
350 |
-
|
351 |
-
def add_pth(self, pth_file: str, entry: str) -> None:
|
352 |
-
pth_file = self._normalize_path_cached(pth_file)
|
353 |
-
if self._permitted(pth_file):
|
354 |
-
if pth_file not in self._pth:
|
355 |
-
self._pth[pth_file] = UninstallPthEntries(pth_file)
|
356 |
-
self._pth[pth_file].add(entry)
|
357 |
-
else:
|
358 |
-
self._refuse.add(pth_file)
|
359 |
-
|
360 |
-
def remove(self, auto_confirm: bool = False, verbose: bool = False) -> None:
|
361 |
-
"""Remove paths in ``self._paths`` with confirmation (unless
|
362 |
-
``auto_confirm`` is True)."""
|
363 |
-
|
364 |
-
if not self._paths:
|
365 |
-
logger.info(
|
366 |
-
"Can't uninstall '%s'. No files were found to uninstall.",
|
367 |
-
self._dist.raw_name,
|
368 |
-
)
|
369 |
-
return
|
370 |
-
|
371 |
-
dist_name_version = f"{self._dist.raw_name}-{self._dist.version}"
|
372 |
-
logger.info("Uninstalling %s:", dist_name_version)
|
373 |
-
|
374 |
-
with indent_log():
|
375 |
-
if auto_confirm or self._allowed_to_proceed(verbose):
|
376 |
-
moved = self._moved_paths
|
377 |
-
|
378 |
-
for_rename = compress_for_rename(self._paths)
|
379 |
-
|
380 |
-
for path in sorted(compact(for_rename)):
|
381 |
-
moved.stash(path)
|
382 |
-
logger.verbose("Removing file or directory %s", path)
|
383 |
-
|
384 |
-
for pth in self._pth.values():
|
385 |
-
pth.remove()
|
386 |
-
|
387 |
-
logger.info("Successfully uninstalled %s", dist_name_version)
|
388 |
-
|
389 |
-
def _allowed_to_proceed(self, verbose: bool) -> bool:
|
390 |
-
"""Display which files would be deleted and prompt for confirmation"""
|
391 |
-
|
392 |
-
def _display(msg: str, paths: Iterable[str]) -> None:
|
393 |
-
if not paths:
|
394 |
-
return
|
395 |
-
|
396 |
-
logger.info(msg)
|
397 |
-
with indent_log():
|
398 |
-
for path in sorted(compact(paths)):
|
399 |
-
logger.info(path)
|
400 |
-
|
401 |
-
if not verbose:
|
402 |
-
will_remove, will_skip = compress_for_output_listing(self._paths)
|
403 |
-
else:
|
404 |
-
# In verbose mode, display all the files that are going to be
|
405 |
-
# deleted.
|
406 |
-
will_remove = set(self._paths)
|
407 |
-
will_skip = set()
|
408 |
-
|
409 |
-
_display("Would remove:", will_remove)
|
410 |
-
_display("Would not remove (might be manually added):", will_skip)
|
411 |
-
_display("Would not remove (outside of prefix):", self._refuse)
|
412 |
-
if verbose:
|
413 |
-
_display("Will actually move:", compress_for_rename(self._paths))
|
414 |
-
|
415 |
-
return ask("Proceed (Y/n)? ", ("y", "n", "")) != "n"
|
416 |
-
|
417 |
-
def rollback(self) -> None:
|
418 |
-
"""Rollback the changes previously made by remove()."""
|
419 |
-
if not self._moved_paths.can_rollback:
|
420 |
-
logger.error(
|
421 |
-
"Can't roll back %s; was not uninstalled",
|
422 |
-
self._dist.raw_name,
|
423 |
-
)
|
424 |
-
return
|
425 |
-
logger.info("Rolling back uninstall of %s", self._dist.raw_name)
|
426 |
-
self._moved_paths.rollback()
|
427 |
-
for pth in self._pth.values():
|
428 |
-
pth.rollback()
|
429 |
-
|
430 |
-
def commit(self) -> None:
|
431 |
-
"""Remove temporary save dir: rollback will no longer be possible."""
|
432 |
-
self._moved_paths.commit()
|
433 |
-
|
434 |
-
@classmethod
|
435 |
-
def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet":
|
436 |
-
dist_location = dist.location
|
437 |
-
info_location = dist.info_location
|
438 |
-
if dist_location is None:
|
439 |
-
logger.info(
|
440 |
-
"Not uninstalling %s since it is not installed",
|
441 |
-
dist.canonical_name,
|
442 |
-
)
|
443 |
-
return cls(dist)
|
444 |
-
|
445 |
-
normalized_dist_location = normalize_path(dist_location)
|
446 |
-
if not dist.local:
|
447 |
-
logger.info(
|
448 |
-
"Not uninstalling %s at %s, outside environment %s",
|
449 |
-
dist.canonical_name,
|
450 |
-
normalized_dist_location,
|
451 |
-
sys.prefix,
|
452 |
-
)
|
453 |
-
return cls(dist)
|
454 |
-
|
455 |
-
if normalized_dist_location in {
|
456 |
-
p
|
457 |
-
for p in {sysconfig.get_path("stdlib"), sysconfig.get_path("platstdlib")}
|
458 |
-
if p
|
459 |
-
}:
|
460 |
-
logger.info(
|
461 |
-
"Not uninstalling %s at %s, as it is in the standard library.",
|
462 |
-
dist.canonical_name,
|
463 |
-
normalized_dist_location,
|
464 |
-
)
|
465 |
-
return cls(dist)
|
466 |
-
|
467 |
-
paths_to_remove = cls(dist)
|
468 |
-
develop_egg_link = egg_link_path_from_location(dist.raw_name)
|
469 |
-
|
470 |
-
# Distribution is installed with metadata in a "flat" .egg-info
|
471 |
-
# directory. This means it is not a modern .dist-info installation, an
|
472 |
-
# egg, or legacy editable.
|
473 |
-
setuptools_flat_installation = (
|
474 |
-
dist.installed_with_setuptools_egg_info
|
475 |
-
and info_location is not None
|
476 |
-
and os.path.exists(info_location)
|
477 |
-
# If dist is editable and the location points to a ``.egg-info``,
|
478 |
-
# we are in fact in the legacy editable case.
|
479 |
-
and not info_location.endswith(f"{dist.setuptools_filename}.egg-info")
|
480 |
-
)
|
481 |
-
|
482 |
-
# Uninstall cases order do matter as in the case of 2 installs of the
|
483 |
-
# same package, pip needs to uninstall the currently detected version
|
484 |
-
if setuptools_flat_installation:
|
485 |
-
if info_location is not None:
|
486 |
-
paths_to_remove.add(info_location)
|
487 |
-
installed_files = dist.iter_declared_entries()
|
488 |
-
if installed_files is not None:
|
489 |
-
for installed_file in installed_files:
|
490 |
-
paths_to_remove.add(os.path.join(dist_location, installed_file))
|
491 |
-
# FIXME: need a test for this elif block
|
492 |
-
# occurs with --single-version-externally-managed/--record outside
|
493 |
-
# of pip
|
494 |
-
elif dist.is_file("top_level.txt"):
|
495 |
-
try:
|
496 |
-
namespace_packages = dist.read_text("namespace_packages.txt")
|
497 |
-
except FileNotFoundError:
|
498 |
-
namespaces = []
|
499 |
-
else:
|
500 |
-
namespaces = namespace_packages.splitlines(keepends=False)
|
501 |
-
for top_level_pkg in [
|
502 |
-
p
|
503 |
-
for p in dist.read_text("top_level.txt").splitlines()
|
504 |
-
if p and p not in namespaces
|
505 |
-
]:
|
506 |
-
path = os.path.join(dist_location, top_level_pkg)
|
507 |
-
paths_to_remove.add(path)
|
508 |
-
paths_to_remove.add(f"{path}.py")
|
509 |
-
paths_to_remove.add(f"{path}.pyc")
|
510 |
-
paths_to_remove.add(f"{path}.pyo")
|
511 |
-
|
512 |
-
elif dist.installed_by_distutils:
|
513 |
-
raise UninstallationError(
|
514 |
-
"Cannot uninstall {!r}. It is a distutils installed project "
|
515 |
-
"and thus we cannot accurately determine which files belong "
|
516 |
-
"to it which would lead to only a partial uninstall.".format(
|
517 |
-
dist.raw_name,
|
518 |
-
)
|
519 |
-
)
|
520 |
-
|
521 |
-
elif dist.installed_as_egg:
|
522 |
-
# package installed by easy_install
|
523 |
-
# We cannot match on dist.egg_name because it can slightly vary
|
524 |
-
# i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
|
525 |
-
paths_to_remove.add(dist_location)
|
526 |
-
easy_install_egg = os.path.split(dist_location)[1]
|
527 |
-
easy_install_pth = os.path.join(
|
528 |
-
os.path.dirname(dist_location),
|
529 |
-
"easy-install.pth",
|
530 |
-
)
|
531 |
-
paths_to_remove.add_pth(easy_install_pth, "./" + easy_install_egg)
|
532 |
-
|
533 |
-
elif dist.installed_with_dist_info:
|
534 |
-
for path in uninstallation_paths(dist):
|
535 |
-
paths_to_remove.add(path)
|
536 |
-
|
537 |
-
elif develop_egg_link:
|
538 |
-
# PEP 660 modern editable is handled in the ``.dist-info`` case
|
539 |
-
# above, so this only covers the setuptools-style editable.
|
540 |
-
with open(develop_egg_link) as fh:
|
541 |
-
link_pointer = os.path.normcase(fh.readline().strip())
|
542 |
-
normalized_link_pointer = paths_to_remove._normalize_path_cached(
|
543 |
-
link_pointer
|
544 |
-
)
|
545 |
-
assert os.path.samefile(
|
546 |
-
normalized_link_pointer, normalized_dist_location
|
547 |
-
), (
|
548 |
-
f"Egg-link {develop_egg_link} (to {link_pointer}) does not match "
|
549 |
-
f"installed location of {dist.raw_name} (at {dist_location})"
|
550 |
-
)
|
551 |
-
paths_to_remove.add(develop_egg_link)
|
552 |
-
easy_install_pth = os.path.join(
|
553 |
-
os.path.dirname(develop_egg_link), "easy-install.pth"
|
554 |
-
)
|
555 |
-
paths_to_remove.add_pth(easy_install_pth, dist_location)
|
556 |
-
|
557 |
-
else:
|
558 |
-
logger.debug(
|
559 |
-
"Not sure how to uninstall: %s - Check: %s",
|
560 |
-
dist,
|
561 |
-
dist_location,
|
562 |
-
)
|
563 |
-
|
564 |
-
if dist.in_usersite:
|
565 |
-
bin_dir = get_bin_user()
|
566 |
-
else:
|
567 |
-
bin_dir = get_bin_prefix()
|
568 |
-
|
569 |
-
# find distutils scripts= scripts
|
570 |
-
try:
|
571 |
-
for script in dist.iter_distutils_script_names():
|
572 |
-
paths_to_remove.add(os.path.join(bin_dir, script))
|
573 |
-
if WINDOWS:
|
574 |
-
paths_to_remove.add(os.path.join(bin_dir, f"{script}.bat"))
|
575 |
-
except (FileNotFoundError, NotADirectoryError):
|
576 |
-
pass
|
577 |
-
|
578 |
-
# find console_scripts and gui_scripts
|
579 |
-
def iter_scripts_to_remove(
|
580 |
-
dist: BaseDistribution,
|
581 |
-
bin_dir: str,
|
582 |
-
) -> Generator[str, None, None]:
|
583 |
-
for entry_point in dist.iter_entry_points():
|
584 |
-
if entry_point.group == "console_scripts":
|
585 |
-
yield from _script_names(bin_dir, entry_point.name, False)
|
586 |
-
elif entry_point.group == "gui_scripts":
|
587 |
-
yield from _script_names(bin_dir, entry_point.name, True)
|
588 |
-
|
589 |
-
for s in iter_scripts_to_remove(dist, bin_dir):
|
590 |
-
paths_to_remove.add(s)
|
591 |
-
|
592 |
-
return paths_to_remove
|
593 |
-
|
594 |
-
|
595 |
-
class UninstallPthEntries:
|
596 |
-
def __init__(self, pth_file: str) -> None:
|
597 |
-
self.file = pth_file
|
598 |
-
self.entries: Set[str] = set()
|
599 |
-
self._saved_lines: Optional[List[bytes]] = None
|
600 |
-
|
601 |
-
def add(self, entry: str) -> None:
|
602 |
-
entry = os.path.normcase(entry)
|
603 |
-
# On Windows, os.path.normcase converts the entry to use
|
604 |
-
# backslashes. This is correct for entries that describe absolute
|
605 |
-
# paths outside of site-packages, but all the others use forward
|
606 |
-
# slashes.
|
607 |
-
# os.path.splitdrive is used instead of os.path.isabs because isabs
|
608 |
-
# treats non-absolute paths with drive letter markings like c:foo\bar
|
609 |
-
# as absolute paths. It also does not recognize UNC paths if they don't
|
610 |
-
# have more than "\\sever\share". Valid examples: "\\server\share\" or
|
611 |
-
# "\\server\share\folder".
|
612 |
-
if WINDOWS and not os.path.splitdrive(entry)[0]:
|
613 |
-
entry = entry.replace("\\", "/")
|
614 |
-
self.entries.add(entry)
|
615 |
-
|
616 |
-
def remove(self) -> None:
|
617 |
-
logger.verbose("Removing pth entries from %s:", self.file)
|
618 |
-
|
619 |
-
# If the file doesn't exist, log a warning and return
|
620 |
-
if not os.path.isfile(self.file):
|
621 |
-
logger.warning("Cannot remove entries from nonexistent file %s", self.file)
|
622 |
-
return
|
623 |
-
with open(self.file, "rb") as fh:
|
624 |
-
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
|
625 |
-
lines = fh.readlines()
|
626 |
-
self._saved_lines = lines
|
627 |
-
if any(b"\r\n" in line for line in lines):
|
628 |
-
endline = "\r\n"
|
629 |
-
else:
|
630 |
-
endline = "\n"
|
631 |
-
# handle missing trailing newline
|
632 |
-
if lines and not lines[-1].endswith(endline.encode("utf-8")):
|
633 |
-
lines[-1] = lines[-1] + endline.encode("utf-8")
|
634 |
-
for entry in self.entries:
|
635 |
-
try:
|
636 |
-
logger.verbose("Removing entry: %s", entry)
|
637 |
-
lines.remove((entry + endline).encode("utf-8"))
|
638 |
-
except ValueError:
|
639 |
-
pass
|
640 |
-
with open(self.file, "wb") as fh:
|
641 |
-
fh.writelines(lines)
|
642 |
-
|
643 |
-
def rollback(self) -> bool:
|
644 |
-
if self._saved_lines is None:
|
645 |
-
logger.error("Cannot roll back changes to %s, none were made", self.file)
|
646 |
-
return False
|
647 |
-
logger.debug("Rolling %s back to previous state", self.file)
|
648 |
-
with open(self.file, "wb") as fh:
|
649 |
-
fh.writelines(self._saved_lines)
|
650 |
-
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/escprober.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
######################## BEGIN LICENSE BLOCK ########################
|
2 |
-
# The Original Code is mozilla.org code.
|
3 |
-
#
|
4 |
-
# The Initial Developer of the Original Code is
|
5 |
-
# Netscape Communications Corporation.
|
6 |
-
# Portions created by the Initial Developer are Copyright (C) 1998
|
7 |
-
# the Initial Developer. All Rights Reserved.
|
8 |
-
#
|
9 |
-
# Contributor(s):
|
10 |
-
# Mark Pilgrim - port to Python
|
11 |
-
#
|
12 |
-
# This library is free software; you can redistribute it and/or
|
13 |
-
# modify it under the terms of the GNU Lesser General Public
|
14 |
-
# License as published by the Free Software Foundation; either
|
15 |
-
# version 2.1 of the License, or (at your option) any later version.
|
16 |
-
#
|
17 |
-
# This library is distributed in the hope that it will be useful,
|
18 |
-
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
19 |
-
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
20 |
-
# Lesser General Public License for more details.
|
21 |
-
#
|
22 |
-
# You should have received a copy of the GNU Lesser General Public
|
23 |
-
# License along with this library; if not, write to the Free Software
|
24 |
-
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
25 |
-
# 02110-1301 USA
|
26 |
-
######################### END LICENSE BLOCK #########################
|
27 |
-
|
28 |
-
from typing import Optional, Union
|
29 |
-
|
30 |
-
from .charsetprober import CharSetProber
|
31 |
-
from .codingstatemachine import CodingStateMachine
|
32 |
-
from .enums import LanguageFilter, MachineState, ProbingState
|
33 |
-
from .escsm import (
|
34 |
-
HZ_SM_MODEL,
|
35 |
-
ISO2022CN_SM_MODEL,
|
36 |
-
ISO2022JP_SM_MODEL,
|
37 |
-
ISO2022KR_SM_MODEL,
|
38 |
-
)
|
39 |
-
|
40 |
-
|
41 |
-
class EscCharSetProber(CharSetProber):
|
42 |
-
"""
|
43 |
-
This CharSetProber uses a "code scheme" approach for detecting encodings,
|
44 |
-
whereby easily recognizable escape or shift sequences are relied on to
|
45 |
-
identify these encodings.
|
46 |
-
"""
|
47 |
-
|
48 |
-
def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
|
49 |
-
super().__init__(lang_filter=lang_filter)
|
50 |
-
self.coding_sm = []
|
51 |
-
if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
|
52 |
-
self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
|
53 |
-
self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL))
|
54 |
-
if self.lang_filter & LanguageFilter.JAPANESE:
|
55 |
-
self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL))
|
56 |
-
if self.lang_filter & LanguageFilter.KOREAN:
|
57 |
-
self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL))
|
58 |
-
self.active_sm_count = 0
|
59 |
-
self._detected_charset: Optional[str] = None
|
60 |
-
self._detected_language: Optional[str] = None
|
61 |
-
self._state = ProbingState.DETECTING
|
62 |
-
self.reset()
|
63 |
-
|
64 |
-
def reset(self) -> None:
|
65 |
-
super().reset()
|
66 |
-
for coding_sm in self.coding_sm:
|
67 |
-
coding_sm.active = True
|
68 |
-
coding_sm.reset()
|
69 |
-
self.active_sm_count = len(self.coding_sm)
|
70 |
-
self._detected_charset = None
|
71 |
-
self._detected_language = None
|
72 |
-
|
73 |
-
@property
|
74 |
-
def charset_name(self) -> Optional[str]:
|
75 |
-
return self._detected_charset
|
76 |
-
|
77 |
-
@property
|
78 |
-
def language(self) -> Optional[str]:
|
79 |
-
return self._detected_language
|
80 |
-
|
81 |
-
def get_confidence(self) -> float:
|
82 |
-
return 0.99 if self._detected_charset else 0.00
|
83 |
-
|
84 |
-
def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
|
85 |
-
for c in byte_str:
|
86 |
-
for coding_sm in self.coding_sm:
|
87 |
-
if not coding_sm.active:
|
88 |
-
continue
|
89 |
-
coding_state = coding_sm.next_state(c)
|
90 |
-
if coding_state == MachineState.ERROR:
|
91 |
-
coding_sm.active = False
|
92 |
-
self.active_sm_count -= 1
|
93 |
-
if self.active_sm_count <= 0:
|
94 |
-
self._state = ProbingState.NOT_ME
|
95 |
-
return self.state
|
96 |
-
elif coding_state == MachineState.ITS_ME:
|
97 |
-
self._state = ProbingState.FOUND_IT
|
98 |
-
self._detected_charset = coding_sm.get_coding_state_machine()
|
99 |
-
self._detected_language = coding_sm.language
|
100 |
-
return self.state
|
101 |
-
|
102 |
-
return self.state
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aveygo/AstroSleuth/README.md
DELETED
@@ -1,86 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AstroSleuth
|
3 |
-
emoji: 🌖
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.17.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: gpl-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
# AstroSleuth
|
14 |
-
|
15 |
-
<p align="center">
|
16 |
-
<img src="https://media.githubusercontent.com/media/Aveygo/AstroSleuth/master/sample.png">
|
17 |
-
</p>
|
18 |
-
|
19 |
-
[](https://github.com/Aveygo/AstroSleuth "Go to GitHub repo")
|
20 |
-
[](https://github.com/Aveygo/AstroSleuth)[](https://www.python.org/downloads/release/python-399/)
|
21 |
-
|
22 |
-
The (only?) free, zero bulls**t, 200 line, open source astrophotgraphy upscaler.
|
23 |
-
|
24 |
-
Sick of the commercialisation of deep space tools, I wanted a solution that can run on almost any hardware with epic results.
|
25 |
-
|
26 |
-
I started this project a regrettably long time ago. A lot has changed since then. I tried to share my work, got burned, removed it, perfected it, and fell into a well of "is it good enough".
|
27 |
-
|
28 |
-
I present my original idea, a finetuned realesr-gan model trained on 15k images of astrophotography. It is behind my works on [reddit](https://www.reddit.com/user/CodingCoda), my [youtube](https://www.youtube.com/channel/UCHode4WV0hteze-ZDEG5atQ) attempt
|
29 |
-
and my [cloudy nights post](https://www.cloudynights.com/topic/816869-astrosleuth-image-denoiser-upscaler/), and I hope it will suit you well.
|
30 |
-
|
31 |
-
## Running
|
32 |
-
|
33 |
-
### Hugging face - Good for testing/playing around
|
34 |
-
1. Go [here](https://huggingface.co/spaces/Aveygo/AstroSleuth). Please note that hugging face servers use 2 core cpus and you'll likely be sharing, so large images may take a very long time, even timing out.
|
35 |
-
|
36 |
-
### Colab - Best method if you don't have a GPU
|
37 |
-
1. Visit [colab](https://colab.research.google.com/drive/1LxiNsnokF-6OmICSxWNvTeFEEZvRM2Lp?usp=sharing)
|
38 |
-
2. Enjoy!
|
39 |
-
|
40 |
-
### Locally (Binaries) - Recommended method
|
41 |
-
1. Go to the [releases](https://github.com/Aveygo/AstroSleuth/releases) page
|
42 |
-
2. Download the latest zip for your platform, eg: astrosleuth-v0.1.0-windows.zip
|
43 |
-
3. Unzip and enter the folder
|
44 |
-
4. Right click -> open in terminal
|
45 |
-
5. ```astrosleuth.exe -n astrosleuth -i [input source] -o [output destination]```
|
46 |
-
|
47 |
-
### Locally (Python) - Fairly complicated, is the "proper" way to self-host
|
48 |
-
1. Install [python](https://www.python.org/downloads/) (and [pip](https://phoenixnap.com/kb/install-pip-windows))
|
49 |
-
2. Download and unzip the latest [release](https://github.com/Aveygo/AstroSleuth/archive/refs/heads/master.zip) of AstroSleuth
|
50 |
-
3. Open the terminal (right-click -> terminal) and run ```pip install -r requirements.txt```
|
51 |
-
4. Run the streamlit interface with ```streamlit run app.py```
|
52 |
-
|
53 |
-
### Local (Python - Pytorch) - GPU Acceleration
|
54 |
-
1. Follow the instructions on the [pytorch](https://pytorch.org/get-started/locally/) website to install pytorch.
|
55 |
-
2. Follow the "Locally (Python)" instructions, but run with ```streamlit run app.py -- --gpu --torch``` for step 4
|
56 |
-
|
57 |
-
### Local (Python - ONNX) - GPU Acceleration
|
58 |
-
Please note, this method only works if you have cuda version 11, check your drivers first!
|
59 |
-
|
60 |
-
1. Run ```pip3 uninstall onnxruntime```
|
61 |
-
2. and then ```pip3 install onnxruntime-gpu```
|
62 |
-
|
63 |
-
## Extra information
|
64 |
-
|
65 |
-
Please see [details](https://github.com/Aveygo/AstroSleuth/blob/master/results/details.md) for image samples and potential workflow improvements and [training](https://github.com/Aveygo/AstroSleuth/blob/master/training.md) for details on how the models are trained.
|
66 |
-
|
67 |
-
## Known issues
|
68 |
-
|
69 |
-
Results are now more comparable with BlurXterminator after training improvements (see [training](https://github.com/Aveygo/AstroSleuth/blob/master/training.md)). AstroSleuthV2 weights will be on the hugging face repo, but not automatically downloaded for the time being.
|
70 |
-
|
71 |
-
~~Currently investigating a "zero-knowledge" solution.~~
|
72 |
-
No "real" zero-knowledge solution seems very practical. Still on the lookout for the time being.
|
73 |
-
|
74 |
-
The biggest concern currently is the discriminator failing to detect real from fakes, regardless of it's weight on the generator. This results in AstroSleuthV2 adding a lot more stars than it should (supposably also due to the new feature model having some effect), and overall not performing to my standards. A fix is currently underway but will take a while to train/find best training parameters, and maybe needs a new discriminator altogether.
|
75 |
-
|
76 |
-
Another issue is star diffraction spikes being wavy or "spotty". A better disscriminator will help, but a dataset more focused on diffraction spikes is much more optimal. Possible synthetic dataset in the works currently.
|
77 |
-
|
78 |
-
## Concerns and Personal Notes
|
79 |
-
|
80 |
-
Its not a understatement that this tool has changed my life. It was my first machine learning project. I even built full-stack applications searching for the perfect way to share my work.
|
81 |
-
I will continue to do so. Ask for any improvements and I will likely impliment them. I am begging for an excuse to work on it so any feedback is appreciated. I am interested in creating a Photoshop/Pixinsight plugin if thats what even a single person wants, just open a git issue [here](https://github.com/Aveygo/AstroSleuth/issues) and I'll see to it.
|
82 |
-
|
83 |
-
For the redditors, this tool is presented as is, free as long as it stays free, I cannot convey though words how much I dont care that its not "scientifically accurate".
|
84 |
-
|
85 |
-
<!---If it wasnt for https://www.rc-astro.com/ I wouldnt have built up the effort though spite to go though redeveloping this project. "Does BlurXTerminator fabricate detail? No" is full of s**t, when I got s**t for being honest and saying my model does-->
|
86 |
-
<!--git push hf HEAD:main-->
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AxelBell/EasyOCR_text_recognition/app.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
from pprint import pprint
|
2 |
-
import gradio as gr
|
3 |
-
from data import Data
|
4 |
-
|
5 |
-
data = Data("./demo_data.toml")
|
6 |
-
|
7 |
-
with gr.Blocks(theme="freddyaboulton/dracula_revamped", css=data.assets["css"]) as demo:
|
8 |
-
with gr.Column():
|
9 |
-
gr.HTML(data.assets["header"])
|
10 |
-
with gr.Row():
|
11 |
-
with gr.Column(variant="panel"):
|
12 |
-
data.render("image")
|
13 |
-
with gr.Accordion("Advanced Settings", open=False):
|
14 |
-
with gr.Tabs():
|
15 |
-
with gr.Tab("General"):
|
16 |
-
with gr.Group():
|
17 |
-
lang_shadow_api = gr.Dropdown(
|
18 |
-
[
|
19 |
-
l.split(" ")[-1][1:-1]
|
20 |
-
for l in data.inputs["lang"].choices
|
21 |
-
],
|
22 |
-
value=[
|
23 |
-
l.split(" ")[-1][1:-1]
|
24 |
-
for l in data.inputs["lang"].value
|
25 |
-
],
|
26 |
-
visible=False,
|
27 |
-
)
|
28 |
-
data.render("lang")
|
29 |
-
with gr.Row():
|
30 |
-
data.render("decoder", "beamWidth")
|
31 |
-
data.render("allowlist", "blocklist")
|
32 |
-
with gr.Row():
|
33 |
-
data.render("paragraph", "detail")
|
34 |
-
data.render(
|
35 |
-
"min_size", "rotation_info", "output_format"
|
36 |
-
)
|
37 |
-
with gr.Tab("Contrast"):
|
38 |
-
with gr.Group():
|
39 |
-
data.render(
|
40 |
-
"contrast_ths",
|
41 |
-
"adjust_contrast",
|
42 |
-
)
|
43 |
-
with gr.Tab("Text Detection"):
|
44 |
-
with gr.Group():
|
45 |
-
data.render(
|
46 |
-
"text_threshold",
|
47 |
-
"low_text",
|
48 |
-
"link_threshold",
|
49 |
-
"mag_ratio",
|
50 |
-
"threshold",
|
51 |
-
"bbox_min_score",
|
52 |
-
"bbox_min_size",
|
53 |
-
"max_candidates",
|
54 |
-
)
|
55 |
-
with gr.Tab("Bounding Box Merging"):
|
56 |
-
with gr.Group():
|
57 |
-
gr.HTML(
|
58 |
-
"<p style='margin: var(--block-padding);text-align: center;'>This set of parameter controls when adjacent bounding boxes merge with each other. Every parameters except 'Slope threshold' is in the unit of box height"
|
59 |
-
)
|
60 |
-
data.render(
|
61 |
-
"slope_ths",
|
62 |
-
"ycenter_ths",
|
63 |
-
"height_ths",
|
64 |
-
"width_ths",
|
65 |
-
"add_margin",
|
66 |
-
"y_ths",
|
67 |
-
"x_ths",
|
68 |
-
)
|
69 |
-
with gr.Row():
|
70 |
-
btn_clear = gr.ClearButton(
|
71 |
-
[data.inputs["image"], *data.outputs_list], value="Reset"
|
72 |
-
)
|
73 |
-
btn_run = gr.Button("Run!", variant="primary")
|
74 |
-
gr.Examples(
|
75 |
-
examples=data.examples,
|
76 |
-
elem_id="examples",
|
77 |
-
inputs=data.inputs_list,
|
78 |
-
outputs=data.outputs_list,
|
79 |
-
fn=Data.process_image,
|
80 |
-
cache_examples=False,
|
81 |
-
)
|
82 |
-
with gr.Column(variant="panel"):
|
83 |
-
data.render("image_out")
|
84 |
-
with gr.Tabs():
|
85 |
-
with gr.Tab("Data"):
|
86 |
-
data.render("data_out")
|
87 |
-
with gr.Tab("Raw"):
|
88 |
-
data.render("raw_out")
|
89 |
-
gr.HTML(data.assets["footer"])
|
90 |
-
|
91 |
-
data.inputs["lang"].change(
|
92 |
-
fn=lambda v: [l.split(" ")[-1][1:-1] for l in v],
|
93 |
-
inputs=data.inputs["lang"],
|
94 |
-
outputs=lang_shadow_api,
|
95 |
-
api_name=False,
|
96 |
-
)
|
97 |
-
|
98 |
-
btn_run.click(
|
99 |
-
fn=data.process_image,
|
100 |
-
inputs=[lang_shadow_api, *data.inputs_list[1:]],
|
101 |
-
outputs=data.outputs_list,
|
102 |
-
scroll_to_output=True,
|
103 |
-
)
|
104 |
-
data.inputs["decoder"].select(
|
105 |
-
lambda d: data.inputs["beamWidth"].update(
|
106 |
-
interactive=True if d != "greedy" else False
|
107 |
-
),
|
108 |
-
data.inputs["decoder"],
|
109 |
-
data.inputs["beamWidth"],
|
110 |
-
api_name=False,
|
111 |
-
)
|
112 |
-
data.inputs["paragraph"].select(
|
113 |
-
lambda p: [
|
114 |
-
data.inputs["x_ths"].update(interactive=p),
|
115 |
-
data.inputs["y_ths"].update(interactive=p),
|
116 |
-
],
|
117 |
-
data.inputs["paragraph"],
|
118 |
-
[data.inputs["x_ths"], data.inputs["y_ths"]],
|
119 |
-
api_name=False,
|
120 |
-
)
|
121 |
-
data.inputs["detail"].select(
|
122 |
-
lambda p: data.inputs["output_format"].update(value=data.default[-1]),
|
123 |
-
data.inputs["detail"],
|
124 |
-
data.inputs["output_format"],
|
125 |
-
api_name=False,
|
126 |
-
)
|
127 |
-
data.inputs["output_format"].select(
|
128 |
-
lambda p: data.inputs["detail"].update(value=True),
|
129 |
-
data.inputs["output_format"],
|
130 |
-
data.inputs["detail"],
|
131 |
-
api_name=False,
|
132 |
-
)
|
133 |
-
btn_clear.click(lambda: data.default, [], data.inputs_list, api_name=False)
|
134 |
-
|
135 |
-
|
136 |
-
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Classic Apk.md
DELETED
@@ -1,83 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Cómo Descargar y Jugar UNO! ! en tu Dispositivo Móvil</h1>
|
3 |
-
<p>¿Te encanta jugar UNO, el clásico juego de cartas que trae diversión y emoción a cualquier ocasión? ¿Te gustaría poder jugar en cualquier momento, en cualquier lugar, con cualquier persona? Si es así, estás de suerte! UNO! Por Nuria Capdevila UNO es el juego móvil oficial de UNO que te permite disfrutar del juego en tu smartphone o tablet. En este artículo, le mostraremos cómo descargar y jugar UNO! Algunos consejos y trucos para aprovechar al máximo tu experiencia. </p>
|
4 |
-
<h2>¿Qué es el UNO! ?? </h2>
|
5 |
-
<h3>El clásico juego de cartas con un toque</h3>
|
6 |
-
<P>UNO! Se basa en el juego de cartas original que fue creado en 1971 por Merle Robbins. El objetivo del juego es deshacerse de todas tus cartas antes que tus oponentes, haciendo coincidir el color o el número de la carta en la parte superior de la pila de descartes. También puedes usar cartas especiales, como Saltar, Revertir, Dibujar dos, Comodín y Comodín cuatro, para cambiar la dirección del juego, forzar a tus oponentes a robar más cartas o cambiar el color de la carta. Y no te olvides de gritar "UNO" cuando solo te queda una tarjeta! </p>
|
7 |
-
<h2>classic apk</h2><br /><p><b><b>Download</b> ===== <a href="https://bltlly.com/2v6Ml6">https://bltlly.com/2v6Ml6</a></b></p><br /><br />
|
8 |
-
<h3>¡Las características y modos de UNO! </h3>
|
9 |
-
<p>¡UNO! es más que una versión digital del juego clásico. También ofrece nuevas características y modos que lo hacen más divertido y desafiante. Estos son algunos de ellos:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Puedes jugar con diferentes reglas de la casa, como apilar, saltar, 7-0, intercambiar manos, empatar y más. </li>
|
12 |
-
<li>Puedes jugar en diferentes modos, como Quick Play, Modo Clásico, Go Wild Mode, 2v2 Mode, Room Mode y Tournament Mode.</li>
|
13 |
-
<li>Puedes competir en torneos de la serie mundial y eventos especiales para ganar recompensas gratis y encabezar las tablas de clasificación. </li>
|
14 |
-
<li>Puedes asociarte con amigos o familiares en modo 2v2 y colaborar para ganar. </li>
|
15 |
-
<li>Puedes conectar con tus amigos en UNO! con los clubes y enviarse regalos. </li>
|
16 |
-
<li> Puedes chatear y gritar UNO con tu pareja o oponente durante el juego. </li>
|
17 |
-
</ul>
|
18 |
-
|
19 |
-
<h3>Descargar de Google Play Store o App Store</h3>
|
20 |
-
<p>Para descargar UNO! ǐ en su dispositivo móvil, necesita tener un dispositivo Android o iOS compatible. La aplicación requiere Android 4.4 o superior, o iOS 9.0 o superior. También necesitas tener suficiente espacio de almacenamiento en tu dispositivo. El tamaño de la aplicación es de unos 200 MB para dispositivos Android, y unos 300 MB para dispositivos iOS. </p>
|
21 |
-
<p>Para descargar la aplicación, siga estos pasos:</p>
|
22 |
-
<ol>
|
23 |
-
<li>Abra la aplicación Google Play Store en su dispositivo Android, o la aplicación App Store en su dispositivo iOS. </li>
|
24 |
-
<li>Buscar "UNO" o "UNO! !" en la barra de búsqueda. </li>
|
25 |
-
<li>Toque en el icono de la aplicación que dice "UNO! El" por Mattel163 Limited.</li>
|
26 |
-
<li>Toque en "Instalar" (para dispositivos Android) o "Obtener" (para dispositivos iOS) para iniciar la descarga de la aplicación. </li>
|
27 |
-
</ol>
|
28 |
-
<h3>Instalar y ejecutar la aplicación</h3>
|
29 |
-
<p>Después de descargar la aplicación, debe instalarla y lanzarla en su dispositivo. Para hacerlo, siga estos pasos:</p>
|
30 |
-
<ol>
|
31 |
-
<li>Toque en "Abrir" (para dispositivos Android) o el icono de la aplicación en la pantalla de inicio (para dispositivos iOS) para iniciar la aplicación. </li>
|
32 |
-
<li> Esperar a que la aplicación se cargue y mostrar el menú principal. </li>
|
33 |
-
<li>Toque en "Aceptar" para aceptar los términos del servicio y la política de privacidad. </li>
|
34 |
-
<li>Toque en "Permitir" para conceder acceso a la aplicación de almacenamiento de su dispositivo, micrófono y cámara. </li>
|
35 |
-
</ol>
|
36 |
-
<h3>Iniciar sesión o crear una cuenta</h3>
|
37 |
-
<p>Para jugar al UNO! se necesita iniciar sesión o crear una cuenta. Puede usar su cuenta de Facebook, Google, Apple o correo electrónico para hacerlo. Para iniciar sesión o crear una cuenta, siga estos pasos:</p>
|
38 |
-
<ol>
|
39 |
-
<li>Toque en el botón que corresponde a su tipo de cuenta preferido. </li>
|
40 |
-
<li>Siga las instrucciones en la pantalla para iniciar sesión o crear una cuenta. </li>
|
41 |
-
<li>Elige un nombre de usuario y un avatar para tu perfil de UNO! = . </li>
|
42 |
-
<li>Toque en "Confirmar" para completar el proceso. </li>
|
43 |
-
</ol>
|
44 |
-
<h2>¿Cómo se juega UNO! ! con amigos y familiares</h2>
|
45 |
-
<h3>Elige un modo de juego y personaliza tus reglas</h3>
|
46 |
-
|
47 |
-
<h3>Invitar o unirse a un amigo o familiar</h3>
|
48 |
-
<p>Para invitar o unirse a un amigo o familiar, debe pulsar en el icono "Amigos" en la esquina inferior izquierda de la pantalla. Puedes ver a tus amigos en línea y su estado en esta pantalla. También puedes añadir nuevos amigos tocando el icono "+" en la esquina superior derecha de la pantalla. Puedes buscar amigos por su nombre de usuario, ID o código QR. Para invitar a un amigo o familiar, sigue estos pasos:</p>
|
49 |
-
<p></p>
|
50 |
-
<ol>
|
51 |
-
<li>Toque en el amigo o familiar que desea invitar. </li>
|
52 |
-
<li>Toque en "Invitar" para enviarles una invitación. </li>
|
53 |
-
<li>Espera a que acepten tu invitación y únete a tu juego. </li>
|
54 |
-
</ol>
|
55 |
-
<p>Para unirte a un amigo o familiar, sigue estos pasos:</p>
|
56 |
-
<ol>
|
57 |
-
<li>Toque en la notificación que dice "Su amigo le ha invitado a jugar UNO! .". </li>
|
58 |
-
<li>Toque en "Unirse" para aceptar su invitación y unirse a su juego. </li>
|
59 |
-
</ol>
|
60 |
-
<h3>Juega y chatea con tu pareja o oponente</h3>
|
61 |
-
<p>Para jugar a UNO! ạ con tu pareja u oponente, debes seguir las reglas del juego e intentar deshacerte de todas tus cartas antes de que lo hagan. También puede utilizar tarjetas especiales y estrategias para obtener una ventaja sobre ellos. Para chatear con su pareja u oponente, debe tocar el icono de chat en la esquina inferior derecha de la pantalla. Puede enviar mensajes de texto, emojis, pegatinas, mensajes de voz y mensajes de vídeo para comunicarse con ellos. También puede gritar UNO tocando el botón UNO cuando solo le queda una tarjeta. </p>
|
62 |
-
<h2>Cómo competir en torneos y eventos</h2>
|
63 |
-
<h3>Únete a los torneos de la serie mundial y eventos especiales</h3>
|
64 |
-
|
65 |
-
<h3>Gana recompensas gratis y encabeza las tablas de clasificación</h3>
|
66 |
-
<p>Para ganar recompensas gratis y encabezar las tablas de clasificación, necesitas jugar bien y anotar alto en los torneos y eventos. Puedes ganar puntos, monedas, diamantes, fichas y otras recompensas al ganar partidas, completar desafíos y posicionarte en las tablas de clasificación. Puedes usar estas recompensas para desbloquear nuevas cartas, avatares, marcos, fondos y más. También puedes comparar tu rendimiento y logros con otros jugadores de todo el mundo. </p>
|
67 |
-
<h3>Participar en la UNO! Mobile Community Cup 2023 Estados Unidos y Canadá</h3>
|
68 |
-
<p>Uno de los torneos más emocionantes en los que puedes participar es el UNO! . Este es un torneo regional que está abierto a todos los jugadores de los Estados Unidos y Canadá. El torneo se desarrollará del 19 de junio al 2 de julio de 2023. El torneo tendrá cuatro etapas: Clasificatorias, Ronda de 64, Ronda de 16 y Finales. Los 64 mejores jugadores de las eliminatorias avanzarán a la ronda de 64, donde competirán en un grupo de eliminación simple. Los ganadores de cada partido avanzarán a la siguiente ronda hasta que solo queden cuatro jugadores en la final. La final será una serie al mejor de cinco, donde el jugador que gane tres partidos será coronado como el campeón. El campeón recibirá un gran premio de $10,000 USD, así como un trofeo y un avatar especial. El subcampeón recibirá $5,000 USD, y el tercer y cuarto lugar recibirán $2,500 USD cada uno. </p>
|
69 |
-
<h2>Conclusión</h2>
|
70 |
-
|
71 |
-
<h2>Preguntas frecuentes</h2>
|
72 |
-
<h4>Q: ¿Es UNO! Free to play? </h4>
|
73 |
-
<p>A: Sí, UNO! es gratis para descargar y jugar en su dispositivo móvil. Sin embargo, algunas características y elementos pueden requerir compras en la aplicación o ver anuncios. </p>
|
74 |
-
<h4>Q: ¿Cómo puedo contactar con el servicio de atención al cliente de UNO! =? </h4>
|
75 |
-
<p>A: ¡Puede ponerse en contacto con el servicio al cliente de UNO! "Configuración" en la esquina superior izquierda de la pantalla, luego tocando "Ayuda" y "Contáctenos". También puede visitar su sitio web oficial en https://www.letsplayuno.com/ o su página de Facebook en https://www.facebook.com/UNOnow/.</p>
|
76 |
-
<h4>Q: ¿Cómo puedo reportar un error o un tramposo en UNO! .</h4>
|
77 |
-
<p>A: Puede reportar un error o un tramposo en UNO! Presionando el botón "Informe" en la esquina superior derecha de la pantalla durante o después de un partido. También puede ponerse en contacto con el servicio de atención al cliente de UNO! = siguiendo los pasos anteriores. </p>
|
78 |
-
<h4>P: ¿Cómo puedo unirme o crear un club en el UNO! ï? </h4>
|
79 |
-
<p>A: Puedes unirte o crear un club en UNO! . Puede buscar clubes existentes por su nombre o ID, o crear su propio club tocando el icono "+" en la esquina superior derecha de la pantalla. </p>
|
80 |
-
<h4>Q: ¿Cómo puedo obtener más tarjetas, monedas, diamantes, fichas y otras recompensas en UNO! .
|
81 |
-
<p>A: ¡Usted puede conseguir más tarjetas, monedas, diamantes, símbolos, y otras recompensas en UNO! jugando partidos, completando desafíos, ocupando posiciones altas en tablas de clasificación, participando en torneos y eventos, uniéndose o creando clubes, enviando o recibiendo regalos, viendo anuncios o haciendo compras en la aplicación. </p> 64aa2da5cf<br />
|
82 |
-
<br />
|
83 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Bola De Dragn Explosin Furiosa 2 Apk.md
DELETED
@@ -1,55 +0,0 @@
|
|
1 |
-
|
2 |
-
<br> - Beneficios de escuchar Happiness por Rex Orange County | Esta sección debe destacar las ventajas de descargar la canción en lugar de streaming en línea. También debe mencionar algunos de los efectos positivos de escuchar la canción, como la mejora del estado de ánimo, alivio del estrés, etc. | | H2: Cómo descargar Happiness by Rex Orange County legal y safe? | - Plataformas que ofrecen descargas legales y seguras <br> - Pasos para descargar Happiness by Rex Orange County desde cada plataforma <br> - Consejos para evitar malware y virus al descargar música | Esta sección debe proporcionar una lista de plataformas que permiten a los usuarios descargar la canción de forma legal y segura, como Spotify, Apple Music, YouTube Music, etc. También debe proporcionar un pasoguía paso a paso sobre cómo descargar la canción de cada plataforma. También debe dar algunos consejos sobre cómo evitar el malware y los virus al descargar música, como el uso de software antivirus, comprobar la extensión del archivo, etc. | | | H2: Cómo disfrutar de la felicidad por Rex Orange County después de descargarlo? | - Formas de reproducir la canción sin conexión <br> - Formas de compartir la canción con otros <br> - Formas de apoyar al artista | Esta sección debe sugerir algunas maneras de disfrutar de la canción después de descargarla, como reproducirla sin conexión en varios dispositivos, compartirla con amigos y familiares, crear listas de reproducción, etc. También debe alentar a los usuarios a apoyar al artista siguiéndolo en las redes sociales, comprando su mercancía, asistiendo a sus conciertos, etc. | | H2: Conclusión | N/A | Esta sección debe resumir los puntos principales del artículo y terminar con un llamado a la acción, como invitar a los usuarios a comentar, compartir o suscribirse. | Tabla 2: Artículo con formato HTML <h1>Cómo descargar felicidad por Rex Orange County</h1>
|
3 |
-
|
4 |
-
<p>Si estás buscando una forma de descargar Happiness by Rex Orange County y disfrutarlo offline, has venido al lugar correcto. En este artículo, le mostraremos cómo descargar la canción de forma legal y segura desde varias plataformas, y cómo aprovechar al máximo después de descargarla. </p>
|
5 |
-
<h2>¿Qué es la felicidad por Rex Orange County? </h2>
|
6 |
-
<p>Happiness by Rex Orange County es una canción escrita e interpretada por Alexander James O'Connor, más conocido por su nombre artístico Rex Orange County. Él es un cantautor británico y multi-instrumentista que saltó a la fama después de colaborar con Tyler, El Creador en su álbum Flower Boy.</p>
|
7 |
-
<h2>descargar bola de dragón explosión furiosa 2 apk</h2><br /><p><b><b>Download</b> ——— <a href="https://bltlly.com/2v6Kif">https://bltlly.com/2v6Kif</a></b></p><br /><br />
|
8 |
-
<p>Happiness es la décima y última canción de Apricot Princess, su segundo álbum de estudio. La canción es una balada de piano con cuerdas radiantes que muestra su voz conmovedora y entrega emocional. Las letras son sobre su relación con su novia Thea Morgan-Murrell, que también aparece en el video musical de la canción. </p>
|
9 |
-
<p>La canción explora los temas de amor, compromiso, inseguridad y esperanza. Expresa sus dudas sobre si ella todavía lo amará cuando él envejezca y olvide las cosas, pero también su gratitud por tenerla en su vida. También le desea felicidad y la anima a ser ella misma. </p>
|
10 |
-
<h3>¿Por qué descargar Happiness by Rex Orange County? </h3>
|
11 |
-
<h4>Beneficios de descargar música</h4>
|
12 |
-
<p>Descargar música tiene muchos beneficios en comparación con la transmisión en línea. Algunos de ellos son:</p>
|
13 |
-
<ul>
|
14 |
-
<li>Puede escuchar sus canciones favoritas en cualquier momento y en cualquier lugar sin depender de una conexión a Internet o un plan de datos. </li>
|
15 |
-
<li> Puede ahorrar dinero en suscripciones de streaming o cargos de datos. </li>
|
16 |
-
<li>Puedes evitar anuncios molestos o interrupciones que pueden arruinar tu experiencia auditiva. </li>
|
17 |
-
<li>Puede tener más control sobre su biblioteca de música y listas de reproducción. </li>
|
18 |
-
<li>Puedes apoyar a tus artistas favoritos comprando su música en lugar de transmitirla gratis. </li>
|
19 |
-
</ul>
|
20 |
-
|
21 |
-
<p>Escuchando la felicidad por Rex Orange. <h4>Consejos para evitar el malware y los virus al descargar música</h4>
|
22 |
-
<p>Descargar música también puede exponerlo a algunos riesgos, como malware y virus que pueden dañar su dispositivo o robar su información personal. Estos son algunos consejos para evitar malware y virus al descargar música:</p>
|
23 |
-
<ul>
|
24 |
-
<li> Utilice una plataforma confiable y de confianza que ofrece descargas legales y seguras, como las que mencionamos anteriormente. </li>
|
25 |
-
<li> Utilice un software antivirus fiable y actualizado que puede escanear y eliminar cualquier archivo o programa malicioso de su dispositivo. </li>
|
26 |
-
<li>Compruebe la extensión de archivo y el tamaño de la descarga antes de abrirla. Evite abrir archivos que tengan extensiones inusuales o que sean demasiado grandes o demasiado pequeños para la canción. </li>
|
27 |
-
<li>Lee las reseñas y valoraciones de la canción y la plataforma antes de descargarla. Evite descargar canciones que tengan comentarios negativos o sospechosos de otros usuarios. </li>
|
28 |
-
<li>No haga clic en ningún pop-ups, anuncios o enlaces que aparecen durante la descarga de música. Pueden redirigirle a sitios web maliciosos o descargar programas o archivos no deseados en su dispositivo. </li>
|
29 |
-
</ul>
|
30 |
-
<h2>Cómo disfrutar de la felicidad por Rex Orange County después de descargarlo? </h2>
|
31 |
-
<p>Una vez hayas descargado Happiness by Rex Orange County, puedes disfrutarlo de muchas maneras. Aquí hay algunas sugerencias:</p>
|
32 |
-
<p></p>
|
33 |
-
<h4>Maneras de reproducir la canción sin conexión</h4>
|
34 |
-
<p>Puede reproducir la canción sin conexión en varios dispositivos, como su teléfono inteligente, tableta, computadora portátil, escritorio o reproductor de mp3. También puede utilizar auriculares, altavoces o auriculares para mejorar la calidad del sonido y la experiencia. También puede ajustar el volumen, la velocidad o la configuración del ecualizador para adaptarse a sus preferencias. </p>
|
35 |
-
<h4>Maneras de compartir la canción con otros</h4>
|
36 |
-
|
37 |
-
<h4>Formas de apoyar al artista</h4>
|
38 |
-
<p>Puedes apoyar a Rex Orange County siguiéndolo en sus cuentas de redes sociales, como <a href="">Instagram</a>, <a href="">Twitter</a>, <a href=">Facebook</a>, o <a href=">YouTube</a>. También puede visitar su sitio web oficial <a href=">here</a> y comprar su mercancía, como camisetas, sudaderas, carteles o vinilos. También puede transmitir sus otras canciones en varias plataformas, como Spotify, Apple Music, YouTube Music o Amazon Music. También puedes asistir a sus conciertos o eventos si está actuando cerca de ti. </p>
|
39 |
-
: https://www.instagram.com/rexorangecounty/ : https://twitter.com/rexorangecounty : https://www.facebook.com/rexorangecounty/ : https:/www.youtube.com/UC5XQ1nG7lwvOj0fZW6-DK : tps:s//w.unty.
|
40 |
-
<p>Happiness by Rex Orange County es una canción maravillosa que puede hacerte sentir feliz, relajado y agradecido. También es una canción que puedes descargar de forma legal y segura desde varias plataformas, y disfrutar sin conexión en varios dispositivos. También puedes compartir la canción con otros y apoyar al artista siguiéndolo en las redes sociales, comprando su mercancía, transmitiendo sus otras canciones o asistiendo a sus conciertos. </p>
|
41 |
-
<p>Esperamos que este artículo te haya ayudado a aprender a descargar Happiness by Rex Orange County, y por qué deberías hacerlo. Si tiene alguna pregunta, comentario o comentario, no dude en dejarlos a continuación. Nos encantaría saber de usted. </p>
|
42 |
-
<p>Gracias por leer, y que tenga un día feliz! </p>
|
43 |
-
<h2>Preguntas frecuentes</h2>
|
44 |
-
<h4>Q: ¿Qué género es la felicidad por Rex Orange County? </h4>
|
45 |
-
<p>A: Happiness by Rex Orange County es una canción que pertenece al género indie pop, que es un subgénero de música pop que cuenta con artistas independientes o alternativos que producen su música fuera de la industria de la música convencional. </p>
|
46 |
-
<h4>Q: ¿Cuánto tiempo es la felicidad por Rex Orange County? </h4>
|
47 |
-
|
48 |
-
<h4>Q: ¿Quién es Thea Morgan-Murrell? </h4>
|
49 |
-
<p>A: Thea Morgan-Murrell es la novia de Rex Orange County, y la inspiración para la canción Happiness. También es cantautora y músico que ha colaborado con él en algunas de sus canciones, como Loving Is Easy y Pluto Projector.</p>
|
50 |
-
<h4>P: ¿Cómo puedo descargar Happiness by Rex Orange County gratis? </h4>
|
51 |
-
<p>A: Puedes descargar Happiness by Rex Orange County gratis usando algunas de las plataformas que ofrecen pruebas gratuitas o planes gratuitos, como Spotify, Apple Music, YouTube Music o Amazon Music. Sin embargo, es posible que necesite crear una cuenta, proporcionar su información de pago o lidiar con algunas limitaciones o restricciones al usar estas plataformas. También es posible que tenga que cancelar su suscripción antes de que finalice el período de prueba para evitar ser cargado. </p>
|
52 |
-
<h4>Q: ¿Cómo puedo descargar Happiness by Rex Orange County en formato MP3? </h4>
|
53 |
-
<p>A: Puedes descargar Happiness by Rex Orange County en formato MP3 utilizando algunas de las plataformas que te permiten elegir el formato de la descarga, como Bandcamp o Amazon Music. Sin embargo, es posible que tenga que pagar una cuota o suscribirse a un plan antes de descargar la canción en formato MP3. </p> 64aa2da5cf<br />
|
54 |
-
<br />
|
55 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_internal/utils/models.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
"""Utilities for defining models
|
2 |
-
"""
|
3 |
-
|
4 |
-
import operator
|
5 |
-
from typing import Any, Callable, Type
|
6 |
-
|
7 |
-
|
8 |
-
class KeyBasedCompareMixin:
|
9 |
-
"""Provides comparison capabilities that is based on a key"""
|
10 |
-
|
11 |
-
__slots__ = ["_compare_key", "_defining_class"]
|
12 |
-
|
13 |
-
def __init__(self, key: Any, defining_class: Type["KeyBasedCompareMixin"]) -> None:
|
14 |
-
self._compare_key = key
|
15 |
-
self._defining_class = defining_class
|
16 |
-
|
17 |
-
def __hash__(self) -> int:
|
18 |
-
return hash(self._compare_key)
|
19 |
-
|
20 |
-
def __lt__(self, other: Any) -> bool:
|
21 |
-
return self._compare(other, operator.__lt__)
|
22 |
-
|
23 |
-
def __le__(self, other: Any) -> bool:
|
24 |
-
return self._compare(other, operator.__le__)
|
25 |
-
|
26 |
-
def __gt__(self, other: Any) -> bool:
|
27 |
-
return self._compare(other, operator.__gt__)
|
28 |
-
|
29 |
-
def __ge__(self, other: Any) -> bool:
|
30 |
-
return self._compare(other, operator.__ge__)
|
31 |
-
|
32 |
-
def __eq__(self, other: Any) -> bool:
|
33 |
-
return self._compare(other, operator.__eq__)
|
34 |
-
|
35 |
-
def _compare(self, other: Any, method: Callable[[Any, Any], bool]) -> bool:
|
36 |
-
if not isinstance(other, self._defining_class):
|
37 |
-
return NotImplemented
|
38 |
-
|
39 |
-
return method(self._compare_key, other._compare_key)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/version.py
DELETED
@@ -1,739 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
#
|
3 |
-
# Copyright (C) 2012-2017 The Python Software Foundation.
|
4 |
-
# See LICENSE.txt and CONTRIBUTORS.txt.
|
5 |
-
#
|
6 |
-
"""
|
7 |
-
Implementation of a flexible versioning scheme providing support for PEP-440,
|
8 |
-
setuptools-compatible and semantic versioning.
|
9 |
-
"""
|
10 |
-
|
11 |
-
import logging
|
12 |
-
import re
|
13 |
-
|
14 |
-
from .compat import string_types
|
15 |
-
from .util import parse_requirement
|
16 |
-
|
17 |
-
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
|
18 |
-
'LegacyVersion', 'LegacyMatcher',
|
19 |
-
'SemanticVersion', 'SemanticMatcher',
|
20 |
-
'UnsupportedVersionError', 'get_scheme']
|
21 |
-
|
22 |
-
logger = logging.getLogger(__name__)
|
23 |
-
|
24 |
-
|
25 |
-
class UnsupportedVersionError(ValueError):
|
26 |
-
"""This is an unsupported version."""
|
27 |
-
pass
|
28 |
-
|
29 |
-
|
30 |
-
class Version(object):
|
31 |
-
def __init__(self, s):
|
32 |
-
self._string = s = s.strip()
|
33 |
-
self._parts = parts = self.parse(s)
|
34 |
-
assert isinstance(parts, tuple)
|
35 |
-
assert len(parts) > 0
|
36 |
-
|
37 |
-
def parse(self, s):
|
38 |
-
raise NotImplementedError('please implement in a subclass')
|
39 |
-
|
40 |
-
def _check_compatible(self, other):
|
41 |
-
if type(self) != type(other):
|
42 |
-
raise TypeError('cannot compare %r and %r' % (self, other))
|
43 |
-
|
44 |
-
def __eq__(self, other):
|
45 |
-
self._check_compatible(other)
|
46 |
-
return self._parts == other._parts
|
47 |
-
|
48 |
-
def __ne__(self, other):
|
49 |
-
return not self.__eq__(other)
|
50 |
-
|
51 |
-
def __lt__(self, other):
|
52 |
-
self._check_compatible(other)
|
53 |
-
return self._parts < other._parts
|
54 |
-
|
55 |
-
def __gt__(self, other):
|
56 |
-
return not (self.__lt__(other) or self.__eq__(other))
|
57 |
-
|
58 |
-
def __le__(self, other):
|
59 |
-
return self.__lt__(other) or self.__eq__(other)
|
60 |
-
|
61 |
-
def __ge__(self, other):
|
62 |
-
return self.__gt__(other) or self.__eq__(other)
|
63 |
-
|
64 |
-
# See http://docs.python.org/reference/datamodel#object.__hash__
|
65 |
-
def __hash__(self):
|
66 |
-
return hash(self._parts)
|
67 |
-
|
68 |
-
def __repr__(self):
|
69 |
-
return "%s('%s')" % (self.__class__.__name__, self._string)
|
70 |
-
|
71 |
-
def __str__(self):
|
72 |
-
return self._string
|
73 |
-
|
74 |
-
@property
|
75 |
-
def is_prerelease(self):
|
76 |
-
raise NotImplementedError('Please implement in subclasses.')
|
77 |
-
|
78 |
-
|
79 |
-
class Matcher(object):
|
80 |
-
version_class = None
|
81 |
-
|
82 |
-
# value is either a callable or the name of a method
|
83 |
-
_operators = {
|
84 |
-
'<': lambda v, c, p: v < c,
|
85 |
-
'>': lambda v, c, p: v > c,
|
86 |
-
'<=': lambda v, c, p: v == c or v < c,
|
87 |
-
'>=': lambda v, c, p: v == c or v > c,
|
88 |
-
'==': lambda v, c, p: v == c,
|
89 |
-
'===': lambda v, c, p: v == c,
|
90 |
-
# by default, compatible => >=.
|
91 |
-
'~=': lambda v, c, p: v == c or v > c,
|
92 |
-
'!=': lambda v, c, p: v != c,
|
93 |
-
}
|
94 |
-
|
95 |
-
# this is a method only to support alternative implementations
|
96 |
-
# via overriding
|
97 |
-
def parse_requirement(self, s):
|
98 |
-
return parse_requirement(s)
|
99 |
-
|
100 |
-
def __init__(self, s):
|
101 |
-
if self.version_class is None:
|
102 |
-
raise ValueError('Please specify a version class')
|
103 |
-
self._string = s = s.strip()
|
104 |
-
r = self.parse_requirement(s)
|
105 |
-
if not r:
|
106 |
-
raise ValueError('Not valid: %r' % s)
|
107 |
-
self.name = r.name
|
108 |
-
self.key = self.name.lower() # for case-insensitive comparisons
|
109 |
-
clist = []
|
110 |
-
if r.constraints:
|
111 |
-
# import pdb; pdb.set_trace()
|
112 |
-
for op, s in r.constraints:
|
113 |
-
if s.endswith('.*'):
|
114 |
-
if op not in ('==', '!='):
|
115 |
-
raise ValueError('\'.*\' not allowed for '
|
116 |
-
'%r constraints' % op)
|
117 |
-
# Could be a partial version (e.g. for '2.*') which
|
118 |
-
# won't parse as a version, so keep it as a string
|
119 |
-
vn, prefix = s[:-2], True
|
120 |
-
# Just to check that vn is a valid version
|
121 |
-
self.version_class(vn)
|
122 |
-
else:
|
123 |
-
# Should parse as a version, so we can create an
|
124 |
-
# instance for the comparison
|
125 |
-
vn, prefix = self.version_class(s), False
|
126 |
-
clist.append((op, vn, prefix))
|
127 |
-
self._parts = tuple(clist)
|
128 |
-
|
129 |
-
def match(self, version):
|
130 |
-
"""
|
131 |
-
Check if the provided version matches the constraints.
|
132 |
-
|
133 |
-
:param version: The version to match against this instance.
|
134 |
-
:type version: String or :class:`Version` instance.
|
135 |
-
"""
|
136 |
-
if isinstance(version, string_types):
|
137 |
-
version = self.version_class(version)
|
138 |
-
for operator, constraint, prefix in self._parts:
|
139 |
-
f = self._operators.get(operator)
|
140 |
-
if isinstance(f, string_types):
|
141 |
-
f = getattr(self, f)
|
142 |
-
if not f:
|
143 |
-
msg = ('%r not implemented '
|
144 |
-
'for %s' % (operator, self.__class__.__name__))
|
145 |
-
raise NotImplementedError(msg)
|
146 |
-
if not f(version, constraint, prefix):
|
147 |
-
return False
|
148 |
-
return True
|
149 |
-
|
150 |
-
@property
|
151 |
-
def exact_version(self):
|
152 |
-
result = None
|
153 |
-
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
|
154 |
-
result = self._parts[0][1]
|
155 |
-
return result
|
156 |
-
|
157 |
-
def _check_compatible(self, other):
|
158 |
-
if type(self) != type(other) or self.name != other.name:
|
159 |
-
raise TypeError('cannot compare %s and %s' % (self, other))
|
160 |
-
|
161 |
-
def __eq__(self, other):
|
162 |
-
self._check_compatible(other)
|
163 |
-
return self.key == other.key and self._parts == other._parts
|
164 |
-
|
165 |
-
def __ne__(self, other):
|
166 |
-
return not self.__eq__(other)
|
167 |
-
|
168 |
-
# See http://docs.python.org/reference/datamodel#object.__hash__
|
169 |
-
def __hash__(self):
|
170 |
-
return hash(self.key) + hash(self._parts)
|
171 |
-
|
172 |
-
def __repr__(self):
|
173 |
-
return "%s(%r)" % (self.__class__.__name__, self._string)
|
174 |
-
|
175 |
-
def __str__(self):
|
176 |
-
return self._string
|
177 |
-
|
178 |
-
|
179 |
-
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
|
180 |
-
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
|
181 |
-
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
|
182 |
-
|
183 |
-
|
184 |
-
def _pep_440_key(s):
|
185 |
-
s = s.strip()
|
186 |
-
m = PEP440_VERSION_RE.match(s)
|
187 |
-
if not m:
|
188 |
-
raise UnsupportedVersionError('Not a valid version: %s' % s)
|
189 |
-
groups = m.groups()
|
190 |
-
nums = tuple(int(v) for v in groups[1].split('.'))
|
191 |
-
while len(nums) > 1 and nums[-1] == 0:
|
192 |
-
nums = nums[:-1]
|
193 |
-
|
194 |
-
if not groups[0]:
|
195 |
-
epoch = 0
|
196 |
-
else:
|
197 |
-
epoch = int(groups[0][:-1])
|
198 |
-
pre = groups[4:6]
|
199 |
-
post = groups[7:9]
|
200 |
-
dev = groups[10:12]
|
201 |
-
local = groups[13]
|
202 |
-
if pre == (None, None):
|
203 |
-
pre = ()
|
204 |
-
else:
|
205 |
-
pre = pre[0], int(pre[1])
|
206 |
-
if post == (None, None):
|
207 |
-
post = ()
|
208 |
-
else:
|
209 |
-
post = post[0], int(post[1])
|
210 |
-
if dev == (None, None):
|
211 |
-
dev = ()
|
212 |
-
else:
|
213 |
-
dev = dev[0], int(dev[1])
|
214 |
-
if local is None:
|
215 |
-
local = ()
|
216 |
-
else:
|
217 |
-
parts = []
|
218 |
-
for part in local.split('.'):
|
219 |
-
# to ensure that numeric compares as > lexicographic, avoid
|
220 |
-
# comparing them directly, but encode a tuple which ensures
|
221 |
-
# correct sorting
|
222 |
-
if part.isdigit():
|
223 |
-
part = (1, int(part))
|
224 |
-
else:
|
225 |
-
part = (0, part)
|
226 |
-
parts.append(part)
|
227 |
-
local = tuple(parts)
|
228 |
-
if not pre:
|
229 |
-
# either before pre-release, or final release and after
|
230 |
-
if not post and dev:
|
231 |
-
# before pre-release
|
232 |
-
pre = ('a', -1) # to sort before a0
|
233 |
-
else:
|
234 |
-
pre = ('z',) # to sort after all pre-releases
|
235 |
-
# now look at the state of post and dev.
|
236 |
-
if not post:
|
237 |
-
post = ('_',) # sort before 'a'
|
238 |
-
if not dev:
|
239 |
-
dev = ('final',)
|
240 |
-
|
241 |
-
#print('%s -> %s' % (s, m.groups()))
|
242 |
-
return epoch, nums, pre, post, dev, local
|
243 |
-
|
244 |
-
|
245 |
-
_normalized_key = _pep_440_key
|
246 |
-
|
247 |
-
|
248 |
-
class NormalizedVersion(Version):
|
249 |
-
"""A rational version.
|
250 |
-
|
251 |
-
Good:
|
252 |
-
1.2 # equivalent to "1.2.0"
|
253 |
-
1.2.0
|
254 |
-
1.2a1
|
255 |
-
1.2.3a2
|
256 |
-
1.2.3b1
|
257 |
-
1.2.3c1
|
258 |
-
1.2.3.4
|
259 |
-
TODO: fill this out
|
260 |
-
|
261 |
-
Bad:
|
262 |
-
1 # minimum two numbers
|
263 |
-
1.2a # release level must have a release serial
|
264 |
-
1.2.3b
|
265 |
-
"""
|
266 |
-
def parse(self, s):
|
267 |
-
result = _normalized_key(s)
|
268 |
-
# _normalized_key loses trailing zeroes in the release
|
269 |
-
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
|
270 |
-
# However, PEP 440 prefix matching needs it: for example,
|
271 |
-
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
|
272 |
-
m = PEP440_VERSION_RE.match(s) # must succeed
|
273 |
-
groups = m.groups()
|
274 |
-
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
|
275 |
-
return result
|
276 |
-
|
277 |
-
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
|
278 |
-
|
279 |
-
@property
|
280 |
-
def is_prerelease(self):
|
281 |
-
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
|
282 |
-
|
283 |
-
|
284 |
-
def _match_prefix(x, y):
|
285 |
-
x = str(x)
|
286 |
-
y = str(y)
|
287 |
-
if x == y:
|
288 |
-
return True
|
289 |
-
if not x.startswith(y):
|
290 |
-
return False
|
291 |
-
n = len(y)
|
292 |
-
return x[n] == '.'
|
293 |
-
|
294 |
-
|
295 |
-
class NormalizedMatcher(Matcher):
|
296 |
-
version_class = NormalizedVersion
|
297 |
-
|
298 |
-
# value is either a callable or the name of a method
|
299 |
-
_operators = {
|
300 |
-
'~=': '_match_compatible',
|
301 |
-
'<': '_match_lt',
|
302 |
-
'>': '_match_gt',
|
303 |
-
'<=': '_match_le',
|
304 |
-
'>=': '_match_ge',
|
305 |
-
'==': '_match_eq',
|
306 |
-
'===': '_match_arbitrary',
|
307 |
-
'!=': '_match_ne',
|
308 |
-
}
|
309 |
-
|
310 |
-
def _adjust_local(self, version, constraint, prefix):
|
311 |
-
if prefix:
|
312 |
-
strip_local = '+' not in constraint and version._parts[-1]
|
313 |
-
else:
|
314 |
-
# both constraint and version are
|
315 |
-
# NormalizedVersion instances.
|
316 |
-
# If constraint does not have a local component,
|
317 |
-
# ensure the version doesn't, either.
|
318 |
-
strip_local = not constraint._parts[-1] and version._parts[-1]
|
319 |
-
if strip_local:
|
320 |
-
s = version._string.split('+', 1)[0]
|
321 |
-
version = self.version_class(s)
|
322 |
-
return version, constraint
|
323 |
-
|
324 |
-
def _match_lt(self, version, constraint, prefix):
|
325 |
-
version, constraint = self._adjust_local(version, constraint, prefix)
|
326 |
-
if version >= constraint:
|
327 |
-
return False
|
328 |
-
release_clause = constraint._release_clause
|
329 |
-
pfx = '.'.join([str(i) for i in release_clause])
|
330 |
-
return not _match_prefix(version, pfx)
|
331 |
-
|
332 |
-
def _match_gt(self, version, constraint, prefix):
|
333 |
-
version, constraint = self._adjust_local(version, constraint, prefix)
|
334 |
-
if version <= constraint:
|
335 |
-
return False
|
336 |
-
release_clause = constraint._release_clause
|
337 |
-
pfx = '.'.join([str(i) for i in release_clause])
|
338 |
-
return not _match_prefix(version, pfx)
|
339 |
-
|
340 |
-
def _match_le(self, version, constraint, prefix):
|
341 |
-
version, constraint = self._adjust_local(version, constraint, prefix)
|
342 |
-
return version <= constraint
|
343 |
-
|
344 |
-
def _match_ge(self, version, constraint, prefix):
|
345 |
-
version, constraint = self._adjust_local(version, constraint, prefix)
|
346 |
-
return version >= constraint
|
347 |
-
|
348 |
-
def _match_eq(self, version, constraint, prefix):
|
349 |
-
version, constraint = self._adjust_local(version, constraint, prefix)
|
350 |
-
if not prefix:
|
351 |
-
result = (version == constraint)
|
352 |
-
else:
|
353 |
-
result = _match_prefix(version, constraint)
|
354 |
-
return result
|
355 |
-
|
356 |
-
def _match_arbitrary(self, version, constraint, prefix):
|
357 |
-
return str(version) == str(constraint)
|
358 |
-
|
359 |
-
def _match_ne(self, version, constraint, prefix):
|
360 |
-
version, constraint = self._adjust_local(version, constraint, prefix)
|
361 |
-
if not prefix:
|
362 |
-
result = (version != constraint)
|
363 |
-
else:
|
364 |
-
result = not _match_prefix(version, constraint)
|
365 |
-
return result
|
366 |
-
|
367 |
-
def _match_compatible(self, version, constraint, prefix):
|
368 |
-
version, constraint = self._adjust_local(version, constraint, prefix)
|
369 |
-
if version == constraint:
|
370 |
-
return True
|
371 |
-
if version < constraint:
|
372 |
-
return False
|
373 |
-
# if not prefix:
|
374 |
-
# return True
|
375 |
-
release_clause = constraint._release_clause
|
376 |
-
if len(release_clause) > 1:
|
377 |
-
release_clause = release_clause[:-1]
|
378 |
-
pfx = '.'.join([str(i) for i in release_clause])
|
379 |
-
return _match_prefix(version, pfx)
|
380 |
-
|
381 |
-
_REPLACEMENTS = (
|
382 |
-
(re.compile('[.+-]$'), ''), # remove trailing puncts
|
383 |
-
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
|
384 |
-
(re.compile('^[.-]'), ''), # remove leading puncts
|
385 |
-
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
|
386 |
-
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
|
387 |
-
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
|
388 |
-
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
|
389 |
-
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
|
390 |
-
(re.compile(r'\b(pre-alpha|prealpha)\b'),
|
391 |
-
'pre.alpha'), # standardise
|
392 |
-
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
|
393 |
-
)
|
394 |
-
|
395 |
-
_SUFFIX_REPLACEMENTS = (
|
396 |
-
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
|
397 |
-
(re.compile('[,*")([\\]]'), ''), # remove unwanted chars
|
398 |
-
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
|
399 |
-
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
|
400 |
-
(re.compile(r'\.$'), ''), # trailing '.'
|
401 |
-
)
|
402 |
-
|
403 |
-
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
|
404 |
-
|
405 |
-
|
406 |
-
def _suggest_semantic_version(s):
|
407 |
-
"""
|
408 |
-
Try to suggest a semantic form for a version for which
|
409 |
-
_suggest_normalized_version couldn't come up with anything.
|
410 |
-
"""
|
411 |
-
result = s.strip().lower()
|
412 |
-
for pat, repl in _REPLACEMENTS:
|
413 |
-
result = pat.sub(repl, result)
|
414 |
-
if not result:
|
415 |
-
result = '0.0.0'
|
416 |
-
|
417 |
-
# Now look for numeric prefix, and separate it out from
|
418 |
-
# the rest.
|
419 |
-
#import pdb; pdb.set_trace()
|
420 |
-
m = _NUMERIC_PREFIX.match(result)
|
421 |
-
if not m:
|
422 |
-
prefix = '0.0.0'
|
423 |
-
suffix = result
|
424 |
-
else:
|
425 |
-
prefix = m.groups()[0].split('.')
|
426 |
-
prefix = [int(i) for i in prefix]
|
427 |
-
while len(prefix) < 3:
|
428 |
-
prefix.append(0)
|
429 |
-
if len(prefix) == 3:
|
430 |
-
suffix = result[m.end():]
|
431 |
-
else:
|
432 |
-
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
|
433 |
-
prefix = prefix[:3]
|
434 |
-
prefix = '.'.join([str(i) for i in prefix])
|
435 |
-
suffix = suffix.strip()
|
436 |
-
if suffix:
|
437 |
-
#import pdb; pdb.set_trace()
|
438 |
-
# massage the suffix.
|
439 |
-
for pat, repl in _SUFFIX_REPLACEMENTS:
|
440 |
-
suffix = pat.sub(repl, suffix)
|
441 |
-
|
442 |
-
if not suffix:
|
443 |
-
result = prefix
|
444 |
-
else:
|
445 |
-
sep = '-' if 'dev' in suffix else '+'
|
446 |
-
result = prefix + sep + suffix
|
447 |
-
if not is_semver(result):
|
448 |
-
result = None
|
449 |
-
return result
|
450 |
-
|
451 |
-
|
452 |
-
def _suggest_normalized_version(s):
|
453 |
-
"""Suggest a normalized version close to the given version string.
|
454 |
-
|
455 |
-
If you have a version string that isn't rational (i.e. NormalizedVersion
|
456 |
-
doesn't like it) then you might be able to get an equivalent (or close)
|
457 |
-
rational version from this function.
|
458 |
-
|
459 |
-
This does a number of simple normalizations to the given string, based
|
460 |
-
on observation of versions currently in use on PyPI. Given a dump of
|
461 |
-
those version during PyCon 2009, 4287 of them:
|
462 |
-
- 2312 (53.93%) match NormalizedVersion without change
|
463 |
-
with the automatic suggestion
|
464 |
-
- 3474 (81.04%) match when using this suggestion method
|
465 |
-
|
466 |
-
@param s {str} An irrational version string.
|
467 |
-
@returns A rational version string, or None, if couldn't determine one.
|
468 |
-
"""
|
469 |
-
try:
|
470 |
-
_normalized_key(s)
|
471 |
-
return s # already rational
|
472 |
-
except UnsupportedVersionError:
|
473 |
-
pass
|
474 |
-
|
475 |
-
rs = s.lower()
|
476 |
-
|
477 |
-
# part of this could use maketrans
|
478 |
-
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
|
479 |
-
('beta', 'b'), ('rc', 'c'), ('-final', ''),
|
480 |
-
('-pre', 'c'),
|
481 |
-
('-release', ''), ('.release', ''), ('-stable', ''),
|
482 |
-
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
|
483 |
-
('final', '')):
|
484 |
-
rs = rs.replace(orig, repl)
|
485 |
-
|
486 |
-
# if something ends with dev or pre, we add a 0
|
487 |
-
rs = re.sub(r"pre$", r"pre0", rs)
|
488 |
-
rs = re.sub(r"dev$", r"dev0", rs)
|
489 |
-
|
490 |
-
# if we have something like "b-2" or "a.2" at the end of the
|
491 |
-
# version, that is probably beta, alpha, etc
|
492 |
-
# let's remove the dash or dot
|
493 |
-
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
|
494 |
-
|
495 |
-
# 1.0-dev-r371 -> 1.0.dev371
|
496 |
-
# 0.1-dev-r79 -> 0.1.dev79
|
497 |
-
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
|
498 |
-
|
499 |
-
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
|
500 |
-
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
|
501 |
-
|
502 |
-
# Clean: v0.3, v1.0
|
503 |
-
if rs.startswith('v'):
|
504 |
-
rs = rs[1:]
|
505 |
-
|
506 |
-
# Clean leading '0's on numbers.
|
507 |
-
#TODO: unintended side-effect on, e.g., "2003.05.09"
|
508 |
-
# PyPI stats: 77 (~2%) better
|
509 |
-
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
|
510 |
-
|
511 |
-
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
|
512 |
-
# zero.
|
513 |
-
# PyPI stats: 245 (7.56%) better
|
514 |
-
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
|
515 |
-
|
516 |
-
# the 'dev-rNNN' tag is a dev tag
|
517 |
-
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
|
518 |
-
|
519 |
-
# clean the - when used as a pre delimiter
|
520 |
-
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
|
521 |
-
|
522 |
-
# a terminal "dev" or "devel" can be changed into ".dev0"
|
523 |
-
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
|
524 |
-
|
525 |
-
# a terminal "dev" can be changed into ".dev0"
|
526 |
-
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
|
527 |
-
|
528 |
-
# a terminal "final" or "stable" can be removed
|
529 |
-
rs = re.sub(r"(final|stable)$", "", rs)
|
530 |
-
|
531 |
-
# The 'r' and the '-' tags are post release tags
|
532 |
-
# 0.4a1.r10 -> 0.4a1.post10
|
533 |
-
# 0.9.33-17222 -> 0.9.33.post17222
|
534 |
-
# 0.9.33-r17222 -> 0.9.33.post17222
|
535 |
-
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
|
536 |
-
|
537 |
-
# Clean 'r' instead of 'dev' usage:
|
538 |
-
# 0.9.33+r17222 -> 0.9.33.dev17222
|
539 |
-
# 1.0dev123 -> 1.0.dev123
|
540 |
-
# 1.0.git123 -> 1.0.dev123
|
541 |
-
# 1.0.bzr123 -> 1.0.dev123
|
542 |
-
# 0.1a0dev.123 -> 0.1a0.dev123
|
543 |
-
# PyPI stats: ~150 (~4%) better
|
544 |
-
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
|
545 |
-
|
546 |
-
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
|
547 |
-
# 0.2.pre1 -> 0.2c1
|
548 |
-
# 0.2-c1 -> 0.2c1
|
549 |
-
# 1.0preview123 -> 1.0c123
|
550 |
-
# PyPI stats: ~21 (0.62%) better
|
551 |
-
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
|
552 |
-
|
553 |
-
# Tcl/Tk uses "px" for their post release markers
|
554 |
-
rs = re.sub(r"p(\d+)$", r".post\1", rs)
|
555 |
-
|
556 |
-
try:
|
557 |
-
_normalized_key(rs)
|
558 |
-
except UnsupportedVersionError:
|
559 |
-
rs = None
|
560 |
-
return rs
|
561 |
-
|
562 |
-
#
|
563 |
-
# Legacy version processing (distribute-compatible)
|
564 |
-
#
|
565 |
-
|
566 |
-
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
|
567 |
-
_VERSION_REPLACE = {
|
568 |
-
'pre': 'c',
|
569 |
-
'preview': 'c',
|
570 |
-
'-': 'final-',
|
571 |
-
'rc': 'c',
|
572 |
-
'dev': '@',
|
573 |
-
'': None,
|
574 |
-
'.': None,
|
575 |
-
}
|
576 |
-
|
577 |
-
|
578 |
-
def _legacy_key(s):
|
579 |
-
def get_parts(s):
|
580 |
-
result = []
|
581 |
-
for p in _VERSION_PART.split(s.lower()):
|
582 |
-
p = _VERSION_REPLACE.get(p, p)
|
583 |
-
if p:
|
584 |
-
if '0' <= p[:1] <= '9':
|
585 |
-
p = p.zfill(8)
|
586 |
-
else:
|
587 |
-
p = '*' + p
|
588 |
-
result.append(p)
|
589 |
-
result.append('*final')
|
590 |
-
return result
|
591 |
-
|
592 |
-
result = []
|
593 |
-
for p in get_parts(s):
|
594 |
-
if p.startswith('*'):
|
595 |
-
if p < '*final':
|
596 |
-
while result and result[-1] == '*final-':
|
597 |
-
result.pop()
|
598 |
-
while result and result[-1] == '00000000':
|
599 |
-
result.pop()
|
600 |
-
result.append(p)
|
601 |
-
return tuple(result)
|
602 |
-
|
603 |
-
|
604 |
-
class LegacyVersion(Version):
|
605 |
-
def parse(self, s):
|
606 |
-
return _legacy_key(s)
|
607 |
-
|
608 |
-
@property
|
609 |
-
def is_prerelease(self):
|
610 |
-
result = False
|
611 |
-
for x in self._parts:
|
612 |
-
if (isinstance(x, string_types) and x.startswith('*') and
|
613 |
-
x < '*final'):
|
614 |
-
result = True
|
615 |
-
break
|
616 |
-
return result
|
617 |
-
|
618 |
-
|
619 |
-
class LegacyMatcher(Matcher):
|
620 |
-
version_class = LegacyVersion
|
621 |
-
|
622 |
-
_operators = dict(Matcher._operators)
|
623 |
-
_operators['~='] = '_match_compatible'
|
624 |
-
|
625 |
-
numeric_re = re.compile(r'^(\d+(\.\d+)*)')
|
626 |
-
|
627 |
-
def _match_compatible(self, version, constraint, prefix):
|
628 |
-
if version < constraint:
|
629 |
-
return False
|
630 |
-
m = self.numeric_re.match(str(constraint))
|
631 |
-
if not m:
|
632 |
-
logger.warning('Cannot compute compatible match for version %s '
|
633 |
-
' and constraint %s', version, constraint)
|
634 |
-
return True
|
635 |
-
s = m.groups()[0]
|
636 |
-
if '.' in s:
|
637 |
-
s = s.rsplit('.', 1)[0]
|
638 |
-
return _match_prefix(version, s)
|
639 |
-
|
640 |
-
#
|
641 |
-
# Semantic versioning
|
642 |
-
#
|
643 |
-
|
644 |
-
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
|
645 |
-
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
|
646 |
-
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
|
647 |
-
|
648 |
-
|
649 |
-
def is_semver(s):
|
650 |
-
return _SEMVER_RE.match(s)
|
651 |
-
|
652 |
-
|
653 |
-
def _semantic_key(s):
|
654 |
-
def make_tuple(s, absent):
|
655 |
-
if s is None:
|
656 |
-
result = (absent,)
|
657 |
-
else:
|
658 |
-
parts = s[1:].split('.')
|
659 |
-
# We can't compare ints and strings on Python 3, so fudge it
|
660 |
-
# by zero-filling numeric values so simulate a numeric comparison
|
661 |
-
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
|
662 |
-
return result
|
663 |
-
|
664 |
-
m = is_semver(s)
|
665 |
-
if not m:
|
666 |
-
raise UnsupportedVersionError(s)
|
667 |
-
groups = m.groups()
|
668 |
-
major, minor, patch = [int(i) for i in groups[:3]]
|
669 |
-
# choose the '|' and '*' so that versions sort correctly
|
670 |
-
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
|
671 |
-
return (major, minor, patch), pre, build
|
672 |
-
|
673 |
-
|
674 |
-
class SemanticVersion(Version):
|
675 |
-
def parse(self, s):
|
676 |
-
return _semantic_key(s)
|
677 |
-
|
678 |
-
@property
|
679 |
-
def is_prerelease(self):
|
680 |
-
return self._parts[1][0] != '|'
|
681 |
-
|
682 |
-
|
683 |
-
class SemanticMatcher(Matcher):
|
684 |
-
version_class = SemanticVersion
|
685 |
-
|
686 |
-
|
687 |
-
class VersionScheme(object):
|
688 |
-
def __init__(self, key, matcher, suggester=None):
|
689 |
-
self.key = key
|
690 |
-
self.matcher = matcher
|
691 |
-
self.suggester = suggester
|
692 |
-
|
693 |
-
def is_valid_version(self, s):
|
694 |
-
try:
|
695 |
-
self.matcher.version_class(s)
|
696 |
-
result = True
|
697 |
-
except UnsupportedVersionError:
|
698 |
-
result = False
|
699 |
-
return result
|
700 |
-
|
701 |
-
def is_valid_matcher(self, s):
|
702 |
-
try:
|
703 |
-
self.matcher(s)
|
704 |
-
result = True
|
705 |
-
except UnsupportedVersionError:
|
706 |
-
result = False
|
707 |
-
return result
|
708 |
-
|
709 |
-
def is_valid_constraint_list(self, s):
|
710 |
-
"""
|
711 |
-
Used for processing some metadata fields
|
712 |
-
"""
|
713 |
-
# See issue #140. Be tolerant of a single trailing comma.
|
714 |
-
if s.endswith(','):
|
715 |
-
s = s[:-1]
|
716 |
-
return self.is_valid_matcher('dummy_name (%s)' % s)
|
717 |
-
|
718 |
-
def suggest(self, s):
|
719 |
-
if self.suggester is None:
|
720 |
-
result = None
|
721 |
-
else:
|
722 |
-
result = self.suggester(s)
|
723 |
-
return result
|
724 |
-
|
725 |
-
_SCHEMES = {
|
726 |
-
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
|
727 |
-
_suggest_normalized_version),
|
728 |
-
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
|
729 |
-
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
|
730 |
-
_suggest_semantic_version),
|
731 |
-
}
|
732 |
-
|
733 |
-
_SCHEMES['default'] = _SCHEMES['normalized']
|
734 |
-
|
735 |
-
|
736 |
-
def get_scheme(name):
|
737 |
-
if name not in _SCHEMES:
|
738 |
-
raise ValueError('unknown scheme name: %r' % name)
|
739 |
-
return _SCHEMES[name]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/progress_bar.py
DELETED
@@ -1,224 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
from functools import lru_cache
|
3 |
-
from time import monotonic
|
4 |
-
from typing import Iterable, List, Optional
|
5 |
-
|
6 |
-
from .color import Color, blend_rgb
|
7 |
-
from .color_triplet import ColorTriplet
|
8 |
-
from .console import Console, ConsoleOptions, RenderResult
|
9 |
-
from .jupyter import JupyterMixin
|
10 |
-
from .measure import Measurement
|
11 |
-
from .segment import Segment
|
12 |
-
from .style import Style, StyleType
|
13 |
-
|
14 |
-
# Number of characters before 'pulse' animation repeats
|
15 |
-
PULSE_SIZE = 20
|
16 |
-
|
17 |
-
|
18 |
-
class ProgressBar(JupyterMixin):
|
19 |
-
"""Renders a (progress) bar. Used by rich.progress.
|
20 |
-
|
21 |
-
Args:
|
22 |
-
total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation.
|
23 |
-
completed (float, optional): Number of steps completed. Defaults to 0.
|
24 |
-
width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
|
25 |
-
pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed.
|
26 |
-
style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
|
27 |
-
complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
|
28 |
-
finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
|
29 |
-
pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
|
30 |
-
animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time.
|
31 |
-
"""
|
32 |
-
|
33 |
-
def __init__(
|
34 |
-
self,
|
35 |
-
total: Optional[float] = 100.0,
|
36 |
-
completed: float = 0,
|
37 |
-
width: Optional[int] = None,
|
38 |
-
pulse: bool = False,
|
39 |
-
style: StyleType = "bar.back",
|
40 |
-
complete_style: StyleType = "bar.complete",
|
41 |
-
finished_style: StyleType = "bar.finished",
|
42 |
-
pulse_style: StyleType = "bar.pulse",
|
43 |
-
animation_time: Optional[float] = None,
|
44 |
-
):
|
45 |
-
self.total = total
|
46 |
-
self.completed = completed
|
47 |
-
self.width = width
|
48 |
-
self.pulse = pulse
|
49 |
-
self.style = style
|
50 |
-
self.complete_style = complete_style
|
51 |
-
self.finished_style = finished_style
|
52 |
-
self.pulse_style = pulse_style
|
53 |
-
self.animation_time = animation_time
|
54 |
-
|
55 |
-
self._pulse_segments: Optional[List[Segment]] = None
|
56 |
-
|
57 |
-
def __repr__(self) -> str:
|
58 |
-
return f"<Bar {self.completed!r} of {self.total!r}>"
|
59 |
-
|
60 |
-
@property
|
61 |
-
def percentage_completed(self) -> Optional[float]:
|
62 |
-
"""Calculate percentage complete."""
|
63 |
-
if self.total is None:
|
64 |
-
return None
|
65 |
-
completed = (self.completed / self.total) * 100.0
|
66 |
-
completed = min(100, max(0.0, completed))
|
67 |
-
return completed
|
68 |
-
|
69 |
-
@lru_cache(maxsize=16)
|
70 |
-
def _get_pulse_segments(
|
71 |
-
self,
|
72 |
-
fore_style: Style,
|
73 |
-
back_style: Style,
|
74 |
-
color_system: str,
|
75 |
-
no_color: bool,
|
76 |
-
ascii: bool = False,
|
77 |
-
) -> List[Segment]:
|
78 |
-
"""Get a list of segments to render a pulse animation.
|
79 |
-
|
80 |
-
Returns:
|
81 |
-
List[Segment]: A list of segments, one segment per character.
|
82 |
-
"""
|
83 |
-
bar = "-" if ascii else "━"
|
84 |
-
segments: List[Segment] = []
|
85 |
-
if color_system not in ("standard", "eight_bit", "truecolor") or no_color:
|
86 |
-
segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2)
|
87 |
-
segments += [Segment(" " if no_color else bar, back_style)] * (
|
88 |
-
PULSE_SIZE - (PULSE_SIZE // 2)
|
89 |
-
)
|
90 |
-
return segments
|
91 |
-
|
92 |
-
append = segments.append
|
93 |
-
fore_color = (
|
94 |
-
fore_style.color.get_truecolor()
|
95 |
-
if fore_style.color
|
96 |
-
else ColorTriplet(255, 0, 255)
|
97 |
-
)
|
98 |
-
back_color = (
|
99 |
-
back_style.color.get_truecolor()
|
100 |
-
if back_style.color
|
101 |
-
else ColorTriplet(0, 0, 0)
|
102 |
-
)
|
103 |
-
cos = math.cos
|
104 |
-
pi = math.pi
|
105 |
-
_Segment = Segment
|
106 |
-
_Style = Style
|
107 |
-
from_triplet = Color.from_triplet
|
108 |
-
|
109 |
-
for index in range(PULSE_SIZE):
|
110 |
-
position = index / PULSE_SIZE
|
111 |
-
fade = 0.5 + cos((position * pi * 2)) / 2.0
|
112 |
-
color = blend_rgb(fore_color, back_color, cross_fade=fade)
|
113 |
-
append(_Segment(bar, _Style(color=from_triplet(color))))
|
114 |
-
return segments
|
115 |
-
|
116 |
-
def update(self, completed: float, total: Optional[float] = None) -> None:
|
117 |
-
"""Update progress with new values.
|
118 |
-
|
119 |
-
Args:
|
120 |
-
completed (float): Number of steps completed.
|
121 |
-
total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None.
|
122 |
-
"""
|
123 |
-
self.completed = completed
|
124 |
-
self.total = total if total is not None else self.total
|
125 |
-
|
126 |
-
def _render_pulse(
|
127 |
-
self, console: Console, width: int, ascii: bool = False
|
128 |
-
) -> Iterable[Segment]:
|
129 |
-
"""Renders the pulse animation.
|
130 |
-
|
131 |
-
Args:
|
132 |
-
console (Console): Console instance.
|
133 |
-
width (int): Width in characters of pulse animation.
|
134 |
-
|
135 |
-
Returns:
|
136 |
-
RenderResult: [description]
|
137 |
-
|
138 |
-
Yields:
|
139 |
-
Iterator[Segment]: Segments to render pulse
|
140 |
-
"""
|
141 |
-
fore_style = console.get_style(self.pulse_style, default="white")
|
142 |
-
back_style = console.get_style(self.style, default="black")
|
143 |
-
|
144 |
-
pulse_segments = self._get_pulse_segments(
|
145 |
-
fore_style, back_style, console.color_system, console.no_color, ascii=ascii
|
146 |
-
)
|
147 |
-
segment_count = len(pulse_segments)
|
148 |
-
current_time = (
|
149 |
-
monotonic() if self.animation_time is None else self.animation_time
|
150 |
-
)
|
151 |
-
segments = pulse_segments * (int(width / segment_count) + 2)
|
152 |
-
offset = int(-current_time * 15) % segment_count
|
153 |
-
segments = segments[offset : offset + width]
|
154 |
-
yield from segments
|
155 |
-
|
156 |
-
def __rich_console__(
|
157 |
-
self, console: Console, options: ConsoleOptions
|
158 |
-
) -> RenderResult:
|
159 |
-
|
160 |
-
width = min(self.width or options.max_width, options.max_width)
|
161 |
-
ascii = options.legacy_windows or options.ascii_only
|
162 |
-
should_pulse = self.pulse or self.total is None
|
163 |
-
if should_pulse:
|
164 |
-
yield from self._render_pulse(console, width, ascii=ascii)
|
165 |
-
return
|
166 |
-
|
167 |
-
completed: Optional[float] = (
|
168 |
-
min(self.total, max(0, self.completed)) if self.total is not None else None
|
169 |
-
)
|
170 |
-
|
171 |
-
bar = "-" if ascii else "━"
|
172 |
-
half_bar_right = " " if ascii else "╸"
|
173 |
-
half_bar_left = " " if ascii else "╺"
|
174 |
-
complete_halves = (
|
175 |
-
int(width * 2 * completed / self.total)
|
176 |
-
if self.total and completed is not None
|
177 |
-
else width * 2
|
178 |
-
)
|
179 |
-
bar_count = complete_halves // 2
|
180 |
-
half_bar_count = complete_halves % 2
|
181 |
-
style = console.get_style(self.style)
|
182 |
-
is_finished = self.total is None or self.completed >= self.total
|
183 |
-
complete_style = console.get_style(
|
184 |
-
self.finished_style if is_finished else self.complete_style
|
185 |
-
)
|
186 |
-
_Segment = Segment
|
187 |
-
if bar_count:
|
188 |
-
yield _Segment(bar * bar_count, complete_style)
|
189 |
-
if half_bar_count:
|
190 |
-
yield _Segment(half_bar_right * half_bar_count, complete_style)
|
191 |
-
|
192 |
-
if not console.no_color:
|
193 |
-
remaining_bars = width - bar_count - half_bar_count
|
194 |
-
if remaining_bars and console.color_system is not None:
|
195 |
-
if not half_bar_count and bar_count:
|
196 |
-
yield _Segment(half_bar_left, style)
|
197 |
-
remaining_bars -= 1
|
198 |
-
if remaining_bars:
|
199 |
-
yield _Segment(bar * remaining_bars, style)
|
200 |
-
|
201 |
-
def __rich_measure__(
|
202 |
-
self, console: Console, options: ConsoleOptions
|
203 |
-
) -> Measurement:
|
204 |
-
return (
|
205 |
-
Measurement(self.width, self.width)
|
206 |
-
if self.width is not None
|
207 |
-
else Measurement(4, options.max_width)
|
208 |
-
)
|
209 |
-
|
210 |
-
|
211 |
-
if __name__ == "__main__": # pragma: no cover
|
212 |
-
console = Console()
|
213 |
-
bar = ProgressBar(width=50, total=100)
|
214 |
-
|
215 |
-
import time
|
216 |
-
|
217 |
-
console.show_cursor(False)
|
218 |
-
for n in range(0, 101, 1):
|
219 |
-
bar.update(n)
|
220 |
-
console.print(bar)
|
221 |
-
console.file.write("\r")
|
222 |
-
time.sleep(0.05)
|
223 |
-
console.show_cursor(True)
|
224 |
-
console.print()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/futures.py
DELETED
@@ -1,606 +0,0 @@
|
|
1 |
-
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
import copy
|
14 |
-
import logging
|
15 |
-
import sys
|
16 |
-
import threading
|
17 |
-
from collections import namedtuple
|
18 |
-
from concurrent import futures
|
19 |
-
|
20 |
-
from s3transfer.compat import MAXINT
|
21 |
-
from s3transfer.exceptions import CancelledError, TransferNotDoneError
|
22 |
-
from s3transfer.utils import FunctionContainer, TaskSemaphore
|
23 |
-
|
24 |
-
logger = logging.getLogger(__name__)
|
25 |
-
|
26 |
-
|
27 |
-
class BaseTransferFuture:
|
28 |
-
@property
|
29 |
-
def meta(self):
|
30 |
-
"""The metadata associated to the TransferFuture"""
|
31 |
-
raise NotImplementedError('meta')
|
32 |
-
|
33 |
-
def done(self):
|
34 |
-
"""Determines if a TransferFuture has completed
|
35 |
-
|
36 |
-
:returns: True if completed. False, otherwise.
|
37 |
-
"""
|
38 |
-
raise NotImplementedError('done()')
|
39 |
-
|
40 |
-
def result(self):
|
41 |
-
"""Waits until TransferFuture is done and returns the result
|
42 |
-
|
43 |
-
If the TransferFuture succeeded, it will return the result. If the
|
44 |
-
TransferFuture failed, it will raise the exception associated to the
|
45 |
-
failure.
|
46 |
-
"""
|
47 |
-
raise NotImplementedError('result()')
|
48 |
-
|
49 |
-
def cancel(self):
|
50 |
-
"""Cancels the request associated with the TransferFuture"""
|
51 |
-
raise NotImplementedError('cancel()')
|
52 |
-
|
53 |
-
|
54 |
-
class BaseTransferMeta:
|
55 |
-
@property
|
56 |
-
def call_args(self):
|
57 |
-
"""The call args used in the transfer request"""
|
58 |
-
raise NotImplementedError('call_args')
|
59 |
-
|
60 |
-
@property
|
61 |
-
def transfer_id(self):
|
62 |
-
"""The unique id of the transfer"""
|
63 |
-
raise NotImplementedError('transfer_id')
|
64 |
-
|
65 |
-
@property
|
66 |
-
def user_context(self):
|
67 |
-
"""A dictionary that requesters can store data in"""
|
68 |
-
raise NotImplementedError('user_context')
|
69 |
-
|
70 |
-
|
71 |
-
class TransferFuture(BaseTransferFuture):
|
72 |
-
def __init__(self, meta=None, coordinator=None):
|
73 |
-
"""The future associated to a submitted transfer request
|
74 |
-
|
75 |
-
:type meta: TransferMeta
|
76 |
-
:param meta: The metadata associated to the request. This object
|
77 |
-
is visible to the requester.
|
78 |
-
|
79 |
-
:type coordinator: TransferCoordinator
|
80 |
-
:param coordinator: The coordinator associated to the request. This
|
81 |
-
object is not visible to the requester.
|
82 |
-
"""
|
83 |
-
self._meta = meta
|
84 |
-
if meta is None:
|
85 |
-
self._meta = TransferMeta()
|
86 |
-
|
87 |
-
self._coordinator = coordinator
|
88 |
-
if coordinator is None:
|
89 |
-
self._coordinator = TransferCoordinator()
|
90 |
-
|
91 |
-
@property
|
92 |
-
def meta(self):
|
93 |
-
return self._meta
|
94 |
-
|
95 |
-
def done(self):
|
96 |
-
return self._coordinator.done()
|
97 |
-
|
98 |
-
def result(self):
|
99 |
-
try:
|
100 |
-
# Usually the result() method blocks until the transfer is done,
|
101 |
-
# however if a KeyboardInterrupt is raised we want want to exit
|
102 |
-
# out of this and propagate the exception.
|
103 |
-
return self._coordinator.result()
|
104 |
-
except KeyboardInterrupt as e:
|
105 |
-
self.cancel()
|
106 |
-
raise e
|
107 |
-
|
108 |
-
def cancel(self):
|
109 |
-
self._coordinator.cancel()
|
110 |
-
|
111 |
-
def set_exception(self, exception):
|
112 |
-
"""Sets the exception on the future."""
|
113 |
-
if not self.done():
|
114 |
-
raise TransferNotDoneError(
|
115 |
-
'set_exception can only be called once the transfer is '
|
116 |
-
'complete.'
|
117 |
-
)
|
118 |
-
self._coordinator.set_exception(exception, override=True)
|
119 |
-
|
120 |
-
|
121 |
-
class TransferMeta(BaseTransferMeta):
|
122 |
-
"""Holds metadata about the TransferFuture"""
|
123 |
-
|
124 |
-
def __init__(self, call_args=None, transfer_id=None):
|
125 |
-
self._call_args = call_args
|
126 |
-
self._transfer_id = transfer_id
|
127 |
-
self._size = None
|
128 |
-
self._user_context = {}
|
129 |
-
|
130 |
-
@property
|
131 |
-
def call_args(self):
|
132 |
-
"""The call args used in the transfer request"""
|
133 |
-
return self._call_args
|
134 |
-
|
135 |
-
@property
|
136 |
-
def transfer_id(self):
|
137 |
-
"""The unique id of the transfer"""
|
138 |
-
return self._transfer_id
|
139 |
-
|
140 |
-
@property
|
141 |
-
def size(self):
|
142 |
-
"""The size of the transfer request if known"""
|
143 |
-
return self._size
|
144 |
-
|
145 |
-
@property
|
146 |
-
def user_context(self):
|
147 |
-
"""A dictionary that requesters can store data in"""
|
148 |
-
return self._user_context
|
149 |
-
|
150 |
-
def provide_transfer_size(self, size):
|
151 |
-
"""A method to provide the size of a transfer request
|
152 |
-
|
153 |
-
By providing this value, the TransferManager will not try to
|
154 |
-
call HeadObject or use the use OS to determine the size of the
|
155 |
-
transfer.
|
156 |
-
"""
|
157 |
-
self._size = size
|
158 |
-
|
159 |
-
|
160 |
-
class TransferCoordinator:
|
161 |
-
"""A helper class for managing TransferFuture"""
|
162 |
-
|
163 |
-
def __init__(self, transfer_id=None):
|
164 |
-
self.transfer_id = transfer_id
|
165 |
-
self._status = 'not-started'
|
166 |
-
self._result = None
|
167 |
-
self._exception = None
|
168 |
-
self._associated_futures = set()
|
169 |
-
self._failure_cleanups = []
|
170 |
-
self._done_callbacks = []
|
171 |
-
self._done_event = threading.Event()
|
172 |
-
self._lock = threading.Lock()
|
173 |
-
self._associated_futures_lock = threading.Lock()
|
174 |
-
self._done_callbacks_lock = threading.Lock()
|
175 |
-
self._failure_cleanups_lock = threading.Lock()
|
176 |
-
|
177 |
-
def __repr__(self):
|
178 |
-
return '{}(transfer_id={})'.format(
|
179 |
-
self.__class__.__name__, self.transfer_id
|
180 |
-
)
|
181 |
-
|
182 |
-
@property
|
183 |
-
def exception(self):
|
184 |
-
return self._exception
|
185 |
-
|
186 |
-
@property
|
187 |
-
def associated_futures(self):
|
188 |
-
"""The list of futures associated to the inprogress TransferFuture
|
189 |
-
|
190 |
-
Once the transfer finishes this list becomes empty as the transfer
|
191 |
-
is considered done and there should be no running futures left.
|
192 |
-
"""
|
193 |
-
with self._associated_futures_lock:
|
194 |
-
# We return a copy of the list because we do not want to
|
195 |
-
# processing the returned list while another thread is adding
|
196 |
-
# more futures to the actual list.
|
197 |
-
return copy.copy(self._associated_futures)
|
198 |
-
|
199 |
-
@property
|
200 |
-
def failure_cleanups(self):
|
201 |
-
"""The list of callbacks to call when the TransferFuture fails"""
|
202 |
-
return self._failure_cleanups
|
203 |
-
|
204 |
-
@property
|
205 |
-
def status(self):
|
206 |
-
"""The status of the TransferFuture
|
207 |
-
|
208 |
-
The currently supported states are:
|
209 |
-
* not-started - Has yet to start. If in this state, a transfer
|
210 |
-
can be canceled immediately and nothing will happen.
|
211 |
-
* queued - SubmissionTask is about to submit tasks
|
212 |
-
* running - Is inprogress. In-progress as of now means that
|
213 |
-
the SubmissionTask that runs the transfer is being executed. So
|
214 |
-
there is no guarantee any transfer requests had been made to
|
215 |
-
S3 if this state is reached.
|
216 |
-
* cancelled - Was cancelled
|
217 |
-
* failed - An exception other than CancelledError was thrown
|
218 |
-
* success - No exceptions were thrown and is done.
|
219 |
-
"""
|
220 |
-
return self._status
|
221 |
-
|
222 |
-
def set_result(self, result):
|
223 |
-
"""Set a result for the TransferFuture
|
224 |
-
|
225 |
-
Implies that the TransferFuture succeeded. This will always set a
|
226 |
-
result because it is invoked on the final task where there is only
|
227 |
-
ever one final task and it is ran at the very end of a transfer
|
228 |
-
process. So if a result is being set for this final task, the transfer
|
229 |
-
succeeded even if something came a long and canceled the transfer
|
230 |
-
on the final task.
|
231 |
-
"""
|
232 |
-
with self._lock:
|
233 |
-
self._exception = None
|
234 |
-
self._result = result
|
235 |
-
self._status = 'success'
|
236 |
-
|
237 |
-
def set_exception(self, exception, override=False):
|
238 |
-
"""Set an exception for the TransferFuture
|
239 |
-
|
240 |
-
Implies the TransferFuture failed.
|
241 |
-
|
242 |
-
:param exception: The exception that cause the transfer to fail.
|
243 |
-
:param override: If True, override any existing state.
|
244 |
-
"""
|
245 |
-
with self._lock:
|
246 |
-
if not self.done() or override:
|
247 |
-
self._exception = exception
|
248 |
-
self._status = 'failed'
|
249 |
-
|
250 |
-
def result(self):
|
251 |
-
"""Waits until TransferFuture is done and returns the result
|
252 |
-
|
253 |
-
If the TransferFuture succeeded, it will return the result. If the
|
254 |
-
TransferFuture failed, it will raise the exception associated to the
|
255 |
-
failure.
|
256 |
-
"""
|
257 |
-
# Doing a wait() with no timeout cannot be interrupted in python2 but
|
258 |
-
# can be interrupted in python3 so we just wait with the largest
|
259 |
-
# possible value integer value, which is on the scale of billions of
|
260 |
-
# years...
|
261 |
-
self._done_event.wait(MAXINT)
|
262 |
-
|
263 |
-
# Once done waiting, raise an exception if present or return the
|
264 |
-
# final result.
|
265 |
-
if self._exception:
|
266 |
-
raise self._exception
|
267 |
-
return self._result
|
268 |
-
|
269 |
-
def cancel(self, msg='', exc_type=CancelledError):
|
270 |
-
"""Cancels the TransferFuture
|
271 |
-
|
272 |
-
:param msg: The message to attach to the cancellation
|
273 |
-
:param exc_type: The type of exception to set for the cancellation
|
274 |
-
"""
|
275 |
-
with self._lock:
|
276 |
-
if not self.done():
|
277 |
-
should_announce_done = False
|
278 |
-
logger.debug('%s cancel(%s) called', self, msg)
|
279 |
-
self._exception = exc_type(msg)
|
280 |
-
if self._status == 'not-started':
|
281 |
-
should_announce_done = True
|
282 |
-
self._status = 'cancelled'
|
283 |
-
if should_announce_done:
|
284 |
-
self.announce_done()
|
285 |
-
|
286 |
-
def set_status_to_queued(self):
|
287 |
-
"""Sets the TransferFutrue's status to running"""
|
288 |
-
self._transition_to_non_done_state('queued')
|
289 |
-
|
290 |
-
def set_status_to_running(self):
|
291 |
-
"""Sets the TransferFuture's status to running"""
|
292 |
-
self._transition_to_non_done_state('running')
|
293 |
-
|
294 |
-
def _transition_to_non_done_state(self, desired_state):
|
295 |
-
with self._lock:
|
296 |
-
if self.done():
|
297 |
-
raise RuntimeError(
|
298 |
-
'Unable to transition from done state %s to non-done '
|
299 |
-
'state %s.' % (self.status, desired_state)
|
300 |
-
)
|
301 |
-
self._status = desired_state
|
302 |
-
|
303 |
-
def submit(self, executor, task, tag=None):
|
304 |
-
"""Submits a task to a provided executor
|
305 |
-
|
306 |
-
:type executor: s3transfer.futures.BoundedExecutor
|
307 |
-
:param executor: The executor to submit the callable to
|
308 |
-
|
309 |
-
:type task: s3transfer.tasks.Task
|
310 |
-
:param task: The task to submit to the executor
|
311 |
-
|
312 |
-
:type tag: s3transfer.futures.TaskTag
|
313 |
-
:param tag: A tag to associate to the submitted task
|
314 |
-
|
315 |
-
:rtype: concurrent.futures.Future
|
316 |
-
:returns: A future representing the submitted task
|
317 |
-
"""
|
318 |
-
logger.debug(
|
319 |
-
"Submitting task {} to executor {} for transfer request: {}.".format(
|
320 |
-
task, executor, self.transfer_id
|
321 |
-
)
|
322 |
-
)
|
323 |
-
future = executor.submit(task, tag=tag)
|
324 |
-
# Add this created future to the list of associated future just
|
325 |
-
# in case it is needed during cleanups.
|
326 |
-
self.add_associated_future(future)
|
327 |
-
future.add_done_callback(
|
328 |
-
FunctionContainer(self.remove_associated_future, future)
|
329 |
-
)
|
330 |
-
return future
|
331 |
-
|
332 |
-
def done(self):
|
333 |
-
"""Determines if a TransferFuture has completed
|
334 |
-
|
335 |
-
:returns: False if status is equal to 'failed', 'cancelled', or
|
336 |
-
'success'. True, otherwise
|
337 |
-
"""
|
338 |
-
return self.status in ['failed', 'cancelled', 'success']
|
339 |
-
|
340 |
-
def add_associated_future(self, future):
|
341 |
-
"""Adds a future to be associated with the TransferFuture"""
|
342 |
-
with self._associated_futures_lock:
|
343 |
-
self._associated_futures.add(future)
|
344 |
-
|
345 |
-
def remove_associated_future(self, future):
|
346 |
-
"""Removes a future's association to the TransferFuture"""
|
347 |
-
with self._associated_futures_lock:
|
348 |
-
self._associated_futures.remove(future)
|
349 |
-
|
350 |
-
def add_done_callback(self, function, *args, **kwargs):
|
351 |
-
"""Add a done callback to be invoked when transfer is done"""
|
352 |
-
with self._done_callbacks_lock:
|
353 |
-
self._done_callbacks.append(
|
354 |
-
FunctionContainer(function, *args, **kwargs)
|
355 |
-
)
|
356 |
-
|
357 |
-
def add_failure_cleanup(self, function, *args, **kwargs):
|
358 |
-
"""Adds a callback to call upon failure"""
|
359 |
-
with self._failure_cleanups_lock:
|
360 |
-
self._failure_cleanups.append(
|
361 |
-
FunctionContainer(function, *args, **kwargs)
|
362 |
-
)
|
363 |
-
|
364 |
-
def announce_done(self):
|
365 |
-
"""Announce that future is done running and run associated callbacks
|
366 |
-
|
367 |
-
This will run any failure cleanups if the transfer failed if not
|
368 |
-
they have not been run, allows the result() to be unblocked, and will
|
369 |
-
run any done callbacks associated to the TransferFuture if they have
|
370 |
-
not already been ran.
|
371 |
-
"""
|
372 |
-
if self.status != 'success':
|
373 |
-
self._run_failure_cleanups()
|
374 |
-
self._done_event.set()
|
375 |
-
self._run_done_callbacks()
|
376 |
-
|
377 |
-
def _run_done_callbacks(self):
|
378 |
-
# Run the callbacks and remove the callbacks from the internal
|
379 |
-
# list so they do not get ran again if done is announced more than
|
380 |
-
# once.
|
381 |
-
with self._done_callbacks_lock:
|
382 |
-
self._run_callbacks(self._done_callbacks)
|
383 |
-
self._done_callbacks = []
|
384 |
-
|
385 |
-
def _run_failure_cleanups(self):
|
386 |
-
# Run the cleanup callbacks and remove the callbacks from the internal
|
387 |
-
# list so they do not get ran again if done is announced more than
|
388 |
-
# once.
|
389 |
-
with self._failure_cleanups_lock:
|
390 |
-
self._run_callbacks(self.failure_cleanups)
|
391 |
-
self._failure_cleanups = []
|
392 |
-
|
393 |
-
def _run_callbacks(self, callbacks):
|
394 |
-
for callback in callbacks:
|
395 |
-
self._run_callback(callback)
|
396 |
-
|
397 |
-
def _run_callback(self, callback):
|
398 |
-
try:
|
399 |
-
callback()
|
400 |
-
# We do not want a callback interrupting the process, especially
|
401 |
-
# in the failure cleanups. So log and catch, the exception.
|
402 |
-
except Exception:
|
403 |
-
logger.debug("Exception raised in %s." % callback, exc_info=True)
|
404 |
-
|
405 |
-
|
406 |
-
class BoundedExecutor:
|
407 |
-
EXECUTOR_CLS = futures.ThreadPoolExecutor
|
408 |
-
|
409 |
-
def __init__(
|
410 |
-
self, max_size, max_num_threads, tag_semaphores=None, executor_cls=None
|
411 |
-
):
|
412 |
-
"""An executor implementation that has a maximum queued up tasks
|
413 |
-
|
414 |
-
The executor will block if the number of tasks that have been
|
415 |
-
submitted and is currently working on is past its maximum.
|
416 |
-
|
417 |
-
:params max_size: The maximum number of inflight futures. An inflight
|
418 |
-
future means that the task is either queued up or is currently
|
419 |
-
being executed. A size of None or 0 means that the executor will
|
420 |
-
have no bound in terms of the number of inflight futures.
|
421 |
-
|
422 |
-
:params max_num_threads: The maximum number of threads the executor
|
423 |
-
uses.
|
424 |
-
|
425 |
-
:type tag_semaphores: dict
|
426 |
-
:params tag_semaphores: A dictionary where the key is the name of the
|
427 |
-
tag and the value is the semaphore to use when limiting the
|
428 |
-
number of tasks the executor is processing at a time.
|
429 |
-
|
430 |
-
:type executor_cls: BaseExecutor
|
431 |
-
:param underlying_executor_cls: The executor class that
|
432 |
-
get bounded by this executor. If None is provided, the
|
433 |
-
concurrent.futures.ThreadPoolExecutor class is used.
|
434 |
-
"""
|
435 |
-
self._max_num_threads = max_num_threads
|
436 |
-
if executor_cls is None:
|
437 |
-
executor_cls = self.EXECUTOR_CLS
|
438 |
-
self._executor = executor_cls(max_workers=self._max_num_threads)
|
439 |
-
self._semaphore = TaskSemaphore(max_size)
|
440 |
-
self._tag_semaphores = tag_semaphores
|
441 |
-
|
442 |
-
def submit(self, task, tag=None, block=True):
|
443 |
-
"""Submit a task to complete
|
444 |
-
|
445 |
-
:type task: s3transfer.tasks.Task
|
446 |
-
:param task: The task to run __call__ on
|
447 |
-
|
448 |
-
|
449 |
-
:type tag: s3transfer.futures.TaskTag
|
450 |
-
:param tag: An optional tag to associate to the task. This
|
451 |
-
is used to override which semaphore to use.
|
452 |
-
|
453 |
-
:type block: boolean
|
454 |
-
:param block: True if to wait till it is possible to submit a task.
|
455 |
-
False, if not to wait and raise an error if not able to submit
|
456 |
-
a task.
|
457 |
-
|
458 |
-
:returns: The future associated to the submitted task
|
459 |
-
"""
|
460 |
-
semaphore = self._semaphore
|
461 |
-
# If a tag was provided, use the semaphore associated to that
|
462 |
-
# tag.
|
463 |
-
if tag:
|
464 |
-
semaphore = self._tag_semaphores[tag]
|
465 |
-
|
466 |
-
# Call acquire on the semaphore.
|
467 |
-
acquire_token = semaphore.acquire(task.transfer_id, block)
|
468 |
-
# Create a callback to invoke when task is done in order to call
|
469 |
-
# release on the semaphore.
|
470 |
-
release_callback = FunctionContainer(
|
471 |
-
semaphore.release, task.transfer_id, acquire_token
|
472 |
-
)
|
473 |
-
# Submit the task to the underlying executor.
|
474 |
-
future = ExecutorFuture(self._executor.submit(task))
|
475 |
-
# Add the Semaphore.release() callback to the future such that
|
476 |
-
# it is invoked once the future completes.
|
477 |
-
future.add_done_callback(release_callback)
|
478 |
-
return future
|
479 |
-
|
480 |
-
def shutdown(self, wait=True):
|
481 |
-
self._executor.shutdown(wait)
|
482 |
-
|
483 |
-
|
484 |
-
class ExecutorFuture:
|
485 |
-
def __init__(self, future):
|
486 |
-
"""A future returned from the executor
|
487 |
-
|
488 |
-
Currently, it is just a wrapper around a concurrent.futures.Future.
|
489 |
-
However, this can eventually grow to implement the needed functionality
|
490 |
-
of concurrent.futures.Future if we move off of the library and not
|
491 |
-
affect the rest of the codebase.
|
492 |
-
|
493 |
-
:type future: concurrent.futures.Future
|
494 |
-
:param future: The underlying future
|
495 |
-
"""
|
496 |
-
self._future = future
|
497 |
-
|
498 |
-
def result(self):
|
499 |
-
return self._future.result()
|
500 |
-
|
501 |
-
def add_done_callback(self, fn):
|
502 |
-
"""Adds a callback to be completed once future is done
|
503 |
-
|
504 |
-
:param fn: A callable that takes no arguments. Note that is different
|
505 |
-
than concurrent.futures.Future.add_done_callback that requires
|
506 |
-
a single argument for the future.
|
507 |
-
"""
|
508 |
-
# The done callback for concurrent.futures.Future will always pass a
|
509 |
-
# the future in as the only argument. So we need to create the
|
510 |
-
# proper signature wrapper that will invoke the callback provided.
|
511 |
-
def done_callback(future_passed_to_callback):
|
512 |
-
return fn()
|
513 |
-
|
514 |
-
self._future.add_done_callback(done_callback)
|
515 |
-
|
516 |
-
def done(self):
|
517 |
-
return self._future.done()
|
518 |
-
|
519 |
-
|
520 |
-
class BaseExecutor:
|
521 |
-
"""Base Executor class implementation needed to work with s3transfer"""
|
522 |
-
|
523 |
-
def __init__(self, max_workers=None):
|
524 |
-
pass
|
525 |
-
|
526 |
-
def submit(self, fn, *args, **kwargs):
|
527 |
-
raise NotImplementedError('submit()')
|
528 |
-
|
529 |
-
def shutdown(self, wait=True):
|
530 |
-
raise NotImplementedError('shutdown()')
|
531 |
-
|
532 |
-
|
533 |
-
class NonThreadedExecutor(BaseExecutor):
|
534 |
-
"""A drop-in replacement non-threaded version of ThreadPoolExecutor"""
|
535 |
-
|
536 |
-
def submit(self, fn, *args, **kwargs):
|
537 |
-
future = NonThreadedExecutorFuture()
|
538 |
-
try:
|
539 |
-
result = fn(*args, **kwargs)
|
540 |
-
future.set_result(result)
|
541 |
-
except Exception:
|
542 |
-
e, tb = sys.exc_info()[1:]
|
543 |
-
logger.debug(
|
544 |
-
'Setting exception for %s to %s with traceback %s',
|
545 |
-
future,
|
546 |
-
e,
|
547 |
-
tb,
|
548 |
-
)
|
549 |
-
future.set_exception_info(e, tb)
|
550 |
-
return future
|
551 |
-
|
552 |
-
def shutdown(self, wait=True):
|
553 |
-
pass
|
554 |
-
|
555 |
-
|
556 |
-
class NonThreadedExecutorFuture:
|
557 |
-
"""The Future returned from NonThreadedExecutor
|
558 |
-
|
559 |
-
Note that this future is **not** thread-safe as it is being used
|
560 |
-
from the context of a non-threaded environment.
|
561 |
-
"""
|
562 |
-
|
563 |
-
def __init__(self):
|
564 |
-
self._result = None
|
565 |
-
self._exception = None
|
566 |
-
self._traceback = None
|
567 |
-
self._done = False
|
568 |
-
self._done_callbacks = []
|
569 |
-
|
570 |
-
def set_result(self, result):
|
571 |
-
self._result = result
|
572 |
-
self._set_done()
|
573 |
-
|
574 |
-
def set_exception_info(self, exception, traceback):
|
575 |
-
self._exception = exception
|
576 |
-
self._traceback = traceback
|
577 |
-
self._set_done()
|
578 |
-
|
579 |
-
def result(self, timeout=None):
|
580 |
-
if self._exception:
|
581 |
-
raise self._exception.with_traceback(self._traceback)
|
582 |
-
return self._result
|
583 |
-
|
584 |
-
def _set_done(self):
|
585 |
-
self._done = True
|
586 |
-
for done_callback in self._done_callbacks:
|
587 |
-
self._invoke_done_callback(done_callback)
|
588 |
-
self._done_callbacks = []
|
589 |
-
|
590 |
-
def _invoke_done_callback(self, done_callback):
|
591 |
-
return done_callback(self)
|
592 |
-
|
593 |
-
def done(self):
|
594 |
-
return self._done
|
595 |
-
|
596 |
-
def add_done_callback(self, fn):
|
597 |
-
if self._done:
|
598 |
-
self._invoke_done_callback(fn)
|
599 |
-
else:
|
600 |
-
self._done_callbacks.append(fn)
|
601 |
-
|
602 |
-
|
603 |
-
TaskTag = namedtuple('TaskTag', ['name'])
|
604 |
-
|
605 |
-
IN_MEMORY_UPLOAD_TAG = TaskTag('in_memory_upload')
|
606 |
-
IN_MEMORY_DOWNLOAD_TAG = TaskTag('in_memory_download')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BigData-KSU/VQA-in-Medical-Imagery/CLIP/clip_old.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
import hashlib
|
2 |
-
import os
|
3 |
-
import urllib
|
4 |
-
import warnings
|
5 |
-
from typing import Union, List
|
6 |
-
|
7 |
-
import torch
|
8 |
-
from PIL import Image
|
9 |
-
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
|
10 |
-
from tqdm import tqdm
|
11 |
-
|
12 |
-
from CLIP.model import build_model
|
13 |
-
from CLIP.simple_tokenizer import SimpleTokenizer as _Tokenizer
|
14 |
-
|
15 |
-
__all__ = ["available_models", "load", "tokenize"]
|
16 |
-
_tokenizer = _Tokenizer()
|
17 |
-
|
18 |
-
_MODELS = {
|
19 |
-
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
|
20 |
-
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
|
21 |
-
}
|
22 |
-
|
23 |
-
|
24 |
-
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
|
25 |
-
os.makedirs(root, exist_ok=True)
|
26 |
-
filename = os.path.basename(url)
|
27 |
-
|
28 |
-
expected_sha256 = url.split("/")[-2]
|
29 |
-
download_target = os.path.join(root, filename)
|
30 |
-
|
31 |
-
if os.path.exists(download_target) and not os.path.isfile(download_target):
|
32 |
-
raise RuntimeError(f"{download_target} exists and is not a regular file")
|
33 |
-
|
34 |
-
if os.path.isfile(download_target):
|
35 |
-
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
|
36 |
-
return download_target
|
37 |
-
else:
|
38 |
-
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
|
39 |
-
|
40 |
-
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
|
41 |
-
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
|
42 |
-
while True:
|
43 |
-
buffer = source.read(8192)
|
44 |
-
if not buffer:
|
45 |
-
break
|
46 |
-
|
47 |
-
output.write(buffer)
|
48 |
-
loop.update(len(buffer))
|
49 |
-
|
50 |
-
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
|
51 |
-
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
|
52 |
-
|
53 |
-
return download_target
|
54 |
-
|
55 |
-
|
56 |
-
def available_models():
|
57 |
-
return list(_MODELS.keys())
|
58 |
-
|
59 |
-
|
60 |
-
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
|
61 |
-
if name not in _MODELS:
|
62 |
-
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
|
63 |
-
|
64 |
-
model_path = _download(_MODELS[name])
|
65 |
-
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
|
66 |
-
n_px = model.input_resolution.item()
|
67 |
-
|
68 |
-
transform = Compose([
|
69 |
-
Resize(n_px, interpolation=Image.BICUBIC),
|
70 |
-
CenterCrop(n_px),
|
71 |
-
lambda image: image.convert("RGB"),
|
72 |
-
ToTensor(),
|
73 |
-
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
|
74 |
-
])
|
75 |
-
|
76 |
-
if not jit:
|
77 |
-
print("get Model.....")
|
78 |
-
model = build_model(model.state_dict()).to(device)
|
79 |
-
return model, transform
|
80 |
-
|
81 |
-
# patch the device names
|
82 |
-
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
|
83 |
-
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
|
84 |
-
|
85 |
-
def patch_device(module):
|
86 |
-
graphs = [module.graph] if hasattr(module, "graph") else []
|
87 |
-
if hasattr(module, "forward1"):
|
88 |
-
graphs.append(module.forward1.graph)
|
89 |
-
|
90 |
-
for graph in graphs:
|
91 |
-
for node in graph.findAllNodes("prim::Constant"):
|
92 |
-
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
|
93 |
-
node.copyAttributes(device_node)
|
94 |
-
|
95 |
-
model.apply(patch_device)
|
96 |
-
patch_device(model.encode_image)
|
97 |
-
patch_device(model.encode_text)
|
98 |
-
|
99 |
-
# patch dtype to float32 on CPU
|
100 |
-
if device == "cpu":
|
101 |
-
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
|
102 |
-
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
|
103 |
-
float_node = float_input.node()
|
104 |
-
|
105 |
-
def patch_float(module):
|
106 |
-
graphs = [module.graph] if hasattr(module, "graph") else []
|
107 |
-
if hasattr(module, "forward1"):
|
108 |
-
graphs.append(module.forward1.graph)
|
109 |
-
|
110 |
-
for graph in graphs:
|
111 |
-
for node in graph.findAllNodes("aten::to"):
|
112 |
-
inputs = list(node.inputs())
|
113 |
-
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
|
114 |
-
if inputs[i].node()["value"] == 5:
|
115 |
-
inputs[i].node().copyAttributes(float_node)
|
116 |
-
|
117 |
-
model.apply(patch_float)
|
118 |
-
patch_float(model.encode_image)
|
119 |
-
patch_float(model.encode_text)
|
120 |
-
|
121 |
-
model.float()
|
122 |
-
|
123 |
-
return model, transform
|
124 |
-
|
125 |
-
|
126 |
-
def tokenize(texts: Union[str, List[str]], context_length: int = 77):
|
127 |
-
if isinstance(texts, str):
|
128 |
-
texts = [texts]
|
129 |
-
|
130 |
-
sot_token = _tokenizer.encoder["<|startoftext|>"]
|
131 |
-
eot_token = _tokenizer.encoder["<|endoftext|>"]
|
132 |
-
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
|
133 |
-
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
|
134 |
-
|
135 |
-
for i, tokens in enumerate(all_tokens):
|
136 |
-
if len(tokens) > context_length:
|
137 |
-
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
|
138 |
-
result[i, :len(tokens)] = torch.tensor(tokens)
|
139 |
-
|
140 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/internal/decompose.h
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
namespace thrust
|
22 |
-
{
|
23 |
-
namespace system
|
24 |
-
{
|
25 |
-
namespace detail
|
26 |
-
{
|
27 |
-
namespace internal
|
28 |
-
{
|
29 |
-
|
30 |
-
template <typename IndexType>
|
31 |
-
class index_range
|
32 |
-
{
|
33 |
-
public:
|
34 |
-
typedef IndexType index_type;
|
35 |
-
|
36 |
-
__host__ __device__
|
37 |
-
index_range(index_type begin, index_type end) : m_begin(begin), m_end(end) {}
|
38 |
-
|
39 |
-
__host__ __device__
|
40 |
-
index_type begin(void) const { return m_begin; }
|
41 |
-
|
42 |
-
__host__ __device__
|
43 |
-
index_type end(void) const { return m_end; }
|
44 |
-
|
45 |
-
__host__ __device__
|
46 |
-
index_type size(void) const { return m_end - m_begin; }
|
47 |
-
|
48 |
-
private:
|
49 |
-
index_type m_begin;
|
50 |
-
index_type m_end;
|
51 |
-
};
|
52 |
-
|
53 |
-
template <typename IndexType>
|
54 |
-
class uniform_decomposition
|
55 |
-
{
|
56 |
-
public:
|
57 |
-
typedef IndexType index_type;
|
58 |
-
typedef index_range<index_type> range_type;
|
59 |
-
|
60 |
-
__host__ __device__
|
61 |
-
uniform_decomposition(index_type N, index_type granularity, index_type max_intervals)
|
62 |
-
: m_N(N),
|
63 |
-
m_intervals((N + granularity - 1) / granularity),
|
64 |
-
m_threshold(0),
|
65 |
-
m_small_interval(granularity),
|
66 |
-
m_large_interval(0)
|
67 |
-
{
|
68 |
-
if(m_intervals > max_intervals)
|
69 |
-
{
|
70 |
-
m_small_interval = granularity * (m_intervals / max_intervals);
|
71 |
-
m_large_interval = m_small_interval + granularity;
|
72 |
-
m_threshold = m_intervals % max_intervals;
|
73 |
-
m_intervals = max_intervals;
|
74 |
-
}
|
75 |
-
}
|
76 |
-
|
77 |
-
__host__ __device__
|
78 |
-
index_range<index_type> operator[](const index_type& i) const
|
79 |
-
{
|
80 |
-
if (i < m_threshold)
|
81 |
-
{
|
82 |
-
index_type begin = m_large_interval * i;
|
83 |
-
index_type end = begin + m_large_interval;
|
84 |
-
return range_type(begin, end);
|
85 |
-
}
|
86 |
-
else
|
87 |
-
{
|
88 |
-
index_type begin = m_large_interval * m_threshold + m_small_interval * (i - m_threshold);
|
89 |
-
index_type end = (begin + m_small_interval < m_N) ? begin + m_small_interval : m_N;
|
90 |
-
return range_type(begin, end);
|
91 |
-
}
|
92 |
-
}
|
93 |
-
|
94 |
-
__host__ __device__
|
95 |
-
index_type size(void) const
|
96 |
-
{
|
97 |
-
return m_intervals;
|
98 |
-
}
|
99 |
-
|
100 |
-
private:
|
101 |
-
|
102 |
-
index_type m_N;
|
103 |
-
index_type m_intervals;
|
104 |
-
index_type m_threshold;
|
105 |
-
index_type m_small_interval;
|
106 |
-
index_type m_large_interval;
|
107 |
-
};
|
108 |
-
|
109 |
-
|
110 |
-
} // end namespace internal
|
111 |
-
} // end namespace detail
|
112 |
-
} // end namespace system
|
113 |
-
} // end namespace thrust
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/evaluation/testing.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import logging
|
3 |
-
import numpy as np
|
4 |
-
import pprint
|
5 |
-
import sys
|
6 |
-
from collections.abc import Mapping
|
7 |
-
|
8 |
-
|
9 |
-
def print_csv_format(results):
|
10 |
-
"""
|
11 |
-
Print main metrics in a format similar to Detectron,
|
12 |
-
so that they are easy to copypaste into a spreadsheet.
|
13 |
-
|
14 |
-
Args:
|
15 |
-
results (OrderedDict[dict]): task_name -> {metric -> score}
|
16 |
-
unordered dict can also be printed, but in arbitrary order
|
17 |
-
"""
|
18 |
-
assert isinstance(results, Mapping) or not len(results), results
|
19 |
-
logger = logging.getLogger(__name__)
|
20 |
-
for task, res in results.items():
|
21 |
-
if isinstance(res, Mapping):
|
22 |
-
# Don't print "AP-category" metrics since they are usually not tracked.
|
23 |
-
important_res = [(k, v) for k, v in res.items() if "-" not in k]
|
24 |
-
logger.info("copypaste: Task: {}".format(task))
|
25 |
-
logger.info("copypaste: " + ",".join([k[0] for k in important_res]))
|
26 |
-
logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res]))
|
27 |
-
else:
|
28 |
-
logger.info(f"copypaste: {task}={res}")
|
29 |
-
|
30 |
-
|
31 |
-
def verify_results(cfg, results):
|
32 |
-
"""
|
33 |
-
Args:
|
34 |
-
results (OrderedDict[dict]): task_name -> {metric -> score}
|
35 |
-
|
36 |
-
Returns:
|
37 |
-
bool: whether the verification succeeds or not
|
38 |
-
"""
|
39 |
-
expected_results = cfg.TEST.EXPECTED_RESULTS
|
40 |
-
if not len(expected_results):
|
41 |
-
return True
|
42 |
-
|
43 |
-
ok = True
|
44 |
-
for task, metric, expected, tolerance in expected_results:
|
45 |
-
actual = results[task].get(metric, None)
|
46 |
-
if actual is None:
|
47 |
-
ok = False
|
48 |
-
continue
|
49 |
-
if not np.isfinite(actual):
|
50 |
-
ok = False
|
51 |
-
continue
|
52 |
-
diff = abs(actual - expected)
|
53 |
-
if diff > tolerance:
|
54 |
-
ok = False
|
55 |
-
|
56 |
-
logger = logging.getLogger(__name__)
|
57 |
-
if not ok:
|
58 |
-
logger.error("Result verification failed!")
|
59 |
-
logger.error("Expected Results: " + str(expected_results))
|
60 |
-
logger.error("Actual Results: " + pprint.pformat(results))
|
61 |
-
|
62 |
-
sys.exit(1)
|
63 |
-
else:
|
64 |
-
logger.info("Results verification passed.")
|
65 |
-
return ok
|
66 |
-
|
67 |
-
|
68 |
-
def flatten_results_dict(results):
|
69 |
-
"""
|
70 |
-
Expand a hierarchical dict of scalars into a flat dict of scalars.
|
71 |
-
If results[k1][k2][k3] = v, the returned dict will have the entry
|
72 |
-
{"k1/k2/k3": v}.
|
73 |
-
|
74 |
-
Args:
|
75 |
-
results (dict):
|
76 |
-
"""
|
77 |
-
r = {}
|
78 |
-
for k, v in results.items():
|
79 |
-
if isinstance(v, Mapping):
|
80 |
-
v = flatten_results_dict(v)
|
81 |
-
for kk, vv in v.items():
|
82 |
-
r[k + "/" + kk] = vv
|
83 |
-
else:
|
84 |
-
r[k] = v
|
85 |
-
return r
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/modeling/text_encoder/registry.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
_lang_encoders = {}
|
2 |
-
|
3 |
-
|
4 |
-
def register_lang_encoder(fn):
|
5 |
-
module_name_split = fn.__module__.split('.')
|
6 |
-
model_name = module_name_split[-1]
|
7 |
-
|
8 |
-
_lang_encoders[model_name] = fn
|
9 |
-
|
10 |
-
return fn
|
11 |
-
|
12 |
-
|
13 |
-
def lang_encoders(model_name):
|
14 |
-
return _lang_encoders[model_name]
|
15 |
-
|
16 |
-
|
17 |
-
def is_lang_encoder(model_name):
|
18 |
-
return model_name in _lang_encoders
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/longcode/prod_cons.h
DELETED
@@ -1,433 +0,0 @@
|
|
1 |
-
#pragma once
|
2 |
-
|
3 |
-
#include <atomic>
|
4 |
-
#include <utility>
|
5 |
-
#include <cstring>
|
6 |
-
#include <type_traits>
|
7 |
-
#include <cstdint>
|
8 |
-
|
9 |
-
#include "libipc/def.h"
|
10 |
-
|
11 |
-
#include "libipc/platform/detail.h"
|
12 |
-
#include "libipc/circ/elem_def.h"
|
13 |
-
#include "libipc/utility/log.h"
|
14 |
-
#include "libipc/utility/utility.h"
|
15 |
-
|
16 |
-
namespace ipc {
|
17 |
-
|
18 |
-
////////////////////////////////////////////////////////////////
|
19 |
-
/// producer-consumer implementation
|
20 |
-
////////////////////////////////////////////////////////////////
|
21 |
-
|
22 |
-
template <typename Flag>
|
23 |
-
struct prod_cons_impl;
|
24 |
-
|
25 |
-
template <>
|
26 |
-
struct prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
27 |
-
|
28 |
-
template <std::size_t DataSize, std::size_t AlignSize>
|
29 |
-
struct elem_t {
|
30 |
-
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
31 |
-
};
|
32 |
-
|
33 |
-
alignas(cache_line_size) std::atomic<circ::u2_t> rd_; // read index
|
34 |
-
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
35 |
-
|
36 |
-
constexpr circ::u2_t cursor() const noexcept {
|
37 |
-
return 0;
|
38 |
-
}
|
39 |
-
|
40 |
-
template <typename W, typename F, typename E>
|
41 |
-
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
42 |
-
auto cur_wt = circ::index_of(wt_.load(std::memory_order_relaxed));
|
43 |
-
if (cur_wt == circ::index_of(rd_.load(std::memory_order_acquire) - 1)) {
|
44 |
-
return false; // full
|
45 |
-
}
|
46 |
-
std::forward<F>(f)(&(elems[cur_wt].data_));
|
47 |
-
wt_.fetch_add(1, std::memory_order_release);
|
48 |
-
return true;
|
49 |
-
}
|
50 |
-
|
51 |
-
/**
|
52 |
-
* In single-single-unicast, 'force_push' means 'no reader' or 'the only one reader is dead'.
|
53 |
-
* So we could just disconnect all connections of receiver, and return false.
|
54 |
-
*/
|
55 |
-
template <typename W, typename F, typename E>
|
56 |
-
bool force_push(W* wrapper, F&&, E*) {
|
57 |
-
wrapper->elems()->disconnect_receiver(~static_cast<circ::cc_t>(0u));
|
58 |
-
return false;
|
59 |
-
}
|
60 |
-
|
61 |
-
template <typename W, typename F, typename R, typename E>
|
62 |
-
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E* elems) {
|
63 |
-
auto cur_rd = circ::index_of(rd_.load(std::memory_order_relaxed));
|
64 |
-
if (cur_rd == circ::index_of(wt_.load(std::memory_order_acquire))) {
|
65 |
-
return false; // empty
|
66 |
-
}
|
67 |
-
std::forward<F>(f)(&(elems[cur_rd].data_));
|
68 |
-
std::forward<R>(out)(true);
|
69 |
-
rd_.fetch_add(1, std::memory_order_release);
|
70 |
-
return true;
|
71 |
-
}
|
72 |
-
};
|
73 |
-
|
74 |
-
template <>
|
75 |
-
struct prod_cons_impl<wr<relat::single, relat::multi , trans::unicast>>
|
76 |
-
: prod_cons_impl<wr<relat::single, relat::single, trans::unicast>> {
|
77 |
-
|
78 |
-
template <typename W, typename F, typename E>
|
79 |
-
bool force_push(W* wrapper, F&&, E*) {
|
80 |
-
wrapper->elems()->disconnect_receiver(1);
|
81 |
-
return false;
|
82 |
-
}
|
83 |
-
|
84 |
-
template <typename W, typename F, typename R,
|
85 |
-
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
86 |
-
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
87 |
-
byte_t buff[DS];
|
88 |
-
for (unsigned k = 0;;) {
|
89 |
-
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
90 |
-
if (circ::index_of(cur_rd) ==
|
91 |
-
circ::index_of(wt_.load(std::memory_order_acquire))) {
|
92 |
-
return false; // empty
|
93 |
-
}
|
94 |
-
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
95 |
-
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
96 |
-
std::forward<F>(f)(buff);
|
97 |
-
std::forward<R>(out)(true);
|
98 |
-
return true;
|
99 |
-
}
|
100 |
-
ipc::yield(k);
|
101 |
-
}
|
102 |
-
}
|
103 |
-
};
|
104 |
-
|
105 |
-
template <>
|
106 |
-
struct prod_cons_impl<wr<relat::multi , relat::multi, trans::unicast>>
|
107 |
-
: prod_cons_impl<wr<relat::single, relat::multi, trans::unicast>> {
|
108 |
-
|
109 |
-
using flag_t = std::uint64_t;
|
110 |
-
|
111 |
-
template <std::size_t DataSize, std::size_t AlignSize>
|
112 |
-
struct elem_t {
|
113 |
-
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
114 |
-
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
115 |
-
};
|
116 |
-
|
117 |
-
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
118 |
-
|
119 |
-
template <typename W, typename F, typename E>
|
120 |
-
bool push(W* /*wrapper*/, F&& f, E* elems) {
|
121 |
-
circ::u2_t cur_ct, nxt_ct;
|
122 |
-
for (unsigned k = 0;;) {
|
123 |
-
cur_ct = ct_.load(std::memory_order_relaxed);
|
124 |
-
if (circ::index_of(nxt_ct = cur_ct + 1) ==
|
125 |
-
circ::index_of(rd_.load(std::memory_order_acquire))) {
|
126 |
-
return false; // full
|
127 |
-
}
|
128 |
-
if (ct_.compare_exchange_weak(cur_ct, nxt_ct, std::memory_order_acq_rel)) {
|
129 |
-
break;
|
130 |
-
}
|
131 |
-
ipc::yield(k);
|
132 |
-
}
|
133 |
-
auto* el = elems + circ::index_of(cur_ct);
|
134 |
-
std::forward<F>(f)(&(el->data_));
|
135 |
-
// set flag & try update wt
|
136 |
-
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
137 |
-
while (1) {
|
138 |
-
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
139 |
-
if (cur_ct != wt_.load(std::memory_order_relaxed)) {
|
140 |
-
return true;
|
141 |
-
}
|
142 |
-
if ((~cac_ct) != cur_ct) {
|
143 |
-
return true;
|
144 |
-
}
|
145 |
-
if (!el->f_ct_.compare_exchange_strong(cac_ct, 0, std::memory_order_relaxed)) {
|
146 |
-
return true;
|
147 |
-
}
|
148 |
-
wt_.store(nxt_ct, std::memory_order_release);
|
149 |
-
cur_ct = nxt_ct;
|
150 |
-
nxt_ct = cur_ct + 1;
|
151 |
-
el = elems + circ::index_of(cur_ct);
|
152 |
-
}
|
153 |
-
return true;
|
154 |
-
}
|
155 |
-
|
156 |
-
template <typename W, typename F, typename E>
|
157 |
-
bool force_push(W* wrapper, F&&, E*) {
|
158 |
-
wrapper->elems()->disconnect_receiver(1);
|
159 |
-
return false;
|
160 |
-
}
|
161 |
-
|
162 |
-
template <typename W, typename F, typename R,
|
163 |
-
template <std::size_t, std::size_t> class E, std::size_t DS, std::size_t AS>
|
164 |
-
bool pop(W* /*wrapper*/, circ::u2_t& /*cur*/, F&& f, R&& out, E<DS, AS>* elems) {
|
165 |
-
byte_t buff[DS];
|
166 |
-
for (unsigned k = 0;;) {
|
167 |
-
auto cur_rd = rd_.load(std::memory_order_relaxed);
|
168 |
-
auto cur_wt = wt_.load(std::memory_order_acquire);
|
169 |
-
auto id_rd = circ::index_of(cur_rd);
|
170 |
-
auto id_wt = circ::index_of(cur_wt);
|
171 |
-
if (id_rd == id_wt) {
|
172 |
-
auto* el = elems + id_wt;
|
173 |
-
auto cac_ct = el->f_ct_.load(std::memory_order_acquire);
|
174 |
-
if ((~cac_ct) != cur_wt) {
|
175 |
-
return false; // empty
|
176 |
-
}
|
177 |
-
if (el->f_ct_.compare_exchange_weak(cac_ct, 0, std::memory_order_relaxed)) {
|
178 |
-
wt_.store(cur_wt + 1, std::memory_order_release);
|
179 |
-
}
|
180 |
-
k = 0;
|
181 |
-
}
|
182 |
-
else {
|
183 |
-
std::memcpy(buff, &(elems[circ::index_of(cur_rd)].data_), sizeof(buff));
|
184 |
-
if (rd_.compare_exchange_weak(cur_rd, cur_rd + 1, std::memory_order_release)) {
|
185 |
-
std::forward<F>(f)(buff);
|
186 |
-
std::forward<R>(out)(true);
|
187 |
-
return true;
|
188 |
-
}
|
189 |
-
ipc::yield(k);
|
190 |
-
}
|
191 |
-
}
|
192 |
-
}
|
193 |
-
};
|
194 |
-
|
195 |
-
template <>
|
196 |
-
struct prod_cons_impl<wr<relat::single, relat::multi, trans::broadcast>> {
|
197 |
-
|
198 |
-
using rc_t = std::uint64_t;
|
199 |
-
|
200 |
-
enum : rc_t {
|
201 |
-
ep_mask = 0x00000000ffffffffull,
|
202 |
-
ep_incr = 0x0000000100000000ull
|
203 |
-
};
|
204 |
-
|
205 |
-
template <std::size_t DataSize, std::size_t AlignSize>
|
206 |
-
struct elem_t {
|
207 |
-
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
208 |
-
std::atomic<rc_t> rc_ { 0 }; // read-counter
|
209 |
-
};
|
210 |
-
|
211 |
-
alignas(cache_line_size) std::atomic<circ::u2_t> wt_; // write index
|
212 |
-
alignas(cache_line_size) rc_t epoch_ { 0 }; // only one writer
|
213 |
-
|
214 |
-
circ::u2_t cursor() const noexcept {
|
215 |
-
return wt_.load(std::memory_order_acquire);
|
216 |
-
}
|
217 |
-
|
218 |
-
template <typename W, typename F, typename E>
|
219 |
-
bool push(W* wrapper, F&& f, E* elems) {
|
220 |
-
E* el;
|
221 |
-
for (unsigned k = 0;;) {
|
222 |
-
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
223 |
-
if (cc == 0) return false; // no reader
|
224 |
-
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
225 |
-
// check all consumers have finished reading this element
|
226 |
-
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
227 |
-
circ::cc_t rem_cc = cur_rc & ep_mask;
|
228 |
-
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch_)) {
|
229 |
-
return false; // has not finished yet
|
230 |
-
}
|
231 |
-
// consider rem_cc to be 0 here
|
232 |
-
if (el->rc_.compare_exchange_weak(
|
233 |
-
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
234 |
-
break;
|
235 |
-
}
|
236 |
-
ipc::yield(k);
|
237 |
-
}
|
238 |
-
std::forward<F>(f)(&(el->data_));
|
239 |
-
wt_.fetch_add(1, std::memory_order_release);
|
240 |
-
return true;
|
241 |
-
}
|
242 |
-
|
243 |
-
template <typename W, typename F, typename E>
|
244 |
-
bool force_push(W* wrapper, F&& f, E* elems) {
|
245 |
-
E* el;
|
246 |
-
epoch_ += ep_incr;
|
247 |
-
for (unsigned k = 0;;) {
|
248 |
-
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
249 |
-
if (cc == 0) return false; // no reader
|
250 |
-
el = elems + circ::index_of(wt_.load(std::memory_order_relaxed));
|
251 |
-
// check all consumers have finished reading this element
|
252 |
-
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
253 |
-
circ::cc_t rem_cc = cur_rc & ep_mask;
|
254 |
-
if (cc & rem_cc) {
|
255 |
-
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
256 |
-
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
257 |
-
if (cc == 0) return false; // no reader
|
258 |
-
}
|
259 |
-
// just compare & exchange
|
260 |
-
if (el->rc_.compare_exchange_weak(
|
261 |
-
cur_rc, epoch_ | static_cast<rc_t>(cc), std::memory_order_release)) {
|
262 |
-
break;
|
263 |
-
}
|
264 |
-
ipc::yield(k);
|
265 |
-
}
|
266 |
-
std::forward<F>(f)(&(el->data_));
|
267 |
-
wt_.fetch_add(1, std::memory_order_release);
|
268 |
-
return true;
|
269 |
-
}
|
270 |
-
|
271 |
-
template <typename W, typename F, typename R, typename E>
|
272 |
-
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E* elems) {
|
273 |
-
if (cur == cursor()) return false; // acquire
|
274 |
-
auto* el = elems + circ::index_of(cur++);
|
275 |
-
std::forward<F>(f)(&(el->data_));
|
276 |
-
for (unsigned k = 0;;) {
|
277 |
-
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
278 |
-
if ((cur_rc & ep_mask) == 0) {
|
279 |
-
std::forward<R>(out)(true);
|
280 |
-
return true;
|
281 |
-
}
|
282 |
-
auto nxt_rc = cur_rc & ~static_cast<rc_t>(wrapper->connected_id());
|
283 |
-
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
284 |
-
std::forward<R>(out)((nxt_rc & ep_mask) == 0);
|
285 |
-
return true;
|
286 |
-
}
|
287 |
-
ipc::yield(k);
|
288 |
-
}
|
289 |
-
}
|
290 |
-
};
|
291 |
-
|
292 |
-
template <>
|
293 |
-
struct prod_cons_impl<wr<relat::multi, relat::multi, trans::broadcast>> {
|
294 |
-
|
295 |
-
using rc_t = std::uint64_t;
|
296 |
-
using flag_t = std::uint64_t;
|
297 |
-
|
298 |
-
enum : rc_t {
|
299 |
-
rc_mask = 0x00000000ffffffffull,
|
300 |
-
ep_mask = 0x00ffffffffffffffull,
|
301 |
-
ep_incr = 0x0100000000000000ull,
|
302 |
-
ic_mask = 0xff000000ffffffffull,
|
303 |
-
ic_incr = 0x0000000100000000ull
|
304 |
-
};
|
305 |
-
|
306 |
-
template <std::size_t DataSize, std::size_t AlignSize>
|
307 |
-
struct elem_t {
|
308 |
-
std::aligned_storage_t<DataSize, AlignSize> data_ {};
|
309 |
-
std::atomic<rc_t > rc_ { 0 }; // read-counter
|
310 |
-
std::atomic<flag_t> f_ct_ { 0 }; // commit flag
|
311 |
-
};
|
312 |
-
|
313 |
-
alignas(cache_line_size) std::atomic<circ::u2_t> ct_; // commit index
|
314 |
-
alignas(cache_line_size) std::atomic<rc_t> epoch_ { 0 };
|
315 |
-
|
316 |
-
circ::u2_t cursor() const noexcept {
|
317 |
-
return ct_.load(std::memory_order_acquire);
|
318 |
-
}
|
319 |
-
|
320 |
-
constexpr static rc_t inc_rc(rc_t rc) noexcept {
|
321 |
-
return (rc & ic_mask) | ((rc + ic_incr) & ~ic_mask);
|
322 |
-
}
|
323 |
-
|
324 |
-
constexpr static rc_t inc_mask(rc_t rc) noexcept {
|
325 |
-
return inc_rc(rc) & ~rc_mask;
|
326 |
-
}
|
327 |
-
|
328 |
-
template <typename W, typename F, typename E>
|
329 |
-
bool push(W* wrapper, F&& f, E* elems) {
|
330 |
-
E* el;
|
331 |
-
circ::u2_t cur_ct;
|
332 |
-
rc_t epoch = epoch_.load(std::memory_order_acquire);
|
333 |
-
for (unsigned k = 0;;) {
|
334 |
-
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
335 |
-
if (cc == 0) return false; // no reader
|
336 |
-
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
337 |
-
// check all consumers have finished reading this element
|
338 |
-
auto cur_rc = el->rc_.load(std::memory_order_relaxed);
|
339 |
-
circ::cc_t rem_cc = cur_rc & rc_mask;
|
340 |
-
if ((cc & rem_cc) && ((cur_rc & ~ep_mask) == epoch)) {
|
341 |
-
return false; // has not finished yet
|
342 |
-
}
|
343 |
-
else if (!rem_cc) {
|
344 |
-
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
345 |
-
if ((cur_fl != cur_ct) && cur_fl) {
|
346 |
-
return false; // full
|
347 |
-
}
|
348 |
-
}
|
349 |
-
// consider rem_cc to be 0 here
|
350 |
-
if (el->rc_.compare_exchange_weak(
|
351 |
-
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed) &&
|
352 |
-
epoch_.compare_exchange_weak(epoch, epoch, std::memory_order_acq_rel)) {
|
353 |
-
break;
|
354 |
-
}
|
355 |
-
ipc::yield(k);
|
356 |
-
}
|
357 |
-
// only one thread/process would touch here at one time
|
358 |
-
ct_.store(cur_ct + 1, std::memory_order_release);
|
359 |
-
std::forward<F>(f)(&(el->data_));
|
360 |
-
// set flag & try update wt
|
361 |
-
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
362 |
-
return true;
|
363 |
-
}
|
364 |
-
|
365 |
-
template <typename W, typename F, typename E>
|
366 |
-
bool force_push(W* wrapper, F&& f, E* elems) {
|
367 |
-
E* el;
|
368 |
-
circ::u2_t cur_ct;
|
369 |
-
rc_t epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
370 |
-
for (unsigned k = 0;;) {
|
371 |
-
circ::cc_t cc = wrapper->elems()->connections(std::memory_order_relaxed);
|
372 |
-
if (cc == 0) return false; // no reader
|
373 |
-
el = elems + circ::index_of(cur_ct = ct_.load(std::memory_order_relaxed));
|
374 |
-
// check all consumers have finished reading this element
|
375 |
-
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
376 |
-
circ::cc_t rem_cc = cur_rc & rc_mask;
|
377 |
-
if (cc & rem_cc) {
|
378 |
-
ipc::log("force_push: k = %u, cc = %u, rem_cc = %u\n", k, cc, rem_cc);
|
379 |
-
cc = wrapper->elems()->disconnect_receiver(rem_cc); // disconnect all invalid readers
|
380 |
-
if (cc == 0) return false; // no reader
|
381 |
-
}
|
382 |
-
// just compare & exchange
|
383 |
-
if (el->rc_.compare_exchange_weak(
|
384 |
-
cur_rc, inc_mask(epoch | (cur_rc & ep_mask)) | static_cast<rc_t>(cc), std::memory_order_relaxed)) {
|
385 |
-
if (epoch == epoch_.load(std::memory_order_acquire)) {
|
386 |
-
break;
|
387 |
-
}
|
388 |
-
else if (push(wrapper, std::forward<F>(f), elems)) {
|
389 |
-
return true;
|
390 |
-
}
|
391 |
-
epoch = epoch_.fetch_add(ep_incr, std::memory_order_release) + ep_incr;
|
392 |
-
}
|
393 |
-
ipc::yield(k);
|
394 |
-
}
|
395 |
-
// only one thread/process would touch here at one time
|
396 |
-
ct_.store(cur_ct + 1, std::memory_order_release);
|
397 |
-
std::forward<F>(f)(&(el->data_));
|
398 |
-
// set flag & try update wt
|
399 |
-
el->f_ct_.store(~static_cast<flag_t>(cur_ct), std::memory_order_release);
|
400 |
-
return true;
|
401 |
-
}
|
402 |
-
|
403 |
-
template <typename W, typename F, typename R, typename E, std::size_t N>
|
404 |
-
bool pop(W* wrapper, circ::u2_t& cur, F&& f, R&& out, E(& elems)[N]) {
|
405 |
-
auto* el = elems + circ::index_of(cur);
|
406 |
-
auto cur_fl = el->f_ct_.load(std::memory_order_acquire);
|
407 |
-
if (cur_fl != ~static_cast<flag_t>(cur)) {
|
408 |
-
return false; // empty
|
409 |
-
}
|
410 |
-
++cur;
|
411 |
-
std::forward<F>(f)(&(el->data_));
|
412 |
-
for (unsigned k = 0;;) {
|
413 |
-
auto cur_rc = el->rc_.load(std::memory_order_acquire);
|
414 |
-
if ((cur_rc & rc_mask) == 0) {
|
415 |
-
std::forward<R>(out)(true);
|
416 |
-
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
417 |
-
return true;
|
418 |
-
}
|
419 |
-
auto nxt_rc = inc_rc(cur_rc) & ~static_cast<rc_t>(wrapper->connected_id());
|
420 |
-
bool last_one = false;
|
421 |
-
if ((last_one = (nxt_rc & rc_mask) == 0)) {
|
422 |
-
el->f_ct_.store(cur + N - 1, std::memory_order_release);
|
423 |
-
}
|
424 |
-
if (el->rc_.compare_exchange_weak(cur_rc, nxt_rc, std::memory_order_release)) {
|
425 |
-
std::forward<R>(out)(last_one);
|
426 |
-
return true;
|
427 |
-
}
|
428 |
-
ipc::yield(k);
|
429 |
-
}
|
430 |
-
}
|
431 |
-
};
|
432 |
-
|
433 |
-
} // namespace ipc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/encoders/__init__.py
DELETED
File without changes
|
spaces/DEBO-PROJECT/DEBO-V1/bots/debate_bot.py
DELETED
@@ -1,27 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import random
|
3 |
-
from langchain.prompts import PromptTemplate
|
4 |
-
from modules.gpt_modules import gpt_call
|
5 |
-
|
6 |
-
from .normal_debate import nomal_debator
|
7 |
-
from .one_to_one_debate import one_to_one_debator
|
8 |
-
|
9 |
-
|
10 |
-
#############################################
|
11 |
-
# Debate bot setting
|
12 |
-
#############################################
|
13 |
-
def debate_bot(prompt, history="", debate_subject="", bot_role="", history_num=0):
|
14 |
-
|
15 |
-
if bot_role == "토론":
|
16 |
-
#bot_response = nomal_debator(prompt, history, debate_subject, bot_role, history_num)
|
17 |
-
bot_response = one_to_one_debator(prompt, history, debate_subject, bot_role, history_num)
|
18 |
-
elif bot_role == "주제 정의":
|
19 |
-
pass
|
20 |
-
elif bot_role == "POI 연습":
|
21 |
-
pass
|
22 |
-
elif bot_role == "역할 추천":
|
23 |
-
pass
|
24 |
-
elif bot_role == "주장 비판":
|
25 |
-
pass
|
26 |
-
else:
|
27 |
-
print("bot_role error")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/evaluation/iou.py
DELETED
@@ -1,148 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
@date: 2021/6/29
|
3 |
-
@description:
|
4 |
-
The method with "_floorplan" suffix is only for comparison, which is used for calculation in LED2-net.
|
5 |
-
However, the floorplan is affected by show_radius. Setting too large will result in the decrease of accuracy,
|
6 |
-
and setting too small will result in the failure of calculation beyond the range.
|
7 |
-
"""
|
8 |
-
import numpy as np
|
9 |
-
from shapely.geometry import Polygon
|
10 |
-
|
11 |
-
|
12 |
-
def calc_inter_area(dt_xz, gt_xz):
|
13 |
-
"""
|
14 |
-
:param dt_xz: Prediction boundaries can also be corners, format: [[x1, z1], [x2, z2], ...]
|
15 |
-
:param gt_xz: Ground truth boundaries can also be corners, format: [[x1, z1], [x2, z2], ...]
|
16 |
-
:return:
|
17 |
-
"""
|
18 |
-
dt_polygon = Polygon(dt_xz)
|
19 |
-
gt_polygon = Polygon(gt_xz)
|
20 |
-
|
21 |
-
dt_area = dt_polygon.area
|
22 |
-
gt_area = gt_polygon.area
|
23 |
-
inter_area = dt_polygon.intersection(gt_polygon).area
|
24 |
-
return dt_area, gt_area, inter_area
|
25 |
-
|
26 |
-
|
27 |
-
def calc_IoU_2D(dt_xz, gt_xz):
|
28 |
-
"""
|
29 |
-
:param dt_xz: Prediction boundaries can also be corners, format: [[x1, z1], [x2, z2], ...]
|
30 |
-
:param gt_xz: Ground truth boundaries can also be corners, format: [[x1, z1], [x2, z2], ...]
|
31 |
-
:return:
|
32 |
-
"""
|
33 |
-
dt_area, gt_area, inter_area = calc_inter_area(dt_xz, gt_xz)
|
34 |
-
iou_2d = inter_area / (gt_area + dt_area - inter_area)
|
35 |
-
return iou_2d
|
36 |
-
|
37 |
-
|
38 |
-
def calc_IoU_3D(dt_xz, gt_xz, dt_height, gt_height):
|
39 |
-
"""
|
40 |
-
:param dt_xz: Prediction boundaries can also be corners, format: [[x1, z1], [x2, z2], ...]
|
41 |
-
:param gt_xz: Ground truth boundaries can also be corners, format: [[x1, z1], [x2, z2], ...]
|
42 |
-
:param dt_height:
|
43 |
-
:param gt_height:
|
44 |
-
:return:
|
45 |
-
"""
|
46 |
-
dt_area, gt_area, inter_area = calc_inter_area(dt_xz, gt_xz)
|
47 |
-
dt_volume = dt_area * dt_height
|
48 |
-
gt_volume = gt_area * gt_height
|
49 |
-
inter_volume = inter_area * min(dt_height, gt_height)
|
50 |
-
iou_3d = inter_volume / (dt_volume + gt_volume - inter_volume)
|
51 |
-
return iou_3d
|
52 |
-
|
53 |
-
|
54 |
-
def calc_IoU(dt_xz, gt_xz, dt_height, gt_height):
|
55 |
-
"""
|
56 |
-
:param dt_xz: Prediction boundaries can also be corners, format: [[x1, z1], [x2, z2], ...]
|
57 |
-
:param gt_xz: Ground truth boundaries can also be corners, format: [[x1, z1], [x2, z2], ...]
|
58 |
-
:param dt_height:
|
59 |
-
:param gt_height:
|
60 |
-
:return:
|
61 |
-
"""
|
62 |
-
dt_area, gt_area, inter_area = calc_inter_area(dt_xz, gt_xz)
|
63 |
-
iou_2d = inter_area / (gt_area + dt_area - inter_area)
|
64 |
-
|
65 |
-
dt_volume = dt_area * dt_height
|
66 |
-
gt_volume = gt_area * gt_height
|
67 |
-
inter_volume = inter_area * min(dt_height, gt_height)
|
68 |
-
iou_3d = inter_volume / (dt_volume + gt_volume - inter_volume)
|
69 |
-
|
70 |
-
return iou_2d, iou_3d
|
71 |
-
|
72 |
-
|
73 |
-
def calc_Iou_height(dt_height, gt_height):
|
74 |
-
return min(dt_height, gt_height) / max(dt_height, gt_height)
|
75 |
-
|
76 |
-
|
77 |
-
# the following is for testing only
|
78 |
-
def calc_inter_area_floorplan(dt_floorplan, gt_floorplan):
|
79 |
-
intersect = np.sum(np.logical_and(dt_floorplan, gt_floorplan))
|
80 |
-
dt_area = np.sum(dt_floorplan)
|
81 |
-
gt_area = np.sum(gt_floorplan)
|
82 |
-
return dt_area, gt_area, intersect
|
83 |
-
|
84 |
-
|
85 |
-
def calc_IoU_2D_floorplan(dt_floorplan, gt_floorplan):
|
86 |
-
dt_area, gt_area, inter_area = calc_inter_area_floorplan(dt_floorplan, gt_floorplan)
|
87 |
-
iou_2d = inter_area / (gt_area + dt_area - inter_area)
|
88 |
-
return iou_2d
|
89 |
-
|
90 |
-
|
91 |
-
def calc_IoU_3D_floorplan(dt_floorplan, gt_floorplan, dt_height, gt_height):
|
92 |
-
dt_area, gt_area, inter_area = calc_inter_area_floorplan(dt_floorplan, gt_floorplan)
|
93 |
-
dt_volume = dt_area * dt_height
|
94 |
-
gt_volume = gt_area * gt_height
|
95 |
-
inter_volume = inter_area * min(dt_height, gt_height)
|
96 |
-
iou_3d = inter_volume / (dt_volume + gt_volume - inter_volume)
|
97 |
-
return iou_3d
|
98 |
-
|
99 |
-
|
100 |
-
def calc_IoU_floorplan(dt_floorplan, gt_floorplan, dt_height, gt_height):
|
101 |
-
dt_area, gt_area, inter_area = calc_inter_area_floorplan(dt_floorplan, gt_floorplan)
|
102 |
-
iou_2d = inter_area / (gt_area + dt_area - inter_area)
|
103 |
-
|
104 |
-
dt_volume = dt_area * dt_height
|
105 |
-
gt_volume = gt_area * gt_height
|
106 |
-
inter_volume = inter_area * min(dt_height, gt_height)
|
107 |
-
iou_3d = inter_volume / (dt_volume + gt_volume - inter_volume)
|
108 |
-
return iou_2d, iou_3d
|
109 |
-
|
110 |
-
|
111 |
-
if __name__ == '__main__':
|
112 |
-
from visualization.floorplan import draw_floorplan, draw_iou_floorplan
|
113 |
-
from visualization.boundary import draw_boundaries, corners2boundaries
|
114 |
-
from utils.conversion import uv2xyz
|
115 |
-
from utils.height import height2ratio
|
116 |
-
|
117 |
-
# dummy data
|
118 |
-
dt_floor_corners = np.array([[0.2, 0.7],
|
119 |
-
[0.4, 0.7],
|
120 |
-
[0.6, 0.7],
|
121 |
-
[0.8, 0.7]])
|
122 |
-
dt_height = 2.8
|
123 |
-
|
124 |
-
gt_floor_corners = np.array([[0.3, 0.7],
|
125 |
-
[0.5, 0.7],
|
126 |
-
[0.7, 0.7],
|
127 |
-
[0.9, 0.7]])
|
128 |
-
gt_height = 3.2
|
129 |
-
|
130 |
-
dt_xz = uv2xyz(dt_floor_corners)[..., ::2]
|
131 |
-
gt_xz = uv2xyz(gt_floor_corners)[..., ::2]
|
132 |
-
|
133 |
-
dt_floorplan = draw_floorplan(dt_xz, show=False, show_radius=1)
|
134 |
-
gt_floorplan = draw_floorplan(gt_xz, show=False, show_radius=1)
|
135 |
-
# dt_floorplan = draw_floorplan(dt_xz, show=False, show_radius=2)
|
136 |
-
# gt_floorplan = draw_floorplan(gt_xz, show=False, show_radius=2)
|
137 |
-
|
138 |
-
iou_2d, iou_3d = calc_IoU_floorplan(dt_floorplan, gt_floorplan, dt_height, gt_height)
|
139 |
-
print('use floor plan image:', iou_2d, iou_3d)
|
140 |
-
|
141 |
-
iou_2d, iou_3d = calc_IoU(dt_xz, gt_xz, dt_height, gt_height)
|
142 |
-
print('use floor plan polygon:', iou_2d, iou_3d)
|
143 |
-
|
144 |
-
draw_iou_floorplan(dt_xz, gt_xz, show=True, iou_2d=iou_2d, iou_3d=iou_3d)
|
145 |
-
pano_bd = draw_boundaries(np.zeros([512, 1024, 3]), corners_list=[dt_floor_corners],
|
146 |
-
boundary_color=[0, 0, 1], ratio=height2ratio(dt_height), draw_corners=False)
|
147 |
-
pano_bd = draw_boundaries(pano_bd, corners_list=[gt_floor_corners],
|
148 |
-
boundary_color=[0, 1, 0], ratio=height2ratio(gt_height), show=True, draw_corners=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/MusicGen/audiocraft/models/builders.py
DELETED
@@ -1,218 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
"""
|
8 |
-
All the functions to build the relevant models and modules
|
9 |
-
from the Hydra config.
|
10 |
-
"""
|
11 |
-
|
12 |
-
import typing as tp
|
13 |
-
import warnings
|
14 |
-
|
15 |
-
import audiocraft
|
16 |
-
import omegaconf
|
17 |
-
import torch
|
18 |
-
|
19 |
-
from .encodec import CompressionModel, EncodecModel, FlattenedCompressionModel # noqa
|
20 |
-
from .lm import LMModel
|
21 |
-
from ..modules.codebooks_patterns import (
|
22 |
-
CodebooksPatternProvider,
|
23 |
-
DelayedPatternProvider,
|
24 |
-
ParallelPatternProvider,
|
25 |
-
UnrolledPatternProvider,
|
26 |
-
VALLEPattern,
|
27 |
-
MusicLMPattern,
|
28 |
-
)
|
29 |
-
from ..modules.conditioners import (
|
30 |
-
BaseConditioner,
|
31 |
-
ConditioningProvider,
|
32 |
-
LUTConditioner,
|
33 |
-
T5Conditioner,
|
34 |
-
ConditionFuser,
|
35 |
-
ChromaStemConditioner,
|
36 |
-
)
|
37 |
-
from .. import quantization as qt
|
38 |
-
from ..utils.utils import dict_from_config
|
39 |
-
|
40 |
-
|
41 |
-
def get_quantizer(quantizer: str, cfg: omegaconf.DictConfig, dimension: int) -> qt.BaseQuantizer:
|
42 |
-
klass = {
|
43 |
-
'no_quant': qt.DummyQuantizer,
|
44 |
-
'rvq': qt.ResidualVectorQuantizer
|
45 |
-
}[quantizer]
|
46 |
-
kwargs = dict_from_config(getattr(cfg, quantizer))
|
47 |
-
if quantizer != 'no_quant':
|
48 |
-
kwargs['dimension'] = dimension
|
49 |
-
return klass(**kwargs)
|
50 |
-
|
51 |
-
|
52 |
-
def get_encodec_autoencoder(encoder_name: str, cfg: omegaconf.DictConfig):
|
53 |
-
if encoder_name == 'seanet':
|
54 |
-
kwargs = dict_from_config(getattr(cfg, 'seanet'))
|
55 |
-
encoder_override_kwargs = kwargs.pop('encoder')
|
56 |
-
decoder_override_kwargs = kwargs.pop('decoder')
|
57 |
-
encoder_kwargs = {**kwargs, **encoder_override_kwargs}
|
58 |
-
decoder_kwargs = {**kwargs, **decoder_override_kwargs}
|
59 |
-
encoder = audiocraft.modules.SEANetEncoder(**encoder_kwargs)
|
60 |
-
decoder = audiocraft.modules.SEANetDecoder(**decoder_kwargs)
|
61 |
-
return encoder, decoder
|
62 |
-
else:
|
63 |
-
raise KeyError(f'Unexpected compression model {cfg.compression_model}')
|
64 |
-
|
65 |
-
|
66 |
-
def get_compression_model(cfg: omegaconf.DictConfig) -> CompressionModel:
|
67 |
-
"""Instantiate a compression model.
|
68 |
-
"""
|
69 |
-
if cfg.compression_model == 'encodec':
|
70 |
-
kwargs = dict_from_config(getattr(cfg, 'encodec'))
|
71 |
-
encoder_name = kwargs.pop('autoencoder')
|
72 |
-
quantizer_name = kwargs.pop('quantizer')
|
73 |
-
encoder, decoder = get_encodec_autoencoder(encoder_name, cfg)
|
74 |
-
quantizer = get_quantizer(quantizer_name, cfg, encoder.dimension)
|
75 |
-
frame_rate = kwargs['sample_rate'] // encoder.hop_length
|
76 |
-
renormalize = kwargs.pop('renormalize', None)
|
77 |
-
renorm = kwargs.pop('renorm')
|
78 |
-
if renormalize is None:
|
79 |
-
renormalize = renorm is not None
|
80 |
-
warnings.warn("You are using a deprecated EnCodec model. Please migrate to new renormalization.")
|
81 |
-
return EncodecModel(encoder, decoder, quantizer,
|
82 |
-
frame_rate=frame_rate, renormalize=renormalize, **kwargs).to(cfg.device)
|
83 |
-
else:
|
84 |
-
raise KeyError(f'Unexpected compression model {cfg.compression_model}')
|
85 |
-
|
86 |
-
|
87 |
-
def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:
|
88 |
-
"""Instantiate a transformer LM.
|
89 |
-
"""
|
90 |
-
if cfg.lm_model == 'transformer_lm':
|
91 |
-
kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))
|
92 |
-
n_q = kwargs['n_q']
|
93 |
-
q_modeling = kwargs.pop('q_modeling', None)
|
94 |
-
codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')
|
95 |
-
attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))
|
96 |
-
cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))
|
97 |
-
cfg_prob, cfg_coef = cls_free_guidance["training_dropout"], cls_free_guidance["inference_coef"]
|
98 |
-
fuser = get_condition_fuser(cfg)
|
99 |
-
condition_provider = get_conditioner_provider(kwargs["dim"], cfg).to(cfg.device)
|
100 |
-
if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programatically
|
101 |
-
kwargs['cross_attention'] = True
|
102 |
-
if codebooks_pattern_cfg.modeling is None:
|
103 |
-
assert q_modeling is not None, \
|
104 |
-
'LM model should either have a codebook pattern defined or transformer_lm.q_modeling'
|
105 |
-
codebooks_pattern_cfg = omegaconf.OmegaConf.create(
|
106 |
-
{'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}
|
107 |
-
)
|
108 |
-
pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)
|
109 |
-
return LMModel(
|
110 |
-
pattern_provider=pattern_provider,
|
111 |
-
condition_provider=condition_provider,
|
112 |
-
fuser=fuser,
|
113 |
-
cfg_dropout=cfg_prob,
|
114 |
-
cfg_coef=cfg_coef,
|
115 |
-
attribute_dropout=attribute_dropout,
|
116 |
-
dtype=getattr(torch, cfg.dtype),
|
117 |
-
device=cfg.device,
|
118 |
-
**kwargs
|
119 |
-
).to(cfg.device)
|
120 |
-
else:
|
121 |
-
raise KeyError(f'Unexpected LM model {cfg.lm_model}')
|
122 |
-
|
123 |
-
|
124 |
-
def get_conditioner_provider(output_dim: int, cfg: omegaconf.DictConfig) -> ConditioningProvider:
|
125 |
-
"""Instantiate a conditioning model.
|
126 |
-
"""
|
127 |
-
device = cfg.device
|
128 |
-
duration = cfg.dataset.segment_duration
|
129 |
-
cfg = getattr(cfg, "conditioners")
|
130 |
-
cfg = omegaconf.OmegaConf.create({}) if cfg is None else cfg
|
131 |
-
conditioners: tp.Dict[str, BaseConditioner] = {}
|
132 |
-
with omegaconf.open_dict(cfg):
|
133 |
-
condition_provider_args = cfg.pop('args', {})
|
134 |
-
for cond, cond_cfg in cfg.items():
|
135 |
-
model_type = cond_cfg["model"]
|
136 |
-
model_args = cond_cfg[model_type]
|
137 |
-
if model_type == "t5":
|
138 |
-
conditioners[str(cond)] = T5Conditioner(output_dim=output_dim, device=device, **model_args)
|
139 |
-
elif model_type == "lut":
|
140 |
-
conditioners[str(cond)] = LUTConditioner(output_dim=output_dim, **model_args)
|
141 |
-
elif model_type == "chroma_stem":
|
142 |
-
model_args.pop('cache_path', None)
|
143 |
-
conditioners[str(cond)] = ChromaStemConditioner(
|
144 |
-
output_dim=output_dim,
|
145 |
-
duration=duration,
|
146 |
-
device=device,
|
147 |
-
**model_args
|
148 |
-
)
|
149 |
-
else:
|
150 |
-
raise ValueError(f"unrecognized conditioning model: {model_type}")
|
151 |
-
conditioner = ConditioningProvider(conditioners, device=device, **condition_provider_args)
|
152 |
-
return conditioner
|
153 |
-
|
154 |
-
|
155 |
-
def get_condition_fuser(cfg: omegaconf.DictConfig) -> ConditionFuser:
|
156 |
-
"""Instantiate a condition fuser object.
|
157 |
-
"""
|
158 |
-
fuser_cfg = getattr(cfg, "fuser")
|
159 |
-
fuser_methods = ["sum", "cross", "prepend", "input_interpolate"]
|
160 |
-
fuse2cond = {k: fuser_cfg[k] for k in fuser_methods}
|
161 |
-
kwargs = {k: v for k, v in fuser_cfg.items() if k not in fuser_methods}
|
162 |
-
fuser = ConditionFuser(fuse2cond=fuse2cond, **kwargs)
|
163 |
-
return fuser
|
164 |
-
|
165 |
-
|
166 |
-
def get_codebooks_pattern_provider(n_q: int, cfg: omegaconf.DictConfig) -> CodebooksPatternProvider:
|
167 |
-
"""Instantiate a codebooks pattern provider object.
|
168 |
-
"""
|
169 |
-
pattern_providers = {
|
170 |
-
'parallel': ParallelPatternProvider,
|
171 |
-
'delay': DelayedPatternProvider,
|
172 |
-
'unroll': UnrolledPatternProvider,
|
173 |
-
'valle': VALLEPattern,
|
174 |
-
'musiclm': MusicLMPattern,
|
175 |
-
}
|
176 |
-
name = cfg.modeling
|
177 |
-
kwargs = dict_from_config(cfg.get(name)) if hasattr(cfg, name) else {}
|
178 |
-
klass = pattern_providers[name]
|
179 |
-
return klass(n_q, **kwargs)
|
180 |
-
|
181 |
-
|
182 |
-
def get_debug_compression_model(device='cpu'):
|
183 |
-
"""Instantiate a debug compression model to be used for unit tests.
|
184 |
-
"""
|
185 |
-
seanet_kwargs = {
|
186 |
-
'n_filters': 4,
|
187 |
-
'n_residual_layers': 1,
|
188 |
-
'dimension': 32,
|
189 |
-
'ratios': [10, 8, 16] # 25 Hz at 32kHz
|
190 |
-
}
|
191 |
-
encoder = audiocraft.modules.SEANetEncoder(**seanet_kwargs)
|
192 |
-
decoder = audiocraft.modules.SEANetDecoder(**seanet_kwargs)
|
193 |
-
quantizer = qt.ResidualVectorQuantizer(dimension=32, bins=400, n_q=4)
|
194 |
-
init_x = torch.randn(8, 32, 128)
|
195 |
-
quantizer(init_x, 1) # initialize kmeans etc.
|
196 |
-
compression_model = EncodecModel(
|
197 |
-
encoder, decoder, quantizer,
|
198 |
-
frame_rate=25, sample_rate=32000, channels=1).to(device)
|
199 |
-
return compression_model.eval()
|
200 |
-
|
201 |
-
|
202 |
-
def get_debug_lm_model(device='cpu'):
|
203 |
-
"""Instantiate a debug LM to be used for unit tests.
|
204 |
-
"""
|
205 |
-
pattern = DelayedPatternProvider(n_q=4)
|
206 |
-
dim = 16
|
207 |
-
providers = {
|
208 |
-
'description': LUTConditioner(n_bins=128, dim=dim, output_dim=dim, tokenizer="whitespace"),
|
209 |
-
}
|
210 |
-
condition_provider = ConditioningProvider(providers)
|
211 |
-
fuser = ConditionFuser(
|
212 |
-
{'cross': ['description'], 'prepend': [],
|
213 |
-
'sum': [], 'input_interpolate': []})
|
214 |
-
lm = LMModel(
|
215 |
-
pattern, condition_provider, fuser,
|
216 |
-
n_q=4, card=400, dim=dim, num_heads=4, custom=True, num_layers=2,
|
217 |
-
cross_attention=True, causal=True)
|
218 |
-
return lm.to(device).eval()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|