Commit
·
3bcd6df
1
Parent(s):
d074614
Update parquet files (step 71 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/17TheWord/RealESRGAN/scripts/extract_subimages.py +0 -135
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Antares Autotune Evo VST RTAS v6.0.9.rar.rar The Ultimate Guide to the Most Popular Vocal Processing Tool.md +0 -191
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVDFab 8.1.5.9 Qt Final Multilang Download Pcl A Powerful and Customizable DVD Copy Software.md +0 -217
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Danfe View Keygen [NEW].md +0 -22
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discografia Evaldo Freire Torrent Download A Guide to the Musical Career of the Sertanejo Icon.md +0 -91
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ApkOnline A Web-Based Android Emulator and APK Installer.md +0 -108
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bad 2 Bad Apocalypse - The Ultimate Open World Survival RPG.md +0 -97
- spaces/1phancelerku/anime-remove-background/Download YouTube Shorts in HD Quality No Watermark Free Fast.md +0 -128
- spaces/1phancelerku/anime-remove-background/Download youtube-dlg 0.4 A cross platform GUI for youtube-dl.md +0 -158
- spaces/1phancelerku/anime-remove-background/FS 12 Mod APK How to Unlock Unlimited Money in Farming Simulator 22.md +0 -104
- spaces/232labs/VToonify/vtoonify/model/raft/core/utils/flow_viz.py +0 -132
- spaces/44brabal/valentinafeve-yolos-fashionpedia/app.py +0 -7
- spaces/A00001/bingothoo/src/components/user-menu.tsx +0 -113
- spaces/AEUPH/AethericGPT/README.md +0 -13
- spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/distributed.py +0 -150
- spaces/AIGText/GlyphControl/ldm/modules/midas/__init__.py +0 -0
- spaces/AISuperheroes/README/README.md +0 -15
- spaces/ATang0729/Forecast4Muses/Model/Model6/extensions/__init__.py +0 -0
- spaces/Abhilashvj/planogram-compliance/utils/segment/__init__.py +0 -0
- spaces/Adapter/T2I-Adapter/configs/mm/faster_rcnn_r50_fpn_coco.py +0 -182
- spaces/Addai/Breast_cancer_detection_with_deep_transfer_learning/app.py +0 -23
- spaces/AdityaVishwakarma/LiveChecker/README.md +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/PreTest.js +0 -47
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/LayoutChildren.js +0 -68
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pan/Pan.js +0 -2
- spaces/Alycer/VITS-Umamusume-voice-synthesizer/mel_processing.py +0 -101
- spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/custom_ops.py +0 -126
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/import_utils.py +0 -655
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/custom_init_isort.py +0 -252
- spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py +0 -8
- spaces/Andy1621/uniformer_image_detection/configs/gfl/README.md +0 -32
- spaces/Andy1621/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py +0 -13
- spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py +0 -13
- spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py +0 -9
- spaces/Andy1621/uniformer_image_segmentation/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py +0 -2
- spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py +0 -9
- spaces/AnnonSubmission/xai-cl/ssl_models/simsiam.py +0 -91
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/deform_roi_pool.py +0 -204
- spaces/Apex-X/GODROOP/roop/processors/frame/face_enhancer.py +0 -81
- spaces/Arnx/MusicGenXvAKN/tests/data/test_audio_utils.py +0 -110
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/diagram/__init__.py +0 -642
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/json.py +0 -140
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/losses.py +0 -133
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_deformable.py +0 -175
- spaces/Bart92/RVC_HF/train/utils.py +0 -500
- spaces/Benson/text-generation/Examples/Descargar Dummy Mp4 Video.md +0 -79
- spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/cells.py +0 -154
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_scripts.py +0 -173
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_compat.py +0 -71
- spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/_musllinux.py +0 -136
spaces/17TheWord/RealESRGAN/scripts/extract_subimages.py
DELETED
@@ -1,135 +0,0 @@
|
|
1 |
-
import argparse
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
import os
|
5 |
-
import sys
|
6 |
-
from basicsr.utils import scandir
|
7 |
-
from multiprocessing import Pool
|
8 |
-
from os import path as osp
|
9 |
-
from tqdm import tqdm
|
10 |
-
|
11 |
-
|
12 |
-
def main(args):
|
13 |
-
"""A multi-thread tool to crop large images to sub-images for faster IO.
|
14 |
-
|
15 |
-
opt (dict): Configuration dict. It contains:
|
16 |
-
n_thread (int): Thread number.
|
17 |
-
compression_level (int): CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size
|
18 |
-
and longer compression time. Use 0 for faster CPU decompression. Default: 3, same in cv2.
|
19 |
-
input_folder (str): Path to the input folder.
|
20 |
-
save_folder (str): Path to save folder.
|
21 |
-
crop_size (int): Crop size.
|
22 |
-
step (int): Step for overlapped sliding window.
|
23 |
-
thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped.
|
24 |
-
|
25 |
-
Usage:
|
26 |
-
For each folder, run this script.
|
27 |
-
Typically, there are GT folder and LQ folder to be processed for DIV2K dataset.
|
28 |
-
After process, each sub_folder should have the same number of subimages.
|
29 |
-
Remember to modify opt configurations according to your settings.
|
30 |
-
"""
|
31 |
-
|
32 |
-
opt = {}
|
33 |
-
opt['n_thread'] = args.n_thread
|
34 |
-
opt['compression_level'] = args.compression_level
|
35 |
-
opt['input_folder'] = args.input
|
36 |
-
opt['save_folder'] = args.output
|
37 |
-
opt['crop_size'] = args.crop_size
|
38 |
-
opt['step'] = args.step
|
39 |
-
opt['thresh_size'] = args.thresh_size
|
40 |
-
extract_subimages(opt)
|
41 |
-
|
42 |
-
|
43 |
-
def extract_subimages(opt):
|
44 |
-
"""Crop images to subimages.
|
45 |
-
|
46 |
-
Args:
|
47 |
-
opt (dict): Configuration dict. It contains:
|
48 |
-
input_folder (str): Path to the input folder.
|
49 |
-
save_folder (str): Path to save folder.
|
50 |
-
n_thread (int): Thread number.
|
51 |
-
"""
|
52 |
-
input_folder = opt['input_folder']
|
53 |
-
save_folder = opt['save_folder']
|
54 |
-
if not osp.exists(save_folder):
|
55 |
-
os.makedirs(save_folder)
|
56 |
-
print(f'mkdir {save_folder} ...')
|
57 |
-
else:
|
58 |
-
print(f'Folder {save_folder} already exists. Exit.')
|
59 |
-
sys.exit(1)
|
60 |
-
|
61 |
-
# scan all images
|
62 |
-
img_list = list(scandir(input_folder, full_path=True))
|
63 |
-
|
64 |
-
pbar = tqdm(total=len(img_list), unit='image', desc='Extract')
|
65 |
-
pool = Pool(opt['n_thread'])
|
66 |
-
for path in img_list:
|
67 |
-
pool.apply_async(worker, args=(path, opt), callback=lambda arg: pbar.update(1))
|
68 |
-
pool.close()
|
69 |
-
pool.join()
|
70 |
-
pbar.close()
|
71 |
-
print('All processes done.')
|
72 |
-
|
73 |
-
|
74 |
-
def worker(path, opt):
|
75 |
-
"""Worker for each process.
|
76 |
-
|
77 |
-
Args:
|
78 |
-
path (str): Image path.
|
79 |
-
opt (dict): Configuration dict. It contains:
|
80 |
-
crop_size (int): Crop size.
|
81 |
-
step (int): Step for overlapped sliding window.
|
82 |
-
thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped.
|
83 |
-
save_folder (str): Path to save folder.
|
84 |
-
compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION.
|
85 |
-
|
86 |
-
Returns:
|
87 |
-
process_info (str): Process information displayed in progress bar.
|
88 |
-
"""
|
89 |
-
crop_size = opt['crop_size']
|
90 |
-
step = opt['step']
|
91 |
-
thresh_size = opt['thresh_size']
|
92 |
-
img_name, extension = osp.splitext(osp.basename(path))
|
93 |
-
|
94 |
-
# remove the x2, x3, x4 and x8 in the filename for DIV2K
|
95 |
-
img_name = img_name.replace('x2', '').replace('x3', '').replace('x4', '').replace('x8', '')
|
96 |
-
|
97 |
-
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
|
98 |
-
|
99 |
-
h, w = img.shape[0:2]
|
100 |
-
h_space = np.arange(0, h - crop_size + 1, step)
|
101 |
-
if h - (h_space[-1] + crop_size) > thresh_size:
|
102 |
-
h_space = np.append(h_space, h - crop_size)
|
103 |
-
w_space = np.arange(0, w - crop_size + 1, step)
|
104 |
-
if w - (w_space[-1] + crop_size) > thresh_size:
|
105 |
-
w_space = np.append(w_space, w - crop_size)
|
106 |
-
|
107 |
-
index = 0
|
108 |
-
for x in h_space:
|
109 |
-
for y in w_space:
|
110 |
-
index += 1
|
111 |
-
cropped_img = img[x:x + crop_size, y:y + crop_size, ...]
|
112 |
-
cropped_img = np.ascontiguousarray(cropped_img)
|
113 |
-
cv2.imwrite(
|
114 |
-
osp.join(opt['save_folder'], f'{img_name}_s{index:03d}{extension}'), cropped_img,
|
115 |
-
[cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']])
|
116 |
-
process_info = f'Processing {img_name} ...'
|
117 |
-
return process_info
|
118 |
-
|
119 |
-
|
120 |
-
if __name__ == '__main__':
|
121 |
-
parser = argparse.ArgumentParser()
|
122 |
-
parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder')
|
123 |
-
parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_HR_sub', help='Output folder')
|
124 |
-
parser.add_argument('--crop_size', type=int, default=480, help='Crop size')
|
125 |
-
parser.add_argument('--step', type=int, default=240, help='Step for overlapped sliding window')
|
126 |
-
parser.add_argument(
|
127 |
-
'--thresh_size',
|
128 |
-
type=int,
|
129 |
-
default=0,
|
130 |
-
help='Threshold size. Patches whose size is lower than thresh_size will be dropped.')
|
131 |
-
parser.add_argument('--n_thread', type=int, default=20, help='Thread number.')
|
132 |
-
parser.add_argument('--compression_level', type=int, default=3, help='Compression level')
|
133 |
-
args = parser.parse_args()
|
134 |
-
|
135 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Antares Autotune Evo VST RTAS v6.0.9.rar.rar The Ultimate Guide to the Most Popular Vocal Processing Tool.md
DELETED
@@ -1,191 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Antares Autotune Evo VST RTAS v6.0.9.rar.rar: What is it and how to use it?</h1>
|
3 |
-
<p>If you are looking for a professional and easy-to-use tool for correcting and enhancing the pitch of your vocals or instruments, you might have come across a file called <strong>Antares Autotune Evo VST RTAS v6.0.9.rar.rar</strong>. But what is this file and how can you use it in your audio projects? In this article, we will explain what Antares Autotune Evo is, what VST RTAS means, what a .rar file is, and how to download, install and use Antares Autotune Evo in your digital audio workstation (DAW).</p>
|
4 |
-
<h2>Introduction</h2>
|
5 |
-
<h3>What is Antares Autotune Evo?</h3>
|
6 |
-
<p>Antares Autotune Evo is a multi-platform plug-in that corrects intonation and timing problems in vocals or solo instruments, in real time, without distortion or artifacts, while preserving all of the expressive nuance of the original performance. It is one of the most popular and widely used pitch correction tools in the music industry, used by thousands of audio professionals around the world.</p>
|
7 |
-
<h2>Antares Autotune Evo VST RTAS v6.0.9.rar.rar</h2><br /><p><b><b>Download File</b> ✸✸✸ <a href="https://byltly.com/2uKvHC">https://byltly.com/2uKvHC</a></b></p><br /><br />
|
8 |
-
<h3>What is VST RTAS?</h3>
|
9 |
-
<p>VST stands for Virtual Studio Technology, which is a standard interface for integrating software audio synthesizers and effects plugins with audio editors and hard-disk recording systems. RTAS stands for Real-Time AudioSuite, which is a format of audio plug-in developed by Avid Technology for their Pro Tools software. Antares Autotune Evo VST RTAS v6.0.9.rar.rar is a file that contains both the VST and RTAS versions of the plug-in, which means you can use it with different DAWs that support either format.</p>
|
10 |
-
<h3>What is a .rar file?</h3>
|
11 |
-
<p>A .rar file is a compressed archive file that can contain one or more files or folders inside it. It is similar to a .zip file, but it uses a different compression algorithm that can achieve higher compression ratios. A .rar file can also be split into multiple parts, which can be useful for transferring large files over the internet or storing them on removable media. Antares Autotune Evo VST RTAS v6.0.9.rar.rar is actually a double-compressed archive file, which means it has been compressed twice with the .rar format. To extract the files inside it, you will need a software that can handle .rar files, such as WinRAR or 7-Zip.</p>
|
12 |
-
<h2>Features and benefits of Antares Autotune Evo</h2>
|
13 |
-
<h3>Automatic and graphical modes</h3>
|
14 |
-
<p>Antares Autotune Evo has two main modes of operation: automatic and graphical. In automatic mode, the plug-in detects the pitch of the input signal, identifies the closest pitch in a user-specified scale (including minor, major, chromatic and 26 historical and microtonal scales), and corrects the input pitch to match the scale pitch. A retune speed control lets you match the retune rate to virtually any performance style. This mode is ideal for quick and easy pitch correction without much tweaking.</p>
|
15 |
-
<p>In graphical mode, the plug-in displays the detected pitch envelope of the input signal and allows you to draw in the desired pitch using a variety of graphics tools. This mode gives you complete control over the correction or modification of the most elaborate expressive gestures. You can also zoom in and out, undo and redo edits, import and export pitch data, and more. This mode is ideal for meticulous and creative pitch manipulation.</p>
|
16 |
-
<h3>Pitch correction and manipulation</h3>
|
17 |
-
<p>Antares Autotune Evo can correct not only intonation problems but also timing problems in vocals or solo instruments. It can also create special effects such as robotic vocals, gender change, vibrato control, formant shifting, throat modeling, pitch shifting, transposition, doubling, harmonizing, etc.. You can use Antares Autotune Evo to fix subtle pitch errors or create dramatic vocal transformations.</p>
|
18 |
-
<p>Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR download<br />
|
19 |
-
Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR free<br />
|
20 |
-
Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR crack<br />
|
21 |
-
Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR rar<br />
|
22 |
-
Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR audioz<br />
|
23 |
-
Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR google drive<br />
|
24 |
-
Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR 4shared<br />
|
25 |
-
Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR atualizado<br />
|
26 |
-
Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR modulador de voz<br />
|
27 |
-
Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR site oficial<br />
|
28 |
-
Antares Autotune Evo VST RTAS v6.0.9 full version<br />
|
29 |
-
Antares Autotune Evo VST RTAS v6.0.9 serial key<br />
|
30 |
-
Antares Autotune Evo VST RTAS v6.0.9 activation code<br />
|
31 |
-
Antares Autotune Evo VST RTAS v6.0.9 license key<br />
|
32 |
-
Antares Autotune Evo VST RTAS v6.0.9 registration key<br />
|
33 |
-
Antares Autotune Evo VST RTAS v6.0.9 torrent download<br />
|
34 |
-
Antares Autotune Evo VST RTAS v6.0.9 magnet link<br />
|
35 |
-
Antares Autotune Evo VST RTAS v6.0.9 direct link<br />
|
36 |
-
Antares Autotune Evo VST RTAS v6.0.9 mega download<br />
|
37 |
-
Antares Autotune Evo VST RTAS v6.0.9 mediafire download<br />
|
38 |
-
Antares Autotune Evo VST RTAS v6.0.9 windows 10 compatible<br />
|
39 |
-
Antares Autotune Evo VST RTAS v6.0.9 windows 7 compatible<br />
|
40 |
-
Antares Autotune Evo VST RTAS v6.0.9 mac os compatible<br />
|
41 |
-
Antares Autotune Evo VST RTAS v6.0.9 linux compatible<br />
|
42 |
-
Antares Autotune Evo VST RTAS v6.0.9 64 bit compatible<br />
|
43 |
-
Antares Autotune Evo VST RTAS v6.0.9 32 bit compatible<br />
|
44 |
-
Antares Autotune Evo VST RTAS v6.0.9 pitch correction software<br />
|
45 |
-
Antares Autotune Evo VST RTAS v6.0.9 vocal effects software<br />
|
46 |
-
Antares Autotune Evo VST RTAS v6.0.9 professional audio software<br />
|
47 |
-
Antares Autotune Evo VST RTAS v6.0.9 music production software<br />
|
48 |
-
Antares Autotune Evo VST RTAS v6.0.9 how to install guide<br />
|
49 |
-
Antares Autotune Evo VST RTAS v6.0.9 how to use guide<br />
|
50 |
-
Antares Autotune Evo VST RTAS v6.0.9 user manual pdf<br />
|
51 |
-
Antares Autotune Evo VST RTAS v6.0.9 video tutorial youtube<br />
|
52 |
-
Antares Autotune Evo VST RTAS v6.0.9 review and rating<br />
|
53 |
-
Antares Autotune Evo VST RTAS v6</p>
|
54 |
-
<h3>Compatibility and performance</h3>
|
55 |
-
<p>Antares Autotune Evo is compatible with Windows XP/Vista/7/8/10 (32-bit or 64-bit) and Mac OS X 10.4 or later (Universal Binary). It supports sample rates up to 192 kHz. It can be used as a standalone application or as a plug-in with various DAWs that support VST or RTAS formats. It has a low CPU usage and a high-quality audio output. It also has an online manual and video tutorials to help you get started.</p>
|
56 |
-
<h2>How to download and install Antares Autotune Evo</h2>
|
57 |
-
<h3>Downloading the file</h3>
|
58 |
-
<p>To download Antares Autotune Evo VST RTAS v6.0.9.rar.rar, you can use one of these links:</p>
|
59 |
-
<ul>
|
60 |
-
<li><a href="https://audioz.download/software/win/67524-download_antares-autotune-evo-vst-rtas-v609-proper-air.html">https://audioz.download/software/win/67524-download_antares-autotune-evo-vst-rtas-v609-proper-air.html</a></li>
|
61 |
-
<li><a href="https://drive.google.com/file/d/18vbUbHloXX2s7oXXY1n9jePb1KxFkklO/view?usp=sharing">https://drive.google.com/file/d/18vbUbHloXX2s7oXXY1n9jePb1KxFkklO/view?usp=sharing</a></li>
|
62 |
-
<li><a href="https://4download.net/368-auto-tune-evo-full.html">https://4download.net/368-auto-tune-evo-full.html</a></li>
|
63 |
-
<li><a href="https://www.4shared.com/rar/EGjratnyee/Antares_Autotune_Evo_VST_RTAS_.html?locale=en">https://www.4shared.com/rar/EGjratnyee/Antares_Autotune_Evo_VST_RTAS_.html?locale=en</a></li>
|
64 |
-
</ul>
|
65 |
-
<p>The file size is about 5 MB. You may need to register or sign in to access some of these links.</p>
|
66 |
-
<h3>Extracting the file</h3>
|
67 |
-
<p>After downloading Antares Autotune Evo VST RTAS v6.0.9.rar.rar, you will need to extract it using a software that can handle .rar files, such as WinRAR or 7-Zip. To do this:</p>
|
68 |
-
<ol>
|
69 |
-
<li>Right-click on the file and choose "Extract here" or "Extract to Antares Autotune Evo VST RTAS v6.0.9" (depending on your software).</li>
|
70 |
-
<li>You will be asked to enter a password for the file. The password is: Byd3Ri}9 (for audioz.download link) or www.4download.net (for 4download.net link).</li>
|
71 |
-
<li>You will get another .rar file called Antares.Autotune.Evo.VST.RTAS.v6.0.9.PROPER-AiR.rar inside the extracted folder.</li>
|
72 |
-
<h3>Installing the plug-in</h3>
|
73 |
-
<p>After extracting the file, you will get a folder called Antares.Autotune.Evo.VST.RTAS.v6.0.9.PROPER-AiR that contains the VST and RTAS versions of the plug-in, as well as a text file with installation instructions. To install the plug-in:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Run the file called Setup.exe inside the folder.</li>
|
76 |
-
<li>Follow the on-screen instructions to complete the installation.</li>
|
77 |
-
<li>Copy the file called Auto-Tune_Evo_VST.dll from the folder to your VST plug-in directory that you specified in your audio software.</li>
|
78 |
-
<li>Copy the file called Auto-Tune_Evo_RTAS.dll from the folder to your RTAS plug-in directory that you specified in your audio software.</li>
|
79 |
-
<li>Restart your audio software if it was running during the installation.</li>
|
80 |
-
</ol>
|
81 |
-
<p>You have now successfully installed Antares Autotune Evo VST RTAS v6.0.9.rar.rar on your computer.</p>
|
82 |
-
<h2>How to use Antares Autotune Evo in your audio projects</h2>
|
83 |
-
<h3>Loading the plug-in in your DAW</h3>
|
84 |
-
<p>To use Antares Autotune Evo in your audio projects, you need to load it as a plug-in in your DAW. The exact steps may vary depending on your DAW, but here is a general guide:</p>
|
85 |
-
<ol>
|
86 |
-
<li>Create a new audio track or open an existing one that contains vocals or instruments that you want to correct or enhance.</li>
|
87 |
-
<li>Go to the plug-in browser or menu in your DAW and look for Antares Autotune Evo under VST or RTAS categories.</li>
|
88 |
-
<li>Drag and drop the plug-in onto the audio track or insert it as an effect.</li>
|
89 |
-
<li>You should see a window with the Antares Autotune Evo interface and controls.</li>
|
90 |
-
</ol>
|
91 |
-
<h3>Choosing the scale and retune speed</h3>
|
92 |
-
<p>The first thing you need to do is to choose the scale and retune speed for your input signal. The scale determines which pitches are considered correct and which are corrected by the plug-in. The retune speed determines how fast and how much the plug-in corrects the input pitch. To do this:</p>
|
93 |
-
<ul>
|
94 |
-
<li>Click on the Scale button at the top left corner of the window to open a drop-down menu with various scale options. You can choose from minor, major, chromatic, or 26 historical and microtonal scales. You can also create your own custom scale by clicking on Edit Scale.</li>
|
95 |
-
<li>Click on the Key button next to the Scale button to choose the root note of your scale. For example, if you choose C major as your scale, you can choose C as your key.</li>
|
96 |
-
<li>Click on the Input Type button next to the Key button to choose the type of input signal you are using. You can choose from Soprano, Alto/Tenor, Low Male, Instrument, or Bass Instrument.</li>
|
97 |
-
<li>Adjust the Retune Speed knob at the bottom left corner of the window to set how fast and how much the plug-in corrects the input pitch. A lower value means faster and more aggressive correction, while a higher value means slower and more natural correction. You can also use different retune speeds for different parts of your signal by using automation or by switching between automatic and graphical modes.</li>
|
98 |
-
</ul>
|
99 |
-
<h3>Editing the pitch envelope</h3>
|
100 |
-
the pitch envelope of your input signal. To do this:</p>
|
101 |
-
<ul>
|
102 |
-
<li>Click on the Graph button at the top right corner of the window to switch to graphical mode.</li>
|
103 |
-
<li>Click on the Track Pitch button at the bottom right corner of the window to start tracking the pitch of your input signal. You will see a red line representing the detected pitch envelope.</li>
|
104 |
-
<li>Click on the Make Curve button next to the Track Pitch button to create a blue line representing the corrected pitch envelope. The blue line will initially follow the red line exactly.</li>
|
105 |
-
<li>Use the graphics tools at the top of the window to edit the blue line as you wish. You can select, move, delete, draw, line, curve, or vibrato segments of the blue line. You can also zoom in and out, undo and redo edits, import and export pitch data, and more.</li>
|
106 |
-
<li>Click on the Play button at the bottom left corner of the window to hear how your edits sound. You can also click on the Bypass button next to it to compare with the original signal.</li>
|
107 |
-
</ul>
|
108 |
-
<h2>Conclusion</h2>
|
109 |
-
<h3>Summary of the main points</h3>
|
110 |
-
<p>In this article, we have learned what Antares Autotune Evo VST RTAS v6.0.9.rar.rar is and how to use it in our audio projects. We have covered:</p>
|
111 |
-
<ul>
|
112 |
-
<li>What Antares Autotune Evo is and what it can do for our vocals or instruments.</li>
|
113 |
-
<li>What VST RTAS means and how it affects compatibility and performance.</li>
|
114 |
-
<li>What a .rar file is and how to extract it using a software like WinRAR or 7-Zip.</li>
|
115 |
-
<li>How to download and install Antares Autotune Evo using one of the links provided.</li>
|
116 |
-
<li>How to use Antares Autotune Evo in automatic mode or graphical mode depending on our needs and preferences.</li>
|
117 |
-
</ul>
|
118 |
-
<h3>Call to action</h3>
|
119 |
-
<p>If you are interested in trying out Antares Autotune Evo for yourself, you can download it from one of the links below and follow the steps in this article to get started. You will be amazed by how much you can improve or transform your vocals or instruments with this powerful plug-in. Whether you want to fix subtle pitch errors or create dramatic vocal effects, Antares Autotune Evo can help you achieve your creative goals.</p>
|
120 |
-
<p>Download Antares Autotune Evo VST RTAS v6.0.9.rar.rar from one of these links:</p>
|
121 |
-
<ul>
|
122 |
-
<li><a href="https://audioz.download/software/win/67524-download_antares-autotune-evo-vst-rtas-v609-proper-air.html">https://audioz.download/software/win/67524-download_antares-autotune-evo-vst-rtas-v609-proper-air.html</a></li>
|
123 |
-
<li><a href="https://drive.google.com/file/d/18vbUbHloXX2s7oXXY1n9jePb1KxFkklO/view?usp=sharing">https://drive.google.com/file/d/18vbUbHloXX2s7oXXY1n9jePb1KxFkklO/view?usp=sharing</a></li>
|
124 |
-
<li><a href="https://4download.net/368-auto-tune-evo-full.html">https://4download.net/368-auto-tune-evo-full.html</a></li>
|
125 |
-
<li><a href="https://www.4shared.com/rar/EGjratnyee/Antares_Autotune_Evo_VST_RTAS_.html?locale=en">https://www.4shared.com/rar/EGjratnyee/Antares_Autotune_Evo_VST_RTAS_.html?locale=en</a></li>
|
126 |
-
</ul>
|
127 |
-
<p>Thank you for reading this article and happy tuning!</p>
|
128 |
-
<h2>FAQs</h2>
|
129 |
-
<h3>What is the difference between Auto-Tune Evo and Auto-Tune Pro?</h3>
|
130 |
-
<p>Auto-Tune Pro is the latest version of Auto-Tune software that offers more features and improvements than Auto-Tune Evo. Some of these features include:</p>
|
131 |
-
<ul>
|
132 |
-
<li>A redesigned interface that is more intuitive and user-friendly.</li>
|
133 |
-
<li>A new Classic Mode that emulates the sound of Auto-Tune 5, which is popular for its characteristic vocal effect.</li>
|
134 |
-
<li>A new Flex-Tune feature that allows more natural and expressive pitch correction.</li>
|
135 |
-
<li>A new Low Latency Mode that enables real-time performance with minimal delay.</li>
|
136 |
-
<li>A new Auto-Key plug-in that automatically detects the key and scale of your music and sends it to Auto-Tune Pro.</li>
|
137 |
-
<li>A new MIDI Control feature that allows you to control Auto-Tune Pro with a MIDI controller or a MIDI track.</li>
|
138 |
-
<li>A new ARA support that allows seamless integration with ARA-compatible DAWs.</li>
|
139 |
-
</ul>
|
140 |
-
<h3>How do I uninstall Antares Autotune Evo?</h3>
|
141 |
-
<p>To uninstall Antares Autotune Evo from your computer, you need to do two things:</p>
|
142 |
-
<ol>
|
143 |
-
<li>Delete the files that you copied to your VST and RTAS plug-in directories (Auto-Tune_Evo_VST.dll and Auto-Tune_Evo_RTAS.dll).</li>
|
144 |
-
<li>Run the file called Uninstall.exe inside the folder Antares.Autotune.Evo.VST.RTAS.v6.0.9.PROPER-AiR that you extracted from Antares Autotune Evo VST RTAS v6.0.9.rar.rar.</li>
|
145 |
-
</ol>
|
146 |
-
<h3>How do I update Antares Autotune Evo?</h3>
|
147 |
-
the latest version of Antares Autotune Evo from the official website or from one of the links provided in this article. You will need to uninstall the previous version of Antares Autotune Evo before installing the new one. To do this, follow these steps:</p>
|
148 |
-
<ol>
|
149 |
-
<li>Delete the files that you copied to your VST and RTAS plug-in directories (Auto-Tune_Evo_VST.dll and Auto-Tune_Evo_RTAS.dll).</li>
|
150 |
-
<li>Run the file called Uninstall.exe inside the folder Antares.Autotune.Evo.VST.RTAS.v6.0.9.PROPER-AiR that you extracted from Antares Autotune Evo VST RTAS v6.0.9.rar.rar.</li>
|
151 |
-
<li>Download and install the latest version of Antares Autotune Evo using one of these links:</li>
|
152 |
-
<ul>
|
153 |
-
<li><a href="https://www.antarestech.com/software-download/">https://www.antarestech.com/software-download/</a></li>
|
154 |
-
<li><a href="https://splice.com/plugins/141-auto-tune-evo-vst-au-by-antares">https://splice.com/plugins/141-auto-tune-evo-vst-au-by-antares</a></li>
|
155 |
-
</ul>
|
156 |
-
<li>Follow the installation instructions provided by the installer or by the website.</li>
|
157 |
-
<li>Restart your audio software if it was running during the installation.</li>
|
158 |
-
</ol>
|
159 |
-
<h3>How do I get support for Antares Autotune Evo?</h3>
|
160 |
-
<p>If you have any questions or issues with Antares Autotune Evo, you can contact the Antares support team through their website or through their social media channels. You can also check their online manual and video tutorials for more information and tips on how to use Antares Autotune Evo. Here are some links to help you:</p>
|
161 |
-
<ul>
|
162 |
-
<li><a href="https://www.antarestech.com/support/">https://www.antarestech.com/support/</a></li>
|
163 |
-
<li><a href="https://www.antarestech.com/manuals/">https://www.antarestech.com/manuals/</a></li>
|
164 |
-
<li><a href="https://www.youtube.com/user/AntaresAudioTech">https://www.youtube.com/user/AntaresAudioTech</a></li>
|
165 |
-
<li><a href="https://www.facebook.com/AntaresAudioTechnologies">https://www.facebook.com/AntaresAudioTechnologies</a></li>
|
166 |
-
<li><a href="https://twitter.com/AntaresAudio">https://twitter.com/AntaresAudio</a></li>
|
167 |
-
</ul>
|
168 |
-
<h3>What are some alternatives to Antares Autotune Evo?</h3>
|
169 |
-
<p>If you are looking for some alternatives to Antares Autotune Evo, you can try some of these other pitch correction and manipulation software:</p>
|
170 |
-
<ul>
|
171 |
-
<li>Melodyne by Celemony: A powerful and versatile software that allows you to edit pitch, timing, formants, and more with a high level of accuracy and musicality.</li>
|
172 |
-
<li>Waves Tune by Waves: A comprehensive and easy-to-use software that offers both automatic and graphical modes for pitch correction and creative effects.</li>
|
173 |
-
<li>Nectar 3 by iZotope: A complete vocal production suite that includes pitch correction, harmony generation, vocal assistant, breath control, compression, EQ, reverb, delay, and more.</li>
|
174 |
-
<li>MAutoPitch by MeldaProduction: A free and simple software that offers basic pitch correction and modulation effects.</li>
|
175 |
-
</ul>
|
176 |
-
<h3>What are some tips and tricks for using Antares Autotune Evo?</h3>
|
177 |
-
<p>Here are some tips and tricks for using Antares Autotune Evo effectively and creatively:</p>
|
178 |
-
<ul>
|
179 |
-
<li>Use a good microphone and preamp to record your vocals or instruments with a clear and clean sound.</li>
|
180 |
-
<li>Use a pop filter and a shock mount to reduce unwanted noises and vibrations.</li>
|
181 |
-
<li>Record multiple takes of your performance and choose the best one for pitch correction.</li>
|
182 |
-
<li>Use a tuner or a reference track to check your tuning before applying pitch correction.</li>
|
183 |
-
<li>Use automatic mode for quick and easy pitch correction or graphical mode for precise and creative pitch manipulation.</li>
|
184 |
-
<li>Experiment with different scales, keys, input types, retune speeds, graphics tools, and effects to achieve different results.</li>
|
185 |
-
<li>Use automation or MIDI control to change parameters dynamically during playback.</li>
|
186 |
-
<li>Use bypass or compare buttons to check your edits against the original signal.</li>
|
187 |
-
<li>Mix your corrected or enhanced vocals or instruments with other tracks in your project to create a balanced and harmonious sound.</li>
|
188 |
-
</ul>
|
189 |
-
</p> 0a6ba089eb<br />
|
190 |
-
<br />
|
191 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVDFab 8.1.5.9 Qt Final Multilang Download Pcl A Powerful and Customizable DVD Copy Software.md
DELETED
@@ -1,217 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>DVDFab 8.1.5.9 Qt Final Multilang: A Powerful DVD Copy Software</h1>
|
3 |
-
<p>Do you have a collection of DVDs that you want to backup, rip, convert or create? If so, you need a reliable and versatile DVD copy software that can handle any task you throw at it.</p>
|
4 |
-
<p>One such software is <strong>DVDFab 8.1.5.9 Qt Final Multilang</strong>, a powerful and comprehensive tool that can copy, rip, convert and create DVDs with high quality and speed.</p>
|
5 |
-
<h2>DVDFab 8.1.5.9 Qt Final Multilang Download Pcl</h2><br /><p><b><b>Download File</b> ——— <a href="https://byltly.com/2uKAa8">https://byltly.com/2uKAa8</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will show you what DVDFab 8.1.5.9 Qt Final Multilang can do, why you should choose it over other DVD copy software, and how you can use it to perform various tasks with ease.</p>
|
7 |
-
<h2>Introduction</h2>
|
8 |
-
<p><strong>DVDFab 8.1.5.9 Qt Final Multilang</strong> is a software that can copy, rip, convert and create DVDs and Blu-rays with high quality and speed.</p>
|
9 |
-
<p>It supports various formats and devices, such as MP4, MKV, AVI, iPhone, iPad, Android, etc.</p>
|
10 |
-
<p>It also has many advanced features and options that allow you to customize your output according to your preferences.</p>
|
11 |
-
<p>DVDFab Qt 8.1.5.9 Multilingual Full Version<br />
|
12 |
-
Download DVDFab 8.1.5.9 Final with Crack<br />
|
13 |
-
DVDFab Qt 8.1.5.9 Portable Free Download<br />
|
14 |
-
How to Install DVDFab 8.1.5.9 Multilang on PC<br />
|
15 |
-
DVDFab 8.1.5.9 Qt Final Patched Download<br />
|
16 |
-
DVDFab Qt 8.1.5.9 Serial Key Generator<br />
|
17 |
-
Download DVDFab 8.1.5.9 Final Multilanguage for Windows<br />
|
18 |
-
DVDFab Qt 8.1.5.9 License Key Activation<br />
|
19 |
-
DVDFab 8.1.5.9 Qt Final Multilang Torrent Download<br />
|
20 |
-
DVDFab Qt 8.1.5.9 Crack Full Version<br />
|
21 |
-
Download DVDFab 8.1.5.9 Final with Keygen<br />
|
22 |
-
DVDFab Qt 8.1.5.9 Registration Code Free<br />
|
23 |
-
DVDFab 8.1.5.9 Qt Final Multilang Review<br />
|
24 |
-
DVDFab Qt 8.1.5.9 Features and Benefits<br />
|
25 |
-
Download DVDFab 8.1.5.9 Final with License Key<br />
|
26 |
-
DVDFab Qt 8.1.5.9 Product Key Finder<br />
|
27 |
-
DVDFab 8.1.5.9 Qt Final Multilang Software Download<br />
|
28 |
-
DVDFab Qt 8.1.5.9 System Requirements and Compatibility<br />
|
29 |
-
Download DVDFab 8.1.5.9 Final with Serial Number<br />
|
30 |
-
DVDFab Qt 8.1.5.9 Activation Code Generator<br />
|
31 |
-
DVDFab 8.1.5.9 Qt Final Multilang Free Trial Download<br />
|
32 |
-
DVDFab Qt 8.1.5.9 User Guide and Manual<br />
|
33 |
-
Download DVDFab 8.1.5.9 Final with Patch<br />
|
34 |
-
DVDFab Qt 8.1.5.9 Keygen Full Version<br />
|
35 |
-
DVDFab 8.1.5.9 Qt Final Multilang Update Download<br />
|
36 |
-
DVDFab Qt 8 Crack + Serial Key Free Download<br />
|
37 |
-
Download DVDFab 8 Full Version with Crack<br />
|
38 |
-
DVDFab Qt Latest Version Free Download for PC<br />
|
39 |
-
How to Use DVDFab Qt to Copy and Rip DVDs and Blu-rays<br />
|
40 |
-
DVDFab Qt Crack + Keygen Download for Windows<br />
|
41 |
-
Download DVDFab Qt Portable Full Version for PC<br />
|
42 |
-
DVDFab Qt Review: Best DVD and Blu-ray Copy Software<br />
|
43 |
-
DVDFab Qt License Key + Patch Free Download<br />
|
44 |
-
Download DVDFab Qt Multilingual Full Version for Windows<br />
|
45 |
-
DVDFab Qt Serial Number + Activation Code Free Download<br />
|
46 |
-
Download DVDFab Qt Crack + Keygen for PC<br />
|
47 |
-
DVDFab Qt User Manual and Tutorial PDF Download<br />
|
48 |
-
DVDFab Qt System Requirements and Installation Guide<br />
|
49 |
-
Download DVDFab Qt Full Version with License Key for PC<br />
|
50 |
-
DVDFab Qt Patch + Serial Key Free Download for Windows<br />
|
51 |
-
Download DVDFab Qt Multilanguage Full Version with Crack for PC<br />
|
52 |
-
DVDFab Qt Activation Code + Registration Code Free Download<br />
|
53 |
-
Download DVDFab Qt Full Version with Serial Number for PC<br />
|
54 |
-
DVDFab Qt Features and Functions Overview and Comparison<br />
|
55 |
-
Download DVDFab Qt Full Version with Patch for PC <br />
|
56 |
-
DVDFab Qt Product Key + Crack Free Download for Windows <br />
|
57 |
-
Download DVDFab Qt Multilingual Full Version with Keygen for PC <br />
|
58 |
-
How to Update and Upgrade to the Latest Version of DVDFab </p>
|
59 |
-
<p>Here are some of the reasons why you should choose DVDFab 8.1.5.9 Qt Final Multilang over other DVD copy software:</p>
|
60 |
-
<ul>
|
61 |
-
<li>It has a user-friendly interface that is easy to navigate and operate.</li>
|
62 |
-
<li>It has multiple modes that cater to different needs and scenarios.</li>
|
63 |
-
<li>It has a fast processing speed that can save you time and energy.</li>
|
64 |
-
<li>It has a high output quality that can preserve the original quality of your DVDs.</li>
|
65 |
-
<li>It has a wide compatibility that can work with any DVD disc or drive.</li>
|
66 |
-
<li>It has a free trial version that you can download and use without any limitations.</li>
|
67 |
-
</ul>
|
68 |
-
<p>Now that you know what DVDFab 8.1.5.9 Qt Final Multilang can do and why you should choose it over other DVD copy software, let's see how you can use it to perform various tasks.</p>
|
69 |
-
<h2>How to Copy DVDs with DVDFab 8.1.5.Qt Final Multilang</h2>
|
70 |
-
<p>If you want to make a backup copy of your DVDs for safekeeping or sharing, you can use DVDFab 8.QT Final Multilang's copy mode.</p>
|
71 |
-
<p>This mode allows you to copy your DVDs in different ways, such as full disc, main movie, split, merge, clone/burn, customize or customize split.</p>
|
72 |
-
<p>You can also choose between different output types, such as DVD disc (DVD+R/RW,DVD-R/RW,DVD+R DL,DVD-R DL), ISO file or folder.</p>
|
73 |
-
<p>Here are the steps on how to copy DVDs with DVDFab 8.QT Final Multilang:</p>
|
74 |
-
<ol>
|
75 |
-
<li>Download and install DVDFab 8.QT Final Multilang on your PC from <a href="https://dvdfab-qt-final.software.informer.com/8.Q/">here</a>.</li>
|
76 |
-
<li>Launch DVDFab 8.QT Final Multilang and select the copy mode from the top menu bar.</li>
|
77 |
-
<li>Insert your source DVD disc into your PC's drive or load it from your hard drive as an ISO file or folder.</li>
|
78 |
-
<li>Select your target output type from the drop-down menu at the bottom left corner.</li>
|
79 |
-
<li>Select your desired copy mode from the left panel.</li>
|
80 |
-
<li>Customize your output settings by clicking on the wrench icon at the top right corner.</li>
|
81 |
-
<li>Start the copying process by clicking on the start button at the bottom right corner.</li>
|
82 |
-
</ol>
|
83 |
-
<h2>How to Rip DVDs with DVDFab 8.QT Final Multilang</h2>
|
84 |
-
<p>If you want to convert your DVDs into digital formats that can be played on various devices or platforms, you can use DVDFab 8.QT Final Multilang's ripper mode.</p>
|
85 |
-
<p>You can also customize the output settings by adjusting the video and audio parameters, such as resolution, bitrate, frame rate, codec, channel, etc.</p>
|
86 |
-
<p>Here are the steps on how to rip DVDs with DVDFab 8.QT Final Multilang:</p>
|
87 |
-
<ol>
|
88 |
-
<li>Launch DVDFab 8.QT Final Multilang and select the ripper mode from the top menu bar.</li>
|
89 |
-
<li>Insert your source DVD disc into your PC's drive or load it from your hard drive as an ISO file or folder.</li>
|
90 |
-
<li>Select your target output format and profile from the left panel.</li>
|
91 |
-
<li>Customize your output settings by clicking on the wrench icon at the top right corner.</li>
|
92 |
-
<li>Start the ripping process by clicking on the start button at the bottom right corner.</li>
|
93 |
-
</ol>
|
94 |
-
<h2>How to Convert DVDs with DVDFab 8.QT Final Multilang</h2>
|
95 |
-
<p>If you want to change the format of your DVDs without changing the content or quality, you can use DVDFab 8.QT Final Multilang's converter mode.</p>
|
96 |
-
<p>This mode allows you to convert your DVDs into different formats and profiles, such as MP4,H264,H265,MKV,MPEG4,MPEG2,XVID,DIVX,AAC,DTS,DOLBY DIGITAL PLUS,DOLBY TRUEHD,DOLBY ATMOS,iPhone,iPad,iPod Touch,Samsung Galaxy,Huawei,Xiaomi,LG,Sony,Nokia,Motorola,ZTE,Vivo,Oppo,Nintendo Switch,Xbox One S,Xbox One X,Xbox Series S,Xbox Series X,Sony PS3,Sony PS4,Sony PS4 Pro,Sony PS5,PSP,Vita,Wii U,Wii,Nintendo DS,Nintendo DSi,Nintendo DSi XL,Nintendo DS Lite,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,</p>
|
97 |
-
<p>You can also customize the output settings by adjusting the video and audio parameters, such as resolution, bitrate, frame rate, codec, channel, etc.</p>
|
98 |
-
<p>Here are the steps on how to convert DVDs with DVDFab 8.QT Final Multilang:</p>
|
99 |
-
<ol>
|
100 |
-
<li>Launch DVDFab 8.QT Final Multilang and select the converter mode from the top menu bar.</li>
|
101 |
-
<li>Insert your source DVD disc into your PC's drive or load it from your hard drive as an ISO file or folder.</li>
|
102 |
-
<li>Select your target output format and profile from the left panel.</li>
|
103 |
-
<li>Customize your output settings by clicking on the wrench icon at the top right corner.</li>
|
104 |
-
<li>Start the conversion process by clicking on the start button at the bottom right corner.</li>
|
105 |
-
</ol>
|
106 |
-
<h2>How to Create DVDs with DVDFab 8.QT Final Multilang</h2>
|
107 |
-
<p>If you want to create your own DVDs from various sources, such as videos, photos, music, etc., you can use DVDFab 8.QT Final Multilang's creator mode.</p>
|
108 |
-
<p>This mode allows you to create DVDs in different formats and profiles, such as DVD disc (DVD+R/RW,DVD-R/RW,DVD+R DL,DVD-R DL), ISO file or folder.</p>
|
109 |
-
<p>You can also customize the menu, chapters, subtitles, audio tracks, etc. by using various templates and options.</p>
|
110 |
-
<p>Here are the steps on how to create DVDs with DVDFab 8.QT Final Multilang:</p>
|
111 |
-
<ol>
|
112 |
-
<li>Launch DVDFab 8.QT Final Multilang and select the creator mode from the top menu bar.</li>
|
113 |
-
<li>Add your source files by clicking on the add button at the top left corner or dragging and dropping them into the main interface.</li>
|
114 |
-
<li>Select your target output type from the drop-down menu at the bottom left corner.</li>
|
115 |
-
<li>Select your desired output format and profile from the left panel.</li>
|
116 |
-
<li>Customize your menu, chapters, subtitles, audio tracks, etc. by clicking on the menu icon at the top right corner.</li>
|
117 |
-
<li>Start the creation process by clicking on the start button at the bottom right corner.</li>
|
118 |
-
<h2>Comparison Table of DVDFab 8.QT Final Multilang with Other DVD Copy Software</h2>
|
119 |
-
<p>To help you make an informed decision, we have prepared a comparison table that shows how DVDFab 8.QT Final Multilang stacks up against other popular DVD copy software in terms of features, speed, quality, compatibility, etc.</p>
|
120 |
-
<table>
|
121 |
-
<tr>
|
122 |
-
<th>DVD Copy Software</th>
|
123 |
-
<th>Features</th>
|
124 |
-
<th>Speed</th>
|
125 |
-
<th>Quality</th>
|
126 |
-
<th>Compatibility</th>
|
127 |
-
</tr>
|
128 |
-
<tr>
|
129 |
-
<td><strong>DVDFab 8.QT Final Multilang</strong></td>
|
130 |
-
<td>Copy, rip, convert and create DVDs and Blu-rays in various modes and formats.</td>
|
131 |
-
<td>Fast and stable.</td>
|
132 |
-
<td>High and lossless.</td>
|
133 |
-
<td>Wide and flexible.</td>
|
134 |
-
</tr>
|
135 |
-
<tr>
|
136 |
-
<td>Wondershare UniConverter</td>
|
137 |
-
<td>Copy, rip and convert DVDs and videos in various formats.</td>
|
138 |
-
<td>Fast but unstable.</td>
|
139 |
-
<td>High but lossy.</td>
|
140 |
-
<td>Wide but limited.</td>
|
141 |
-
</tr>
|
142 |
-
<tr>
|
143 |
-
<td>WinX DVD Ripper Platinum</td>
|
144 |
-
<td>Rip and convert DVDs in various formats.</td>
|
145 |
-
<td>Fast but unstable.</td>
|
146 |
-
<td>High but lossy.</td>
|
147 |
-
<td>Narrow and rigid.</td>
|
148 |
-
</tr>
|
149 |
-
<tr>
|
150 |
-
<td>DVD Shrink</td>
|
151 |
-
<td>Copy and compress DVDs in various modes.</td>
|
152 |
-
<td>Slow and unstable.</td>
|
153 |
-
<td>Low and lossy.</td>
|
154 |
-
<td>Narrow and rigid.</td>
|
155 |
-
</tr>
|
156 |
-
<tr>
|
157 |
-
<td>DVD Decrypter</td>
|
158 |
-
<td>Copy and decrypt DVDs in various modes.</td>
|
159 |
-
<td>Slow and unstable.</td>
|
160 |
-
<td>Low and lossy.</td>
|
161 |
-
<td>Narrow and rigid.</td>
|
162 |
-
</tr>
|
163 |
-
</table>
|
164 |
-
<p>As you can see from the table, DVDFab 8.QT Final Multilang is the best DVD copy software that can meet all your needs and expectations.</p>
|
165 |
-
<h2>Conclusion</h2>
|
166 |
-
<p>In conclusion, DVDFab 8.QT Final Multilang is a powerful and comprehensive DVD copy software that can copy, rip, convert and create DVDs with high quality and speed.</p>
|
167 |
-
<p>It has a user-friendly interface, multiple modes, fast processing speed, high output quality, wide compatibility, and a free trial version that you can download and use without any limitations.</p>
|
168 |
-
<p>If you are looking for a software that can handle any DVD task you throw at it, you should definitely give DVDFab 8.QT Final Multilang a try. You will not regret it!</p>
|
169 |
-
<p>To download DVDFab 8.QT Final Multilang for free and enjoy its amazing features, click on the link below:</p>
|
170 |
-
<a href="https://dvdfab-qt-final.software.informer.com/8.2/">Download DVDFab 8.QT Final Multilang for Free Now!</a>
|
171 |
-
<h2>FAQs</h2>
|
172 |
-
<p>Here are some of the frequently asked questions about DVDFab 8.QT Final Multilang:</p>
|
173 |
-
<ol>
|
174 |
-
<li><strong>What are the system requirements for DVDFab 8.QT Final Multilang?</strong></li>
|
175 |
-
<p>The system requirements for DVDFab 8.QT Final Multilang are as follows:</p>
|
176 |
-
<ul>
|
177 |
-
<li>Windows XP/Vista/7/8/10 (32-bit/64-bit)</li>
|
178 |
-
<li>Pentium II 500 MHz or above</li>
|
179 |
-
<li>512 MB of RAM or above</li>
|
180 |
-
<li>A DVD drive or a Blu-ray drive</li>
|
181 |
-
<li>An internet connection for registration and updates</li>
|
182 |
-
</ul>
|
183 |
-
<li><strong>How to update DVDFab 8.QT Final Multilang to the latest version?</strong></li>
|
184 |
-
<p>To update DVDFab 8.QT Final Multilang to the latest version, you can do one of the following:</p>
|
185 |
-
<ul>
|
186 |
-
<li>Click on the green check mark icon at the top right corner of the main interface and follow the instructions to download and install the latest version.</li>
|
187 |
-
<li>Visit the official website of DVDFab <a href="https://www.dvdfab.cn/">here</a> and download the latest version manually.</li>
|
188 |
-
<li>Contact the customer service of DVDFab <a href="https://www.dvdfab.cn/contact.htm">here</a> and ask for assistance.</li>
|
189 |
-
</ul>
|
190 |
-
<li><strong>How to contact the customer service of DVDFab?</strong></li>
|
191 |
-
<p>If you have any questions or problems with DVDFab 8.QT Final Multilang, you can contact the customer service of DVDFab by doing one of the following:</p>
|
192 |
-
<ul>
|
193 |
-
<li>Email them at <a href="mailto:[email protected]">[email protected]</a>.</li>
|
194 |
-
<li>Livchat with them on their website <a href="https://www.dvdfab.cn/">here</a>.</li>
|
195 |
-
<li>Call them at +86-10-84913343 (Monday-Friday: 9:00-18:00 GMT+08:00).</li>
|
196 |
-
<li>Fax them at +86-10-84913343 (Monday-Friday: 9:00-18:00 GMT+08:00).</li>
|
197 |
-
<li>Social media platforms such as Facebook <a href="https://www.facebook.com/dvdfabsoftware">here</a>, Twitter <a href="https://twitter.com/DVDFab">here</a>, YouTube <a href="https://www.youtube.com/user/dvdfabsoftware">here</a>, etc.</li>
|
198 |
-
</ul>
|
199 |
-
<li><strong>How to get a refund for DVDFab 8.QT Final Multilang?</strong></li>
|
200 |
-
<p>If you are not satisfied with DVDFab 8.QT Final Multilang for any reason, you can request a refund within 30 days of purchase by doing one of the following:</p>
|
201 |
-
<ul>
|
202 |
-
<li>Email your order number and reason for refund to <a href="mailto:[email protected]">[email protected]</a>.</li>
|
203 |
-
<li>Livchat with the customer service on their website <a href="https://www.dvdfab.cn/">here</a>.</li>
|
204 |
-
<li>Contact your payment platform such as PayPal, Visa, MasterCard, etc. and ask for a chargeback.</li>
|
205 |
-
</ul>
|
206 |
-
<li><strong>How to get a discount for DVDFab 8.QT Final Multilang?</strong></li>
|
207 |
-
<p>If you want to get a discount for DVDFab 8.QT Final Multilang, you can do one of the following:</p>
|
208 |
-
<ul>
|
209 |
-
<li>Catch their seasonal promotions or special offers on their website <a href="https://www.dvdfab.cn/">here</a>.</li>
|
210 |
-
<li>Become their member or VIP by registering on their website <a href="https://www.dvdfab.cn/member.htm">here</a>.</li>
|
211 |
-
<li>Become their affiliate or partner by applying on their website <a href="https://www.dvdfab.cn/affiliate.htm">here</a>.</li>
|
212 |
-
<li>Become their fan or follower on their social media platforms such as Facebook <a href="https://www.facebook.com/dvdfabsoftware">here</a>, Twitter <a href="https://twitter.com/DVDFab">here</a>, YouTube <a href="https://www.youtube.com/user/dvdfabsoftware">here</a>, etc. and get exclusive coupons or codes.</li>
|
213 |
-
<li>Contact their customer service and negotiate a discount with them.</li>
|
214 |
-
</ul>
|
215 |
-
</p> 0a6ba089eb<br />
|
216 |
-
<br />
|
217 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Danfe View Keygen [NEW].md
DELETED
@@ -1,22 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>What is DANFE View and how can it help you manage your electronic invoices?</h1>
|
3 |
-
<p>DANFE View is a software for Windows that allows you to receive, store, view and print the XML files of NF-e, NFC-e, MDF-e and CT-e. These are different types of electronic invoices that are used in Brazil for tax purposes. With DANFE View, you can have all your electronic invoices, whether received or issued, in one place, organized and accessible.</p>
|
4 |
-
<p>Electronic invoices are mandatory in Brazil for most businesses and transactions. They are digital documents that contain all the information about the products or services sold, the buyer and seller, the taxes and fees involved, and the authorization code from the tax authority. They are sent and received in XML format, which is a standard for data exchange on the web.</p>
|
5 |
-
<h2>Danfe View Keygen</h2><br /><p><b><b>Download Zip</b> ⚹⚹⚹ <a href="https://byltly.com/2uKyUJ">https://byltly.com/2uKyUJ</a></b></p><br /><br />
|
6 |
-
<p>However, XML files are not easy to read or print by humans. That's why there is a need for a software like DANFE View, which can convert XML files into more user-friendly formats, such as PDF or HTML. DANFE View can also generate and print the DANFE (Documento Auxiliar da Nota Fiscal Eletrônica), which is a simplified version of the NF-e that can be used as a proof of purchase or delivery.</p>
|
7 |
-
<p>But DANFE View is not just a viewer or printer of electronic invoices. It is also a powerful tool for managing and organizing them. With DANFE View, you can:</p>
|
8 |
-
<ul>
|
9 |
-
<li>Download and store automatically the XML files from your email or from the tax authority website (SEFAZ).</li>
|
10 |
-
<li>Search quickly for any XML file by period, series, number, value, CNPJ (tax identification number) of the issuer or recipient.</li>
|
11 |
-
<li>Mark the XML files with custom tags for easy classification.</li>
|
12 |
-
<li>Manifest your agreement or disagreement with the NF-e issued by your suppliers or customers.</li>
|
13 |
-
<li>Make periodic and automatic backup of your XML files.</li>
|
14 |
-
<li>Send automatically your XML files to your accounting office.</li>
|
15 |
-
<li>Emit reports by supplier, recipient, products and CFOP (tax code).</li>
|
16 |
-
</ul>
|
17 |
-
<p>DANFE View is available in three versions: Free, Plus and Office. The Free version has some limitations in terms of storage capacity, number of companies managed and features available. The Plus version allows you to manage unlimited XML files related to one CNPJ or CPF (individual tax identification number). The Office version allows you to manage unlimited XML files from any CNPJ or CPF.</p>
|
18 |
-
<p>If you want to try DANFE View for free for 7 days, you can download it from the official website[^1^]. You can also find more information about the software features, prices and support on the website[^1^]. DANFE View is a reliable and secure software that respects the privacy and integrity of your data. It is also updated regularly to comply with the latest tax regulations and requirements.</p>
|
19 |
-
<p>DANFE View is a must-have software for anyone who deals with electronic invoices in Brazil. It can save you time, money and hassle by making your life easier and more organized. Don't miss this opportunity and download DANFE View today!</p>
|
20 |
-
<p></p> cec2833e83<br />
|
21 |
-
<br />
|
22 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discografia Evaldo Freire Torrent Download A Guide to the Musical Career of the Sertanejo Icon.md
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Discografia Evaldo Freire Torrent Download</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>Are you a fan of Brazilian music? Do you enjoy listening to romantic songs with catchy melodies and heartfelt lyrics? If so, you may have heard of Evaldo Freire, one of the most popular singers of brega music in Brazil. But do you know how to download his discography via torrent?</p>
|
5 |
-
<h2>Discografia Evaldo Freire Torrent Download</h2><br /><p><b><b>DOWNLOAD</b> ✅ <a href="https://byltly.com/2uKwSw">https://byltly.com/2uKwSw</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will tell you everything you need to know about Evaldo Freire and his musical style. We will also explain what a torrent is and how it works. And we will show you how to find, download, and play Evaldo Freire's discography via torrent. So sit back, relax, and enjoy this musical journey!</p>
|
7 |
-
<h2>Evaldo Freire: A Brazilian Brega Singer</h2>
|
8 |
-
<p>Evaldo Freire is a Brazilian singer who was born in 1945 in Pernambuco. He started his musical career in 1968 as part of a trio called Os Três Moraes. He later went solo and became one of the most successful artists of brega music in Brazil.</p>
|
9 |
-
<p>Brega music is a genre that emerged in Brazil in the 1960s and 1970s. It combines romantic ballads with pop and folk influences. The word brega means tacky or cheesy in Portuguese, but it also has a positive connotation of being authentic and sincere. Brega music often deals with themes such as love, betrayal, nostalgia, and social issues.</p>
|
10 |
-
<p>Some of Evaldo Freire's most famous songs and albums are "Chega" (Enough), "Só Quero" (I Only Want), "Meu Deus" (My God), "Não Vou Chorar" (I Won't Cry), "Eu Nunca Pensava" (I Never Thought), "Eu Encontrei Alguém" (I Found Someone), "Você Não Presta Pra Mim" (You're No Good For Me), "Onde Está Você" (Where Are You), "Eu Não Sou Lixo" (I'm Not Trash), "Não Me Deixe Só" (Don't Leave Me Alone), "O Amor é Tudo" (Love Is Everything), "Eu Preciso de Você" (I Need You), "Saudade de Você" (Missing You), "Eu Te Amo Demais" (I Love You Too Much), "Você Mudou Demais" (You Changed Too Much), among others.</p>
|
11 |
-
<h2>Torrent: A Peer-to-Peer File Sharing Protocol</h2>
|
12 |
-
<p>A torrent is a file that contains information about other files that are shared by users over the internet. It allows users to download large amounts of data from multiple sources at once, without relying on a central server.</p>
|
13 |
-
while users who are downloading the file are called leechers. A tracker is a server that helps users find each other and coordinate the file transfer. A client is a software that enables users to create, download, and manage torrents.</p>
|
14 |
-
<p>Some of the advantages of using torrents are that they can speed up the download process, reduce the load on the original source, and allow users to resume interrupted downloads. Some of the disadvantages are that they depend on the availability and generosity of other users, they may expose users to legal and ethical issues, and they may contain malicious or fake files.</p>
|
15 |
-
<p>Evaldo Freire discography torrent download free<br />
|
16 |
-
How to download Evaldo Freire albums torrent<br />
|
17 |
-
Evaldo Freire songs torrent download mp3<br />
|
18 |
-
Best sites to download Evaldo Freire discography torrent<br />
|
19 |
-
Evaldo Freire torrent download full discography<br />
|
20 |
-
Download Evaldo Freire music torrent online<br />
|
21 |
-
Evaldo Freire discografia completa torrent baixar<br />
|
22 |
-
Onde baixar discografia de Evaldo Freire torrent<br />
|
23 |
-
Músicas de Evaldo Freire torrent download grátis<br />
|
24 |
-
Melhores sites para baixar discografia de Evaldo Freire torrent<br />
|
25 |
-
Discografia Evaldo Freire download torrent magnet link<br />
|
26 |
-
Como baixar discos de Evaldo Freire torrent<br />
|
27 |
-
Evaldo Freire discography torrent download 320kbps<br />
|
28 |
-
Download Evaldo Freire albums torrent flac<br />
|
29 |
-
Evaldo Freire songs torrent download zip<br />
|
30 |
-
Discografia de Evaldo Freire torrent download rar<br />
|
31 |
-
Download all Evaldo Freire songs torrent<br />
|
32 |
-
Evaldo Freire discography torrent download blogspot<br />
|
33 |
-
Baixar discografia de Evaldo Freire torrent mega<br />
|
34 |
-
Discografia completa de Evaldo Freire torrent download mediafire<br />
|
35 |
-
Download Evaldo Freire discography torrent kickass<br />
|
36 |
-
Baixar discos de Evaldo Freire torrent the pirate bay<br />
|
37 |
-
Discografia de Evaldo Freire download torrent utorrent<br />
|
38 |
-
Baixar músicas de Evaldo Freire torrent bittorrent<br />
|
39 |
-
Download Evaldo Freire music torrent limetorrents<br />
|
40 |
-
Discografia de Evaldo Freire download torrent yify<br />
|
41 |
-
Baixar todas as músicas de Evaldo Freire torrent rarbg<br />
|
42 |
-
Download best of Evaldo Freire torrent extratorrent<br />
|
43 |
-
Discografia de Evaldo Freire download torrent isoHunt<br />
|
44 |
-
Baixar melhores músicas de Evaldo Freire torrent eztv<br />
|
45 |
-
Download greatest hits of Evaldo Freire torrent zooqle<br />
|
46 |
-
Discografia de Evaldo Freire download torrent torlock<br />
|
47 |
-
Baixar sucessos de Evaldo Freire torrent demonoid<br />
|
48 |
-
Download top songs of Evaldo Freire torrent idope<br />
|
49 |
-
Discografia de Evaldo Freire download torrent seedpeer<br />
|
50 |
-
Baixar canções de Evaldo Freire torrent monova<br />
|
51 |
-
Download popular songs of Evaldo Freire torrent yourbittorrent<br />
|
52 |
-
Discografia de Evaldo Freire download torrent btscene<br />
|
53 |
-
Baixar hits de Evaldo Freire torrent glodls<br />
|
54 |
-
Download classic songs of Evaldo Freire torrent 1337x</p>
|
55 |
-
<p>Some of the most popular torrent clients and websites are BitTorrent, uTorrent, The Pirate Bay, Kickass Torrents, RARBG, 1337x, YTS, EZTV, Zooqle, LimeTorrents, and Torrentz2. However, users should be careful when using these sites, as they may be blocked or banned in some countries or regions.</p>
|
56 |
-
<h2>How to Download Evaldo Freire's Discography via Torrent</h2>
|
57 |
-
<p>If you want to download Evaldo Freire's discography via torrent, you will need to follow these steps:</p>
|
58 |
-
<ol>
|
59 |
-
<li>Download and install a torrent client of your choice. We recommend BitTorrent or uTorrent for their simplicity and reliability.</li>
|
60 |
-
<li>Go to a torrent website of your choice. We recommend The Pirate Bay or Kickass Torrents for their variety and popularity.</li>
|
61 |
-
<li>Search for "Discografia Evaldo Freire" or "Evaldo Freire Discography" in the search bar. You will see a list of results with different torrent files for Evaldo Freire's discography.</li>
|
62 |
-
<li>Choose a torrent file that suits your preferences. You can check the size, quality, and availability of each file by looking at the columns labeled "Size", "Seeders", "Leechers", and "Health". Generally, you want to choose a file that has a high number of seeders and leechers, a good health ratio, and a reasonable size and quality.</li>
|
63 |
-
<li>Click on the torrent file that you want to download. You will be redirected to a page with more details about the file, such as the name, description, comments, and files included. You can also see a magnet link that looks like a horseshoe-shaped icon.</li>
|
64 |
-
<li>Click on the magnet link or copy and paste it into your torrent client. This will start the download process. You can monitor the progress of the download in your torrent client. You can also pause, resume, or cancel the download at any time.</li>
|
65 |
-
<li>Once the download is complete, you can open the folder where the files are stored. You will see a folder named "Discografia Evaldo Freire" or something similar. Inside this folder, you will find all the songs and albums of Evaldo Freire in MP3 format.</li>
|
66 |
-
<li>Enjoy listening to Evaldo Freire's music! You can play the songs using any media player of your choice. You can also transfer them to your mobile device or burn them to a CD if you want.</li>
|
67 |
-
</ol>
|
68 |
-
<p>To help you choose a torrent file for Evaldo Freire's discography, we have created a table that compares some of the options available on The Pirate Bay:</p>
|
69 |
-
| Name | Size | Quality | Seeders | Leechers | Health | |------|------|---------|---------|----------|--------| | Discografia Evaldo Freire | 1.1 GB | 320 kbps | 12 | 4 | Good | | Evaldo Freire Discography | 1.2 GB | 256 kbps | 8 | 6 | Good | | Evaldo Freire - Brega Collection | 214 MB | 128 kbps | 5 | 3 | Fair | | Evaldo Freire - Chega e Mais Nada (1977) | 35 MB | 192 kbps | 3 | 2 | Fair | | Evaldo Freire - Só Quero (1981) | 40 MB | 192 kbps | 2 | 1 | Poor | <p>As you can see, the first two options are the best ones in terms of size, quality, and availability. However, you can also choose other options if you only want specific albums or songs.</p>
|
70 |
-
the content owners or authorities. You should also respect the rights and efforts of the artists and creators who produce the content. You should only download torrents from trusted and legal sources, and use a VPN or proxy to protect your privacy and security. You should also scan the files for viruses or malware before opening them.</p>
|
71 |
-
<h2>Conclusion</h2>
|
72 |
-
<p>In this article, we have learned about Evaldo Freire and his musical style. We have also learned about torrent and how it works. And we have learned how to download Evaldo Freire's discography via torrent.</p>
|
73 |
-
<p>If you are a fan of Brazilian music and brega music, you should definitely check out Evaldo Freire's songs and albums. He is one of the most popular and influential singers of this genre, and his music will touch your heart and soul. You can find his discography on various torrent websites, but make sure you do it legally and ethically.</p>
|
74 |
-
<p>What do you think of Evaldo Freire and his music? Have you downloaded his discography via torrent? How was your experience? Share your thoughts and opinions with us in the comments section below. We would love to hear from you!</p>
|
75 |
-
<p>Thank you for reading this article. We hope you enjoyed it and learned something new. If you liked this article, please share it with your friends and family. And don't forget to subscribe to our newsletter for more interesting and informative articles like this one.</p>
|
76 |
-
<h2>FAQs</h2>
|
77 |
-
<ul>
|
78 |
-
<li><b>Q: Who is Evaldo Freire?</b></li>
|
79 |
-
<li>A: Evaldo Freire is a Brazilian singer who specializes in brega music, a genre that mixes romantic ballads with pop and folk influences.</li>
|
80 |
-
<li><b>Q: What is brega music?</b></li>
|
81 |
-
<li>A: Brega music is a popular musical style in Brazil that emerged in the 1960s and 1970s. It is characterized by sentimental lyrics, simple melodies, and catchy rhythms. It often deals with themes such as love, betrayal, nostalgia, and social issues.</li>
|
82 |
-
<li><b>Q: What is a torrent?</b></li>
|
83 |
-
<li>A: A torrent is a file that contains information about other files that are shared by users over the internet. It allows users to download large amounts of data from multiple sources at once, without relying on a central server.</li>
|
84 |
-
<li><b>Q: How does torrent work?</b></li>
|
85 |
-
<li>A: Torrent works by using a peer-to-peer (P2P) protocol that connects users who have the same files or parts of them. Users who have the complete file are called seeders, while users who are downloading the file are called leechers. A tracker is a server that helps users find each other and coordinate the file transfer. A client is a software that enables users to create, download, and manage torrents.</li>
|
86 |
-
<li><b>Q: Is downloading torrents legal?</b></li>
|
87 |
-
<li>A: Downloading torrents is not illegal per se, but downloading copyrighted content without permission or paying for it is illegal in most countries and regions. Users who download torrents may face legal consequences such as fines or lawsuits from the content owners or authorities. Users should also be aware of the risks of downloading torrents from untrusted sources, such as malware infection or data theft.</li>
|
88 |
-
</ul>
|
89 |
-
</p> 0a6ba089eb<br />
|
90 |
-
<br />
|
91 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ApkOnline A Web-Based Android Emulator and APK Installer.md
DELETED
@@ -1,108 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>ApkOnline Android Online Emulator: A Review</h1> | <p>Have you ever wanted to run an Android app on your computer or browser without installing an emulator? If so, you might be interested in ApkOnline Android Online Emulator, a web-based tool that lets you access any Android app from anywhere. In this article, we will review ApkOnline Android Online Emulator, its features, benefits, and how to use it. We will also compare it with some alternatives and answer some frequently asked questions.</p>
|
3 |
-
<h2>What is ApkOnline Android Online Emulator?</h2>
|
4 |
-
<p>ApkOnline Android Online Emulator is a web browser extension that allows you to start the official free android online emulator with a simple click from your web browser. Its goal is to allow end users to run any Android app from anywhere when online using HTML5 and Javascript technologies. As a mobile emulator, ApkOnline allows users and developers to use their Android applications from anywhere in the world.</p>
|
5 |
-
<h2>apkonline android online emulator</h2><br /><p><b><b>Download File</b> ✵ <a href="https://urlin.us/2uT2kT">https://urlin.us/2uT2kT</a></b></p><br /><br />
|
6 |
-
<h3>Features of ApkOnline Android Online Emulator</h3>
|
7 |
-
<p>Some of the features of ApkOnline Android Online Emulator are:</p>
|
8 |
-
<ul>
|
9 |
-
<li>It supports almost all the features that exist in the real android devices, such as phone calls, text messages, device location, device rotation, hardware sensors, and Google Play Store.</li>
|
10 |
-
<li>It can simulate different configurations of the android emulator, such as Nexus 5 with Android 6.0 Marshmallow.</li>
|
11 |
-
<li>It has a graphical user interface that can be controlled with the mouse. It also provides access to the buttons via a menu on the right side of the emulator.</li>
|
12 |
-
<li>It has an APK downloader that can search for and download any Android app. It also looks for iPhone apps with links to download iPhone apps.</li>
|
13 |
-
<li>It has a free online android emulator that can run any android app online without downloading or installing anything.</li>
|
14 |
-
</ul>
|
15 |
-
<h3>Benefits of ApkOnline Android Online Emulator</h3>
|
16 |
-
<p>Some of the benefits of ApkOnline Android Online Emulator are:</p>
|
17 |
-
<ul>
|
18 |
-
<li>It is easy to use and does not require any installation or registration. You just need to install the browser extension and click on it to start the emulator.</li>
|
19 |
-
<li>It is fast and reliable. It runs on a cloud software platform that ensures high performance and availability.</li>
|
20 |
-
<li>It is convenient and flexible. You can access any Android app from any device and any location as long as you have an internet connection.</li>
|
21 |
-
<li>It is useful for testing and debugging. You can test your own apps or other apps without using a real device or an emulator.</li>
|
22 |
-
<li>It is fun and entertaining. You can play games, watch videos, chat with friends, or explore new apps on your browser.</li>
|
23 |
-
</ul>
|
24 |
-
<h2>How to Use ApkOnline Android Online Emulator?</h2>
|
25 |
-
<p>To use ApkOnline Android Online Emulator, you need to follow these steps:</p>
|
26 |
-
<h3>Step 1: Install the ApkOnline Browser Extension</h3>
|
27 |
-
<p>The first step is to install the ApkOnline browser extension on your web browser. You can find it on the official website or on the Microsoft Edge Addons store. Once you install it, you will see an icon on your browser toolbar.</p>
|
28 |
-
<p>apkonline android online emulator download<br />
|
29 |
-
apkonline android online emulator free<br />
|
30 |
-
apkonline android online emulator apk<br />
|
31 |
-
apkonline android online emulator for pc<br />
|
32 |
-
apkonline android online emulator games<br />
|
33 |
-
apkonline android online emulator app<br />
|
34 |
-
apkonline android online emulator review<br />
|
35 |
-
apkonline android online emulator tutorial<br />
|
36 |
-
apkonline android online emulator chrome<br />
|
37 |
-
apkonline android online emulator ios<br />
|
38 |
-
apkonline android online emulator no download<br />
|
39 |
-
apkonline android online emulator reddit<br />
|
40 |
-
apkonline android online emulator alternative<br />
|
41 |
-
apkonline android online emulator mod<br />
|
42 |
-
apkonline android online emulator hack<br />
|
43 |
-
apkonline android online emulator test<br />
|
44 |
-
apkonline android online emulator play store<br />
|
45 |
-
apkonline android online emulator whatsapp<br />
|
46 |
-
apkonline android online emulator instagram<br />
|
47 |
-
apkonline android online emulator tiktok<br />
|
48 |
-
apkonline android online emulator netflix<br />
|
49 |
-
apkonline android online emulator youtube<br />
|
50 |
-
apkonline android online emulator facebook<br />
|
51 |
-
apkonline android online emulator snapchat<br />
|
52 |
-
apkonline android online emulator telegram<br />
|
53 |
-
apkonline android online emulator spotify<br />
|
54 |
-
apkonline android online emulator zoom<br />
|
55 |
-
apkonline android online emulator discord<br />
|
56 |
-
apkonline android online emulator twitter<br />
|
57 |
-
apkonline android online emulator gmail<br />
|
58 |
-
apkonline android online emulator google maps<br />
|
59 |
-
apkonline android online emulator uber<br />
|
60 |
-
apkonline android online emulator amazon<br />
|
61 |
-
apkonline android online emulator ebay<br />
|
62 |
-
apkonline android online emulator paypal<br />
|
63 |
-
apkonline android online emulator minecraft<br />
|
64 |
-
apkonline android online emulator roblox<br />
|
65 |
-
apkonline android online emulator pubg<br />
|
66 |
-
apkonline android online emulator fortnite<br />
|
67 |
-
apkonline android online emulator candy crush<br />
|
68 |
-
apkonline android online emulator clash of clans<br />
|
69 |
-
apkonline android online emulator pokemon go<br />
|
70 |
-
apkonline android online emulator among us<br />
|
71 |
-
apkonline android online emulator subway surfers<br />
|
72 |
-
apkonline android online emulator temple run<br />
|
73 |
-
apkonline android online emulator angry birds<br />
|
74 |
-
apkonline android online emulator plants vs zombies<br />
|
75 |
-
apkonline android online emulator fruit ninja<br />
|
76 |
-
apkonline android online emulator doodle jump<br />
|
77 |
-
apkonline android online emulator cut the rope</p>
|
78 |
-
<h3>Step 2: Search for and Download Any Android App</h3>
|
79 |
-
<p>The next step is to search for and download any Android app you want to run online. You can do this by clicking on the ApkOnline icon on your browser toolbar and selecting \"APK Downloader\". This will open a new tab with a search box where you can type the name of the app you want to download. You can also browse the categories or the top apps to find the app you are looking for. Once you find the app, you can click on the \"Download APK\" button to download it to your computer.</p>
|
80 |
-
<h3>Step 3: Run the Android App Online</h3>
|
81 |
-
<p>The final step is to run the Android app online using the ApkOnline emulator. You can do this by clicking on the ApkOnline icon on your browser toolbar and selecting \"Run APK Online\". This will open a new tab with the emulator interface where you can drag and drop the APK file you downloaded in the previous step. Alternatively, you can click on the \"Browse\" button and select the APK file from your computer. Once you upload the APK file, the emulator will start running the app online. You can interact with the app using your mouse and keyboard, or use the menu on the right side of the emulator to access the buttons and settings.</p>
|
82 |
-
<h2>Alternatives to ApkOnline Android Online Emulator</h2>
|
83 |
-
<p>ApkOnline Android Online Emulator is not the only web-based tool that allows you to run Android apps online. There are some alternatives that you can try if you want to compare or explore other options. Here are some of them:</p>
|
84 |
-
<h3>Appetize.io</h3>
|
85 |
-
<p>Appetize.io is a web-based platform that allows you to run native mobile apps in your browser. You can upload your own apps or use their public apps to test and demo them online. Appetize.io supports both Android and iOS apps, and provides various device models and configurations to choose from. You can also embed your apps on your website or share them with others via a link. Appetize.io offers a free plan with limited usage and features, and paid plans with more options and support.</p>
|
86 |
-
<h3>Genymotion Cloud</h3>
|
87 |
-
<p>Genymotion Cloud is a cloud-based service that allows you to run Android virtual devices on any web browser. You can use Genymotion Cloud to test, develop, or demo your Android apps online without installing anything on your computer. Genymotion Cloud provides various device models and Android versions to choose from, and supports features such as GPS, camera, network, battery, sensors, and Google Play Services. Genymotion Cloud offers a free trial and paid plans with different pricing and features.</p>
|
88 |
-
<h3>ARC Welder</h3>
|
89 |
-
<p>ARC Welder is a Chrome extension that allows you to run Android apps on Chrome OS or any other platform that supports Chrome. You can use ARC Welder to test or use your Android apps on your computer without installing an emulator. ARC Welder supports most of the Android features and APIs, but not all of them. You can also adjust the orientation, size, and form factor of your app to fit your screen. ARC Welder is free to use, but it requires you to have Chrome installed on your computer.</p>
|
90 |
-
<h2>Conclusion</h2>
|
91 |
-
<p>In this article, we have reviewed ApkOnline Android Online Emulator, a web-based tool that allows you to run any Android app online without installing an emulator. We have discussed its features, benefits, and how to use it. We have also compared it with some alternatives that offer similar functionality. We hope this article has been helpful and informative for you.</p>
|
92 |
-
<p>If you have any questions or comments about ApkOnline Android Online Emulator or any other web-based tool for running Android apps online, feel free to leave them below. We would love to hear from you.</p>
|
93 |
-
<h2>Frequently Asked Questions</h2>
|
94 |
-
<p>Here are some of the most common questions that people ask about ApkOnline Android Online Emulator:</p>
|
95 |
-
<ol>
|
96 |
-
<li><b>Is ApkOnline Android Online Emulator safe?</b></li>
|
97 |
-
<p>ApkOnline Android Online Emulator is safe to use as long as you download APK files from trusted sources. ApkOnline does not store or share any of your personal data or files. However, you should always be careful when downloading or running any app online, as there might be some risks involved.</p>
|
98 |
-
<li><b>Is ApkOnline Android Online Emulator free?</b></li>
|
99 |
-
<p>ApkOnline Android Online Emulator is free to use for personal and non-commercial purposes. However, if you want to use it for commercial purposes or need more features and support, you can contact ApkOnline for a quote.</p>
|
100 |
-
<li><b>Can I run iOS apps on ApkOnline Android Online Emulator?</b></li>
|
101 |
-
<p>No, ApkOnline Android Online Emulator only supports Android apps. If you want to run iOS apps online, you can try some of the alternatives we mentioned, such as Appetize.io, which supports both Android and iOS apps.</p>
|
102 |
-
<li><b>Can I save my progress or data on ApkOnline Android Online Emulator?</b></li>
|
103 |
-
<p>No, ApkOnline Android Online Emulator does not save your progress or data on the cloud. Every time you run an app online, it starts from scratch. If you want to save your progress or data, you need to use a real device or an emulator that supports data storage.</p>
|
104 |
-
<li><b>Can I run multiple apps at the same time on ApkOnline Android Online Emulator?</b></li>
|
105 |
-
<p>No, ApkOnline Android Online Emulator only allows you to run one app at a time on one tab. If you want to run multiple apps at the same time, you need to open multiple tabs and run each app separately. However, this might affect the performance and speed of the emulator.</p>
|
106 |
-
</ol></p> 197e85843d<br />
|
107 |
-
<br />
|
108 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bad 2 Bad Apocalypse - The Ultimate Open World Survival RPG.md
DELETED
@@ -1,97 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bad 2 Bad: Apocalypse - A Review of the Open World Survival RPG</h1>
|
3 |
-
<p>If you are looking for a challenging and immersive open world survival RPG, you might want to check out Bad 2 Bad: Apocalypse. This game is the sequel to Bad 2 Bad: Delta and Extinction, and it follows the story of the Delta Team, led by Major Pan, saving and reconstructing the world ravaged by a virus from the Human Forces. In this article, we will review the game's features, gameplay, graphics, sound, and more. We will also answer some frequently asked questions about the game.</p>
|
4 |
-
<h2>bad 2 bad apocalypse apkaward</h2><br /><p><b><b>Download</b> → <a href="https://urlin.us/2uT2hq">https://urlin.us/2uT2hq</a></b></p><br /><br />
|
5 |
-
<h2>Introduction</h2>
|
6 |
-
<h3>What is Bad 2 Bad: Apocalypse?</h3>
|
7 |
-
<p>Bad 2 Bad: Apocalypse is a mobile game developed by DAWINSTONE, an indie game development team from South Korea. The game is available for Android devices on Google Play and for iOS devices on App Store. The game is free to download and play, but it contains in-app purchases and ads.</p>
|
8 |
-
<h3>What are the main features of the game?</h3>
|
9 |
-
<p>According to the developer, some of the main features of the game are:</p>
|
10 |
-
<ul>
|
11 |
-
<li>Vast Open World RPG! An expanded world to explore!</li>
|
12 |
-
<li>Exploration, Gathering, Fishing and Crafting for survival!</li>
|
13 |
-
<li>3 times as many items and weapons than the previous game</li>
|
14 |
-
<li>More detailed character customization and appearances</li>
|
15 |
-
<li>Over 60 maps and regions to explore!</li>
|
16 |
-
<li>"World Missions" that take place across the globe</li>
|
17 |
-
<li>Create and upgrade your own special forces team</li>
|
18 |
-
<li>Artillery Support, Air Support, and Powerful Drones!</li>
|
19 |
-
<li>Embark on "Battle Armor" and engage in combat</li>
|
20 |
-
<li>Advanced graphics and upgraded systems</li>
|
21 |
-
</ul>
|
22 |
-
<h2>Gameplay</h2>
|
23 |
-
<h3>Exploration, Gathering, Fishing and Crafting</h3>
|
24 |
-
<p>The core gameplay of Bad 2 Bad: Apocalypse is based on exploration, gathering, fishing, and crafting for survival. You can explore various locations such as forests, deserts, cities, islands, mountains, etc. You can gather resources such as wood, stone, metal, food, water, etc. You can fish in rivers, lakes, or oceans. You can craft items such as weapons, armor, tools, medicine, etc. You can also upgrade your base camp by building facilities such as barracks, workshops, farms, etc.</p>
|
25 |
-
<h3>Customization and Squad System</h3>
|
26 |
-
<p>The game also allows you to customize your character's appearance and equipment. You can choose from different races such as humans, animals, or hybrids. You can change your character's hair style, color, eyes, nose, mouth, etc. You can also equip your character with various weapons such as rifles, pistols, shotguns, snipers, etc. You can also equip your character with accessories such as helmets, goggles, masks, gloves, etc.</p>
|
27 |
-
<p>In addition to your main character, you can also create and upgrade your own special forces team. You can recruit different characters from different factions such as Delta Force, <h3>Support Weapons and Battle Armor</h3>
|
28 |
-
<p>Another feature of the game is the use of support weapons and battle armor to enhance your combat capabilities. You can call for artillery support from self-propelled artilleries, air support from attack helicopters, and combat drones to assist you in battle . You can also embark on the powerful tactical weapon "Battle Armor" and ride into battle . The Battle Armor is a mechanized suit that can fire missiles, rockets, and machine guns. You can also upgrade the Battle Armor with different types and models.</p>
|
29 |
-
<h2>Graphics and Sound</h2>
|
30 |
-
<h3>How does the game look and sound?</h3>
|
31 |
-
<p>The game has advanced graphics and upgraded systems compared to the previous games in the series. The game features a 2D pixel art style with smooth animations and dynamic lighting effects. The game also has a realistic weather system that changes according to the time and location. The game has a variety of sound effects and music tracks that match the mood and atmosphere of the game. The game also has voice acting for some of the main characters and dialogues.</p>
|
32 |
-
<p>bad 2 bad apocalypse mod apk download<br />
|
33 |
-
bad 2 bad apocalypse cheats and tips<br />
|
34 |
-
bad 2 bad apocalypse game review<br />
|
35 |
-
bad 2 bad apocalypse best weapons and equipment<br />
|
36 |
-
bad 2 bad apocalypse how to craft items<br />
|
37 |
-
bad 2 bad apocalypse open world survival rpg<br />
|
38 |
-
bad 2 bad apocalypse latest version update<br />
|
39 |
-
bad 2 bad apocalypse free apk for android<br />
|
40 |
-
bad 2 bad apocalypse squad system and tactics<br />
|
41 |
-
bad 2 bad apocalypse world missions and regions<br />
|
42 |
-
bad 2 bad apocalypse base camp upgrade guide<br />
|
43 |
-
bad 2 bad apocalypse exploration and gathering<br />
|
44 |
-
bad 2 bad apocalypse fishing and cooking<br />
|
45 |
-
bad 2 bad apocalypse character customization and appearance<br />
|
46 |
-
bad 2 bad apocalypse night vision and accessories<br />
|
47 |
-
bad 2 bad apocalypse artillery and air support<br />
|
48 |
-
bad 2 bad apocalypse combat drones and battle armor<br />
|
49 |
-
bad 2 bad apocalypse virus-infected wilders and enemies<br />
|
50 |
-
bad 2 bad apocalypse delta team and major pan story<br />
|
51 |
-
bad 2 bad apocalypse sequel to delta and extinction<br />
|
52 |
-
dawinstone games - developer of bad 2 bad apocalypse<br />
|
53 |
-
how to play bad 2 bad apocalypse offline mode<br />
|
54 |
-
how to install xapk file of bad 2 bad apocalypse<br />
|
55 |
-
how to backup and restore data of bad 2 bad apocalypse<br />
|
56 |
-
how to contact dawinstone support for bad 2 bad apocalypse<br />
|
57 |
-
is there a pc version of bad 2 bad apocalypse available<br />
|
58 |
-
is there a ios version of bad 2 bad apocalypse available<br />
|
59 |
-
is there a multiplayer mode in bad 2 bad apocalypse<br />
|
60 |
-
is there a wiki page for bad 2 bad apocalypse game<br />
|
61 |
-
is there a forum for discussing about bad 2 bad apocalypse game<br />
|
62 |
-
what are the minimum requirements to run the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game</p>
|
63 |
-
<h3>What are the pros and cons of the graphics and sound?</h3>
|
64 |
-
<p>The pros of the graphics and sound are:</p>
|
65 |
-
<ul>
|
66 |
-
<li>The pixel art style is charming and nostalgic, and it suits the theme of the game.</li>
|
67 |
-
<li>The animations and lighting effects are impressive and add to the immersion of the game.</li>
|
68 |
-
<li>The weather system is realistic and dynamic, and it affects the gameplay and environment.</li>
|
69 |
-
<li>The sound effects and music tracks are diverse and fitting, and they enhance the mood and atmosphere of the game.</li>
|
70 |
-
<li>The voice acting is decent and adds personality to the characters.</li>
|
71 |
-
</ul>
|
72 |
-
<p>The cons of the graphics and sound are:</p>
|
73 |
-
<ul>
|
74 |
-
<li>The pixel art style may not appeal to everyone, especially those who prefer more realistic or modern graphics.</li>
|
75 |
-
<li>The animations and lighting effects may cause lag or performance issues on some devices.</li>
|
76 |
-
<li>The weather system may be too random or unpredictable, and it may interfere with the gameplay or visibility.</li>
|
77 |
-
<li>The sound effects and music tracks may be repetitive or annoying, especially after playing for a long time.</li>
|
78 |
-
<li>The voice acting may be inconsistent or low quality, especially for some of the minor characters or dialogues.</li>
|
79 |
-
</ul> <h2>Conclusion</h2>
|
80 |
-
<h3>Summary of the main points</h3>
|
81 |
-
<p>In conclusion, Bad 2 Bad: Apocalypse is a challenging and immersive open world survival RPG that follows the story of the Delta Team saving and reconstructing the world from a virus. The game has various features such as exploration, gathering, fishing, crafting, customization, squad system, support weapons, and battle armor. The game has advanced graphics and upgraded systems that create a realistic and dynamic environment. The game also has diverse sound effects and music tracks that match the mood and atmosphere of the game.</p>
|
82 |
-
<h3>Recommendation and rating</h3>
|
83 |
-
<p>We recommend Bad 2 Bad: Apocalypse to anyone who enjoys open world survival RPGs with a pixel art style and a post-apocalyptic theme. The game is fun and engaging, and it offers a lot of content and replay value. The game is also free to download and play, but it contains in-app purchases and ads. We rate the game 4.5 out of 5 stars, based on its features, gameplay, graphics, sound, and overall quality.</p>
|
84 |
-
<h2>FAQs</h2>
|
85 |
-
<h4>Q1: How to download and install Bad 2 Bad: Apocalypse?</h4>
|
86 |
-
<p>A1: You can download and install Bad 2 Bad: Apocalypse from Google Play or App Store, depending on your device. You need to have at least Android 4.4 or iOS 9.0 or later to run the game. You also need to have enough storage space on your device to install the game.</p>
|
87 |
-
<h4>Q2: How to upgrade the base camp and equipment?</h4>
|
88 |
-
<p>A2: You can upgrade your base camp by building and improving facilities such as barracks, workshops, farms, etc. You need to gather resources such as wood, stone, metal, food, water, etc. to build and upgrade the facilities. You can also upgrade your equipment by crafting or buying new weapons, armor, tools, medicine, etc. You need to gather resources or money to craft or buy new equipment.</p>
|
89 |
-
<h4>Q3: How to unlock new characters and skins?</h4>
|
90 |
-
<p>A3: You can unlock new characters and skins by completing world missions, citadel missions, or special events. You can also unlock new characters and skins by spending diamonds or gold coins in the shop. Diamonds are the premium currency of the game that you can buy with real money or earn by watching ads or completing tasks. Gold coins are the common currency of the game that you can earn by playing the game or selling items.</p>
|
91 |
-
<h4>Q4: How to complete the world missions and citadel?</h4>
|
92 |
-
<p>A4: World missions are quests that take place across the globe. You can access them from the world map or from the mission board in your base camp. World missions have different objectives such as eliminating enemies, rescuing allies, collecting items, etc. You can earn rewards such as resources, money, items, characters, skins, etc. by completing world missions.</p>
|
93 |
-
<p>Citadel is a special mode that challenges you to survive waves of enemies in a fortified base. You can access it from the world map or from the mission board in your base camp. Citadel has different levels of difficulty such as easy, normal, hard, etc. You can earn rewards such as resources, money, items, characters, skins, etc. by completing citadel.</p>
|
94 |
-
<h4>Q5: How to contact the developer and get support?</h4>
|
95 |
-
<p>A5: You can contact the developer and get support by visiting their official website, Facebook page, YouTube channel, or email address. You can also visit their community forum or Discord server to interact with other players and get tips and feedback.</p> 197e85843d<br />
|
96 |
-
<br />
|
97 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download YouTube Shorts in HD Quality No Watermark Free Fast.md
DELETED
@@ -1,128 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download YouTube Shorts on Your Device</h1>
|
3 |
-
<p>YouTube Shorts are short-form videos that are similar to TikTok and Instagram Reels. They are vertical videos that are 60 seconds or less in length. You can watch them in a never-ending feed of content that is personalized for you. You can also create your own Shorts using the camera and editing tools in the YouTube app.</p>
|
4 |
-
<h2>youtube short download</h2><br /><p><b><b>Download Zip</b> ✏ ✏ ✏ <a href="https://jinyurl.com/2uNP34">https://jinyurl.com/2uNP34</a></b></p><br /><br />
|
5 |
-
<p>If you want to download YouTube Shorts on your device, whether it is your own or someone else's, there are different ways to do it. In this article, we will show you how to download YouTube Shorts on PC, Android, and iOS devices.</p>
|
6 |
-
<h2>What are YouTube Shorts?</h2>
|
7 |
-
<p>YouTube Shorts are a new feature that YouTube introduced in 2020 to compete with TikTok and Instagram Reels. They are short videos that are designed to be watched on mobile devices. They can be up to 60 seconds long, but if you use music from the YouTube catalog, they will be limited to 15 seconds.</p>
|
8 |
-
<p>You can watch YouTube Shorts in a dedicated tab in the YouTube app, or by swiping up on any Short video. You can also find them in the regular YouTube feed, where they will have a #Shorts label.</p>
|
9 |
-
<h3>YouTube Shorts vs TikTok</h3>
|
10 |
-
<p>YouTube Shorts and TikTok are very similar in terms of content and format. They both offer vertical videos that are fun, engaging, and viral. They both have music and sound effects that you can use for your videos. They both have filters, stickers, text, and other editing tools that you can use to enhance your videos.</p>
|
11 |
-
<p>However, there are some differences between them as well. For example:</p>
|
12 |
-
<ul>
|
13 |
-
<li>YouTube Shorts are integrated with the main YouTube platform, which means you can access them from the same app and account. You can also watch longer videos from the same creators or channels. TikTok is a separate app and platform that focuses only on short videos.</li>
|
14 |
-
<li>YouTube Shorts have a maximum length of 60 seconds, while TikTok videos can be up to three minutes long (or even longer for some users).</li>
|
15 |
-
<li>YouTube Shorts have a wider range of music options, as you can use any song from the YouTube music library or upload your own audio. TikTok has a more limited selection of songs and sounds that you can use.</li>
|
16 |
-
<li>YouTube Shorts have more monetization options, as you can earn money from ads that run between videos in the Shorts feed or from YouTube Premium subscribers who watch your Shorts. You can also link your Shorts to your longer videos or other products or services that you offer. TikTok has a creator fund that pays some users based on their views, but it is not available in all countries or for all users.</li>
|
17 |
-
</ul>
|
18 |
-
<h3>YouTube Shorts monetization</h3>
|
19 |
-
<p>If you are a YouTube partner who has accepted the YouTube Partner Program terms, you can earn money from your YouTube Shorts. There are two ways to do this:</p>
|
20 |
-
<ul>
|
21 |
-
<li>Ad revenue sharing: You can earn a share of the revenue from ads that run between videos in the Shorts feed. This revenue is pooled together and distributed among eligible creators based on their views and music usage.</li>
|
22 |
-
<li>YouTube Premium revenue sharing: You can earn a share of the revenue from YouTube Premium subscribers who watch your Shorts. This revenue is based on how much time they spend watching your Shorts compared to other content.</li>
|
23 |
-
</ul>
|
24 |
-
<p>To be eligible for monetization, your Shorts must follow the YouTube channel monetization policies, the advertiser-friendly content guidelines, and the community guidelines. You must also have at least 1,000 subscribers and either 4 <h2>How to Download Your Own YouTube Shorts</h2>
|
25 |
-
<p>If you have created your own YouTube Shorts and you want to download them to your device, you can do so using either YouTube Studio on PC or the YouTube app on mobile. Here are the steps for each method:</p>
|
26 |
-
<h3>Using YouTube Studio on PC</h3>
|
27 |
-
<ol>
|
28 |
-
<li>Go to <a href="">YouTube Studio</a> and sign in with your Google account.</li>
|
29 |
-
<li>Click on the <strong>Content</strong> tab on the left sidebar.</li>
|
30 |
-
<li>Find the Short video that you want to download and click on the <strong>Details</strong> button.</li>
|
31 |
-
<li>Click on the <strong>Download</strong> button on the top right corner of the screen.</li>
|
32 |
-
<li>Choose a location and a file name for your video and click on <strong>Save</strong>.</li>
|
33 |
-
</ol>
|
34 |
-
<h3>Using YouTube app on mobile</h3>
|
35 |
-
<ol>
|
36 |
-
<li>Open the YouTube app on your Android or iOS device and sign in with your Google account.</li>
|
37 |
-
<li>Tap on your profile picture on the top right corner of the screen and select <strong>Your channel</strong>.</li>
|
38 |
-
<li>Tap on the <strong>Videos</strong> tab and find the Short video that you want to download.</li>
|
39 |
-
<li>Tap on the <strong>More</strong> icon (three dots) below the video and select <strong>Download</strong>.</li>
|
40 |
-
<li>Choose a quality option and tap on <strong>OK</strong>.</li>
|
41 |
-
<li>The video will be downloaded to your device and you can find it in your gallery or camera roll.</li>
|
42 |
-
</ol>
|
43 |
-
<h2>How to Download Other People's YouTube Shorts</h2>
|
44 |
-
<p>If you want to download YouTube Shorts from other creators or channels, you have two options: using the YouTube app on mobile or using third-party YouTube Shorts downloaders. Here are the steps for each option:</p>
|
45 |
-
<p>How to download YouTube Shorts videos<br />
|
46 |
-
YouTube Shorts downloader app<br />
|
47 |
-
YouTube Shorts watermark remover<br />
|
48 |
-
Download YouTube Shorts on Android<br />
|
49 |
-
Download YouTube Shorts on iPhone<br />
|
50 |
-
Download YouTube Shorts on PC<br />
|
51 |
-
Best YouTube Shorts downloader online<br />
|
52 |
-
Save YouTube Shorts to gallery<br />
|
53 |
-
Download YouTube Shorts without login<br />
|
54 |
-
Download own YouTube Shorts video<br />
|
55 |
-
YouTube Shorts video converter<br />
|
56 |
-
Download YouTube Shorts with sound<br />
|
57 |
-
Download YouTube Shorts in HD quality<br />
|
58 |
-
Download YouTube Shorts in MP4 format<br />
|
59 |
-
Download YouTube Shorts in MP3 format<br />
|
60 |
-
How to edit YouTube Shorts videos<br />
|
61 |
-
How to make YouTube Shorts videos<br />
|
62 |
-
How to upload YouTube Shorts videos<br />
|
63 |
-
How to monetize YouTube Shorts videos<br />
|
64 |
-
How to get more views on YouTube Shorts videos<br />
|
65 |
-
YouTube Shorts tips and tricks<br />
|
66 |
-
YouTube Shorts vs TikTok videos<br />
|
67 |
-
YouTube Shorts vs Instagram Reels videos<br />
|
68 |
-
YouTube Shorts vs Snapchat Spotlight videos<br />
|
69 |
-
Best apps for creating YouTube Shorts videos<br />
|
70 |
-
Best music for YouTube Shorts videos<br />
|
71 |
-
Best hashtags for YouTube Shorts videos<br />
|
72 |
-
Best niches for YouTube Shorts videos<br />
|
73 |
-
Best examples of YouTube Shorts videos<br />
|
74 |
-
Best channels for YouTube Shorts videos<br />
|
75 |
-
How to grow your channel with YouTube Shorts videos<br />
|
76 |
-
How to optimize your channel for YouTube Shorts videos<br />
|
77 |
-
How to use analytics for YouTube Shorts videos<br />
|
78 |
-
How to promote your YouTube Shorts videos<br />
|
79 |
-
How to collaborate with other creators on YouTube Shorts videos<br />
|
80 |
-
How to add subtitles to YouTube Shorts videos<br />
|
81 |
-
How to add filters to YouTube Shorts videos<br />
|
82 |
-
How to add stickers to YouTube Shorts videos<br />
|
83 |
-
How to add transitions to YouTube Shorts videos<br />
|
84 |
-
How to add effects to YouTube Shorts videos<br />
|
85 |
-
How to trim YouTube Shorts videos<br />
|
86 |
-
How to crop YouTube Shorts videos<br />
|
87 |
-
How to rotate YouTube Shorts videos<br />
|
88 |
-
How to speed up or slow down YouTube Shorts videos<br />
|
89 |
-
How to reverse YouTube Shorts videos<br />
|
90 |
-
How to loop YouTube Shorts videos<br />
|
91 |
-
How to mute or unmute YouTube Shorts videos<br />
|
92 |
-
How to change the aspect ratio of YouTube Shorts videos<br />
|
93 |
-
How to change the background of YouTube Shorts videos<br />
|
94 |
-
How to change the thumbnail of YouTube Shorts videos</p>
|
95 |
-
<h3>Using YouTube app on mobile</h3>
|
96 |
-
<ol>
|
97 |
-
<li>Open the YouTube app on your Android or iOS device and sign in with your Google account.</li>
|
98 |
-
<li>Navigate to the Short video that you want to download and tap on it to play it.</li>
|
99 |
-
<li>Tap on the <strong>Share</strong> icon (an arrow) below the video and select <strong>Copy link</strong>.</li>
|
100 |
-
<li>Paste the link into a note app or a browser and add "ss" before "youtube" in the URL. For example, if the link is https://www.youtube.com/watch?v=abcde, change it to https://www.ssyoutube.com/watch?v=abcde.</li>
|
101 |
-
<li>This will take you to a website called SaveTube, where you can download the video in different formats and qualities.</li>
|
102 |
-
<li>Select a format and a quality option and tap on <strong>Download</strong>.</li>
|
103 |
-
<li>The video will be downloaded to your device and you can find it in your gallery or camera roll.</li>
|
104 |
-
</ol>
|
105 |
-
<h3>Using YouTube Shorts downloaders</h3>
|
106 |
-
<p>If you don't want to use the YouTube app or SaveTube, you can also use other websites or apps that allow you to download YouTube Shorts. Here are some examples of such tools:</p>
|
107 |
-
<h4>Tube Shorts</h4>
|
108 |
-
<p><a href="">Tube Shorts</a> is a website that lets you download YouTube Shorts in MP4 format. You just need to paste the link of the Short video that you want to download and click on <strong>Download MP4</strong>. You can also scan a QR code to download the video directly to your mobile device.</p>
|
109 |
-
<h4>Heat Feed</h4>
|
110 |
-
<p><a href="">Heat Feed</a> is another website that allows you to download YouTube Shorts in MP4 format. You just need to paste the link of the Short video that you want to download and click on <strong>Download Video Now</strong>. You can also choose a quality option before downloading.</p>
|
111 |
-
<h4>Note: These websites are not affiliated with or endorsed by YouTube. Use them at your own risk and respect the rights of the original creators.</h4>
|
112 |
-
<h2>Conclusion</h2>
|
113 |
-
<p>In this article, we have shown you how to download YouTube Shorts on your device, whether they are your own or someone else's. You can use either YouTube Studio on PC, YouTube app on mobile, or third-party YouTube Shorts downloaders. We hope this article was helpful and informative for you. If you have any questions or feedback, please let us know in the comments below.</p>
|
114 |
-
<h2>Frequently Asked Questions (FAQs)</h2>
|
115 |
-
<ul>
|
116 |
-
<li><strong>Q: Can I download YouTube Shorts without watermark?</strong></li>
|
117 |
-
<li>A: Yes, if you use YouTube Studio on PC or YouTube app on mobile, you can download your own YouTube Shorts without watermark. If you use third-party YouTube Shorts downloaders, some of them may add a watermark to the downloaded videos.</li>
|
118 |
-
<li><strong>Q: Can I download YouTube Shorts with sound?</strong></li>
|
119 |
-
<li>A: Yes, if you use YouTube Studio on PC or YouTube app on mobile, you can download your own YouTube Shorts with sound. If you use third-party YouTube Shorts downloaders, some of them may also allow you to download YouTube Shorts with sound, while others may only download the video without sound.</li>
|
120 |
-
<li><strong>Q: Can I download YouTube Shorts on iPhone?</strong></li>
|
121 |
-
<li>A: Yes, you can download YouTube Shorts on iPhone using the YouTube app or third-party YouTube Shorts downloaders. However, you may need to use a file manager app or a video player app to access the downloaded videos on your iPhone.</li>
|
122 |
-
<li><strong>Q: Can I edit YouTube Shorts after downloading them?</strong></li>
|
123 |
-
<li>A: Yes, you can edit YouTube Shorts after downloading them using any video editing software or app that you prefer. You can trim, crop, rotate, add filters, text, music, and more to your downloaded YouTube Shorts.</li>
|
124 |
-
<li><strong>Q: Can I upload downloaded YouTube Shorts to other platforms?</strong></li>
|
125 |
-
<li>A: No, you should not upload downloaded YouTube Shorts to other platforms without the permission of the original creators. This may violate their intellectual property rights and cause legal issues. You should only use downloaded YouTube Shorts for personal or educational purposes.</li>
|
126 |
-
</ul></p> 401be4b1e0<br />
|
127 |
-
<br />
|
128 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download youtube-dlg 0.4 A cross platform GUI for youtube-dl.md
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Youtube-dlg 0.4: A Cross-Platform GUI for Youtube-dl</h1>
|
3 |
-
<p>If you are looking for a simple and easy way to download videos from various websites, you might want to try Youtube-dlg 0.4, a cross-platform graphical user interface (GUI) for the popular youtube-dl command-line tool. In this article, we will explain what Youtube-dlg is, what features it offers, how to download and install it, how to use it, and what are its pros and cons.</p>
|
4 |
-
<h2>youtube-dlg 0.4 download</h2><br /><p><b><b>Download File</b> > <a href="https://jinyurl.com/2uNNGa">https://jinyurl.com/2uNNGa</a></b></p><br /><br />
|
5 |
-
<h2>What is Youtube-dlg?</h2>
|
6 |
-
<p>Youtube-dlg is a front-end GUI for youtube-dl, a powerful media downloader that can handle hundreds of websites and formats. Youtube-dl is a command-line tool that requires some knowledge of terminal commands and options to use it effectively. Youtube-dlg simplifies the process by providing a user-friendly interface that allows you to enter the URL of the video you want to download, choose the format and quality, and start the download with a click of a button.</p>
|
7 |
-
<h3>Features of Youtube-dlg</h3>
|
8 |
-
<p>Youtube-dlg has many features that make it a convenient and versatile tool for downloading videos from the web. Some of these features are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Written in wxPython, which makes it cross-platform and compatible with Windows, Linux, and Mac OS.</li>
|
11 |
-
<li>FFmpeg optional, to post-process video files and convert them to different formats.</li>
|
12 |
-
<li>Supports multiple downloads at the same time, with the option to pause, resume, delete, or move them in the queue.</li>
|
13 |
-
<li>Allows you to change the naming pattern of the downloaded files by choosing from different output templates or creating your own custom one.</li>
|
14 |
-
<li>Lets you add extra youtube-dl command-line options in a separate box for more advanced functionality.</li>
|
15 |
-
<li>Supports updating youtube-dl automatically or manually, or using your own version of youtube-dl by specifying its path.</li>
|
16 |
-
</ul>
|
17 |
-
<h3>Supported sites by Youtube-dlg</h3>
|
18 |
-
<p>Youtube-dlg uses youtube-dl in the backend to download files, which means that it supports all the sites that youtube-dl supports. According to the official documentation of youtube-dl, there are more than 1000 supported sites, including:</p>
|
19 |
-
<ul>
|
20 |
-
<li>YouTube</li>
|
21 |
-
<li>Vimeo</li>
|
22 |
-
<li>Facebook</li>
|
23 |
-
<li>Twitch</li>
|
24 |
-
<li>TikTok</li>
|
25 |
-
<li>SoundCloud</li>
|
26 |
-
<li>Dailymotion</li>
|
27 |
-
<li>Instagram</li>
|
28 |
-
<li>Reddit</li>
|
29 |
-
<li>Twitter</li>
|
30 |
-
<li>and many more...</li>
|
31 |
-
</ul>
|
32 |
-
<h2>How to download and install Youtube-dlg 0.4?</h2>
|
33 |
-
<h3>Download options for Youtube-dlg 0.4</h3>
|
34 |
-
<p>There are several ways to download Youtube-dlg 0.4, depending on your operating system and preference. Here are some of the most common options:</p>
|
35 |
-
<table border="1">
|
36 |
-
<tr><th>Option</th><th>Description</th></tr>
|
37 |
-
<tr><td>SourceForge</td><td>You can download the latest version of Youtube-dlg 0.4 from SourceForge as a ZIP or TAR file. You can also find older versions in the same page.</td></tr>
|
38 |
-
<tr><td>PyPi</td><td>You can download Youtube-dlg 0.4 as a Python package from PyPi. You will need Python 2.7.3 or higher to install it.</td></tr>
|
39 |
-
<tr><td>GitHub</td><td>You can download Youtube-dlg 0.4 from its GitHub repository. You can also find the source code, issues, and pull requests there.</td></tr>
|
40 |
-
<tr><td>Homebrew</td><td>If you are using Mac OS, you can install Youtube-dlg 0.4 using Homebrew, a package manager for Mac OS. You will need to run the following command in the terminal: <code>brew install youtube-dlg</code></td></tr>
|
41 |
-
<tr><td>AUR</td><td>If you are using Arch Linux or a derivative, you can install Youtube-dlg 0.4 using AUR, a community-driven repository for Arch Linux. You will need to use an AUR helper such as yay or pacaur to install it.</td></tr>
|
42 |
-
</table>
|
43 |
-
<h3>Installation steps for Youtube-dlg 0.4</h3>
|
44 |
-
<p>The installation steps for Youtube-dlg 0.4 vary depending on the download option you choose and the operating system you use. Here are some general steps that apply to most cases:</p>
|
45 |
-
<ol>
|
46 |
-
<li>Download Youtube-dlg 0.4 from one of the options mentioned above.</li>
|
47 |
-
<li>Extract the downloaded file to a folder of your choice.</li>
|
48 |
-
<li>Run the youtube-dlg executable file in the folder. On Windows, it is youtube-dlg.exe; on Linux and Mac OS, it is youtube-dl-gui.</li>
|
49 |
-
<li>Alternatively, you can run Youtube-dlg 0.4 from the command line by navigating to the folder where you extracted it and typing <code>youtube-dlg</code>.</li>
|
50 |
-
<li>If you encounter any errors or missing dependencies, you may need to install them manually or update your system.</li>
|
51 |
-
</ol>
|
52 |
-
<h2>How to use Youtube-dlg 0.4?</h2>
|
53 |
-
<h3>Basic usage of Youtube-dlg 0.4</h3>
|
54 |
-
<p>Using Youtube-dlg 0.4 is very simple and straightforward. Here are the basic steps to download a video using Youtube-dlg 0.4:</p>
|
55 |
-
<p>youtube-dlg 0.4 windows setup<br />
|
56 |
-
youtube-dlg 0.4 portable zip<br />
|
57 |
-
youtube-dlg 0.4 source code<br />
|
58 |
-
youtube-dlg 0.4 linux install<br />
|
59 |
-
youtube-dlg 0.4 mac os x<br />
|
60 |
-
youtube-dlg 0.4 changelog<br />
|
61 |
-
youtube-dlg 0.4 documentation<br />
|
62 |
-
youtube-dlg 0.4 screenshots<br />
|
63 |
-
youtube-dlg 0.4 supported sites<br />
|
64 |
-
youtube-dlg 0.4 ffmpeg optional<br />
|
65 |
-
youtube-dlg 0.4 new UI<br />
|
66 |
-
youtube-dlg 0.4 post processing<br />
|
67 |
-
youtube-dlg 0.4 output template<br />
|
68 |
-
youtube-dlg 0.4 command line options<br />
|
69 |
-
youtube-dlg 0.4 custom binary<br />
|
70 |
-
youtube-dlg 0.4 issues and bugs<br />
|
71 |
-
youtube-dlg 0.4 reviews and ratings<br />
|
72 |
-
youtube-dlg 0.4 alternatives and competitors<br />
|
73 |
-
youtube-dlg 0.4 features and benefits<br />
|
74 |
-
youtube-dlg 0.4 FAQs and tutorials<br />
|
75 |
-
youtube-dlg 0.4 license and terms<br />
|
76 |
-
youtube-dlg 0.4 contributors and developers<br />
|
77 |
-
youtube-dlg 0.4 translations and languages<br />
|
78 |
-
youtube-dlg 0.4 updates and releases<br />
|
79 |
-
youtube-dlg 0.4 github repository<br />
|
80 |
-
youtube-dl gui for windows download<br />
|
81 |
-
download videos with youtube-dl gui<br />
|
82 |
-
how to use youtube-dl gui on linux<br />
|
83 |
-
best settings for youtube-dl gui mac<br />
|
84 |
-
latest version of youtube-dl gui python<br />
|
85 |
-
free and open source youtube downloader gui<br />
|
86 |
-
cross platform front-end for youtube downloader<br />
|
87 |
-
download playlists and channels with youtube dl gui<br />
|
88 |
-
how to install ffmpeg for youtube dl gui<br />
|
89 |
-
how to change save path in youtube dl gui<br />
|
90 |
-
how to add extra options in youtube dl gui<br />
|
91 |
-
how to update youtube dl in youtube dl gui<br />
|
92 |
-
how to fix errors in youtube dl gui<br />
|
93 |
-
how to customize filename format in youtube dl gui<br />
|
94 |
-
how to change number of workers in youtube dl gui<br />
|
95 |
-
how to download subtitles with youtube dl gui<br />
|
96 |
-
how to download audio only with youtube dl gui<br />
|
97 |
-
how to download live streams with youtube dl gui<br />
|
98 |
-
how to download multiple urls with youtube dl gui<br />
|
99 |
-
how to download from unsupported sites with youtube dl gui<br />
|
100 |
-
how to convert videos with post processing in youtube dl gui</p>
|
101 |
-
<ol>
|
102 |
-
<li>Open Youtube-dlg 0.4 and you will see a main window with a text box and a button that says "Add".</li>
|
103 |
-
<li>Copy the URL of the video you want to download from your browser and paste it in the text box.</li>
|
104 |
-
<li>Click on the "Add" button and the video will be added to the download list below.</li>
|
105 |
-
<li>You can repeat steps 2 and 3 to add more videos to the download list.</li>
|
106 |
-
<li>You can also drag and drop URLs from your browser to the text box or the download list.</li>
|
107 |
-
<li>If you want to change the format or quality of the video, you can click on the "Options" button next to each video in the download list and select from the available options.</li>
|
108 |
-
<li>If you want to change the output folder where the videos will be saved, you can click on the "Options" button at the bottom right corner of the main window and select a different folder.</li>
|
109 |
-
<li>When you are ready to start downloading, click on the "Download" button at the bottom right corner of the main window and wait for the download to finish.</li>
|
110 |
-
<li>You can monitor the progress of each download by looking at the status bar below each video in the download list.</li>
|
111 |
-
<li>You can also pause, resume, delete, or move up or down each download by clicking on the corresponding buttons next to each video in the download list.</li>
|
112 |
-
</ol>
|
113 |
-
<h3>Advanced options of Youtube-dlg 0.4</h3>
|
114 |
-
<p>If you want to access more advanced features of Youtube-dlg 0.4, you can click on the "Options" button at the bottom right corner of the main window and select "Advanced Options". This will open a new window where you can customize various settings of Youtube-dlg 0.4, such as:</p>
|
115 |
-
<ul>
|
116 |
-
<li>The naming pattern of the downloaded files by choosing from different output templates or creating your own custom one.</li>
|
117 |
-
<li>The post-processing options for converting or merging video and audio files using FFmpeg.</li>
|
118 |
-
<li>The extra youtube-dl command-line options that you want to add for more functionality.</li>
|
119 |
-
<li>The update options for youtube-dl, such as automatic or manual update, or using your own version of youtube-dl by specifying its path.</li>
|
120 |
-
<li>The log options for saving or viewing log files of youtube-dl and Youtube-dlg 0.4.</li>
|
121 |
-
</ul>
|
122 |
-
<h2>Pros and cons of Youtube-dlg 0.4</h2>
|
123 |
-
<h3>Pros of Youtube-dlg 0.4</h3>
|
124 |
-
<p>Youtube-dlg 0.4 has many advantages that make it a great tool for downloading videos from the web. Some of these advantages are:</p>
|
125 |
-
<ul>
|
126 |
-
<li>It is free and open-source, which means that you can use it without any cost or restriction, and you can also contribute to its development or improvement.</li>
|
127 |
-
<li>It is cross-platform and compatible with Windows, Linux, and Mac OS, which means that you can use it on any device or system that you have.</li>
|
128 |
-
<li>It is easy and simple to use, which means that you don't need any technical skills or knowledge to download videos with it.</li>
|
129 |
-
<li>It supports hundreds of websites and formats, which means that you can download almost any video that you want from the web.</li>
|
130 |
-
<li>It offers many features and options, which means that you can customize your downloads according to your preferences and needs.</li>
|
131 |
-
</ul>
|
132 |
-
<h3>Cons of Youtube-dlg 0.4</h3>
|
133 |
-
<p>Youtube-dlg 0.4 also has some disadvantages that you should be aware of before using it. Some of these disadvantages are:</p>
|
134 |
-
<ul>
|
135 |
-
<li>It depends on youtube-dl, which means that if youtube-dl is not updated or working properly, Youtube-dlg 0.4 may not work either.</li>
|
136 |
-
<li>It may not support some websites or formats that youtube-dl does not support, which means that you may not be able to download some videos that you want from the web.</li>
|
137 |
-
<li>It may have some bugs or errors, which means that it may not work as expected or cause some problems for your device or system.</li>
|
138 |
-
<li>It may not have some features or options that youtube-dl has, which means that you may not be able to access some functionality that youtube-dl offers.</li>
|
139 |
-
</ul>
|
140 |
-
<h2>Conclusion</h2>
|
141 |
-
<p>In conclusion, Youtube-dlg 0.4 is a cross-platform GUI for youtube-dl that allows you to download videos from various websites with ease and convenience. It has many features and options that make it a versatile and powerful tool for downloading videos from the web. However, it also has some drawbacks that you should consider before using it. If you are looking for a simple and easy way to download videos from the web, you might want to give Youtube-dlg 0.4 a try.</p>
|
142 |
-
<h3>FAQs</h3>
|
143 |
-
<p>Here are some frequently asked questions about Youtube-dlg 0.4:</p>
|
144 |
-
<ol>
|
145 |
-
<li><b>Is Youtube-dlg 0.4 safe to use?</b><br>
|
146 |
-
Yes, Youtube-dlg 0.4 is safe to use as long as you download it from a trusted source and scan it for viruses or malware before running it. However, you should also be careful about the videos that you download from the web, as they may contain harmful content or infringe on the rights of the original creators.</li>
|
147 |
-
<li><b>Is Youtube-dlg 0.4 legal to use?</b><br>
|
148 |
-
Yes, Youtube-dlg 0.4 is legal to use as long as you use it for personal and non-commercial purposes and respect the terms of service and privacy policies of the websites that you download videos from. However, you should also be aware of the laws and regulations of your country or region regarding downloading videos from the web, as they may vary depending on the location and situation.</li>
|
149 |
-
<li><b>How can I update Youtube-dlg 0.4?</b><br>
|
150 |
-
You can update Youtube-dlg 0.4 by clicking on the "Options" button at the bottom right corner of the main window and selecting "Update". You can also check for updates manually by clicking on the "Help" menu at the top left corner of the main window and selecting "Check for updates". Alternatively, you can download the latest version of Youtube-dlg 0.4 from one of the options mentioned above and replace the old version with it.</li>
|
151 |
-
<li><b>How can I contact the developers of Youtube-dlg 0.4?</b><br>
|
152 |
-
You can contact the developers of Youtube-dlg 0.4 by visiting their GitHub page and opening an issue or a pull request there. You can also join their Discord server and chat with them there.</li>
|
153 |
-
<li><b>How can I support the development of Youtube-dlg 0.4?</b><br>
|
154 |
-
You can support the development of Youtube-dlg 0.4 by donating to their PayPal account or by becoming a patron on their Patreon page. You can also support them by giving them feedback, reporting bugs, suggesting features, or spreading the word about their project.</li>
|
155 |
-
</ol>
|
156 |
-
: https://sourceforge.net/projects/youtube-dl-gui/ : https://pypi.org/project/youtube_dl_gui</p> 197e85843d<br />
|
157 |
-
<br />
|
158 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/FS 12 Mod APK How to Unlock Unlimited Money in Farming Simulator 22.md
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>FS 12 Mod APK Unlimited Money Download: How to Enjoy Farming Simulator 12 on Your Android Device</h1>
|
3 |
-
<p>If you are a fan of farming games, you might have heard of Farming Simulator 12, one of the most popular and realistic farming simulation games for Android devices. In this game, you can experience the varied farming life in a wide, agricultural scenery with fields, roads, your farm, and a small village. You can cultivate your fields with various three-dimensional vehicles modeled after original machines and vehicles by prestigious manufacturers. You can also sell your harvest and invest in new equipment, buildings, and tools.</p>
|
4 |
-
<h2>fs 12 mod apk unlimited money download</h2><br /><p><b><b>DOWNLOAD</b> ——— <a href="https://jinyurl.com/2uNMrX">https://jinyurl.com/2uNMrX</a></b></p><br /><br />
|
5 |
-
<p>However, if you want to enjoy the game without any limitations or restrictions, you might want to try FS 12 Mod APK Unlimited Money, a modified version of the original game that gives you unlimited money and access to all the features and items in the game. In this article, we will tell you what is FS 12 Mod APK Unlimited Money, how to download and install it on your Android device, and what are the benefits and precautions of using it.</p>
|
6 |
-
<h2>What is Farming Simulator 12?</h2>
|
7 |
-
<h3>A realistic and immersive farming game</h3>
|
8 |
-
<p>Farming Simulator 12 is a game developed by Giants Software, a Swiss video game developer that specializes in creating realistic and immersive simulation games. Farming Simulator 12 was released in 2012 for Android devices, and it has received positive reviews from critics and players alike. The game has been praised for its graphics, gameplay, physics, and variety of vehicles and equipment.</p>
|
9 |
-
<h3>Features of Farming Simulator 12</h3>
|
10 |
-
<p>Some of the features of Farming Simulator 12 are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>Put your favorite simulator into your pocket</li>
|
13 |
-
<li>Authentic machines by DEUTZ-FAHR, KRONE, KRAMPE, AMAZONE, LEMKEN and KOTTE</li>
|
14 |
-
<li>Use a variety of detailed equipment and trailers</li>
|
15 |
-
<li>Computer-steered assistants help you with your work</li>
|
16 |
-
<li>Career-mode with management-part</li>
|
17 |
-
<li>Three different plants: corn, canola and wheat</li>
|
18 |
-
<li>Freely accessible world with dynamic day-night-cycle</li>
|
19 |
-
</ul>
|
20 |
-
<h2>What is FS 12 Mod APK Unlimited Money?</h2>
|
21 |
-
<h3>A modified version of the original game</h3>
|
22 |
-
<p>FS 12 Mod APK Unlimited Money is a modified version of the original Farming Simulator 12 game that gives you unlimited money and access to all the features and items in the game. This means that you can buy any vehicle, equipment, tool, building, or seed that you want without worrying about the cost. You can also upgrade your farm and expand your business as much as you want.</p>
|
23 |
-
<h3>Benefits of using FS 12 Mod APK Unlimited Money</h3>
|
24 |
-
<p>Some of the benefits of using FS 12 Mod APK Unlimited Money are:</p>
|
25 |
-
<ul>
|
26 |
-
<li>You can enjoy the game without any limitations or restrictions</li>
|
27 |
-
<li>You can experiment with different vehicles, equipment, tools, buildings, and seeds</li>
|
28 |
-
<li>You can customize your farm and make it look more attractive</li>
|
29 |
-
<li>You can save time and effort by using computer-steered assistants</li>
|
30 |
-
<li>You can have more fun and excitement by exploring the open world with dynamic day-night-cycle</li>
|
31 |
-
</ul>
|
32 |
-
<h2>How to download and install FS 12 Mod APK Unlimited Money?</h2>
|
33 |
-
<h3>Steps to download and install FS 12 Mod APK Unlimited Money</h3>
|
34 |
-
<p>If you want to download and install FS 12 Mod APK Unlimited Money on your Android device, you need to follow these steps:</p>
|
35 |
-
<ol>
|
36 |
-
<li>First, you need to uninstall the original Farming Simulator 12 game from your device if you have it installed.</li>
|
37 |
-
<li>Second, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
|
38 |
-
<li>Third, you need to download the FS 12 Mod APK Unlimited Money file from a reliable source. You can search for it on Google or use the link below. Make sure you download the latest version of the mod.</li>
|
39 |
-
<li>Fourth, you need to locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to complete.</li>
|
40 |
-
<li>Fifth, you need to launch the game and enjoy the unlimited money and features.</li>
|
41 |
-
</ol>
|
42 |
-
<h3>Tips and precautions for using FS 12 Mod APK Unlimited Money</h3>
|
43 |
-
<p>Before you start using FS 12 Mod APK Unlimited Money, here are some tips and precautions that you should keep in mind:</p>
|
44 |
-
<ul>
|
45 |
-
<li>Make sure you have enough storage space on your device for the mod file and the game data.</li>
|
46 |
-
<li>Make sure you have a stable internet connection for downloading and installing the mod file.</li>
|
47 |
-
<li>Make sure you download the mod file from a trusted source and scan it for viruses or malware before installing it.</li>
|
48 |
-
<li>Make sure you backup your game data before uninstalling the original game or installing the mod file.</li>
|
49 |
-
<li>Make sure you do not use the mod file for online or multiplayer mode, as it may result in banning or suspension of your account.</li>
|
50 |
-
</ul>
|
51 |
-
<h2>Conclusion</h2>
|
52 |
-
<p>Farming Simulator 12 is a fun and realistic farming simulation game that lets you experience the varied farming life on your Android device. However, if you want to enjoy the game without any limitations or restrictions, you can try FS 12 Mod APK Unlimited Money, a modified version of the original game that gives you unlimited money and access to all the features and items in the game. You can download and install FS 12 Mod APK Unlimited Money by following the steps mentioned above, but make sure you follow the tips and precautions as well. We hope this article was helpful and informative for you. Happy farming!</p>
|
53 |
-
<p>fs 12 mod apk unlimited money download free<br />
|
54 |
-
fs 12 mod apk unlimited money download latest version<br />
|
55 |
-
fs 12 mod apk unlimited money download android<br />
|
56 |
-
fs 12 mod apk unlimited money download no root<br />
|
57 |
-
fs 12 mod apk unlimited money download offline<br />
|
58 |
-
fs 12 mod apk unlimited money download for pc<br />
|
59 |
-
fs 12 mod apk unlimited money download 2023<br />
|
60 |
-
fs 12 mod apk unlimited money download hack<br />
|
61 |
-
fs 12 mod apk unlimited money download obb<br />
|
62 |
-
fs 12 mod apk unlimited money download mediafire<br />
|
63 |
-
fs 12 mod apk unlimited money download rexdl<br />
|
64 |
-
fs 12 mod apk unlimited money download revdl<br />
|
65 |
-
fs 12 mod apk unlimited money download apkpure<br />
|
66 |
-
fs 12 mod apk unlimited money download happymod<br />
|
67 |
-
fs 12 mod apk unlimited money download uptodown<br />
|
68 |
-
fs 12 farming simulator mod apk unlimited money download<br />
|
69 |
-
fs 12 gold edition mod apk unlimited money download<br />
|
70 |
-
fs 22 vs fs 12 mod apk unlimited money download<br />
|
71 |
-
how to install fs 12 mod apk unlimited money download<br />
|
72 |
-
how to play fs 12 mod apk unlimited money download<br />
|
73 |
-
how to get fs 12 mod apk unlimited money download<br />
|
74 |
-
how to update fs 12 mod apk unlimited money download<br />
|
75 |
-
how to use fs 22 cheats in fs 12 mod apk unlimited money download[^1^]<br />
|
76 |
-
best features of fs 12 mod apk unlimited money download<br />
|
77 |
-
best tips and tricks for fs 12 mod apk unlimited money download<br />
|
78 |
-
best vehicles and equipment in fs 12 mod apk unlimited money download<br />
|
79 |
-
best crops and animals in fs 12 mod apk unlimited money download<br />
|
80 |
-
best maps and locations in fs 12 mod apk unlimited money download<br />
|
81 |
-
best mods and addons for fs 12 mod apk unlimited money download<br />
|
82 |
-
best graphics and sound in fs 12 mod apk unlimited money download<br />
|
83 |
-
pros and cons of fs 12 mod apk unlimited money download<br />
|
84 |
-
reviews and ratings of fs 12 mod apk unlimited money download<br />
|
85 |
-
alternatives and competitors of fs 12 mod apk unlimited money download<br />
|
86 |
-
benefits and drawbacks of fs 12 mod apk unlimited money download<br />
|
87 |
-
advantages and disadvantages of fs 12 mod apk unlimited money download<br />
|
88 |
-
comparison and contrast of fs 12 mod apk unlimited money download<br />
|
89 |
-
similarities and differences of fs 12 mod apk unlimited money download<br />
|
90 |
-
pros and cons of farming simulator vs real life farming with fs 12 mod apk unlimited money download<br />
|
91 |
-
reviews and ratings of farming simulator vs real life farming with fs 12 mod apk unlimited money download</p>
|
92 |
-
<h2>FAQs</h2>
|
93 |
-
<h4>What is the difference between Farming Simulator 12 and Farming Simulator 14?</h4>
|
94 |
-
<p>Farming Simulator 12 and Farming Simulator 14 are two different versions of the same game series developed by Giants Software. Farming Simulator 14 was released in 2013 for Android devices, and it has some improvements and additions over Farming Simulator 12, such as new vehicles, equipment, crops, animals, maps, graphics, and gameplay modes.</p>
|
95 |
-
<h4>Is FS 12 Mod APK Unlimited Money safe to use?</h4>
|
96 |
-
<p>FS 12 Mod APK Unlimited Money is generally safe to use if you download it from a reliable source and scan it for viruses or malware before installing it. However, you should be careful not to use it for online or multiplayer mode, as it may result in banning or suspension of your account. You should also backup your game data before uninstalling the original game or installing the mod file.</p>
|
97 |
-
<h4>How can I update FS 12 Mod APK Unlimited Money?</h4>
|
98 |
-
<p>If you want to update FS 12 Mod APK Unlimited Money, you need to uninstall the old version of the mod file from your device and download the new version of the mod file from a trusted source. Then, you need to follow the same steps as mentioned above for installing the mod file. Make sure you backup your game data before uninstalling or installing the mod file.</p>
|
99 |
-
<h4>Can I play FS 12 Mod APK Unlimited Money on PC?</h4>
|
100 |
-
<p>If you want to play FS 12 Mod APK Unlimited Money on PC, you need to use an Android emulator software that allows you to run Android apps and games on your PC. Some of the popular Android emulators are BlueStacks, NoxPlayer, MEmu, LDPlayer, etc. You need to download and install an Android emulator on your PC and then follow the same steps as mentioned above for downloading and installing FS 12 Mod APK Unlimited Money on your Android device.</p>
|
101 |
-
<h4>Can I play FS 12 Mod APK Unlimited Money with my friends?</h4>
|
102 |
-
<p>If you want to play FS 12 Mod APK Unlimited Money with your friends, you can do so by using a local Wi-Fi network or Bluetooth connection. However, you should not use the mod file for online or multiplayer mode as it may result in banning or suspension of your account. You should also make sure that your friends have the same version of the mod file as you do.</p> 401be4b1e0<br />
|
103 |
-
<br />
|
104 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/raft/core/utils/flow_viz.py
DELETED
@@ -1,132 +0,0 @@
|
|
1 |
-
# Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization
|
2 |
-
|
3 |
-
|
4 |
-
# MIT License
|
5 |
-
#
|
6 |
-
# Copyright (c) 2018 Tom Runia
|
7 |
-
#
|
8 |
-
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
9 |
-
# of this software and associated documentation files (the "Software"), to deal
|
10 |
-
# in the Software without restriction, including without limitation the rights
|
11 |
-
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
12 |
-
# copies of the Software, and to permit persons to whom the Software is
|
13 |
-
# furnished to do so, subject to conditions.
|
14 |
-
#
|
15 |
-
# Author: Tom Runia
|
16 |
-
# Date Created: 2018-08-03
|
17 |
-
|
18 |
-
import numpy as np
|
19 |
-
|
20 |
-
def make_colorwheel():
|
21 |
-
"""
|
22 |
-
Generates a color wheel for optical flow visualization as presented in:
|
23 |
-
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
|
24 |
-
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
|
25 |
-
|
26 |
-
Code follows the original C++ source code of Daniel Scharstein.
|
27 |
-
Code follows the the Matlab source code of Deqing Sun.
|
28 |
-
|
29 |
-
Returns:
|
30 |
-
np.ndarray: Color wheel
|
31 |
-
"""
|
32 |
-
|
33 |
-
RY = 15
|
34 |
-
YG = 6
|
35 |
-
GC = 4
|
36 |
-
CB = 11
|
37 |
-
BM = 13
|
38 |
-
MR = 6
|
39 |
-
|
40 |
-
ncols = RY + YG + GC + CB + BM + MR
|
41 |
-
colorwheel = np.zeros((ncols, 3))
|
42 |
-
col = 0
|
43 |
-
|
44 |
-
# RY
|
45 |
-
colorwheel[0:RY, 0] = 255
|
46 |
-
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
|
47 |
-
col = col+RY
|
48 |
-
# YG
|
49 |
-
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
|
50 |
-
colorwheel[col:col+YG, 1] = 255
|
51 |
-
col = col+YG
|
52 |
-
# GC
|
53 |
-
colorwheel[col:col+GC, 1] = 255
|
54 |
-
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
|
55 |
-
col = col+GC
|
56 |
-
# CB
|
57 |
-
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
|
58 |
-
colorwheel[col:col+CB, 2] = 255
|
59 |
-
col = col+CB
|
60 |
-
# BM
|
61 |
-
colorwheel[col:col+BM, 2] = 255
|
62 |
-
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
|
63 |
-
col = col+BM
|
64 |
-
# MR
|
65 |
-
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
|
66 |
-
colorwheel[col:col+MR, 0] = 255
|
67 |
-
return colorwheel
|
68 |
-
|
69 |
-
|
70 |
-
def flow_uv_to_colors(u, v, convert_to_bgr=False):
|
71 |
-
"""
|
72 |
-
Applies the flow color wheel to (possibly clipped) flow components u and v.
|
73 |
-
|
74 |
-
According to the C++ source code of Daniel Scharstein
|
75 |
-
According to the Matlab source code of Deqing Sun
|
76 |
-
|
77 |
-
Args:
|
78 |
-
u (np.ndarray): Input horizontal flow of shape [H,W]
|
79 |
-
v (np.ndarray): Input vertical flow of shape [H,W]
|
80 |
-
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
|
81 |
-
|
82 |
-
Returns:
|
83 |
-
np.ndarray: Flow visualization image of shape [H,W,3]
|
84 |
-
"""
|
85 |
-
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
|
86 |
-
colorwheel = make_colorwheel() # shape [55x3]
|
87 |
-
ncols = colorwheel.shape[0]
|
88 |
-
rad = np.sqrt(np.square(u) + np.square(v))
|
89 |
-
a = np.arctan2(-v, -u)/np.pi
|
90 |
-
fk = (a+1) / 2*(ncols-1)
|
91 |
-
k0 = np.floor(fk).astype(np.int32)
|
92 |
-
k1 = k0 + 1
|
93 |
-
k1[k1 == ncols] = 0
|
94 |
-
f = fk - k0
|
95 |
-
for i in range(colorwheel.shape[1]):
|
96 |
-
tmp = colorwheel[:,i]
|
97 |
-
col0 = tmp[k0] / 255.0
|
98 |
-
col1 = tmp[k1] / 255.0
|
99 |
-
col = (1-f)*col0 + f*col1
|
100 |
-
idx = (rad <= 1)
|
101 |
-
col[idx] = 1 - rad[idx] * (1-col[idx])
|
102 |
-
col[~idx] = col[~idx] * 0.75 # out of range
|
103 |
-
# Note the 2-i => BGR instead of RGB
|
104 |
-
ch_idx = 2-i if convert_to_bgr else i
|
105 |
-
flow_image[:,:,ch_idx] = np.floor(255 * col)
|
106 |
-
return flow_image
|
107 |
-
|
108 |
-
|
109 |
-
def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
|
110 |
-
"""
|
111 |
-
Expects a two dimensional flow image of shape.
|
112 |
-
|
113 |
-
Args:
|
114 |
-
flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
|
115 |
-
clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
|
116 |
-
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
|
117 |
-
|
118 |
-
Returns:
|
119 |
-
np.ndarray: Flow visualization image of shape [H,W,3]
|
120 |
-
"""
|
121 |
-
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
|
122 |
-
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
|
123 |
-
if clip_flow is not None:
|
124 |
-
flow_uv = np.clip(flow_uv, 0, clip_flow)
|
125 |
-
u = flow_uv[:,:,0]
|
126 |
-
v = flow_uv[:,:,1]
|
127 |
-
rad = np.sqrt(np.square(u) + np.square(v))
|
128 |
-
rad_max = np.max(rad)
|
129 |
-
epsilon = 1e-5
|
130 |
-
u = u / (rad_max + epsilon)
|
131 |
-
v = v / (rad_max + epsilon)
|
132 |
-
return flow_uv_to_colors(u, v, convert_to_bgr)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/44brabal/valentinafeve-yolos-fashionpedia/app.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import torch
|
3 |
-
from transformers import pipeline
|
4 |
-
|
5 |
-
pipe = pipeline("object-detection", model="valentinafeve/yolos-fashionpedia")
|
6 |
-
|
7 |
-
gr.Interface.load("models/valentinafeve/yolos-fashionpedia").launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/components/user-menu.tsx
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import { useEffect, useState } from 'react'
|
4 |
-
import Image from 'next/image'
|
5 |
-
import { toast } from 'react-hot-toast'
|
6 |
-
import { Button } from '@/components/ui/button'
|
7 |
-
import pkg from '../../package.json'
|
8 |
-
import {
|
9 |
-
DropdownMenu,
|
10 |
-
DropdownMenuContent,
|
11 |
-
DropdownMenuItem,
|
12 |
-
DropdownMenuSeparator,
|
13 |
-
DropdownMenuTrigger
|
14 |
-
} from '@/components/ui/dropdown-menu'
|
15 |
-
import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons'
|
16 |
-
import SettingIcon from '@/assets/images/settings.svg'
|
17 |
-
import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard'
|
18 |
-
|
19 |
-
export function UserMenu() {
|
20 |
-
const [host, setHost] = useState('')
|
21 |
-
const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 })
|
22 |
-
useEffect(() => {
|
23 |
-
setHost(location.host)
|
24 |
-
}, [])
|
25 |
-
|
26 |
-
useEffect(() => {
|
27 |
-
if (isCopied) {
|
28 |
-
toast.success('复制成功')
|
29 |
-
}
|
30 |
-
}, [isCopied])
|
31 |
-
return (
|
32 |
-
<div className="flex items-center justify-between">
|
33 |
-
<DropdownMenu>
|
34 |
-
<DropdownMenuTrigger asChild>
|
35 |
-
<Button className="pl-0">
|
36 |
-
<div className="flex items-center justify-center text-xs font-medium uppercase rounded-full select-none h-7 w-7 shrink-0 bg-muted/50 text-muted-foreground">
|
37 |
-
<Image alt="settings" src={SettingIcon} width={20} />
|
38 |
-
</div>
|
39 |
-
<span className="ml-2">设置</span>
|
40 |
-
</Button>
|
41 |
-
</DropdownMenuTrigger>
|
42 |
-
<DropdownMenuContent sideOffset={8} align="start" className="w-[180px] bg-background">
|
43 |
-
<DropdownMenuItem
|
44 |
-
onClick={() =>
|
45 |
-
location.href='#dialog="settings"'
|
46 |
-
}
|
47 |
-
className="cursor-pointer"
|
48 |
-
>
|
49 |
-
设置用户
|
50 |
-
</DropdownMenuItem>
|
51 |
-
<DropdownMenuSeparator />
|
52 |
-
<DropdownMenuItem
|
53 |
-
onClick={() =>
|
54 |
-
location.href='#dialog="voice"'
|
55 |
-
}
|
56 |
-
className="cursor-pointer"
|
57 |
-
>
|
58 |
-
语音设置
|
59 |
-
</DropdownMenuItem>
|
60 |
-
<DropdownMenuSeparator />
|
61 |
-
<DropdownMenuItem asChild>
|
62 |
-
<a
|
63 |
-
href="https://github.com/weaigc/bingo/"
|
64 |
-
target="_blank"
|
65 |
-
rel="noopener noreferrer"
|
66 |
-
className="inline-flex items-center justify-between w-full gap-2 cursor-pointer"
|
67 |
-
>
|
68 |
-
开源地址
|
69 |
-
<IconGitHub />
|
70 |
-
<IconExternalLink className="w-3 h-3 ml-auto" />
|
71 |
-
</a>
|
72 |
-
</DropdownMenuItem>
|
73 |
-
<DropdownMenuSeparator />
|
74 |
-
<DropdownMenuItem asChild>
|
75 |
-
<a
|
76 |
-
href="https://huggingface.co/spaces/hf4all/bingo"
|
77 |
-
target="_blank"
|
78 |
-
rel="noopener noreferrer"
|
79 |
-
className="inline-flex items-center justify-between w-full gap-2 cursor-pointer"
|
80 |
-
>
|
81 |
-
托管地址
|
82 |
-
🤗
|
83 |
-
<IconExternalLink className="w-3 h-3 ml-auto" />
|
84 |
-
</a>
|
85 |
-
</DropdownMenuItem>
|
86 |
-
<DropdownMenuSeparator />
|
87 |
-
<DropdownMenuItem asChild>
|
88 |
-
<a
|
89 |
-
href="https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic"
|
90 |
-
target="_blank"
|
91 |
-
rel="noopener noreferrer"
|
92 |
-
className="inline-flex items-center justify-between w-full gap-2 cursor-pointer"
|
93 |
-
>
|
94 |
-
复制站点
|
95 |
-
<IconExternalLink className="w-3 h-3 ml-auto" />
|
96 |
-
</a>
|
97 |
-
</DropdownMenuItem>
|
98 |
-
<DropdownMenuSeparator />
|
99 |
-
<DropdownMenuItem className="flex-col items-start">
|
100 |
-
<div className="font-medium">版本信息 {pkg.version}</div>
|
101 |
-
</DropdownMenuItem>
|
102 |
-
<DropdownMenuSeparator />
|
103 |
-
<DropdownMenuItem className="flex-col items-start">
|
104 |
-
<div className="font-medium">站点域名</div>
|
105 |
-
<div onClick={() => copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer">
|
106 |
-
{host} <IconCopy />
|
107 |
-
</div>
|
108 |
-
</DropdownMenuItem>
|
109 |
-
</DropdownMenuContent>
|
110 |
-
</DropdownMenu>
|
111 |
-
</div>
|
112 |
-
)
|
113 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AEUPH/AethericGPT/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AethericGPT
|
3 |
-
emoji: 🏃
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: purple
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.40.1
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: cc-by-3.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/distributed.py
DELETED
@@ -1,150 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import socket
|
5 |
-
|
6 |
-
try:
|
7 |
-
import horovod.torch as hvd
|
8 |
-
except ImportError:
|
9 |
-
hvd = None
|
10 |
-
|
11 |
-
|
12 |
-
def is_global_master(args):
|
13 |
-
return args.rank == 0
|
14 |
-
|
15 |
-
|
16 |
-
def is_local_master(args):
|
17 |
-
return args.local_rank == 0
|
18 |
-
|
19 |
-
|
20 |
-
def is_master(args, local=False):
|
21 |
-
return is_local_master(args) if local else is_global_master(args)
|
22 |
-
|
23 |
-
|
24 |
-
def is_using_horovod():
|
25 |
-
# NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set
|
26 |
-
# Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required...
|
27 |
-
ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]
|
28 |
-
pmi_vars = ["PMI_RANK", "PMI_SIZE"]
|
29 |
-
if all([var in os.environ for var in ompi_vars]) or all(
|
30 |
-
[var in os.environ for var in pmi_vars]
|
31 |
-
):
|
32 |
-
return True
|
33 |
-
else:
|
34 |
-
return False
|
35 |
-
|
36 |
-
|
37 |
-
def is_using_distributed():
|
38 |
-
if "WORLD_SIZE" in os.environ:
|
39 |
-
return int(os.environ["WORLD_SIZE"]) > 1
|
40 |
-
if "SLURM_NTASKS" in os.environ:
|
41 |
-
return int(os.environ["SLURM_NTASKS"]) > 1
|
42 |
-
return False
|
43 |
-
|
44 |
-
|
45 |
-
def world_info_from_env():
|
46 |
-
local_rank = 0
|
47 |
-
for v in (
|
48 |
-
"SLURM_LOCALID",
|
49 |
-
"MPI_LOCALRANKID",
|
50 |
-
"OMPI_COMM_WORLD_LOCAL_RANK",
|
51 |
-
"LOCAL_RANK",
|
52 |
-
):
|
53 |
-
if v in os.environ:
|
54 |
-
local_rank = int(os.environ[v])
|
55 |
-
break
|
56 |
-
global_rank = 0
|
57 |
-
for v in ("SLURM_PROCID", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "RANK"):
|
58 |
-
if v in os.environ:
|
59 |
-
global_rank = int(os.environ[v])
|
60 |
-
break
|
61 |
-
world_size = 1
|
62 |
-
for v in ("SLURM_NTASKS", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "WORLD_SIZE"):
|
63 |
-
if v in os.environ:
|
64 |
-
world_size = int(os.environ[v])
|
65 |
-
break
|
66 |
-
|
67 |
-
return local_rank, global_rank, world_size
|
68 |
-
|
69 |
-
|
70 |
-
def init_distributed_device(args):
|
71 |
-
# Distributed training = training on more than one GPU.
|
72 |
-
# Works in both single and multi-node scenarios.
|
73 |
-
args.distributed = False
|
74 |
-
args.world_size = 1
|
75 |
-
args.rank = 0 # global rank
|
76 |
-
args.local_rank = 0
|
77 |
-
if args.horovod:
|
78 |
-
assert hvd is not None, "Horovod is not installed"
|
79 |
-
hvd.init()
|
80 |
-
world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
|
81 |
-
world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
|
82 |
-
local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
|
83 |
-
args.local_rank = local_rank
|
84 |
-
args.rank = world_rank
|
85 |
-
args.world_size = world_size
|
86 |
-
# args.local_rank = int(hvd.local_rank())
|
87 |
-
# args.rank = hvd.rank()
|
88 |
-
# args.world_size = hvd.size()
|
89 |
-
args.distributed = True
|
90 |
-
os.environ["LOCAL_RANK"] = str(args.local_rank)
|
91 |
-
os.environ["RANK"] = str(args.rank)
|
92 |
-
os.environ["WORLD_SIZE"] = str(args.world_size)
|
93 |
-
print(
|
94 |
-
f"Distributed training: local_rank={args.local_rank}, "
|
95 |
-
f"rank={args.rank}, world_size={args.world_size}, "
|
96 |
-
f"hostname={socket.gethostname()}, pid={os.getpid()}"
|
97 |
-
)
|
98 |
-
elif is_using_distributed():
|
99 |
-
if "SLURM_PROCID" in os.environ:
|
100 |
-
# DDP via SLURM
|
101 |
-
args.local_rank, args.rank, args.world_size = world_info_from_env()
|
102 |
-
# SLURM var -> torch.distributed vars in case needed
|
103 |
-
os.environ["LOCAL_RANK"] = str(args.local_rank)
|
104 |
-
os.environ["RANK"] = str(args.rank)
|
105 |
-
os.environ["WORLD_SIZE"] = str(args.world_size)
|
106 |
-
torch.distributed.init_process_group(
|
107 |
-
backend=args.dist_backend,
|
108 |
-
init_method=args.dist_url,
|
109 |
-
world_size=args.world_size,
|
110 |
-
rank=args.rank,
|
111 |
-
)
|
112 |
-
elif "OMPI_COMM_WORLD_SIZE" in os.environ: # using Summit cluster
|
113 |
-
world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"])
|
114 |
-
world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"])
|
115 |
-
local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
|
116 |
-
args.local_rank = local_rank
|
117 |
-
args.rank = world_rank
|
118 |
-
args.world_size = world_size
|
119 |
-
torch.distributed.init_process_group(
|
120 |
-
backend=args.dist_backend,
|
121 |
-
init_method=args.dist_url,
|
122 |
-
world_size=args.world_size,
|
123 |
-
rank=args.rank,
|
124 |
-
)
|
125 |
-
else:
|
126 |
-
# DDP via torchrun, torch.distributed.launch
|
127 |
-
args.local_rank, _, _ = world_info_from_env()
|
128 |
-
torch.distributed.init_process_group(
|
129 |
-
backend=args.dist_backend, init_method=args.dist_url
|
130 |
-
)
|
131 |
-
args.world_size = torch.distributed.get_world_size()
|
132 |
-
args.rank = torch.distributed.get_rank()
|
133 |
-
args.distributed = True
|
134 |
-
print(
|
135 |
-
f"Distributed training: local_rank={args.local_rank}, "
|
136 |
-
f"rank={args.rank}, world_size={args.world_size}, "
|
137 |
-
f"hostname={socket.gethostname()}, pid={os.getpid()}"
|
138 |
-
)
|
139 |
-
|
140 |
-
if torch.cuda.is_available():
|
141 |
-
if args.distributed and not args.no_set_device_rank:
|
142 |
-
device = "cuda:%d" % args.local_rank
|
143 |
-
else:
|
144 |
-
device = "cuda:0"
|
145 |
-
torch.cuda.set_device(device)
|
146 |
-
else:
|
147 |
-
device = "cpu"
|
148 |
-
args.device = device
|
149 |
-
device = torch.device(device)
|
150 |
-
return device
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGText/GlyphControl/ldm/modules/midas/__init__.py
DELETED
File without changes
|
spaces/AISuperheroes/README/README.md
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: README
|
3 |
-
emoji: 🐠
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: blue
|
6 |
-
sdk: static
|
7 |
-
pinned: false
|
8 |
-
---
|
9 |
-
<div class="grid lg:grid-cols-3 gap-x-4 gap-y-7">
|
10 |
-
<p class="lg:col-span-3">
|
11 |
-
This classroom is a public open source forum to create and teach AI together. Our goal is turning Pain to Joy and ultimately creating Superpowers for those in need.
|
12 |
-
More info and long term documentation of our progress is at 🌲<b> <a href="https://github.com/AaronCWacker/Yggdrasil/">Yggdrasil</a> </b> Yggdrasil 🌲<br />
|
13 |
-
Intended audience are those interested in learning, or teaching AI and creative technologies for health care and clinical experts, but making it fast and easy to meet goals of anyone interested in learning new technologies like the 🥇Huggingface AI Platform🥇, Streamlit, Gradio, ML Models, Datasets and 🥇HF Spaces🥇.
|
14 |
-
</p>
|
15 |
-
</div>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ATang0729/Forecast4Muses/Model/Model6/extensions/__init__.py
DELETED
File without changes
|
spaces/Abhilashvj/planogram-compliance/utils/segment/__init__.py
DELETED
File without changes
|
spaces/Adapter/T2I-Adapter/configs/mm/faster_rcnn_r50_fpn_coco.py
DELETED
@@ -1,182 +0,0 @@
|
|
1 |
-
checkpoint_config = dict(interval=1)
|
2 |
-
# yapf:disable
|
3 |
-
log_config = dict(
|
4 |
-
interval=50,
|
5 |
-
hooks=[
|
6 |
-
dict(type='TextLoggerHook'),
|
7 |
-
# dict(type='TensorboardLoggerHook')
|
8 |
-
])
|
9 |
-
# yapf:enable
|
10 |
-
dist_params = dict(backend='nccl')
|
11 |
-
log_level = 'INFO'
|
12 |
-
load_from = None
|
13 |
-
resume_from = None
|
14 |
-
workflow = [('train', 1)]
|
15 |
-
# optimizer
|
16 |
-
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
|
17 |
-
optimizer_config = dict(grad_clip=None)
|
18 |
-
# learning policy
|
19 |
-
lr_config = dict(
|
20 |
-
policy='step',
|
21 |
-
warmup='linear',
|
22 |
-
warmup_iters=500,
|
23 |
-
warmup_ratio=0.001,
|
24 |
-
step=[8, 11])
|
25 |
-
total_epochs = 12
|
26 |
-
|
27 |
-
model = dict(
|
28 |
-
type='FasterRCNN',
|
29 |
-
pretrained='torchvision://resnet50',
|
30 |
-
backbone=dict(
|
31 |
-
type='ResNet',
|
32 |
-
depth=50,
|
33 |
-
num_stages=4,
|
34 |
-
out_indices=(0, 1, 2, 3),
|
35 |
-
frozen_stages=1,
|
36 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
37 |
-
norm_eval=True,
|
38 |
-
style='pytorch'),
|
39 |
-
neck=dict(
|
40 |
-
type='FPN',
|
41 |
-
in_channels=[256, 512, 1024, 2048],
|
42 |
-
out_channels=256,
|
43 |
-
num_outs=5),
|
44 |
-
rpn_head=dict(
|
45 |
-
type='RPNHead',
|
46 |
-
in_channels=256,
|
47 |
-
feat_channels=256,
|
48 |
-
anchor_generator=dict(
|
49 |
-
type='AnchorGenerator',
|
50 |
-
scales=[8],
|
51 |
-
ratios=[0.5, 1.0, 2.0],
|
52 |
-
strides=[4, 8, 16, 32, 64]),
|
53 |
-
bbox_coder=dict(
|
54 |
-
type='DeltaXYWHBBoxCoder',
|
55 |
-
target_means=[.0, .0, .0, .0],
|
56 |
-
target_stds=[1.0, 1.0, 1.0, 1.0]),
|
57 |
-
loss_cls=dict(
|
58 |
-
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
|
59 |
-
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
|
60 |
-
roi_head=dict(
|
61 |
-
type='StandardRoIHead',
|
62 |
-
bbox_roi_extractor=dict(
|
63 |
-
type='SingleRoIExtractor',
|
64 |
-
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
|
65 |
-
out_channels=256,
|
66 |
-
featmap_strides=[4, 8, 16, 32]),
|
67 |
-
bbox_head=dict(
|
68 |
-
type='Shared2FCBBoxHead',
|
69 |
-
in_channels=256,
|
70 |
-
fc_out_channels=1024,
|
71 |
-
roi_feat_size=7,
|
72 |
-
num_classes=80,
|
73 |
-
bbox_coder=dict(
|
74 |
-
type='DeltaXYWHBBoxCoder',
|
75 |
-
target_means=[0., 0., 0., 0.],
|
76 |
-
target_stds=[0.1, 0.1, 0.2, 0.2]),
|
77 |
-
reg_class_agnostic=False,
|
78 |
-
loss_cls=dict(
|
79 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
|
80 |
-
loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
|
81 |
-
# model training and testing settings
|
82 |
-
train_cfg=dict(
|
83 |
-
rpn=dict(
|
84 |
-
assigner=dict(
|
85 |
-
type='MaxIoUAssigner',
|
86 |
-
pos_iou_thr=0.7,
|
87 |
-
neg_iou_thr=0.3,
|
88 |
-
min_pos_iou=0.3,
|
89 |
-
match_low_quality=True,
|
90 |
-
ignore_iof_thr=-1),
|
91 |
-
sampler=dict(
|
92 |
-
type='RandomSampler',
|
93 |
-
num=256,
|
94 |
-
pos_fraction=0.5,
|
95 |
-
neg_pos_ub=-1,
|
96 |
-
add_gt_as_proposals=False),
|
97 |
-
allowed_border=-1,
|
98 |
-
pos_weight=-1,
|
99 |
-
debug=False),
|
100 |
-
rpn_proposal=dict(
|
101 |
-
nms_pre=2000,
|
102 |
-
max_per_img=1000,
|
103 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
104 |
-
min_bbox_size=0),
|
105 |
-
rcnn=dict(
|
106 |
-
assigner=dict(
|
107 |
-
type='MaxIoUAssigner',
|
108 |
-
pos_iou_thr=0.5,
|
109 |
-
neg_iou_thr=0.5,
|
110 |
-
min_pos_iou=0.5,
|
111 |
-
match_low_quality=False,
|
112 |
-
ignore_iof_thr=-1),
|
113 |
-
sampler=dict(
|
114 |
-
type='RandomSampler',
|
115 |
-
num=512,
|
116 |
-
pos_fraction=0.25,
|
117 |
-
neg_pos_ub=-1,
|
118 |
-
add_gt_as_proposals=True),
|
119 |
-
pos_weight=-1,
|
120 |
-
debug=False)),
|
121 |
-
test_cfg=dict(
|
122 |
-
rpn=dict(
|
123 |
-
nms_pre=1000,
|
124 |
-
max_per_img=1000,
|
125 |
-
nms=dict(type='nms', iou_threshold=0.7),
|
126 |
-
min_bbox_size=0),
|
127 |
-
rcnn=dict(
|
128 |
-
score_thr=0.05,
|
129 |
-
nms=dict(type='nms', iou_threshold=0.5),
|
130 |
-
max_per_img=100)
|
131 |
-
# soft-nms is also supported for rcnn testing
|
132 |
-
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
|
133 |
-
))
|
134 |
-
|
135 |
-
dataset_type = 'CocoDataset'
|
136 |
-
data_root = 'data/coco'
|
137 |
-
img_norm_cfg = dict(
|
138 |
-
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
139 |
-
train_pipeline = [
|
140 |
-
dict(type='LoadImageFromFile'),
|
141 |
-
dict(type='LoadAnnotations', with_bbox=True),
|
142 |
-
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
|
143 |
-
dict(type='RandomFlip', flip_ratio=0.5),
|
144 |
-
dict(type='Normalize', **img_norm_cfg),
|
145 |
-
dict(type='Pad', size_divisor=32),
|
146 |
-
dict(type='DefaultFormatBundle'),
|
147 |
-
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
|
148 |
-
]
|
149 |
-
test_pipeline = [
|
150 |
-
dict(type='LoadImageFromFile'),
|
151 |
-
dict(
|
152 |
-
type='MultiScaleFlipAug',
|
153 |
-
img_scale=(1333, 800),
|
154 |
-
flip=False,
|
155 |
-
transforms=[
|
156 |
-
dict(type='Resize', keep_ratio=True),
|
157 |
-
dict(type='RandomFlip'),
|
158 |
-
dict(type='Normalize', **img_norm_cfg),
|
159 |
-
dict(type='Pad', size_divisor=32),
|
160 |
-
dict(type='DefaultFormatBundle'),
|
161 |
-
dict(type='Collect', keys=['img']),
|
162 |
-
])
|
163 |
-
]
|
164 |
-
data = dict(
|
165 |
-
samples_per_gpu=2,
|
166 |
-
workers_per_gpu=2,
|
167 |
-
train=dict(
|
168 |
-
type=dataset_type,
|
169 |
-
ann_file=f'{data_root}/annotations/instances_train2017.json',
|
170 |
-
img_prefix=f'{data_root}/train2017/',
|
171 |
-
pipeline=train_pipeline),
|
172 |
-
val=dict(
|
173 |
-
type=dataset_type,
|
174 |
-
ann_file=f'{data_root}/annotations/instances_val2017.json',
|
175 |
-
img_prefix=f'{data_root}/val2017/',
|
176 |
-
pipeline=test_pipeline),
|
177 |
-
test=dict(
|
178 |
-
type=dataset_type,
|
179 |
-
ann_file=f'{data_root}/annotations/instances_val2017.json',
|
180 |
-
img_prefix=f'{data_root}/val2017/',
|
181 |
-
pipeline=test_pipeline))
|
182 |
-
evaluation = dict(interval=1, metric='bbox')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Addai/Breast_cancer_detection_with_deep_transfer_learning/app.py
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from fastai.vision.all import *
|
3 |
-
import skimage
|
4 |
-
|
5 |
-
learn = load_learner('export.pkl')
|
6 |
-
|
7 |
-
labels = learn.dls.vocab
|
8 |
-
def predict(img):
|
9 |
-
img = PILImage.create(img)
|
10 |
-
pred,pred_idx,probs = learn.predict(img)
|
11 |
-
prediction = str(pred)
|
12 |
-
|
13 |
-
return prediction
|
14 |
-
|
15 |
-
|
16 |
-
title = "Breast cancer detection with Deep Transfer Learning(ResNet18)"
|
17 |
-
description = "<p style='text-align: center'><b>As a radiologist or oncologist, it is crucial to know what is wrong with a breast x-ray image.<b><br><b>Upload the breast X-ray image to know what is wrong with a patients breast with or without inplant<b><p>"
|
18 |
-
article="<p style='text-align: center'>Web app is built and managed by Addai Fosberg<b></p>"
|
19 |
-
examples = ['img1.jpeg', 'img2.jpeg']
|
20 |
-
enable_queue=True
|
21 |
-
#interpretation='default'
|
22 |
-
|
23 |
-
gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(512, 512)),outputs=gr.outputs.Label(num_top_classes=3),title=title,description=description,article=article,examples=examples,enable_queue=enable_queue).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AdityaVishwakarma/LiveChecker/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: LiveChecker
|
3 |
-
emoji: 🦀
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: pink
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.27.2
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/PreTest.js
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
1. Test if there has any matched line after chess swapping
|
3 |
-
*/
|
4 |
-
|
5 |
-
import RefreshSymbolCache from './match/RefreshSymbolCache.js';
|
6 |
-
import AnyMatch from './match/AnyMatch.js';
|
7 |
-
|
8 |
-
var PreTest = function () {
|
9 |
-
var match = this.match;
|
10 |
-
var directions = this.board.grid.halfDirections;
|
11 |
-
var tileB;
|
12 |
-
RefreshSymbolCache.call(this); // only refresh symbol cache once
|
13 |
-
for (var tileY = (this.board.height / 2), rowCnt = this.board.height; tileY < rowCnt; tileY++) {
|
14 |
-
for (var tileX = 0, colCnt = this.board.width; tileX < colCnt; tileX++) {
|
15 |
-
tileA.x = tileX;
|
16 |
-
tileA.y = tileY;
|
17 |
-
for (var dir = 0, dirCnt = directions.length; dir < dirCnt; dir++) {
|
18 |
-
tileB = this.board.getNeighborTileXY(tileA, dir);
|
19 |
-
// swap symbol
|
20 |
-
swapSymbols(match, tileA, tileB);
|
21 |
-
// any match?
|
22 |
-
this.preTestResult = AnyMatch.call(this, 3);
|
23 |
-
// swap symbol back
|
24 |
-
swapSymbols(match, tileA, tileB);
|
25 |
-
|
26 |
-
if (this.preTestResult) {
|
27 |
-
return true;
|
28 |
-
}
|
29 |
-
}
|
30 |
-
}
|
31 |
-
}
|
32 |
-
return false;
|
33 |
-
}
|
34 |
-
|
35 |
-
var swapSymbols = function (match, tileA, tileB) {
|
36 |
-
var symbolA = match.getSymbol(tileA.x, tileA.y);
|
37 |
-
var symbolB = match.getSymbol(tileB.x, tileB.y);
|
38 |
-
match.setSymbol(tileA.x, tileA.y, symbolB);
|
39 |
-
match.setSymbol(tileB.x, tileB.y, symbolA);
|
40 |
-
};
|
41 |
-
|
42 |
-
var tileA = {
|
43 |
-
x: 0,
|
44 |
-
y: 0
|
45 |
-
};
|
46 |
-
|
47 |
-
export default PreTest;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/LayoutChildren.js
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
import ResizeGameObject from '../../../plugins/utils/size/ResizeGameObject.js';
|
2 |
-
import PreLayoutChild from '../basesizer/utils/PreLayoutChild.js';
|
3 |
-
import LayoutChild from '../basesizer/utils/LayoutChild.js';
|
4 |
-
import CheckSize from '../basesizer/utils/CheckSize.js';
|
5 |
-
|
6 |
-
var LayoutChildren = function () {
|
7 |
-
var child, childConfig, padding;
|
8 |
-
var startX = this.innerLeft,
|
9 |
-
startY = this.innerTop;
|
10 |
-
var itemX,
|
11 |
-
itemY = startY;
|
12 |
-
var x, y, width, height; // Align zone
|
13 |
-
var childWidth, childHeight;
|
14 |
-
// Layout grid children
|
15 |
-
var columnSpace = this.space.column,
|
16 |
-
rowSpace = this.space.row,
|
17 |
-
indentLeftOdd = this.space.indentLeftOdd,
|
18 |
-
indentLeftEven = this.space.indentLeftEven,
|
19 |
-
indentTopOdd = this.space.indentTopOdd,
|
20 |
-
indentTopEven = this.space.indentTopEven;
|
21 |
-
|
22 |
-
var colWidth, rowHeight;
|
23 |
-
var indentLeft, indentTop;
|
24 |
-
for (var rowIndex = 0; rowIndex < this.rowCount; rowIndex++) {
|
25 |
-
rowHeight = this.getRowHeight(rowIndex);
|
26 |
-
|
27 |
-
indentLeft = (rowIndex % 2) ? indentLeftEven : indentLeftOdd;
|
28 |
-
itemX = startX + indentLeft;
|
29 |
-
for (var columnIndex = 0; columnIndex < this.columnCount; columnIndex++) {
|
30 |
-
colWidth = this.getColumnWidth(columnIndex);
|
31 |
-
|
32 |
-
child = this.getChildAt(columnIndex, rowIndex);
|
33 |
-
if ((!child) || (child.rexSizer.hidden)) {
|
34 |
-
itemX += (colWidth + columnSpace[columnIndex]);
|
35 |
-
continue;
|
36 |
-
}
|
37 |
-
|
38 |
-
PreLayoutChild.call(this, child);
|
39 |
-
|
40 |
-
childWidth = this.getExpandedChildWidth(child, colWidth);
|
41 |
-
childHeight = this.getExpandedChildHeight(child, rowHeight);
|
42 |
-
if (child.isRexSizer) {
|
43 |
-
child.runLayout(this, childWidth, childHeight);
|
44 |
-
CheckSize(child, this);
|
45 |
-
} else {
|
46 |
-
ResizeGameObject(child, childWidth, childHeight);
|
47 |
-
}
|
48 |
-
|
49 |
-
childConfig = child.rexSizer;
|
50 |
-
padding = childConfig.padding;
|
51 |
-
|
52 |
-
x = (itemX + padding.left);
|
53 |
-
width = colWidth - padding.left - padding.right;
|
54 |
-
|
55 |
-
indentTop = (columnIndex % 2) ? indentTopEven : indentTopOdd;
|
56 |
-
y = (itemY + indentTop + padding.top);
|
57 |
-
height = rowHeight - padding.top - padding.bottom;
|
58 |
-
|
59 |
-
LayoutChild.call(this, child, x, y, width, height, childConfig.align);
|
60 |
-
|
61 |
-
itemX += (colWidth + columnSpace[columnIndex]);
|
62 |
-
}
|
63 |
-
|
64 |
-
itemY += (rowHeight + rowSpace[rowIndex]);
|
65 |
-
}
|
66 |
-
}
|
67 |
-
|
68 |
-
export default LayoutChildren;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pan/Pan.js
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import { Pan } from '../../../plugins/gestures';
|
2 |
-
export default Pan;
|
|
|
|
|
|
spaces/Alycer/VITS-Umamusume-voice-synthesizer/mel_processing.py
DELETED
@@ -1,101 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.utils.data
|
3 |
-
from librosa.filters import mel as librosa_mel_fn
|
4 |
-
|
5 |
-
MAX_WAV_VALUE = 32768.0
|
6 |
-
|
7 |
-
|
8 |
-
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
|
9 |
-
"""
|
10 |
-
PARAMS
|
11 |
-
------
|
12 |
-
C: compression factor
|
13 |
-
"""
|
14 |
-
return torch.log(torch.clamp(x, min=clip_val) * C)
|
15 |
-
|
16 |
-
|
17 |
-
def dynamic_range_decompression_torch(x, C=1):
|
18 |
-
"""
|
19 |
-
PARAMS
|
20 |
-
------
|
21 |
-
C: compression factor used to compress
|
22 |
-
"""
|
23 |
-
return torch.exp(x) / C
|
24 |
-
|
25 |
-
|
26 |
-
def spectral_normalize_torch(magnitudes):
|
27 |
-
output = dynamic_range_compression_torch(magnitudes)
|
28 |
-
return output
|
29 |
-
|
30 |
-
|
31 |
-
def spectral_de_normalize_torch(magnitudes):
|
32 |
-
output = dynamic_range_decompression_torch(magnitudes)
|
33 |
-
return output
|
34 |
-
|
35 |
-
|
36 |
-
mel_basis = {}
|
37 |
-
hann_window = {}
|
38 |
-
|
39 |
-
|
40 |
-
def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False):
|
41 |
-
if torch.min(y) < -1.:
|
42 |
-
print('min value is ', torch.min(y))
|
43 |
-
if torch.max(y) > 1.:
|
44 |
-
print('max value is ', torch.max(y))
|
45 |
-
|
46 |
-
global hann_window
|
47 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
48 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
49 |
-
if wnsize_dtype_device not in hann_window:
|
50 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
51 |
-
|
52 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
53 |
-
y = y.squeeze(1)
|
54 |
-
|
55 |
-
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
56 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False)
|
57 |
-
|
58 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
59 |
-
return spec
|
60 |
-
|
61 |
-
|
62 |
-
def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):
|
63 |
-
global mel_basis
|
64 |
-
dtype_device = str(spec.dtype) + '_' + str(spec.device)
|
65 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
66 |
-
if fmax_dtype_device not in mel_basis:
|
67 |
-
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
68 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device)
|
69 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
70 |
-
spec = spectral_normalize_torch(spec)
|
71 |
-
return spec
|
72 |
-
|
73 |
-
|
74 |
-
def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False):
|
75 |
-
if torch.min(y) < -1.:
|
76 |
-
print('min value is ', torch.min(y))
|
77 |
-
if torch.max(y) > 1.:
|
78 |
-
print('max value is ', torch.max(y))
|
79 |
-
|
80 |
-
global mel_basis, hann_window
|
81 |
-
dtype_device = str(y.dtype) + '_' + str(y.device)
|
82 |
-
fmax_dtype_device = str(fmax) + '_' + dtype_device
|
83 |
-
wnsize_dtype_device = str(win_size) + '_' + dtype_device
|
84 |
-
if fmax_dtype_device not in mel_basis:
|
85 |
-
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
|
86 |
-
mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device)
|
87 |
-
if wnsize_dtype_device not in hann_window:
|
88 |
-
hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device)
|
89 |
-
|
90 |
-
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect')
|
91 |
-
y = y.squeeze(1)
|
92 |
-
|
93 |
-
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device],
|
94 |
-
center=center, pad_mode='reflect', normalized=False, onesided=True)
|
95 |
-
|
96 |
-
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
|
97 |
-
|
98 |
-
spec = torch.matmul(mel_basis[fmax_dtype_device], spec)
|
99 |
-
spec = spectral_normalize_torch(spec)
|
100 |
-
|
101 |
-
return spec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/custom_ops.py
DELETED
@@ -1,126 +0,0 @@
|
|
1 |
-
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
import os
|
10 |
-
import glob
|
11 |
-
import torch
|
12 |
-
import torch.utils.cpp_extension
|
13 |
-
import importlib
|
14 |
-
import hashlib
|
15 |
-
import shutil
|
16 |
-
from pathlib import Path
|
17 |
-
|
18 |
-
from torch.utils.file_baton import FileBaton
|
19 |
-
|
20 |
-
#----------------------------------------------------------------------------
|
21 |
-
# Global options.
|
22 |
-
|
23 |
-
verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
|
24 |
-
|
25 |
-
#----------------------------------------------------------------------------
|
26 |
-
# Internal helper funcs.
|
27 |
-
|
28 |
-
def _find_compiler_bindir():
|
29 |
-
patterns = [
|
30 |
-
'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
|
31 |
-
'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
|
32 |
-
'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
|
33 |
-
'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',
|
34 |
-
]
|
35 |
-
for pattern in patterns:
|
36 |
-
matches = sorted(glob.glob(pattern))
|
37 |
-
if len(matches):
|
38 |
-
return matches[-1]
|
39 |
-
return None
|
40 |
-
|
41 |
-
#----------------------------------------------------------------------------
|
42 |
-
# Main entry point for compiling and loading C++/CUDA plugins.
|
43 |
-
|
44 |
-
_cached_plugins = dict()
|
45 |
-
|
46 |
-
def get_plugin(module_name, sources, **build_kwargs):
|
47 |
-
assert verbosity in ['none', 'brief', 'full']
|
48 |
-
|
49 |
-
# Already cached?
|
50 |
-
if module_name in _cached_plugins:
|
51 |
-
return _cached_plugins[module_name]
|
52 |
-
|
53 |
-
# Print status.
|
54 |
-
if verbosity == 'full':
|
55 |
-
print(f'Setting up PyTorch plugin "{module_name}"...')
|
56 |
-
elif verbosity == 'brief':
|
57 |
-
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
|
58 |
-
|
59 |
-
try: # pylint: disable=too-many-nested-blocks
|
60 |
-
# Make sure we can find the necessary compiler binaries.
|
61 |
-
if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
|
62 |
-
compiler_bindir = _find_compiler_bindir()
|
63 |
-
if compiler_bindir is None:
|
64 |
-
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
|
65 |
-
os.environ['PATH'] += ';' + compiler_bindir
|
66 |
-
|
67 |
-
# Compile and load.
|
68 |
-
verbose_build = (verbosity == 'full')
|
69 |
-
|
70 |
-
# Incremental build md5sum trickery. Copies all the input source files
|
71 |
-
# into a cached build directory under a combined md5 digest of the input
|
72 |
-
# source files. Copying is done only if the combined digest has changed.
|
73 |
-
# This keeps input file timestamps and filenames the same as in previous
|
74 |
-
# extension builds, allowing for fast incremental rebuilds.
|
75 |
-
#
|
76 |
-
# This optimization is done only in case all the source files reside in
|
77 |
-
# a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
|
78 |
-
# environment variable is set (we take this as a signal that the user
|
79 |
-
# actually cares about this.)
|
80 |
-
source_dirs_set = set(os.path.dirname(source) for source in sources)
|
81 |
-
if len(source_dirs_set) == 1 and ('TORCH_EXTENSIONS_DIR' in os.environ):
|
82 |
-
all_source_files = sorted(list(x for x in Path(list(source_dirs_set)[0]).iterdir() if x.is_file()))
|
83 |
-
|
84 |
-
# Compute a combined hash digest for all source files in the same
|
85 |
-
# custom op directory (usually .cu, .cpp, .py and .h files).
|
86 |
-
hash_md5 = hashlib.md5()
|
87 |
-
for src in all_source_files:
|
88 |
-
with open(src, 'rb') as f:
|
89 |
-
hash_md5.update(f.read())
|
90 |
-
build_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
|
91 |
-
digest_build_dir = os.path.join(build_dir, hash_md5.hexdigest())
|
92 |
-
|
93 |
-
if not os.path.isdir(digest_build_dir):
|
94 |
-
os.makedirs(digest_build_dir, exist_ok=True)
|
95 |
-
baton = FileBaton(os.path.join(digest_build_dir, 'lock'))
|
96 |
-
if baton.try_acquire():
|
97 |
-
try:
|
98 |
-
for src in all_source_files:
|
99 |
-
shutil.copyfile(src, os.path.join(digest_build_dir, os.path.basename(src)))
|
100 |
-
finally:
|
101 |
-
baton.release()
|
102 |
-
else:
|
103 |
-
# Someone else is copying source files under the digest dir,
|
104 |
-
# wait until done and continue.
|
105 |
-
baton.wait()
|
106 |
-
digest_sources = [os.path.join(digest_build_dir, os.path.basename(x)) for x in sources]
|
107 |
-
torch.utils.cpp_extension.load(name=module_name, build_directory=build_dir,
|
108 |
-
verbose=verbose_build, sources=digest_sources, **build_kwargs)
|
109 |
-
else:
|
110 |
-
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
|
111 |
-
module = importlib.import_module(module_name)
|
112 |
-
|
113 |
-
except:
|
114 |
-
if verbosity == 'brief':
|
115 |
-
print('Failed!')
|
116 |
-
raise
|
117 |
-
|
118 |
-
# Print status and add to cache.
|
119 |
-
if verbosity == 'full':
|
120 |
-
print(f'Done setting up PyTorch plugin "{module_name}".')
|
121 |
-
elif verbosity == 'brief':
|
122 |
-
print('Done.')
|
123 |
-
_cached_plugins[module_name] = module
|
124 |
-
return module
|
125 |
-
|
126 |
-
#----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/import_utils.py
DELETED
@@ -1,655 +0,0 @@
|
|
1 |
-
# Copyright 2023 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
"""
|
15 |
-
Import utilities: Utilities related to imports and our lazy inits.
|
16 |
-
"""
|
17 |
-
import importlib.util
|
18 |
-
import operator as op
|
19 |
-
import os
|
20 |
-
import sys
|
21 |
-
from collections import OrderedDict
|
22 |
-
from typing import Union
|
23 |
-
|
24 |
-
from huggingface_hub.utils import is_jinja_available # noqa: F401
|
25 |
-
from packaging import version
|
26 |
-
from packaging.version import Version, parse
|
27 |
-
|
28 |
-
from . import logging
|
29 |
-
|
30 |
-
|
31 |
-
# The package importlib_metadata is in a different place, depending on the python version.
|
32 |
-
if sys.version_info < (3, 8):
|
33 |
-
import importlib_metadata
|
34 |
-
else:
|
35 |
-
import importlib.metadata as importlib_metadata
|
36 |
-
|
37 |
-
|
38 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
39 |
-
|
40 |
-
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
|
41 |
-
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
|
42 |
-
|
43 |
-
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
|
44 |
-
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
|
45 |
-
USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper()
|
46 |
-
USE_SAFETENSORS = os.environ.get("USE_SAFETENSORS", "AUTO").upper()
|
47 |
-
|
48 |
-
STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
|
49 |
-
|
50 |
-
_torch_version = "N/A"
|
51 |
-
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
|
52 |
-
_torch_available = importlib.util.find_spec("torch") is not None
|
53 |
-
if _torch_available:
|
54 |
-
try:
|
55 |
-
_torch_version = importlib_metadata.version("torch")
|
56 |
-
logger.info(f"PyTorch version {_torch_version} available.")
|
57 |
-
except importlib_metadata.PackageNotFoundError:
|
58 |
-
_torch_available = False
|
59 |
-
else:
|
60 |
-
logger.info("Disabling PyTorch because USE_TORCH is set")
|
61 |
-
_torch_available = False
|
62 |
-
|
63 |
-
|
64 |
-
_tf_version = "N/A"
|
65 |
-
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
|
66 |
-
_tf_available = importlib.util.find_spec("tensorflow") is not None
|
67 |
-
if _tf_available:
|
68 |
-
candidates = (
|
69 |
-
"tensorflow",
|
70 |
-
"tensorflow-cpu",
|
71 |
-
"tensorflow-gpu",
|
72 |
-
"tf-nightly",
|
73 |
-
"tf-nightly-cpu",
|
74 |
-
"tf-nightly-gpu",
|
75 |
-
"intel-tensorflow",
|
76 |
-
"intel-tensorflow-avx512",
|
77 |
-
"tensorflow-rocm",
|
78 |
-
"tensorflow-macos",
|
79 |
-
"tensorflow-aarch64",
|
80 |
-
)
|
81 |
-
_tf_version = None
|
82 |
-
# For the metadata, we have to look for both tensorflow and tensorflow-cpu
|
83 |
-
for pkg in candidates:
|
84 |
-
try:
|
85 |
-
_tf_version = importlib_metadata.version(pkg)
|
86 |
-
break
|
87 |
-
except importlib_metadata.PackageNotFoundError:
|
88 |
-
pass
|
89 |
-
_tf_available = _tf_version is not None
|
90 |
-
if _tf_available:
|
91 |
-
if version.parse(_tf_version) < version.parse("2"):
|
92 |
-
logger.info(f"TensorFlow found but with version {_tf_version}. Diffusers requires version 2 minimum.")
|
93 |
-
_tf_available = False
|
94 |
-
else:
|
95 |
-
logger.info(f"TensorFlow version {_tf_version} available.")
|
96 |
-
else:
|
97 |
-
logger.info("Disabling Tensorflow because USE_TORCH is set")
|
98 |
-
_tf_available = False
|
99 |
-
|
100 |
-
_jax_version = "N/A"
|
101 |
-
_flax_version = "N/A"
|
102 |
-
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
|
103 |
-
_flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None
|
104 |
-
if _flax_available:
|
105 |
-
try:
|
106 |
-
_jax_version = importlib_metadata.version("jax")
|
107 |
-
_flax_version = importlib_metadata.version("flax")
|
108 |
-
logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.")
|
109 |
-
except importlib_metadata.PackageNotFoundError:
|
110 |
-
_flax_available = False
|
111 |
-
else:
|
112 |
-
_flax_available = False
|
113 |
-
|
114 |
-
if USE_SAFETENSORS in ENV_VARS_TRUE_AND_AUTO_VALUES:
|
115 |
-
_safetensors_available = importlib.util.find_spec("safetensors") is not None
|
116 |
-
if _safetensors_available:
|
117 |
-
try:
|
118 |
-
_safetensors_version = importlib_metadata.version("safetensors")
|
119 |
-
logger.info(f"Safetensors version {_safetensors_version} available.")
|
120 |
-
except importlib_metadata.PackageNotFoundError:
|
121 |
-
_safetensors_available = False
|
122 |
-
else:
|
123 |
-
logger.info("Disabling Safetensors because USE_TF is set")
|
124 |
-
_safetensors_available = False
|
125 |
-
|
126 |
-
_transformers_available = importlib.util.find_spec("transformers") is not None
|
127 |
-
try:
|
128 |
-
_transformers_version = importlib_metadata.version("transformers")
|
129 |
-
logger.debug(f"Successfully imported transformers version {_transformers_version}")
|
130 |
-
except importlib_metadata.PackageNotFoundError:
|
131 |
-
_transformers_available = False
|
132 |
-
|
133 |
-
|
134 |
-
_inflect_available = importlib.util.find_spec("inflect") is not None
|
135 |
-
try:
|
136 |
-
_inflect_version = importlib_metadata.version("inflect")
|
137 |
-
logger.debug(f"Successfully imported inflect version {_inflect_version}")
|
138 |
-
except importlib_metadata.PackageNotFoundError:
|
139 |
-
_inflect_available = False
|
140 |
-
|
141 |
-
|
142 |
-
_unidecode_available = importlib.util.find_spec("unidecode") is not None
|
143 |
-
try:
|
144 |
-
_unidecode_version = importlib_metadata.version("unidecode")
|
145 |
-
logger.debug(f"Successfully imported unidecode version {_unidecode_version}")
|
146 |
-
except importlib_metadata.PackageNotFoundError:
|
147 |
-
_unidecode_available = False
|
148 |
-
|
149 |
-
|
150 |
-
_onnxruntime_version = "N/A"
|
151 |
-
_onnx_available = importlib.util.find_spec("onnxruntime") is not None
|
152 |
-
if _onnx_available:
|
153 |
-
candidates = (
|
154 |
-
"onnxruntime",
|
155 |
-
"onnxruntime-gpu",
|
156 |
-
"ort_nightly_gpu",
|
157 |
-
"onnxruntime-directml",
|
158 |
-
"onnxruntime-openvino",
|
159 |
-
"ort_nightly_directml",
|
160 |
-
"onnxruntime-rocm",
|
161 |
-
"onnxruntime-training",
|
162 |
-
)
|
163 |
-
_onnxruntime_version = None
|
164 |
-
# For the metadata, we have to look for both onnxruntime and onnxruntime-gpu
|
165 |
-
for pkg in candidates:
|
166 |
-
try:
|
167 |
-
_onnxruntime_version = importlib_metadata.version(pkg)
|
168 |
-
break
|
169 |
-
except importlib_metadata.PackageNotFoundError:
|
170 |
-
pass
|
171 |
-
_onnx_available = _onnxruntime_version is not None
|
172 |
-
if _onnx_available:
|
173 |
-
logger.debug(f"Successfully imported onnxruntime version {_onnxruntime_version}")
|
174 |
-
|
175 |
-
# (sayakpaul): importlib.util.find_spec("opencv-python") returns None even when it's installed.
|
176 |
-
# _opencv_available = importlib.util.find_spec("opencv-python") is not None
|
177 |
-
try:
|
178 |
-
candidates = (
|
179 |
-
"opencv-python",
|
180 |
-
"opencv-contrib-python",
|
181 |
-
"opencv-python-headless",
|
182 |
-
"opencv-contrib-python-headless",
|
183 |
-
)
|
184 |
-
_opencv_version = None
|
185 |
-
for pkg in candidates:
|
186 |
-
try:
|
187 |
-
_opencv_version = importlib_metadata.version(pkg)
|
188 |
-
break
|
189 |
-
except importlib_metadata.PackageNotFoundError:
|
190 |
-
pass
|
191 |
-
_opencv_available = _opencv_version is not None
|
192 |
-
if _opencv_available:
|
193 |
-
logger.debug(f"Successfully imported cv2 version {_opencv_version}")
|
194 |
-
except importlib_metadata.PackageNotFoundError:
|
195 |
-
_opencv_available = False
|
196 |
-
|
197 |
-
_scipy_available = importlib.util.find_spec("scipy") is not None
|
198 |
-
try:
|
199 |
-
_scipy_version = importlib_metadata.version("scipy")
|
200 |
-
logger.debug(f"Successfully imported scipy version {_scipy_version}")
|
201 |
-
except importlib_metadata.PackageNotFoundError:
|
202 |
-
_scipy_available = False
|
203 |
-
|
204 |
-
_librosa_available = importlib.util.find_spec("librosa") is not None
|
205 |
-
try:
|
206 |
-
_librosa_version = importlib_metadata.version("librosa")
|
207 |
-
logger.debug(f"Successfully imported librosa version {_librosa_version}")
|
208 |
-
except importlib_metadata.PackageNotFoundError:
|
209 |
-
_librosa_available = False
|
210 |
-
|
211 |
-
_accelerate_available = importlib.util.find_spec("accelerate") is not None
|
212 |
-
try:
|
213 |
-
_accelerate_version = importlib_metadata.version("accelerate")
|
214 |
-
logger.debug(f"Successfully imported accelerate version {_accelerate_version}")
|
215 |
-
except importlib_metadata.PackageNotFoundError:
|
216 |
-
_accelerate_available = False
|
217 |
-
|
218 |
-
_xformers_available = importlib.util.find_spec("xformers") is not None
|
219 |
-
try:
|
220 |
-
_xformers_version = importlib_metadata.version("xformers")
|
221 |
-
if _torch_available:
|
222 |
-
import torch
|
223 |
-
|
224 |
-
if version.Version(torch.__version__) < version.Version("1.12"):
|
225 |
-
raise ValueError("PyTorch should be >= 1.12")
|
226 |
-
logger.debug(f"Successfully imported xformers version {_xformers_version}")
|
227 |
-
except importlib_metadata.PackageNotFoundError:
|
228 |
-
_xformers_available = False
|
229 |
-
|
230 |
-
_k_diffusion_available = importlib.util.find_spec("k_diffusion") is not None
|
231 |
-
try:
|
232 |
-
_k_diffusion_version = importlib_metadata.version("k_diffusion")
|
233 |
-
logger.debug(f"Successfully imported k-diffusion version {_k_diffusion_version}")
|
234 |
-
except importlib_metadata.PackageNotFoundError:
|
235 |
-
_k_diffusion_available = False
|
236 |
-
|
237 |
-
_note_seq_available = importlib.util.find_spec("note_seq") is not None
|
238 |
-
try:
|
239 |
-
_note_seq_version = importlib_metadata.version("note_seq")
|
240 |
-
logger.debug(f"Successfully imported note-seq version {_note_seq_version}")
|
241 |
-
except importlib_metadata.PackageNotFoundError:
|
242 |
-
_note_seq_available = False
|
243 |
-
|
244 |
-
_wandb_available = importlib.util.find_spec("wandb") is not None
|
245 |
-
try:
|
246 |
-
_wandb_version = importlib_metadata.version("wandb")
|
247 |
-
logger.debug(f"Successfully imported wandb version {_wandb_version }")
|
248 |
-
except importlib_metadata.PackageNotFoundError:
|
249 |
-
_wandb_available = False
|
250 |
-
|
251 |
-
_omegaconf_available = importlib.util.find_spec("omegaconf") is not None
|
252 |
-
try:
|
253 |
-
_omegaconf_version = importlib_metadata.version("omegaconf")
|
254 |
-
logger.debug(f"Successfully imported omegaconf version {_omegaconf_version}")
|
255 |
-
except importlib_metadata.PackageNotFoundError:
|
256 |
-
_omegaconf_available = False
|
257 |
-
|
258 |
-
_tensorboard_available = importlib.util.find_spec("tensorboard")
|
259 |
-
try:
|
260 |
-
_tensorboard_version = importlib_metadata.version("tensorboard")
|
261 |
-
logger.debug(f"Successfully imported tensorboard version {_tensorboard_version}")
|
262 |
-
except importlib_metadata.PackageNotFoundError:
|
263 |
-
_tensorboard_available = False
|
264 |
-
|
265 |
-
|
266 |
-
_compel_available = importlib.util.find_spec("compel")
|
267 |
-
try:
|
268 |
-
_compel_version = importlib_metadata.version("compel")
|
269 |
-
logger.debug(f"Successfully imported compel version {_compel_version}")
|
270 |
-
except importlib_metadata.PackageNotFoundError:
|
271 |
-
_compel_available = False
|
272 |
-
|
273 |
-
|
274 |
-
_ftfy_available = importlib.util.find_spec("ftfy") is not None
|
275 |
-
try:
|
276 |
-
_ftfy_version = importlib_metadata.version("ftfy")
|
277 |
-
logger.debug(f"Successfully imported ftfy version {_ftfy_version}")
|
278 |
-
except importlib_metadata.PackageNotFoundError:
|
279 |
-
_ftfy_available = False
|
280 |
-
|
281 |
-
|
282 |
-
_bs4_available = importlib.util.find_spec("bs4") is not None
|
283 |
-
try:
|
284 |
-
# importlib metadata under different name
|
285 |
-
_bs4_version = importlib_metadata.version("beautifulsoup4")
|
286 |
-
logger.debug(f"Successfully imported ftfy version {_bs4_version}")
|
287 |
-
except importlib_metadata.PackageNotFoundError:
|
288 |
-
_bs4_available = False
|
289 |
-
|
290 |
-
_torchsde_available = importlib.util.find_spec("torchsde") is not None
|
291 |
-
try:
|
292 |
-
_torchsde_version = importlib_metadata.version("torchsde")
|
293 |
-
logger.debug(f"Successfully imported torchsde version {_torchsde_version}")
|
294 |
-
except importlib_metadata.PackageNotFoundError:
|
295 |
-
_torchsde_available = False
|
296 |
-
|
297 |
-
_invisible_watermark_available = importlib.util.find_spec("imwatermark") is not None
|
298 |
-
try:
|
299 |
-
_invisible_watermark_version = importlib_metadata.version("invisible-watermark")
|
300 |
-
logger.debug(f"Successfully imported invisible-watermark version {_invisible_watermark_version}")
|
301 |
-
except importlib_metadata.PackageNotFoundError:
|
302 |
-
_invisible_watermark_available = False
|
303 |
-
|
304 |
-
|
305 |
-
def is_torch_available():
|
306 |
-
return _torch_available
|
307 |
-
|
308 |
-
|
309 |
-
def is_safetensors_available():
|
310 |
-
return _safetensors_available
|
311 |
-
|
312 |
-
|
313 |
-
def is_tf_available():
|
314 |
-
return _tf_available
|
315 |
-
|
316 |
-
|
317 |
-
def is_flax_available():
|
318 |
-
return _flax_available
|
319 |
-
|
320 |
-
|
321 |
-
def is_transformers_available():
|
322 |
-
return _transformers_available
|
323 |
-
|
324 |
-
|
325 |
-
def is_inflect_available():
|
326 |
-
return _inflect_available
|
327 |
-
|
328 |
-
|
329 |
-
def is_unidecode_available():
|
330 |
-
return _unidecode_available
|
331 |
-
|
332 |
-
|
333 |
-
def is_onnx_available():
|
334 |
-
return _onnx_available
|
335 |
-
|
336 |
-
|
337 |
-
def is_opencv_available():
|
338 |
-
return _opencv_available
|
339 |
-
|
340 |
-
|
341 |
-
def is_scipy_available():
|
342 |
-
return _scipy_available
|
343 |
-
|
344 |
-
|
345 |
-
def is_librosa_available():
|
346 |
-
return _librosa_available
|
347 |
-
|
348 |
-
|
349 |
-
def is_xformers_available():
|
350 |
-
return _xformers_available
|
351 |
-
|
352 |
-
|
353 |
-
def is_accelerate_available():
|
354 |
-
return _accelerate_available
|
355 |
-
|
356 |
-
|
357 |
-
def is_k_diffusion_available():
|
358 |
-
return _k_diffusion_available
|
359 |
-
|
360 |
-
|
361 |
-
def is_note_seq_available():
|
362 |
-
return _note_seq_available
|
363 |
-
|
364 |
-
|
365 |
-
def is_wandb_available():
|
366 |
-
return _wandb_available
|
367 |
-
|
368 |
-
|
369 |
-
def is_omegaconf_available():
|
370 |
-
return _omegaconf_available
|
371 |
-
|
372 |
-
|
373 |
-
def is_tensorboard_available():
|
374 |
-
return _tensorboard_available
|
375 |
-
|
376 |
-
|
377 |
-
def is_compel_available():
|
378 |
-
return _compel_available
|
379 |
-
|
380 |
-
|
381 |
-
def is_ftfy_available():
|
382 |
-
return _ftfy_available
|
383 |
-
|
384 |
-
|
385 |
-
def is_bs4_available():
|
386 |
-
return _bs4_available
|
387 |
-
|
388 |
-
|
389 |
-
def is_torchsde_available():
|
390 |
-
return _torchsde_available
|
391 |
-
|
392 |
-
|
393 |
-
def is_invisible_watermark_available():
|
394 |
-
return _invisible_watermark_available
|
395 |
-
|
396 |
-
|
397 |
-
# docstyle-ignore
|
398 |
-
FLAX_IMPORT_ERROR = """
|
399 |
-
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
|
400 |
-
installation page: https://github.com/google/flax and follow the ones that match your environment.
|
401 |
-
"""
|
402 |
-
|
403 |
-
# docstyle-ignore
|
404 |
-
INFLECT_IMPORT_ERROR = """
|
405 |
-
{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install
|
406 |
-
inflect`
|
407 |
-
"""
|
408 |
-
|
409 |
-
# docstyle-ignore
|
410 |
-
PYTORCH_IMPORT_ERROR = """
|
411 |
-
{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the
|
412 |
-
installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
|
413 |
-
"""
|
414 |
-
|
415 |
-
# docstyle-ignore
|
416 |
-
ONNX_IMPORT_ERROR = """
|
417 |
-
{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip
|
418 |
-
install onnxruntime`
|
419 |
-
"""
|
420 |
-
|
421 |
-
# docstyle-ignore
|
422 |
-
OPENCV_IMPORT_ERROR = """
|
423 |
-
{0} requires the OpenCV library but it was not found in your environment. You can install it with pip: `pip
|
424 |
-
install opencv-python`
|
425 |
-
"""
|
426 |
-
|
427 |
-
# docstyle-ignore
|
428 |
-
SCIPY_IMPORT_ERROR = """
|
429 |
-
{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install
|
430 |
-
scipy`
|
431 |
-
"""
|
432 |
-
|
433 |
-
# docstyle-ignore
|
434 |
-
LIBROSA_IMPORT_ERROR = """
|
435 |
-
{0} requires the librosa library but it was not found in your environment. Checkout the instructions on the
|
436 |
-
installation page: https://librosa.org/doc/latest/install.html and follow the ones that match your environment.
|
437 |
-
"""
|
438 |
-
|
439 |
-
# docstyle-ignore
|
440 |
-
TRANSFORMERS_IMPORT_ERROR = """
|
441 |
-
{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip
|
442 |
-
install transformers`
|
443 |
-
"""
|
444 |
-
|
445 |
-
# docstyle-ignore
|
446 |
-
UNIDECODE_IMPORT_ERROR = """
|
447 |
-
{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install
|
448 |
-
Unidecode`
|
449 |
-
"""
|
450 |
-
|
451 |
-
# docstyle-ignore
|
452 |
-
K_DIFFUSION_IMPORT_ERROR = """
|
453 |
-
{0} requires the k-diffusion library but it was not found in your environment. You can install it with pip: `pip
|
454 |
-
install k-diffusion`
|
455 |
-
"""
|
456 |
-
|
457 |
-
# docstyle-ignore
|
458 |
-
NOTE_SEQ_IMPORT_ERROR = """
|
459 |
-
{0} requires the note-seq library but it was not found in your environment. You can install it with pip: `pip
|
460 |
-
install note-seq`
|
461 |
-
"""
|
462 |
-
|
463 |
-
# docstyle-ignore
|
464 |
-
WANDB_IMPORT_ERROR = """
|
465 |
-
{0} requires the wandb library but it was not found in your environment. You can install it with pip: `pip
|
466 |
-
install wandb`
|
467 |
-
"""
|
468 |
-
|
469 |
-
# docstyle-ignore
|
470 |
-
OMEGACONF_IMPORT_ERROR = """
|
471 |
-
{0} requires the omegaconf library but it was not found in your environment. You can install it with pip: `pip
|
472 |
-
install omegaconf`
|
473 |
-
"""
|
474 |
-
|
475 |
-
# docstyle-ignore
|
476 |
-
TENSORBOARD_IMPORT_ERROR = """
|
477 |
-
{0} requires the tensorboard library but it was not found in your environment. You can install it with pip: `pip
|
478 |
-
install tensorboard`
|
479 |
-
"""
|
480 |
-
|
481 |
-
|
482 |
-
# docstyle-ignore
|
483 |
-
COMPEL_IMPORT_ERROR = """
|
484 |
-
{0} requires the compel library but it was not found in your environment. You can install it with pip: `pip install compel`
|
485 |
-
"""
|
486 |
-
|
487 |
-
# docstyle-ignore
|
488 |
-
BS4_IMPORT_ERROR = """
|
489 |
-
{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip:
|
490 |
-
`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation.
|
491 |
-
"""
|
492 |
-
|
493 |
-
# docstyle-ignore
|
494 |
-
FTFY_IMPORT_ERROR = """
|
495 |
-
{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the
|
496 |
-
installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones
|
497 |
-
that match your environment. Please note that you may need to restart your runtime after installation.
|
498 |
-
"""
|
499 |
-
|
500 |
-
# docstyle-ignore
|
501 |
-
TORCHSDE_IMPORT_ERROR = """
|
502 |
-
{0} requires the torchsde library but it was not found in your environment. You can install it with pip: `pip install torchsde`
|
503 |
-
"""
|
504 |
-
|
505 |
-
# docstyle-ignore
|
506 |
-
INVISIBLE_WATERMARK_IMPORT_ERROR = """
|
507 |
-
{0} requires the invisible-watermark library but it was not found in your environment. You can install it with pip: `pip install invisible-watermark>=0.2.0`
|
508 |
-
"""
|
509 |
-
|
510 |
-
|
511 |
-
BACKENDS_MAPPING = OrderedDict(
|
512 |
-
[
|
513 |
-
("bs4", (is_bs4_available, BS4_IMPORT_ERROR)),
|
514 |
-
("flax", (is_flax_available, FLAX_IMPORT_ERROR)),
|
515 |
-
("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)),
|
516 |
-
("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)),
|
517 |
-
("opencv", (is_opencv_available, OPENCV_IMPORT_ERROR)),
|
518 |
-
("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)),
|
519 |
-
("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
|
520 |
-
("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)),
|
521 |
-
("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)),
|
522 |
-
("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)),
|
523 |
-
("k_diffusion", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)),
|
524 |
-
("note_seq", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)),
|
525 |
-
("wandb", (is_wandb_available, WANDB_IMPORT_ERROR)),
|
526 |
-
("omegaconf", (is_omegaconf_available, OMEGACONF_IMPORT_ERROR)),
|
527 |
-
("tensorboard", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)),
|
528 |
-
("compel", (is_compel_available, COMPEL_IMPORT_ERROR)),
|
529 |
-
("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)),
|
530 |
-
("torchsde", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)),
|
531 |
-
("invisible_watermark", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)),
|
532 |
-
]
|
533 |
-
)
|
534 |
-
|
535 |
-
|
536 |
-
def requires_backends(obj, backends):
|
537 |
-
if not isinstance(backends, (list, tuple)):
|
538 |
-
backends = [backends]
|
539 |
-
|
540 |
-
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
|
541 |
-
checks = (BACKENDS_MAPPING[backend] for backend in backends)
|
542 |
-
failed = [msg.format(name) for available, msg in checks if not available()]
|
543 |
-
if failed:
|
544 |
-
raise ImportError("".join(failed))
|
545 |
-
|
546 |
-
if name in [
|
547 |
-
"VersatileDiffusionTextToImagePipeline",
|
548 |
-
"VersatileDiffusionPipeline",
|
549 |
-
"VersatileDiffusionDualGuidedPipeline",
|
550 |
-
"StableDiffusionImageVariationPipeline",
|
551 |
-
"UnCLIPPipeline",
|
552 |
-
] and is_transformers_version("<", "4.25.0"):
|
553 |
-
raise ImportError(
|
554 |
-
f"You need to install `transformers>=4.25` in order to use {name}: \n```\n pip install"
|
555 |
-
" --upgrade transformers \n```"
|
556 |
-
)
|
557 |
-
|
558 |
-
if name in ["StableDiffusionDepth2ImgPipeline", "StableDiffusionPix2PixZeroPipeline"] and is_transformers_version(
|
559 |
-
"<", "4.26.0"
|
560 |
-
):
|
561 |
-
raise ImportError(
|
562 |
-
f"You need to install `transformers>=4.26` in order to use {name}: \n```\n pip install"
|
563 |
-
" --upgrade transformers \n```"
|
564 |
-
)
|
565 |
-
|
566 |
-
|
567 |
-
class DummyObject(type):
|
568 |
-
"""
|
569 |
-
Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by
|
570 |
-
`requires_backend` each time a user tries to access any method of that class.
|
571 |
-
"""
|
572 |
-
|
573 |
-
def __getattr__(cls, key):
|
574 |
-
if key.startswith("_") and key != "_load_connected_pipes":
|
575 |
-
return super().__getattr__(cls, key)
|
576 |
-
requires_backends(cls, cls._backends)
|
577 |
-
|
578 |
-
|
579 |
-
# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319
|
580 |
-
def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):
|
581 |
-
"""
|
582 |
-
Args:
|
583 |
-
Compares a library version to some requirement using a given operation.
|
584 |
-
library_or_version (`str` or `packaging.version.Version`):
|
585 |
-
A library name or a version to check.
|
586 |
-
operation (`str`):
|
587 |
-
A string representation of an operator, such as `">"` or `"<="`.
|
588 |
-
requirement_version (`str`):
|
589 |
-
The version to compare the library version against
|
590 |
-
"""
|
591 |
-
if operation not in STR_OPERATION_TO_FUNC.keys():
|
592 |
-
raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}")
|
593 |
-
operation = STR_OPERATION_TO_FUNC[operation]
|
594 |
-
if isinstance(library_or_version, str):
|
595 |
-
library_or_version = parse(importlib_metadata.version(library_or_version))
|
596 |
-
return operation(library_or_version, parse(requirement_version))
|
597 |
-
|
598 |
-
|
599 |
-
# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L338
|
600 |
-
def is_torch_version(operation: str, version: str):
|
601 |
-
"""
|
602 |
-
Args:
|
603 |
-
Compares the current PyTorch version to a given reference with an operation.
|
604 |
-
operation (`str`):
|
605 |
-
A string representation of an operator, such as `">"` or `"<="`
|
606 |
-
version (`str`):
|
607 |
-
A string version of PyTorch
|
608 |
-
"""
|
609 |
-
return compare_versions(parse(_torch_version), operation, version)
|
610 |
-
|
611 |
-
|
612 |
-
def is_transformers_version(operation: str, version: str):
|
613 |
-
"""
|
614 |
-
Args:
|
615 |
-
Compares the current Transformers version to a given reference with an operation.
|
616 |
-
operation (`str`):
|
617 |
-
A string representation of an operator, such as `">"` or `"<="`
|
618 |
-
version (`str`):
|
619 |
-
A version string
|
620 |
-
"""
|
621 |
-
if not _transformers_available:
|
622 |
-
return False
|
623 |
-
return compare_versions(parse(_transformers_version), operation, version)
|
624 |
-
|
625 |
-
|
626 |
-
def is_accelerate_version(operation: str, version: str):
|
627 |
-
"""
|
628 |
-
Args:
|
629 |
-
Compares the current Accelerate version to a given reference with an operation.
|
630 |
-
operation (`str`):
|
631 |
-
A string representation of an operator, such as `">"` or `"<="`
|
632 |
-
version (`str`):
|
633 |
-
A version string
|
634 |
-
"""
|
635 |
-
if not _accelerate_available:
|
636 |
-
return False
|
637 |
-
return compare_versions(parse(_accelerate_version), operation, version)
|
638 |
-
|
639 |
-
|
640 |
-
def is_k_diffusion_version(operation: str, version: str):
|
641 |
-
"""
|
642 |
-
Args:
|
643 |
-
Compares the current k-diffusion version to a given reference with an operation.
|
644 |
-
operation (`str`):
|
645 |
-
A string representation of an operator, such as `">"` or `"<="`
|
646 |
-
version (`str`):
|
647 |
-
A version string
|
648 |
-
"""
|
649 |
-
if not _k_diffusion_available:
|
650 |
-
return False
|
651 |
-
return compare_versions(parse(_k_diffusion_version), operation, version)
|
652 |
-
|
653 |
-
|
654 |
-
class OptionalDependencyNotAvailable(BaseException):
|
655 |
-
"""An error indicating that an optional dependency of Diffusers was not found in the environment."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/custom_init_isort.py
DELETED
@@ -1,252 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import argparse
|
17 |
-
import os
|
18 |
-
import re
|
19 |
-
|
20 |
-
|
21 |
-
PATH_TO_TRANSFORMERS = "src/diffusers"
|
22 |
-
|
23 |
-
# Pattern that looks at the indentation in a line.
|
24 |
-
_re_indent = re.compile(r"^(\s*)\S")
|
25 |
-
# Pattern that matches `"key":" and puts `key` in group 0.
|
26 |
-
_re_direct_key = re.compile(r'^\s*"([^"]+)":')
|
27 |
-
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
|
28 |
-
_re_indirect_key = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
|
29 |
-
# Pattern that matches `"key",` and puts `key` in group 0.
|
30 |
-
_re_strip_line = re.compile(r'^\s*"([^"]+)",\s*$')
|
31 |
-
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
|
32 |
-
_re_bracket_content = re.compile(r"\[([^\]]+)\]")
|
33 |
-
|
34 |
-
|
35 |
-
def get_indent(line):
|
36 |
-
"""Returns the indent in `line`."""
|
37 |
-
search = _re_indent.search(line)
|
38 |
-
return "" if search is None else search.groups()[0]
|
39 |
-
|
40 |
-
|
41 |
-
def split_code_in_indented_blocks(code, indent_level="", start_prompt=None, end_prompt=None):
|
42 |
-
"""
|
43 |
-
Split `code` into its indented blocks, starting at `indent_level`. If provided, begins splitting after
|
44 |
-
`start_prompt` and stops at `end_prompt` (but returns what's before `start_prompt` as a first block and what's
|
45 |
-
after `end_prompt` as a last block, so `code` is always the same as joining the result of this function).
|
46 |
-
"""
|
47 |
-
# Let's split the code into lines and move to start_index.
|
48 |
-
index = 0
|
49 |
-
lines = code.split("\n")
|
50 |
-
if start_prompt is not None:
|
51 |
-
while not lines[index].startswith(start_prompt):
|
52 |
-
index += 1
|
53 |
-
blocks = ["\n".join(lines[:index])]
|
54 |
-
else:
|
55 |
-
blocks = []
|
56 |
-
|
57 |
-
# We split into blocks until we get to the `end_prompt` (or the end of the block).
|
58 |
-
current_block = [lines[index]]
|
59 |
-
index += 1
|
60 |
-
while index < len(lines) and (end_prompt is None or not lines[index].startswith(end_prompt)):
|
61 |
-
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
|
62 |
-
if len(current_block) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "):
|
63 |
-
current_block.append(lines[index])
|
64 |
-
blocks.append("\n".join(current_block))
|
65 |
-
if index < len(lines) - 1:
|
66 |
-
current_block = [lines[index + 1]]
|
67 |
-
index += 1
|
68 |
-
else:
|
69 |
-
current_block = []
|
70 |
-
else:
|
71 |
-
blocks.append("\n".join(current_block))
|
72 |
-
current_block = [lines[index]]
|
73 |
-
else:
|
74 |
-
current_block.append(lines[index])
|
75 |
-
index += 1
|
76 |
-
|
77 |
-
# Adds current block if it's nonempty.
|
78 |
-
if len(current_block) > 0:
|
79 |
-
blocks.append("\n".join(current_block))
|
80 |
-
|
81 |
-
# Add final block after end_prompt if provided.
|
82 |
-
if end_prompt is not None and index < len(lines):
|
83 |
-
blocks.append("\n".join(lines[index:]))
|
84 |
-
|
85 |
-
return blocks
|
86 |
-
|
87 |
-
|
88 |
-
def ignore_underscore(key):
|
89 |
-
"Wraps a `key` (that maps an object to string) to lower case and remove underscores."
|
90 |
-
|
91 |
-
def _inner(x):
|
92 |
-
return key(x).lower().replace("_", "")
|
93 |
-
|
94 |
-
return _inner
|
95 |
-
|
96 |
-
|
97 |
-
def sort_objects(objects, key=None):
|
98 |
-
"Sort a list of `objects` following the rules of isort. `key` optionally maps an object to a str."
|
99 |
-
|
100 |
-
# If no key is provided, we use a noop.
|
101 |
-
def noop(x):
|
102 |
-
return x
|
103 |
-
|
104 |
-
if key is None:
|
105 |
-
key = noop
|
106 |
-
# Constants are all uppercase, they go first.
|
107 |
-
constants = [obj for obj in objects if key(obj).isupper()]
|
108 |
-
# Classes are not all uppercase but start with a capital, they go second.
|
109 |
-
classes = [obj for obj in objects if key(obj)[0].isupper() and not key(obj).isupper()]
|
110 |
-
# Functions begin with a lowercase, they go last.
|
111 |
-
functions = [obj for obj in objects if not key(obj)[0].isupper()]
|
112 |
-
|
113 |
-
key1 = ignore_underscore(key)
|
114 |
-
return sorted(constants, key=key1) + sorted(classes, key=key1) + sorted(functions, key=key1)
|
115 |
-
|
116 |
-
|
117 |
-
def sort_objects_in_import(import_statement):
|
118 |
-
"""
|
119 |
-
Return the same `import_statement` but with objects properly sorted.
|
120 |
-
"""
|
121 |
-
|
122 |
-
# This inner function sort imports between [ ].
|
123 |
-
def _replace(match):
|
124 |
-
imports = match.groups()[0]
|
125 |
-
if "," not in imports:
|
126 |
-
return f"[{imports}]"
|
127 |
-
keys = [part.strip().replace('"', "") for part in imports.split(",")]
|
128 |
-
# We will have a final empty element if the line finished with a comma.
|
129 |
-
if len(keys[-1]) == 0:
|
130 |
-
keys = keys[:-1]
|
131 |
-
return "[" + ", ".join([f'"{k}"' for k in sort_objects(keys)]) + "]"
|
132 |
-
|
133 |
-
lines = import_statement.split("\n")
|
134 |
-
if len(lines) > 3:
|
135 |
-
# Here we have to sort internal imports that are on several lines (one per name):
|
136 |
-
# key: [
|
137 |
-
# "object1",
|
138 |
-
# "object2",
|
139 |
-
# ...
|
140 |
-
# ]
|
141 |
-
|
142 |
-
# We may have to ignore one or two lines on each side.
|
143 |
-
idx = 2 if lines[1].strip() == "[" else 1
|
144 |
-
keys_to_sort = [(i, _re_strip_line.search(line).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
|
145 |
-
sorted_indices = sort_objects(keys_to_sort, key=lambda x: x[1])
|
146 |
-
sorted_lines = [lines[x[0] + idx] for x in sorted_indices]
|
147 |
-
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
|
148 |
-
elif len(lines) == 3:
|
149 |
-
# Here we have to sort internal imports that are on one separate line:
|
150 |
-
# key: [
|
151 |
-
# "object1", "object2", ...
|
152 |
-
# ]
|
153 |
-
if _re_bracket_content.search(lines[1]) is not None:
|
154 |
-
lines[1] = _re_bracket_content.sub(_replace, lines[1])
|
155 |
-
else:
|
156 |
-
keys = [part.strip().replace('"', "") for part in lines[1].split(",")]
|
157 |
-
# We will have a final empty element if the line finished with a comma.
|
158 |
-
if len(keys[-1]) == 0:
|
159 |
-
keys = keys[:-1]
|
160 |
-
lines[1] = get_indent(lines[1]) + ", ".join([f'"{k}"' for k in sort_objects(keys)])
|
161 |
-
return "\n".join(lines)
|
162 |
-
else:
|
163 |
-
# Finally we have to deal with imports fitting on one line
|
164 |
-
import_statement = _re_bracket_content.sub(_replace, import_statement)
|
165 |
-
return import_statement
|
166 |
-
|
167 |
-
|
168 |
-
def sort_imports(file, check_only=True):
|
169 |
-
"""
|
170 |
-
Sort `_import_structure` imports in `file`, `check_only` determines if we only check or overwrite.
|
171 |
-
"""
|
172 |
-
with open(file, "r") as f:
|
173 |
-
code = f.read()
|
174 |
-
|
175 |
-
if "_import_structure" not in code:
|
176 |
-
return
|
177 |
-
|
178 |
-
# Blocks of indent level 0
|
179 |
-
main_blocks = split_code_in_indented_blocks(
|
180 |
-
code, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:"
|
181 |
-
)
|
182 |
-
|
183 |
-
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
|
184 |
-
for block_idx in range(1, len(main_blocks) - 1):
|
185 |
-
# Check if the block contains some `_import_structure`s thingy to sort.
|
186 |
-
block = main_blocks[block_idx]
|
187 |
-
block_lines = block.split("\n")
|
188 |
-
|
189 |
-
# Get to the start of the imports.
|
190 |
-
line_idx = 0
|
191 |
-
while line_idx < len(block_lines) and "_import_structure" not in block_lines[line_idx]:
|
192 |
-
# Skip dummy import blocks
|
193 |
-
if "import dummy" in block_lines[line_idx]:
|
194 |
-
line_idx = len(block_lines)
|
195 |
-
else:
|
196 |
-
line_idx += 1
|
197 |
-
if line_idx >= len(block_lines):
|
198 |
-
continue
|
199 |
-
|
200 |
-
# Ignore beginning and last line: they don't contain anything.
|
201 |
-
internal_block_code = "\n".join(block_lines[line_idx:-1])
|
202 |
-
indent = get_indent(block_lines[1])
|
203 |
-
# Slit the internal block into blocks of indent level 1.
|
204 |
-
internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent)
|
205 |
-
# We have two categories of import key: list or _import_structure[key].append/extend
|
206 |
-
pattern = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key
|
207 |
-
# Grab the keys, but there is a trap: some lines are empty or just comments.
|
208 |
-
keys = [(pattern.search(b).groups()[0] if pattern.search(b) is not None else None) for b in internal_blocks]
|
209 |
-
# We only sort the lines with a key.
|
210 |
-
keys_to_sort = [(i, key) for i, key in enumerate(keys) if key is not None]
|
211 |
-
sorted_indices = [x[0] for x in sorted(keys_to_sort, key=lambda x: x[1])]
|
212 |
-
|
213 |
-
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
|
214 |
-
count = 0
|
215 |
-
reordered_blocks = []
|
216 |
-
for i in range(len(internal_blocks)):
|
217 |
-
if keys[i] is None:
|
218 |
-
reordered_blocks.append(internal_blocks[i])
|
219 |
-
else:
|
220 |
-
block = sort_objects_in_import(internal_blocks[sorted_indices[count]])
|
221 |
-
reordered_blocks.append(block)
|
222 |
-
count += 1
|
223 |
-
|
224 |
-
# And we put our main block back together with its first and last line.
|
225 |
-
main_blocks[block_idx] = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]])
|
226 |
-
|
227 |
-
if code != "\n".join(main_blocks):
|
228 |
-
if check_only:
|
229 |
-
return True
|
230 |
-
else:
|
231 |
-
print(f"Overwriting {file}.")
|
232 |
-
with open(file, "w") as f:
|
233 |
-
f.write("\n".join(main_blocks))
|
234 |
-
|
235 |
-
|
236 |
-
def sort_imports_in_all_inits(check_only=True):
|
237 |
-
failures = []
|
238 |
-
for root, _, files in os.walk(PATH_TO_TRANSFORMERS):
|
239 |
-
if "__init__.py" in files:
|
240 |
-
result = sort_imports(os.path.join(root, "__init__.py"), check_only=check_only)
|
241 |
-
if result:
|
242 |
-
failures = [os.path.join(root, "__init__.py")]
|
243 |
-
if len(failures) > 0:
|
244 |
-
raise ValueError(f"Would overwrite {len(failures)} files, run `make style`.")
|
245 |
-
|
246 |
-
|
247 |
-
if __name__ == "__main__":
|
248 |
-
parser = argparse.ArgumentParser()
|
249 |
-
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
|
250 |
-
args = parser.parse_args()
|
251 |
-
|
252 |
-
sort_imports_in_all_inits(check_only=args.check_only)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
backbone=dict(plugins=[
|
4 |
-
dict(
|
5 |
-
cfg=dict(type='ContextBlock', ratio=1. / 4),
|
6 |
-
stages=(False, True, True, True),
|
7 |
-
position='after_conv3')
|
8 |
-
]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/gfl/README.md
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
# Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection
|
2 |
-
|
3 |
-
## Introduction
|
4 |
-
|
5 |
-
[ALGORITHM]
|
6 |
-
|
7 |
-
We provide config files to reproduce the object detection results in the paper [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388)
|
8 |
-
|
9 |
-
```latex
|
10 |
-
@article{li2020generalized,
|
11 |
-
title={Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection},
|
12 |
-
author={Li, Xiang and Wang, Wenhai and Wu, Lijun and Chen, Shuo and Hu, Xiaolin and Li, Jun and Tang, Jinhui and Yang, Jian},
|
13 |
-
journal={arXiv preprint arXiv:2006.04388},
|
14 |
-
year={2020}
|
15 |
-
}
|
16 |
-
```
|
17 |
-
|
18 |
-
## Results and Models
|
19 |
-
|
20 |
-
| Backbone | Style | Lr schd | Multi-scale Training| Inf time (fps) | box AP | Config | Download |
|
21 |
-
|:-----------------:|:-------:|:-------:|:-------------------:|:--------------:|:------:|:------:|:--------:|
|
22 |
-
| R-50 | pytorch | 1x | No | 19.5 | 40.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244.log.json) |
|
23 |
-
| R-50 | pytorch | 2x | Yes | 19.5 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802.log.json) |
|
24 |
-
| R-101 | pytorch | 2x | Yes | 14.7 | 44.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126.log.json) |
|
25 |
-
| R-101-dcnv2 | pytorch | 2x | Yes | 12.9 | 47.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002.log.json) |
|
26 |
-
| X-101-32x4d | pytorch | 2x | Yes | 12.1 | 45.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002.log.json) |
|
27 |
-
| X-101-32x4d-dcnv2 | pytorch | 2x | Yes | 10.7 | 48.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002.log.json) |
|
28 |
-
|
29 |
-
[1] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \
|
30 |
-
[2] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \
|
31 |
-
[3] *`dcnv2` denotes deformable convolutional networks v2.* \
|
32 |
-
[4] *FPS is tested with a single GeForce RTX 2080Ti GPU, using a batch size of 1.*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnext101_64x4d',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNeXt',
|
6 |
-
depth=101,
|
7 |
-
groups=64,
|
8 |
-
base_width=4,
|
9 |
-
num_stages=4,
|
10 |
-
out_indices=(0, 1, 2, 3),
|
11 |
-
frozen_stages=1,
|
12 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
13 |
-
style='pytorch'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_base_ = './rpn_r50_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnext101_32x4d',
|
4 |
-
backbone=dict(
|
5 |
-
type='ResNeXt',
|
6 |
-
depth=101,
|
7 |
-
groups=32,
|
8 |
-
base_width=4,
|
9 |
-
num_stages=4,
|
10 |
-
out_indices=(0, 1, 2, 3),
|
11 |
-
frozen_stages=1,
|
12 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
13 |
-
style='pytorch'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/ann_r50-d8.py',
|
3 |
-
'../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
|
4 |
-
'../_base_/schedules/schedule_40k.py'
|
5 |
-
]
|
6 |
-
model = dict(
|
7 |
-
decode_head=dict(align_corners=True),
|
8 |
-
auxiliary_head=dict(align_corners=True),
|
9 |
-
test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
_base_ = './emanet_r50-d8_512x1024_80k_cityscapes.py'
|
2 |
-
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
_base_ = './fcn_hr18_512x1024_40k_cityscapes.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://msra/hrnetv2_w18_small',
|
4 |
-
backbone=dict(
|
5 |
-
extra=dict(
|
6 |
-
stage1=dict(num_blocks=(2, )),
|
7 |
-
stage2=dict(num_blocks=(2, 2)),
|
8 |
-
stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
|
9 |
-
stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AnnonSubmission/xai-cl/ssl_models/simsiam.py
DELETED
@@ -1,91 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torchvision
|
4 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
5 |
-
|
6 |
-
"""from https://github.com/facebookresearch/simsiam"""
|
7 |
-
|
8 |
-
class SimSiam(nn.Module):
|
9 |
-
|
10 |
-
def __init__(self, base_encoder, dim, pred_dim):
|
11 |
-
"""
|
12 |
-
dim: feature dimension (default: 2048)
|
13 |
-
pred_dim: hidden dimension of the predictor (default: 512)
|
14 |
-
symetric is True only when training
|
15 |
-
"""
|
16 |
-
super(SimSiam, self).__init__()
|
17 |
-
|
18 |
-
# create the encoder
|
19 |
-
# num_classes is the output fc dimension, zero-initialize last BNs
|
20 |
-
self.encoder = base_encoder(num_classes=dim, zero_init_residual=True)
|
21 |
-
|
22 |
-
# build a 3-layer projector
|
23 |
-
prev_dim = self.encoder.fc.weight.shape[1]
|
24 |
-
self.encoder.fc = nn.Sequential(nn.Linear(prev_dim, prev_dim, bias=False),
|
25 |
-
nn.BatchNorm1d(prev_dim),
|
26 |
-
nn.ReLU(inplace=True), # first layer
|
27 |
-
nn.Linear(prev_dim, prev_dim, bias=False),
|
28 |
-
nn.BatchNorm1d(prev_dim),
|
29 |
-
nn.ReLU(inplace=True), # second layer
|
30 |
-
self.encoder.fc,
|
31 |
-
nn.BatchNorm1d(dim, affine=False)) # output layer
|
32 |
-
self.encoder.fc[6].bias.requires_grad = False # hack: not use bias as it is followed by BN
|
33 |
-
|
34 |
-
# build a 2-layer predictor
|
35 |
-
self.predictor = nn.Sequential(nn.Linear(dim, pred_dim, bias=False),
|
36 |
-
nn.BatchNorm1d(pred_dim),
|
37 |
-
nn.ReLU(inplace=True), # hidden layer
|
38 |
-
nn.Linear(pred_dim, dim)) # output layer
|
39 |
-
|
40 |
-
def forward(self, x1, x2):
|
41 |
-
z1 = self.encoder(x1).detach() # NxC
|
42 |
-
z2 = self.encoder(x2).detach() # NxC
|
43 |
-
|
44 |
-
p1 = self.predictor(z1) # NxC
|
45 |
-
p2 = self.predictor(z2) # NxC
|
46 |
-
|
47 |
-
loss = -(nn.CosineSimilarity(dim=1)(p1, z2).mean() + nn.CosineSimilarity(dim=1)(p2, z1).mean()) * 0.5
|
48 |
-
|
49 |
-
return loss
|
50 |
-
|
51 |
-
class ResNet(nn.Module):
|
52 |
-
def __init__(self, backbone):
|
53 |
-
super().__init__()
|
54 |
-
|
55 |
-
modules = list(backbone.children())[:-2]
|
56 |
-
self.net = nn.Sequential(*modules)
|
57 |
-
|
58 |
-
def forward(self, x):
|
59 |
-
return self.net(x).mean(dim=[2, 3])
|
60 |
-
|
61 |
-
class RestructuredSimSiam(nn.Module):
|
62 |
-
def __init__(self, model):
|
63 |
-
super().__init__()
|
64 |
-
|
65 |
-
self.encoder = ResNet(model.encoder)
|
66 |
-
self.mlp_encoder = model.encoder.fc
|
67 |
-
self.mlp_encoder[6].bias.requires_grad = False
|
68 |
-
self.contrastive_head = model.predictor
|
69 |
-
|
70 |
-
def forward(self, x, run_head = True):
|
71 |
-
|
72 |
-
x = self.mlp_encoder(self.encoder(x)) # don't detach since we will do backprop for explainability
|
73 |
-
|
74 |
-
if run_head:
|
75 |
-
x = self.contrastive_head(x)
|
76 |
-
|
77 |
-
return x
|
78 |
-
|
79 |
-
|
80 |
-
def get_simsiam(ckpt_path = 'checkpoint_0099.pth.tar'):
|
81 |
-
|
82 |
-
model = SimSiam(base_encoder = torchvision.models.resnet50,
|
83 |
-
dim = 2048,
|
84 |
-
pred_dim = 512)
|
85 |
-
|
86 |
-
checkpoint = torch.load('pretrained_models/simsiam_models/'+ ckpt_path, map_location='cpu')
|
87 |
-
state_dic = checkpoint['state_dict']
|
88 |
-
state_dic = {k.replace("module.", ""): v for k, v in state_dic.items()}
|
89 |
-
model.load_state_dict(state_dic)
|
90 |
-
restructured_model = RestructuredSimSiam(model)
|
91 |
-
return restructured_model.to(device)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/deform_roi_pool.py
DELETED
@@ -1,204 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
from torch import nn
|
3 |
-
from torch.autograd import Function
|
4 |
-
from torch.autograd.function import once_differentiable
|
5 |
-
from torch.nn.modules.utils import _pair
|
6 |
-
|
7 |
-
from ..utils import ext_loader
|
8 |
-
|
9 |
-
ext_module = ext_loader.load_ext(
|
10 |
-
'_ext', ['deform_roi_pool_forward', 'deform_roi_pool_backward'])
|
11 |
-
|
12 |
-
|
13 |
-
class DeformRoIPoolFunction(Function):
|
14 |
-
|
15 |
-
@staticmethod
|
16 |
-
def symbolic(g, input, rois, offset, output_size, spatial_scale,
|
17 |
-
sampling_ratio, gamma):
|
18 |
-
return g.op(
|
19 |
-
'mmcv::MMCVDeformRoIPool',
|
20 |
-
input,
|
21 |
-
rois,
|
22 |
-
offset,
|
23 |
-
pooled_height_i=output_size[0],
|
24 |
-
pooled_width_i=output_size[1],
|
25 |
-
spatial_scale_f=spatial_scale,
|
26 |
-
sampling_ratio_f=sampling_ratio,
|
27 |
-
gamma_f=gamma)
|
28 |
-
|
29 |
-
@staticmethod
|
30 |
-
def forward(ctx,
|
31 |
-
input,
|
32 |
-
rois,
|
33 |
-
offset,
|
34 |
-
output_size,
|
35 |
-
spatial_scale=1.0,
|
36 |
-
sampling_ratio=0,
|
37 |
-
gamma=0.1):
|
38 |
-
if offset is None:
|
39 |
-
offset = input.new_zeros(0)
|
40 |
-
ctx.output_size = _pair(output_size)
|
41 |
-
ctx.spatial_scale = float(spatial_scale)
|
42 |
-
ctx.sampling_ratio = int(sampling_ratio)
|
43 |
-
ctx.gamma = float(gamma)
|
44 |
-
|
45 |
-
assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!'
|
46 |
-
|
47 |
-
output_shape = (rois.size(0), input.size(1), ctx.output_size[0],
|
48 |
-
ctx.output_size[1])
|
49 |
-
output = input.new_zeros(output_shape)
|
50 |
-
|
51 |
-
ext_module.deform_roi_pool_forward(
|
52 |
-
input,
|
53 |
-
rois,
|
54 |
-
offset,
|
55 |
-
output,
|
56 |
-
pooled_height=ctx.output_size[0],
|
57 |
-
pooled_width=ctx.output_size[1],
|
58 |
-
spatial_scale=ctx.spatial_scale,
|
59 |
-
sampling_ratio=ctx.sampling_ratio,
|
60 |
-
gamma=ctx.gamma)
|
61 |
-
|
62 |
-
ctx.save_for_backward(input, rois, offset)
|
63 |
-
return output
|
64 |
-
|
65 |
-
@staticmethod
|
66 |
-
@once_differentiable
|
67 |
-
def backward(ctx, grad_output):
|
68 |
-
input, rois, offset = ctx.saved_tensors
|
69 |
-
grad_input = grad_output.new_zeros(input.shape)
|
70 |
-
grad_offset = grad_output.new_zeros(offset.shape)
|
71 |
-
|
72 |
-
ext_module.deform_roi_pool_backward(
|
73 |
-
grad_output,
|
74 |
-
input,
|
75 |
-
rois,
|
76 |
-
offset,
|
77 |
-
grad_input,
|
78 |
-
grad_offset,
|
79 |
-
pooled_height=ctx.output_size[0],
|
80 |
-
pooled_width=ctx.output_size[1],
|
81 |
-
spatial_scale=ctx.spatial_scale,
|
82 |
-
sampling_ratio=ctx.sampling_ratio,
|
83 |
-
gamma=ctx.gamma)
|
84 |
-
if grad_offset.numel() == 0:
|
85 |
-
grad_offset = None
|
86 |
-
return grad_input, None, grad_offset, None, None, None, None
|
87 |
-
|
88 |
-
|
89 |
-
deform_roi_pool = DeformRoIPoolFunction.apply
|
90 |
-
|
91 |
-
|
92 |
-
class DeformRoIPool(nn.Module):
|
93 |
-
|
94 |
-
def __init__(self,
|
95 |
-
output_size,
|
96 |
-
spatial_scale=1.0,
|
97 |
-
sampling_ratio=0,
|
98 |
-
gamma=0.1):
|
99 |
-
super(DeformRoIPool, self).__init__()
|
100 |
-
self.output_size = _pair(output_size)
|
101 |
-
self.spatial_scale = float(spatial_scale)
|
102 |
-
self.sampling_ratio = int(sampling_ratio)
|
103 |
-
self.gamma = float(gamma)
|
104 |
-
|
105 |
-
def forward(self, input, rois, offset=None):
|
106 |
-
return deform_roi_pool(input, rois, offset, self.output_size,
|
107 |
-
self.spatial_scale, self.sampling_ratio,
|
108 |
-
self.gamma)
|
109 |
-
|
110 |
-
|
111 |
-
class DeformRoIPoolPack(DeformRoIPool):
|
112 |
-
|
113 |
-
def __init__(self,
|
114 |
-
output_size,
|
115 |
-
output_channels,
|
116 |
-
deform_fc_channels=1024,
|
117 |
-
spatial_scale=1.0,
|
118 |
-
sampling_ratio=0,
|
119 |
-
gamma=0.1):
|
120 |
-
super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale,
|
121 |
-
sampling_ratio, gamma)
|
122 |
-
|
123 |
-
self.output_channels = output_channels
|
124 |
-
self.deform_fc_channels = deform_fc_channels
|
125 |
-
|
126 |
-
self.offset_fc = nn.Sequential(
|
127 |
-
nn.Linear(
|
128 |
-
self.output_size[0] * self.output_size[1] *
|
129 |
-
self.output_channels, self.deform_fc_channels),
|
130 |
-
nn.ReLU(inplace=True),
|
131 |
-
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
|
132 |
-
nn.ReLU(inplace=True),
|
133 |
-
nn.Linear(self.deform_fc_channels,
|
134 |
-
self.output_size[0] * self.output_size[1] * 2))
|
135 |
-
self.offset_fc[-1].weight.data.zero_()
|
136 |
-
self.offset_fc[-1].bias.data.zero_()
|
137 |
-
|
138 |
-
def forward(self, input, rois):
|
139 |
-
assert input.size(1) == self.output_channels
|
140 |
-
x = deform_roi_pool(input, rois, None, self.output_size,
|
141 |
-
self.spatial_scale, self.sampling_ratio,
|
142 |
-
self.gamma)
|
143 |
-
rois_num = rois.size(0)
|
144 |
-
offset = self.offset_fc(x.view(rois_num, -1))
|
145 |
-
offset = offset.view(rois_num, 2, self.output_size[0],
|
146 |
-
self.output_size[1])
|
147 |
-
return deform_roi_pool(input, rois, offset, self.output_size,
|
148 |
-
self.spatial_scale, self.sampling_ratio,
|
149 |
-
self.gamma)
|
150 |
-
|
151 |
-
|
152 |
-
class ModulatedDeformRoIPoolPack(DeformRoIPool):
|
153 |
-
|
154 |
-
def __init__(self,
|
155 |
-
output_size,
|
156 |
-
output_channels,
|
157 |
-
deform_fc_channels=1024,
|
158 |
-
spatial_scale=1.0,
|
159 |
-
sampling_ratio=0,
|
160 |
-
gamma=0.1):
|
161 |
-
super(ModulatedDeformRoIPoolPack,
|
162 |
-
self).__init__(output_size, spatial_scale, sampling_ratio, gamma)
|
163 |
-
|
164 |
-
self.output_channels = output_channels
|
165 |
-
self.deform_fc_channels = deform_fc_channels
|
166 |
-
|
167 |
-
self.offset_fc = nn.Sequential(
|
168 |
-
nn.Linear(
|
169 |
-
self.output_size[0] * self.output_size[1] *
|
170 |
-
self.output_channels, self.deform_fc_channels),
|
171 |
-
nn.ReLU(inplace=True),
|
172 |
-
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
|
173 |
-
nn.ReLU(inplace=True),
|
174 |
-
nn.Linear(self.deform_fc_channels,
|
175 |
-
self.output_size[0] * self.output_size[1] * 2))
|
176 |
-
self.offset_fc[-1].weight.data.zero_()
|
177 |
-
self.offset_fc[-1].bias.data.zero_()
|
178 |
-
|
179 |
-
self.mask_fc = nn.Sequential(
|
180 |
-
nn.Linear(
|
181 |
-
self.output_size[0] * self.output_size[1] *
|
182 |
-
self.output_channels, self.deform_fc_channels),
|
183 |
-
nn.ReLU(inplace=True),
|
184 |
-
nn.Linear(self.deform_fc_channels,
|
185 |
-
self.output_size[0] * self.output_size[1] * 1),
|
186 |
-
nn.Sigmoid())
|
187 |
-
self.mask_fc[2].weight.data.zero_()
|
188 |
-
self.mask_fc[2].bias.data.zero_()
|
189 |
-
|
190 |
-
def forward(self, input, rois):
|
191 |
-
assert input.size(1) == self.output_channels
|
192 |
-
x = deform_roi_pool(input, rois, None, self.output_size,
|
193 |
-
self.spatial_scale, self.sampling_ratio,
|
194 |
-
self.gamma)
|
195 |
-
rois_num = rois.size(0)
|
196 |
-
offset = self.offset_fc(x.view(rois_num, -1))
|
197 |
-
offset = offset.view(rois_num, 2, self.output_size[0],
|
198 |
-
self.output_size[1])
|
199 |
-
mask = self.mask_fc(x.view(rois_num, -1))
|
200 |
-
mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1])
|
201 |
-
d = deform_roi_pool(input, rois, offset, self.output_size,
|
202 |
-
self.spatial_scale, self.sampling_ratio,
|
203 |
-
self.gamma)
|
204 |
-
return d * mask
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Apex-X/GODROOP/roop/processors/frame/face_enhancer.py
DELETED
@@ -1,81 +0,0 @@
|
|
1 |
-
from typing import Any, List, Callable
|
2 |
-
import cv2
|
3 |
-
import threading
|
4 |
-
import gfpgan
|
5 |
-
|
6 |
-
import roop.globals
|
7 |
-
import roop.processors.frame.core
|
8 |
-
from roop.core import update_status
|
9 |
-
from roop.face_analyser import get_one_face
|
10 |
-
from roop.typing import Frame, Face
|
11 |
-
from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
|
12 |
-
|
13 |
-
FACE_ENHANCER = None
|
14 |
-
THREAD_SEMAPHORE = threading.Semaphore()
|
15 |
-
THREAD_LOCK = threading.Lock()
|
16 |
-
NAME = 'ROOP.FACE-ENHANCER'
|
17 |
-
|
18 |
-
|
19 |
-
def get_face_enhancer() -> Any:
|
20 |
-
global FACE_ENHANCER
|
21 |
-
|
22 |
-
with THREAD_LOCK:
|
23 |
-
if FACE_ENHANCER is None:
|
24 |
-
model_path = resolve_relative_path('../models/GFPGANv1.4')
|
25 |
-
# todo: set models path https://github.com/TencentARC/GFPGAN/issues/399
|
26 |
-
FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined]
|
27 |
-
return FACE_ENHANCER
|
28 |
-
|
29 |
-
|
30 |
-
def pre_check() -> bool:
|
31 |
-
download_directory_path = resolve_relative_path('../models')
|
32 |
-
conditional_download(download_directory_path, ['https://huggingface.co/th2w33knd/GFPGANv1.4/resolve/main/GFPGANv1.4.pth'])
|
33 |
-
return True
|
34 |
-
|
35 |
-
|
36 |
-
def pre_start() -> bool:
|
37 |
-
if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
|
38 |
-
update_status('Select an image or video for target path.', NAME)
|
39 |
-
return False
|
40 |
-
return True
|
41 |
-
|
42 |
-
|
43 |
-
def post_process() -> None:
|
44 |
-
global FACE_ENHANCER
|
45 |
-
|
46 |
-
FACE_ENHANCER = None
|
47 |
-
|
48 |
-
|
49 |
-
def enhance_face(temp_frame: Frame) -> Frame:
|
50 |
-
with THREAD_SEMAPHORE:
|
51 |
-
_, _, temp_frame = get_face_enhancer().enhance(
|
52 |
-
temp_frame,
|
53 |
-
paste_back=True
|
54 |
-
)
|
55 |
-
return temp_frame
|
56 |
-
|
57 |
-
|
58 |
-
def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
|
59 |
-
target_face = get_one_face(temp_frame)
|
60 |
-
if target_face:
|
61 |
-
temp_frame = enhance_face(temp_frame)
|
62 |
-
return temp_frame
|
63 |
-
|
64 |
-
|
65 |
-
def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
|
66 |
-
for temp_frame_path in temp_frame_paths:
|
67 |
-
temp_frame = cv2.imread(temp_frame_path)
|
68 |
-
result = process_frame(None, temp_frame)
|
69 |
-
cv2.imwrite(temp_frame_path, result)
|
70 |
-
if update:
|
71 |
-
update()
|
72 |
-
|
73 |
-
|
74 |
-
def process_image(source_path: str, target_path: str, output_path: str) -> None:
|
75 |
-
target_frame = cv2.imread(target_path)
|
76 |
-
result = process_frame(None, target_frame)
|
77 |
-
cv2.imwrite(output_path, result)
|
78 |
-
|
79 |
-
|
80 |
-
def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
|
81 |
-
roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Arnx/MusicGenXvAKN/tests/data/test_audio_utils.py
DELETED
@@ -1,110 +0,0 @@
|
|
1 |
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
2 |
-
# All rights reserved.
|
3 |
-
#
|
4 |
-
# This source code is licensed under the license found in the
|
5 |
-
# LICENSE file in the root directory of this source tree.
|
6 |
-
|
7 |
-
import julius
|
8 |
-
import torch
|
9 |
-
import pytest
|
10 |
-
|
11 |
-
from audiocraft.data.audio_utils import (
|
12 |
-
_clip_wav,
|
13 |
-
convert_audio_channels,
|
14 |
-
convert_audio,
|
15 |
-
normalize_audio
|
16 |
-
)
|
17 |
-
from ..common_utils import get_batch_white_noise
|
18 |
-
|
19 |
-
|
20 |
-
class TestConvertAudioChannels:
|
21 |
-
|
22 |
-
def test_convert_audio_channels_downmix(self):
|
23 |
-
b, c, t = 2, 3, 100
|
24 |
-
audio = get_batch_white_noise(b, c, t)
|
25 |
-
mixed = convert_audio_channels(audio, channels=2)
|
26 |
-
assert list(mixed.shape) == [b, 2, t]
|
27 |
-
|
28 |
-
def test_convert_audio_channels_nochange(self):
|
29 |
-
b, c, t = 2, 3, 100
|
30 |
-
audio = get_batch_white_noise(b, c, t)
|
31 |
-
mixed = convert_audio_channels(audio, channels=c)
|
32 |
-
assert list(mixed.shape) == list(audio.shape)
|
33 |
-
|
34 |
-
def test_convert_audio_channels_upmix(self):
|
35 |
-
b, c, t = 2, 1, 100
|
36 |
-
audio = get_batch_white_noise(b, c, t)
|
37 |
-
mixed = convert_audio_channels(audio, channels=3)
|
38 |
-
assert list(mixed.shape) == [b, 3, t]
|
39 |
-
|
40 |
-
def test_convert_audio_channels_upmix_error(self):
|
41 |
-
b, c, t = 2, 2, 100
|
42 |
-
audio = get_batch_white_noise(b, c, t)
|
43 |
-
with pytest.raises(ValueError):
|
44 |
-
convert_audio_channels(audio, channels=3)
|
45 |
-
|
46 |
-
|
47 |
-
class TestConvertAudio:
|
48 |
-
|
49 |
-
def test_convert_audio_channels_downmix(self):
|
50 |
-
b, c, dur = 2, 3, 4.
|
51 |
-
sr = 128
|
52 |
-
audio = get_batch_white_noise(b, c, int(sr * dur))
|
53 |
-
out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=2)
|
54 |
-
assert list(out.shape) == [audio.shape[0], 2, audio.shape[-1]]
|
55 |
-
|
56 |
-
def test_convert_audio_channels_upmix(self):
|
57 |
-
b, c, dur = 2, 1, 4.
|
58 |
-
sr = 128
|
59 |
-
audio = get_batch_white_noise(b, c, int(sr * dur))
|
60 |
-
out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=3)
|
61 |
-
assert list(out.shape) == [audio.shape[0], 3, audio.shape[-1]]
|
62 |
-
|
63 |
-
def test_convert_audio_upsample(self):
|
64 |
-
b, c, dur = 2, 1, 4.
|
65 |
-
sr = 2
|
66 |
-
new_sr = 3
|
67 |
-
audio = get_batch_white_noise(b, c, int(sr * dur))
|
68 |
-
out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c)
|
69 |
-
out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr)
|
70 |
-
assert torch.allclose(out, out_j)
|
71 |
-
|
72 |
-
def test_convert_audio_resample(self):
|
73 |
-
b, c, dur = 2, 1, 4.
|
74 |
-
sr = 3
|
75 |
-
new_sr = 2
|
76 |
-
audio = get_batch_white_noise(b, c, int(sr * dur))
|
77 |
-
out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c)
|
78 |
-
out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr)
|
79 |
-
assert torch.allclose(out, out_j)
|
80 |
-
|
81 |
-
|
82 |
-
class TestNormalizeAudio:
|
83 |
-
|
84 |
-
def test_clip_wav(self):
|
85 |
-
b, c, dur = 2, 1, 4.
|
86 |
-
sr = 3
|
87 |
-
audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
|
88 |
-
_clip_wav(audio)
|
89 |
-
assert audio.abs().max() <= 1
|
90 |
-
|
91 |
-
def test_normalize_audio_clip(self):
|
92 |
-
b, c, dur = 2, 1, 4.
|
93 |
-
sr = 3
|
94 |
-
audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
|
95 |
-
norm_audio = normalize_audio(audio, strategy='clip')
|
96 |
-
assert norm_audio.abs().max() <= 1
|
97 |
-
|
98 |
-
def test_normalize_audio_rms(self):
|
99 |
-
b, c, dur = 2, 1, 4.
|
100 |
-
sr = 3
|
101 |
-
audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
|
102 |
-
norm_audio = normalize_audio(audio, strategy='rms')
|
103 |
-
assert norm_audio.abs().max() <= 1
|
104 |
-
|
105 |
-
def test_normalize_audio_peak(self):
|
106 |
-
b, c, dur = 2, 1, 4.
|
107 |
-
sr = 3
|
108 |
-
audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
|
109 |
-
norm_audio = normalize_audio(audio, strategy='peak')
|
110 |
-
assert norm_audio.abs().max() <= 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/diagram/__init__.py
DELETED
@@ -1,642 +0,0 @@
|
|
1 |
-
import railroad
|
2 |
-
from pip._vendor import pyparsing
|
3 |
-
import typing
|
4 |
-
from typing import (
|
5 |
-
List,
|
6 |
-
NamedTuple,
|
7 |
-
Generic,
|
8 |
-
TypeVar,
|
9 |
-
Dict,
|
10 |
-
Callable,
|
11 |
-
Set,
|
12 |
-
Iterable,
|
13 |
-
)
|
14 |
-
from jinja2 import Template
|
15 |
-
from io import StringIO
|
16 |
-
import inspect
|
17 |
-
|
18 |
-
|
19 |
-
jinja2_template_source = """\
|
20 |
-
<!DOCTYPE html>
|
21 |
-
<html>
|
22 |
-
<head>
|
23 |
-
{% if not head %}
|
24 |
-
<style type="text/css">
|
25 |
-
.railroad-heading {
|
26 |
-
font-family: monospace;
|
27 |
-
}
|
28 |
-
</style>
|
29 |
-
{% else %}
|
30 |
-
{{ head | safe }}
|
31 |
-
{% endif %}
|
32 |
-
</head>
|
33 |
-
<body>
|
34 |
-
{{ body | safe }}
|
35 |
-
{% for diagram in diagrams %}
|
36 |
-
<div class="railroad-group">
|
37 |
-
<h1 class="railroad-heading">{{ diagram.title }}</h1>
|
38 |
-
<div class="railroad-description">{{ diagram.text }}</div>
|
39 |
-
<div class="railroad-svg">
|
40 |
-
{{ diagram.svg }}
|
41 |
-
</div>
|
42 |
-
</div>
|
43 |
-
{% endfor %}
|
44 |
-
</body>
|
45 |
-
</html>
|
46 |
-
"""
|
47 |
-
|
48 |
-
template = Template(jinja2_template_source)
|
49 |
-
|
50 |
-
# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
|
51 |
-
NamedDiagram = NamedTuple(
|
52 |
-
"NamedDiagram",
|
53 |
-
[("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
|
54 |
-
)
|
55 |
-
"""
|
56 |
-
A simple structure for associating a name with a railroad diagram
|
57 |
-
"""
|
58 |
-
|
59 |
-
T = TypeVar("T")
|
60 |
-
|
61 |
-
|
62 |
-
class EachItem(railroad.Group):
|
63 |
-
"""
|
64 |
-
Custom railroad item to compose a:
|
65 |
-
- Group containing a
|
66 |
-
- OneOrMore containing a
|
67 |
-
- Choice of the elements in the Each
|
68 |
-
with the group label indicating that all must be matched
|
69 |
-
"""
|
70 |
-
|
71 |
-
all_label = "[ALL]"
|
72 |
-
|
73 |
-
def __init__(self, *items):
|
74 |
-
choice_item = railroad.Choice(len(items) - 1, *items)
|
75 |
-
one_or_more_item = railroad.OneOrMore(item=choice_item)
|
76 |
-
super().__init__(one_or_more_item, label=self.all_label)
|
77 |
-
|
78 |
-
|
79 |
-
class AnnotatedItem(railroad.Group):
|
80 |
-
"""
|
81 |
-
Simple subclass of Group that creates an annotation label
|
82 |
-
"""
|
83 |
-
|
84 |
-
def __init__(self, label: str, item):
|
85 |
-
super().__init__(item=item, label="[{}]".format(label) if label else label)
|
86 |
-
|
87 |
-
|
88 |
-
class EditablePartial(Generic[T]):
|
89 |
-
"""
|
90 |
-
Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
|
91 |
-
constructed.
|
92 |
-
"""
|
93 |
-
|
94 |
-
# We need this here because the railroad constructors actually transform the data, so can't be called until the
|
95 |
-
# entire tree is assembled
|
96 |
-
|
97 |
-
def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
|
98 |
-
self.func = func
|
99 |
-
self.args = args
|
100 |
-
self.kwargs = kwargs
|
101 |
-
|
102 |
-
@classmethod
|
103 |
-
def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
|
104 |
-
"""
|
105 |
-
If you call this function in the same way that you would call the constructor, it will store the arguments
|
106 |
-
as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
|
107 |
-
"""
|
108 |
-
return EditablePartial(func=func, args=list(args), kwargs=kwargs)
|
109 |
-
|
110 |
-
@property
|
111 |
-
def name(self):
|
112 |
-
return self.kwargs["name"]
|
113 |
-
|
114 |
-
def __call__(self) -> T:
|
115 |
-
"""
|
116 |
-
Evaluate the partial and return the result
|
117 |
-
"""
|
118 |
-
args = self.args.copy()
|
119 |
-
kwargs = self.kwargs.copy()
|
120 |
-
|
121 |
-
# This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
|
122 |
-
# args=['list', 'of', 'things'])
|
123 |
-
arg_spec = inspect.getfullargspec(self.func)
|
124 |
-
if arg_spec.varargs in self.kwargs:
|
125 |
-
args += kwargs.pop(arg_spec.varargs)
|
126 |
-
|
127 |
-
return self.func(*args, **kwargs)
|
128 |
-
|
129 |
-
|
130 |
-
def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
|
131 |
-
"""
|
132 |
-
Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
|
133 |
-
:params kwargs: kwargs to be passed in to the template
|
134 |
-
"""
|
135 |
-
data = []
|
136 |
-
for diagram in diagrams:
|
137 |
-
if diagram.diagram is None:
|
138 |
-
continue
|
139 |
-
io = StringIO()
|
140 |
-
diagram.diagram.writeSvg(io.write)
|
141 |
-
title = diagram.name
|
142 |
-
if diagram.index == 0:
|
143 |
-
title += " (root)"
|
144 |
-
data.append({"title": title, "text": "", "svg": io.getvalue()})
|
145 |
-
|
146 |
-
return template.render(diagrams=data, **kwargs)
|
147 |
-
|
148 |
-
|
149 |
-
def resolve_partial(partial: "EditablePartial[T]") -> T:
|
150 |
-
"""
|
151 |
-
Recursively resolves a collection of Partials into whatever type they are
|
152 |
-
"""
|
153 |
-
if isinstance(partial, EditablePartial):
|
154 |
-
partial.args = resolve_partial(partial.args)
|
155 |
-
partial.kwargs = resolve_partial(partial.kwargs)
|
156 |
-
return partial()
|
157 |
-
elif isinstance(partial, list):
|
158 |
-
return [resolve_partial(x) for x in partial]
|
159 |
-
elif isinstance(partial, dict):
|
160 |
-
return {key: resolve_partial(x) for key, x in partial.items()}
|
161 |
-
else:
|
162 |
-
return partial
|
163 |
-
|
164 |
-
|
165 |
-
def to_railroad(
|
166 |
-
element: pyparsing.ParserElement,
|
167 |
-
diagram_kwargs: typing.Optional[dict] = None,
|
168 |
-
vertical: int = 3,
|
169 |
-
show_results_names: bool = False,
|
170 |
-
show_groups: bool = False,
|
171 |
-
) -> List[NamedDiagram]:
|
172 |
-
"""
|
173 |
-
Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
|
174 |
-
creation if you want to access the Railroad tree before it is converted to HTML
|
175 |
-
:param element: base element of the parser being diagrammed
|
176 |
-
:param diagram_kwargs: kwargs to pass to the Diagram() constructor
|
177 |
-
:param vertical: (optional) - int - limit at which number of alternatives should be
|
178 |
-
shown vertically instead of horizontally
|
179 |
-
:param show_results_names - bool to indicate whether results name annotations should be
|
180 |
-
included in the diagram
|
181 |
-
:param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
|
182 |
-
surrounding box
|
183 |
-
"""
|
184 |
-
# Convert the whole tree underneath the root
|
185 |
-
lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
|
186 |
-
_to_diagram_element(
|
187 |
-
element,
|
188 |
-
lookup=lookup,
|
189 |
-
parent=None,
|
190 |
-
vertical=vertical,
|
191 |
-
show_results_names=show_results_names,
|
192 |
-
show_groups=show_groups,
|
193 |
-
)
|
194 |
-
|
195 |
-
root_id = id(element)
|
196 |
-
# Convert the root if it hasn't been already
|
197 |
-
if root_id in lookup:
|
198 |
-
if not element.customName:
|
199 |
-
lookup[root_id].name = ""
|
200 |
-
lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
|
201 |
-
|
202 |
-
# Now that we're finished, we can convert from intermediate structures into Railroad elements
|
203 |
-
diags = list(lookup.diagrams.values())
|
204 |
-
if len(diags) > 1:
|
205 |
-
# collapse out duplicate diags with the same name
|
206 |
-
seen = set()
|
207 |
-
deduped_diags = []
|
208 |
-
for d in diags:
|
209 |
-
# don't extract SkipTo elements, they are uninformative as subdiagrams
|
210 |
-
if d.name == "...":
|
211 |
-
continue
|
212 |
-
if d.name is not None and d.name not in seen:
|
213 |
-
seen.add(d.name)
|
214 |
-
deduped_diags.append(d)
|
215 |
-
resolved = [resolve_partial(partial) for partial in deduped_diags]
|
216 |
-
else:
|
217 |
-
# special case - if just one diagram, always display it, even if
|
218 |
-
# it has no name
|
219 |
-
resolved = [resolve_partial(partial) for partial in diags]
|
220 |
-
return sorted(resolved, key=lambda diag: diag.index)
|
221 |
-
|
222 |
-
|
223 |
-
def _should_vertical(
|
224 |
-
specification: int, exprs: Iterable[pyparsing.ParserElement]
|
225 |
-
) -> bool:
|
226 |
-
"""
|
227 |
-
Returns true if we should return a vertical list of elements
|
228 |
-
"""
|
229 |
-
if specification is None:
|
230 |
-
return False
|
231 |
-
else:
|
232 |
-
return len(_visible_exprs(exprs)) >= specification
|
233 |
-
|
234 |
-
|
235 |
-
class ElementState:
|
236 |
-
"""
|
237 |
-
State recorded for an individual pyparsing Element
|
238 |
-
"""
|
239 |
-
|
240 |
-
# Note: this should be a dataclass, but we have to support Python 3.5
|
241 |
-
def __init__(
|
242 |
-
self,
|
243 |
-
element: pyparsing.ParserElement,
|
244 |
-
converted: EditablePartial,
|
245 |
-
parent: EditablePartial,
|
246 |
-
number: int,
|
247 |
-
name: str = None,
|
248 |
-
parent_index: typing.Optional[int] = None,
|
249 |
-
):
|
250 |
-
#: The pyparsing element that this represents
|
251 |
-
self.element: pyparsing.ParserElement = element
|
252 |
-
#: The name of the element
|
253 |
-
self.name: typing.Optional[str] = name
|
254 |
-
#: The output Railroad element in an unconverted state
|
255 |
-
self.converted: EditablePartial = converted
|
256 |
-
#: The parent Railroad element, which we store so that we can extract this if it's duplicated
|
257 |
-
self.parent: EditablePartial = parent
|
258 |
-
#: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
|
259 |
-
self.number: int = number
|
260 |
-
#: The index of this inside its parent
|
261 |
-
self.parent_index: typing.Optional[int] = parent_index
|
262 |
-
#: If true, we should extract this out into a subdiagram
|
263 |
-
self.extract: bool = False
|
264 |
-
#: If true, all of this element's children have been filled out
|
265 |
-
self.complete: bool = False
|
266 |
-
|
267 |
-
def mark_for_extraction(
|
268 |
-
self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
|
269 |
-
):
|
270 |
-
"""
|
271 |
-
Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
|
272 |
-
:param el_id: id of the element
|
273 |
-
:param state: element/diagram state tracker
|
274 |
-
:param name: name to use for this element's text
|
275 |
-
:param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
|
276 |
-
root element when we know we're finished
|
277 |
-
"""
|
278 |
-
self.extract = True
|
279 |
-
|
280 |
-
# Set the name
|
281 |
-
if not self.name:
|
282 |
-
if name:
|
283 |
-
# Allow forcing a custom name
|
284 |
-
self.name = name
|
285 |
-
elif self.element.customName:
|
286 |
-
self.name = self.element.customName
|
287 |
-
else:
|
288 |
-
self.name = ""
|
289 |
-
|
290 |
-
# Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
|
291 |
-
# to be added
|
292 |
-
# Also, if this is just a string literal etc, don't bother extracting it
|
293 |
-
if force or (self.complete and _worth_extracting(self.element)):
|
294 |
-
state.extract_into_diagram(el_id)
|
295 |
-
|
296 |
-
|
297 |
-
class ConverterState:
|
298 |
-
"""
|
299 |
-
Stores some state that persists between recursions into the element tree
|
300 |
-
"""
|
301 |
-
|
302 |
-
def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
|
303 |
-
#: A dictionary mapping ParserElements to state relating to them
|
304 |
-
self._element_diagram_states: Dict[int, ElementState] = {}
|
305 |
-
#: A dictionary mapping ParserElement IDs to subdiagrams generated from them
|
306 |
-
self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
|
307 |
-
#: The index of the next unnamed element
|
308 |
-
self.unnamed_index: int = 1
|
309 |
-
#: The index of the next element. This is used for sorting
|
310 |
-
self.index: int = 0
|
311 |
-
#: Shared kwargs that are used to customize the construction of diagrams
|
312 |
-
self.diagram_kwargs: dict = diagram_kwargs or {}
|
313 |
-
self.extracted_diagram_names: Set[str] = set()
|
314 |
-
|
315 |
-
def __setitem__(self, key: int, value: ElementState):
|
316 |
-
self._element_diagram_states[key] = value
|
317 |
-
|
318 |
-
def __getitem__(self, key: int) -> ElementState:
|
319 |
-
return self._element_diagram_states[key]
|
320 |
-
|
321 |
-
def __delitem__(self, key: int):
|
322 |
-
del self._element_diagram_states[key]
|
323 |
-
|
324 |
-
def __contains__(self, key: int):
|
325 |
-
return key in self._element_diagram_states
|
326 |
-
|
327 |
-
def generate_unnamed(self) -> int:
|
328 |
-
"""
|
329 |
-
Generate a number used in the name of an otherwise unnamed diagram
|
330 |
-
"""
|
331 |
-
self.unnamed_index += 1
|
332 |
-
return self.unnamed_index
|
333 |
-
|
334 |
-
def generate_index(self) -> int:
|
335 |
-
"""
|
336 |
-
Generate a number used to index a diagram
|
337 |
-
"""
|
338 |
-
self.index += 1
|
339 |
-
return self.index
|
340 |
-
|
341 |
-
def extract_into_diagram(self, el_id: int):
|
342 |
-
"""
|
343 |
-
Used when we encounter the same token twice in the same tree. When this
|
344 |
-
happens, we replace all instances of that token with a terminal, and
|
345 |
-
create a new subdiagram for the token
|
346 |
-
"""
|
347 |
-
position = self[el_id]
|
348 |
-
|
349 |
-
# Replace the original definition of this element with a regular block
|
350 |
-
if position.parent:
|
351 |
-
ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
|
352 |
-
if "item" in position.parent.kwargs:
|
353 |
-
position.parent.kwargs["item"] = ret
|
354 |
-
elif "items" in position.parent.kwargs:
|
355 |
-
position.parent.kwargs["items"][position.parent_index] = ret
|
356 |
-
|
357 |
-
# If the element we're extracting is a group, skip to its content but keep the title
|
358 |
-
if position.converted.func == railroad.Group:
|
359 |
-
content = position.converted.kwargs["item"]
|
360 |
-
else:
|
361 |
-
content = position.converted
|
362 |
-
|
363 |
-
self.diagrams[el_id] = EditablePartial.from_call(
|
364 |
-
NamedDiagram,
|
365 |
-
name=position.name,
|
366 |
-
diagram=EditablePartial.from_call(
|
367 |
-
railroad.Diagram, content, **self.diagram_kwargs
|
368 |
-
),
|
369 |
-
index=position.number,
|
370 |
-
)
|
371 |
-
|
372 |
-
del self[el_id]
|
373 |
-
|
374 |
-
|
375 |
-
def _worth_extracting(element: pyparsing.ParserElement) -> bool:
|
376 |
-
"""
|
377 |
-
Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
|
378 |
-
themselves have children, then its complex enough to extract
|
379 |
-
"""
|
380 |
-
children = element.recurse()
|
381 |
-
return any(child.recurse() for child in children)
|
382 |
-
|
383 |
-
|
384 |
-
def _apply_diagram_item_enhancements(fn):
|
385 |
-
"""
|
386 |
-
decorator to ensure enhancements to a diagram item (such as results name annotations)
|
387 |
-
get applied on return from _to_diagram_element (we do this since there are several
|
388 |
-
returns in _to_diagram_element)
|
389 |
-
"""
|
390 |
-
|
391 |
-
def _inner(
|
392 |
-
element: pyparsing.ParserElement,
|
393 |
-
parent: typing.Optional[EditablePartial],
|
394 |
-
lookup: ConverterState = None,
|
395 |
-
vertical: int = None,
|
396 |
-
index: int = 0,
|
397 |
-
name_hint: str = None,
|
398 |
-
show_results_names: bool = False,
|
399 |
-
show_groups: bool = False,
|
400 |
-
) -> typing.Optional[EditablePartial]:
|
401 |
-
|
402 |
-
ret = fn(
|
403 |
-
element,
|
404 |
-
parent,
|
405 |
-
lookup,
|
406 |
-
vertical,
|
407 |
-
index,
|
408 |
-
name_hint,
|
409 |
-
show_results_names,
|
410 |
-
show_groups,
|
411 |
-
)
|
412 |
-
|
413 |
-
# apply annotation for results name, if present
|
414 |
-
if show_results_names and ret is not None:
|
415 |
-
element_results_name = element.resultsName
|
416 |
-
if element_results_name:
|
417 |
-
# add "*" to indicate if this is a "list all results" name
|
418 |
-
element_results_name += "" if element.modalResults else "*"
|
419 |
-
ret = EditablePartial.from_call(
|
420 |
-
railroad.Group, item=ret, label=element_results_name
|
421 |
-
)
|
422 |
-
|
423 |
-
return ret
|
424 |
-
|
425 |
-
return _inner
|
426 |
-
|
427 |
-
|
428 |
-
def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
|
429 |
-
non_diagramming_exprs = (
|
430 |
-
pyparsing.ParseElementEnhance,
|
431 |
-
pyparsing.PositionToken,
|
432 |
-
pyparsing.And._ErrorStop,
|
433 |
-
)
|
434 |
-
return [
|
435 |
-
e
|
436 |
-
for e in exprs
|
437 |
-
if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
|
438 |
-
]
|
439 |
-
|
440 |
-
|
441 |
-
@_apply_diagram_item_enhancements
|
442 |
-
def _to_diagram_element(
|
443 |
-
element: pyparsing.ParserElement,
|
444 |
-
parent: typing.Optional[EditablePartial],
|
445 |
-
lookup: ConverterState = None,
|
446 |
-
vertical: int = None,
|
447 |
-
index: int = 0,
|
448 |
-
name_hint: str = None,
|
449 |
-
show_results_names: bool = False,
|
450 |
-
show_groups: bool = False,
|
451 |
-
) -> typing.Optional[EditablePartial]:
|
452 |
-
"""
|
453 |
-
Recursively converts a PyParsing Element to a railroad Element
|
454 |
-
:param lookup: The shared converter state that keeps track of useful things
|
455 |
-
:param index: The index of this element within the parent
|
456 |
-
:param parent: The parent of this element in the output tree
|
457 |
-
:param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
|
458 |
-
it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
|
459 |
-
do so
|
460 |
-
:param name_hint: If provided, this will override the generated name
|
461 |
-
:param show_results_names: bool flag indicating whether to add annotations for results names
|
462 |
-
:returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
|
463 |
-
:param show_groups: bool flag indicating whether to show groups using bounding box
|
464 |
-
"""
|
465 |
-
exprs = element.recurse()
|
466 |
-
name = name_hint or element.customName or element.__class__.__name__
|
467 |
-
|
468 |
-
# Python's id() is used to provide a unique identifier for elements
|
469 |
-
el_id = id(element)
|
470 |
-
|
471 |
-
element_results_name = element.resultsName
|
472 |
-
|
473 |
-
# Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
|
474 |
-
if not element.customName:
|
475 |
-
if isinstance(
|
476 |
-
element,
|
477 |
-
(
|
478 |
-
# pyparsing.TokenConverter,
|
479 |
-
# pyparsing.Forward,
|
480 |
-
pyparsing.Located,
|
481 |
-
),
|
482 |
-
):
|
483 |
-
# However, if this element has a useful custom name, and its child does not, we can pass it on to the child
|
484 |
-
if exprs:
|
485 |
-
if not exprs[0].customName:
|
486 |
-
propagated_name = name
|
487 |
-
else:
|
488 |
-
propagated_name = None
|
489 |
-
|
490 |
-
return _to_diagram_element(
|
491 |
-
element.expr,
|
492 |
-
parent=parent,
|
493 |
-
lookup=lookup,
|
494 |
-
vertical=vertical,
|
495 |
-
index=index,
|
496 |
-
name_hint=propagated_name,
|
497 |
-
show_results_names=show_results_names,
|
498 |
-
show_groups=show_groups,
|
499 |
-
)
|
500 |
-
|
501 |
-
# If the element isn't worth extracting, we always treat it as the first time we say it
|
502 |
-
if _worth_extracting(element):
|
503 |
-
if el_id in lookup:
|
504 |
-
# If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
|
505 |
-
# so we have to extract it into a new diagram.
|
506 |
-
looked_up = lookup[el_id]
|
507 |
-
looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
|
508 |
-
ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
|
509 |
-
return ret
|
510 |
-
|
511 |
-
elif el_id in lookup.diagrams:
|
512 |
-
# If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
|
513 |
-
# just put in a marker element that refers to the sub-diagram
|
514 |
-
ret = EditablePartial.from_call(
|
515 |
-
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
|
516 |
-
)
|
517 |
-
return ret
|
518 |
-
|
519 |
-
# Recursively convert child elements
|
520 |
-
# Here we find the most relevant Railroad element for matching pyparsing Element
|
521 |
-
# We use ``items=[]`` here to hold the place for where the child elements will go once created
|
522 |
-
if isinstance(element, pyparsing.And):
|
523 |
-
# detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
|
524 |
-
# (all will have the same name, and resultsName)
|
525 |
-
if not exprs:
|
526 |
-
return None
|
527 |
-
if len(set((e.name, e.resultsName) for e in exprs)) == 1:
|
528 |
-
ret = EditablePartial.from_call(
|
529 |
-
railroad.OneOrMore, item="", repeat=str(len(exprs))
|
530 |
-
)
|
531 |
-
elif _should_vertical(vertical, exprs):
|
532 |
-
ret = EditablePartial.from_call(railroad.Stack, items=[])
|
533 |
-
else:
|
534 |
-
ret = EditablePartial.from_call(railroad.Sequence, items=[])
|
535 |
-
elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
|
536 |
-
if not exprs:
|
537 |
-
return None
|
538 |
-
if _should_vertical(vertical, exprs):
|
539 |
-
ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
|
540 |
-
else:
|
541 |
-
ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
|
542 |
-
elif isinstance(element, pyparsing.Each):
|
543 |
-
if not exprs:
|
544 |
-
return None
|
545 |
-
ret = EditablePartial.from_call(EachItem, items=[])
|
546 |
-
elif isinstance(element, pyparsing.NotAny):
|
547 |
-
ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
|
548 |
-
elif isinstance(element, pyparsing.FollowedBy):
|
549 |
-
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
|
550 |
-
elif isinstance(element, pyparsing.PrecededBy):
|
551 |
-
ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
|
552 |
-
elif isinstance(element, pyparsing.Group):
|
553 |
-
if show_groups:
|
554 |
-
ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
|
555 |
-
else:
|
556 |
-
ret = EditablePartial.from_call(railroad.Group, label="", item="")
|
557 |
-
elif isinstance(element, pyparsing.TokenConverter):
|
558 |
-
ret = EditablePartial.from_call(
|
559 |
-
AnnotatedItem, label=type(element).__name__.lower(), item=""
|
560 |
-
)
|
561 |
-
elif isinstance(element, pyparsing.Opt):
|
562 |
-
ret = EditablePartial.from_call(railroad.Optional, item="")
|
563 |
-
elif isinstance(element, pyparsing.OneOrMore):
|
564 |
-
ret = EditablePartial.from_call(railroad.OneOrMore, item="")
|
565 |
-
elif isinstance(element, pyparsing.ZeroOrMore):
|
566 |
-
ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
|
567 |
-
elif isinstance(element, pyparsing.Group):
|
568 |
-
ret = EditablePartial.from_call(
|
569 |
-
railroad.Group, item=None, label=element_results_name
|
570 |
-
)
|
571 |
-
elif isinstance(element, pyparsing.Empty) and not element.customName:
|
572 |
-
# Skip unnamed "Empty" elements
|
573 |
-
ret = None
|
574 |
-
elif len(exprs) > 1:
|
575 |
-
ret = EditablePartial.from_call(railroad.Sequence, items=[])
|
576 |
-
elif len(exprs) > 0 and not element_results_name:
|
577 |
-
ret = EditablePartial.from_call(railroad.Group, item="", label=name)
|
578 |
-
else:
|
579 |
-
terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
|
580 |
-
ret = terminal
|
581 |
-
|
582 |
-
if ret is None:
|
583 |
-
return
|
584 |
-
|
585 |
-
# Indicate this element's position in the tree so we can extract it if necessary
|
586 |
-
lookup[el_id] = ElementState(
|
587 |
-
element=element,
|
588 |
-
converted=ret,
|
589 |
-
parent=parent,
|
590 |
-
parent_index=index,
|
591 |
-
number=lookup.generate_index(),
|
592 |
-
)
|
593 |
-
if element.customName:
|
594 |
-
lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
|
595 |
-
|
596 |
-
i = 0
|
597 |
-
for expr in exprs:
|
598 |
-
# Add a placeholder index in case we have to extract the child before we even add it to the parent
|
599 |
-
if "items" in ret.kwargs:
|
600 |
-
ret.kwargs["items"].insert(i, None)
|
601 |
-
|
602 |
-
item = _to_diagram_element(
|
603 |
-
expr,
|
604 |
-
parent=ret,
|
605 |
-
lookup=lookup,
|
606 |
-
vertical=vertical,
|
607 |
-
index=i,
|
608 |
-
show_results_names=show_results_names,
|
609 |
-
show_groups=show_groups,
|
610 |
-
)
|
611 |
-
|
612 |
-
# Some elements don't need to be shown in the diagram
|
613 |
-
if item is not None:
|
614 |
-
if "item" in ret.kwargs:
|
615 |
-
ret.kwargs["item"] = item
|
616 |
-
elif "items" in ret.kwargs:
|
617 |
-
# If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
|
618 |
-
ret.kwargs["items"][i] = item
|
619 |
-
i += 1
|
620 |
-
elif "items" in ret.kwargs:
|
621 |
-
# If we're supposed to skip this element, remove it from the parent
|
622 |
-
del ret.kwargs["items"][i]
|
623 |
-
|
624 |
-
# If all this items children are none, skip this item
|
625 |
-
if ret and (
|
626 |
-
("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
|
627 |
-
or ("item" in ret.kwargs and ret.kwargs["item"] is None)
|
628 |
-
):
|
629 |
-
ret = EditablePartial.from_call(railroad.Terminal, name)
|
630 |
-
|
631 |
-
# Mark this element as "complete", ie it has all of its children
|
632 |
-
if el_id in lookup:
|
633 |
-
lookup[el_id].complete = True
|
634 |
-
|
635 |
-
if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
|
636 |
-
lookup.extract_into_diagram(el_id)
|
637 |
-
if ret is not None:
|
638 |
-
ret = EditablePartial.from_call(
|
639 |
-
railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
|
640 |
-
)
|
641 |
-
|
642 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/json.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from json import loads, dumps
|
3 |
-
from typing import Any, Callable, Optional, Union
|
4 |
-
|
5 |
-
from .text import Text
|
6 |
-
from .highlighter import JSONHighlighter, NullHighlighter
|
7 |
-
|
8 |
-
|
9 |
-
class JSON:
|
10 |
-
"""A renderable which pretty prints JSON.
|
11 |
-
|
12 |
-
Args:
|
13 |
-
json (str): JSON encoded data.
|
14 |
-
indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2.
|
15 |
-
highlight (bool, optional): Enable highlighting. Defaults to True.
|
16 |
-
skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
|
17 |
-
ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
|
18 |
-
check_circular (bool, optional): Check for circular references. Defaults to True.
|
19 |
-
allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
|
20 |
-
default (Callable, optional): A callable that converts values that can not be encoded
|
21 |
-
in to something that can be JSON encoded. Defaults to None.
|
22 |
-
sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
|
23 |
-
"""
|
24 |
-
|
25 |
-
def __init__(
|
26 |
-
self,
|
27 |
-
json: str,
|
28 |
-
indent: Union[None, int, str] = 2,
|
29 |
-
highlight: bool = True,
|
30 |
-
skip_keys: bool = False,
|
31 |
-
ensure_ascii: bool = False,
|
32 |
-
check_circular: bool = True,
|
33 |
-
allow_nan: bool = True,
|
34 |
-
default: Optional[Callable[[Any], Any]] = None,
|
35 |
-
sort_keys: bool = False,
|
36 |
-
) -> None:
|
37 |
-
data = loads(json)
|
38 |
-
json = dumps(
|
39 |
-
data,
|
40 |
-
indent=indent,
|
41 |
-
skipkeys=skip_keys,
|
42 |
-
ensure_ascii=ensure_ascii,
|
43 |
-
check_circular=check_circular,
|
44 |
-
allow_nan=allow_nan,
|
45 |
-
default=default,
|
46 |
-
sort_keys=sort_keys,
|
47 |
-
)
|
48 |
-
highlighter = JSONHighlighter() if highlight else NullHighlighter()
|
49 |
-
self.text = highlighter(json)
|
50 |
-
self.text.no_wrap = True
|
51 |
-
self.text.overflow = None
|
52 |
-
|
53 |
-
@classmethod
|
54 |
-
def from_data(
|
55 |
-
cls,
|
56 |
-
data: Any,
|
57 |
-
indent: Union[None, int, str] = 2,
|
58 |
-
highlight: bool = True,
|
59 |
-
skip_keys: bool = False,
|
60 |
-
ensure_ascii: bool = False,
|
61 |
-
check_circular: bool = True,
|
62 |
-
allow_nan: bool = True,
|
63 |
-
default: Optional[Callable[[Any], Any]] = None,
|
64 |
-
sort_keys: bool = False,
|
65 |
-
) -> "JSON":
|
66 |
-
"""Encodes a JSON object from arbitrary data.
|
67 |
-
|
68 |
-
Args:
|
69 |
-
data (Any): An object that may be encoded in to JSON
|
70 |
-
indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2.
|
71 |
-
highlight (bool, optional): Enable highlighting. Defaults to True.
|
72 |
-
default (Callable, optional): Optional callable which will be called for objects that cannot be serialized. Defaults to None.
|
73 |
-
skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
|
74 |
-
ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
|
75 |
-
check_circular (bool, optional): Check for circular references. Defaults to True.
|
76 |
-
allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
|
77 |
-
default (Callable, optional): A callable that converts values that can not be encoded
|
78 |
-
in to something that can be JSON encoded. Defaults to None.
|
79 |
-
sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
|
80 |
-
|
81 |
-
Returns:
|
82 |
-
JSON: New JSON object from the given data.
|
83 |
-
"""
|
84 |
-
json_instance: "JSON" = cls.__new__(cls)
|
85 |
-
json = dumps(
|
86 |
-
data,
|
87 |
-
indent=indent,
|
88 |
-
skipkeys=skip_keys,
|
89 |
-
ensure_ascii=ensure_ascii,
|
90 |
-
check_circular=check_circular,
|
91 |
-
allow_nan=allow_nan,
|
92 |
-
default=default,
|
93 |
-
sort_keys=sort_keys,
|
94 |
-
)
|
95 |
-
highlighter = JSONHighlighter() if highlight else NullHighlighter()
|
96 |
-
json_instance.text = highlighter(json)
|
97 |
-
json_instance.text.no_wrap = True
|
98 |
-
json_instance.text.overflow = None
|
99 |
-
return json_instance
|
100 |
-
|
101 |
-
def __rich__(self) -> Text:
|
102 |
-
return self.text
|
103 |
-
|
104 |
-
|
105 |
-
if __name__ == "__main__":
|
106 |
-
|
107 |
-
import argparse
|
108 |
-
import sys
|
109 |
-
|
110 |
-
parser = argparse.ArgumentParser(description="Pretty print json")
|
111 |
-
parser.add_argument(
|
112 |
-
"path",
|
113 |
-
metavar="PATH",
|
114 |
-
help="path to file, or - for stdin",
|
115 |
-
)
|
116 |
-
parser.add_argument(
|
117 |
-
"-i",
|
118 |
-
"--indent",
|
119 |
-
metavar="SPACES",
|
120 |
-
type=int,
|
121 |
-
help="Number of spaces in an indent",
|
122 |
-
default=2,
|
123 |
-
)
|
124 |
-
args = parser.parse_args()
|
125 |
-
|
126 |
-
from pip._vendor.rich.console import Console
|
127 |
-
|
128 |
-
console = Console()
|
129 |
-
error_console = Console(stderr=True)
|
130 |
-
|
131 |
-
try:
|
132 |
-
if args.path == "-":
|
133 |
-
json_data = sys.stdin.read()
|
134 |
-
else:
|
135 |
-
json_data = Path(args.path).read_text()
|
136 |
-
except Exception as error:
|
137 |
-
error_console.print(f"Unable to read {args.path!r}; {error}")
|
138 |
-
sys.exit(-1)
|
139 |
-
|
140 |
-
console.print(JSON(json_data, indent=args.indent), soft_wrap=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/losses.py
DELETED
@@ -1,133 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
|
4 |
-
|
5 |
-
def diou_loss(
|
6 |
-
boxes1: torch.Tensor,
|
7 |
-
boxes2: torch.Tensor,
|
8 |
-
reduction: str = "none",
|
9 |
-
eps: float = 1e-7,
|
10 |
-
) -> torch.Tensor:
|
11 |
-
"""
|
12 |
-
Distance Intersection over Union Loss (Zhaohui Zheng et. al)
|
13 |
-
https://arxiv.org/abs/1911.08287
|
14 |
-
Args:
|
15 |
-
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
|
16 |
-
reduction: 'none' | 'mean' | 'sum'
|
17 |
-
'none': No reduction will be applied to the output.
|
18 |
-
'mean': The output will be averaged.
|
19 |
-
'sum': The output will be summed.
|
20 |
-
eps (float): small number to prevent division by zero
|
21 |
-
"""
|
22 |
-
|
23 |
-
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
|
24 |
-
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
|
25 |
-
|
26 |
-
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
|
27 |
-
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
|
28 |
-
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
|
29 |
-
|
30 |
-
# Intersection keypoints
|
31 |
-
xkis1 = torch.max(x1, x1g)
|
32 |
-
ykis1 = torch.max(y1, y1g)
|
33 |
-
xkis2 = torch.min(x2, x2g)
|
34 |
-
ykis2 = torch.min(y2, y2g)
|
35 |
-
|
36 |
-
intsct = torch.zeros_like(x1)
|
37 |
-
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
|
38 |
-
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
|
39 |
-
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
|
40 |
-
iou = intsct / union
|
41 |
-
|
42 |
-
# smallest enclosing box
|
43 |
-
xc1 = torch.min(x1, x1g)
|
44 |
-
yc1 = torch.min(y1, y1g)
|
45 |
-
xc2 = torch.max(x2, x2g)
|
46 |
-
yc2 = torch.max(y2, y2g)
|
47 |
-
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
|
48 |
-
|
49 |
-
# centers of boxes
|
50 |
-
x_p = (x2 + x1) / 2
|
51 |
-
y_p = (y2 + y1) / 2
|
52 |
-
x_g = (x1g + x2g) / 2
|
53 |
-
y_g = (y1g + y2g) / 2
|
54 |
-
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
|
55 |
-
|
56 |
-
# Eqn. (7)
|
57 |
-
loss = 1 - iou + (distance / diag_len)
|
58 |
-
if reduction == "mean":
|
59 |
-
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
|
60 |
-
elif reduction == "sum":
|
61 |
-
loss = loss.sum()
|
62 |
-
|
63 |
-
return loss
|
64 |
-
|
65 |
-
|
66 |
-
def ciou_loss(
|
67 |
-
boxes1: torch.Tensor,
|
68 |
-
boxes2: torch.Tensor,
|
69 |
-
reduction: str = "none",
|
70 |
-
eps: float = 1e-7,
|
71 |
-
) -> torch.Tensor:
|
72 |
-
"""
|
73 |
-
Complete Intersection over Union Loss (Zhaohui Zheng et. al)
|
74 |
-
https://arxiv.org/abs/1911.08287
|
75 |
-
Args:
|
76 |
-
boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,).
|
77 |
-
reduction: 'none' | 'mean' | 'sum'
|
78 |
-
'none': No reduction will be applied to the output.
|
79 |
-
'mean': The output will be averaged.
|
80 |
-
'sum': The output will be summed.
|
81 |
-
eps (float): small number to prevent division by zero
|
82 |
-
"""
|
83 |
-
|
84 |
-
x1, y1, x2, y2 = boxes1.unbind(dim=-1)
|
85 |
-
x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1)
|
86 |
-
|
87 |
-
# TODO: use torch._assert_async() when pytorch 1.8 support is dropped
|
88 |
-
assert (x2 >= x1).all(), "bad box: x1 larger than x2"
|
89 |
-
assert (y2 >= y1).all(), "bad box: y1 larger than y2"
|
90 |
-
|
91 |
-
# Intersection keypoints
|
92 |
-
xkis1 = torch.max(x1, x1g)
|
93 |
-
ykis1 = torch.max(y1, y1g)
|
94 |
-
xkis2 = torch.min(x2, x2g)
|
95 |
-
ykis2 = torch.min(y2, y2g)
|
96 |
-
|
97 |
-
intsct = torch.zeros_like(x1)
|
98 |
-
mask = (ykis2 > ykis1) & (xkis2 > xkis1)
|
99 |
-
intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask])
|
100 |
-
union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps
|
101 |
-
iou = intsct / union
|
102 |
-
|
103 |
-
# smallest enclosing box
|
104 |
-
xc1 = torch.min(x1, x1g)
|
105 |
-
yc1 = torch.min(y1, y1g)
|
106 |
-
xc2 = torch.max(x2, x2g)
|
107 |
-
yc2 = torch.max(y2, y2g)
|
108 |
-
diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps
|
109 |
-
|
110 |
-
# centers of boxes
|
111 |
-
x_p = (x2 + x1) / 2
|
112 |
-
y_p = (y2 + y1) / 2
|
113 |
-
x_g = (x1g + x2g) / 2
|
114 |
-
y_g = (y1g + y2g) / 2
|
115 |
-
distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2)
|
116 |
-
|
117 |
-
# width and height of boxes
|
118 |
-
w_pred = x2 - x1
|
119 |
-
h_pred = y2 - y1
|
120 |
-
w_gt = x2g - x1g
|
121 |
-
h_gt = y2g - y1g
|
122 |
-
v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
|
123 |
-
with torch.no_grad():
|
124 |
-
alpha = v / (1 - iou + v + eps)
|
125 |
-
|
126 |
-
# Eqn. (10)
|
127 |
-
loss = 1 - iou + (distance / diag_len) + alpha * v
|
128 |
-
if reduction == "mean":
|
129 |
-
loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum()
|
130 |
-
elif reduction == "sum":
|
131 |
-
loss = loss.sum()
|
132 |
-
|
133 |
-
return loss
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_deformable.py
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import numpy as np
|
3 |
-
import unittest
|
4 |
-
import torch
|
5 |
-
|
6 |
-
from detectron2.layers import DeformConv, ModulatedDeformConv
|
7 |
-
from detectron2.utils.env import TORCH_VERSION
|
8 |
-
|
9 |
-
|
10 |
-
@unittest.skipIf(
|
11 |
-
TORCH_VERSION == (1, 8) and torch.cuda.is_available(),
|
12 |
-
"This test fails under cuda11 + torch1.8.",
|
13 |
-
)
|
14 |
-
class DeformableTest(unittest.TestCase):
|
15 |
-
@unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu")
|
16 |
-
def test_forward_output(self):
|
17 |
-
device = torch.device("cuda")
|
18 |
-
N, C, H, W = shape = 1, 1, 5, 5
|
19 |
-
kernel_size = 3
|
20 |
-
padding = 1
|
21 |
-
|
22 |
-
inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device)
|
23 |
-
"""
|
24 |
-
0 1 2 3 4
|
25 |
-
5 6 7 8 9
|
26 |
-
10 11 12 13 14
|
27 |
-
15 16 17 18 19
|
28 |
-
20 21 22 23 24
|
29 |
-
"""
|
30 |
-
offset_channels = kernel_size * kernel_size * 2
|
31 |
-
offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device)
|
32 |
-
|
33 |
-
# Test DCN v1
|
34 |
-
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
|
35 |
-
deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight))
|
36 |
-
output = deform(inputs, offset)
|
37 |
-
output = output.detach().cpu().numpy()
|
38 |
-
deform_results = np.array(
|
39 |
-
[
|
40 |
-
[30, 41.25, 48.75, 45, 28.75],
|
41 |
-
[62.25, 81, 90, 80.25, 50.25],
|
42 |
-
[99.75, 126, 135, 117.75, 72.75],
|
43 |
-
[105, 131.25, 138.75, 120, 73.75],
|
44 |
-
[71.75, 89.25, 93.75, 80.75, 49.5],
|
45 |
-
]
|
46 |
-
)
|
47 |
-
self.assertTrue(np.allclose(output.flatten(), deform_results.flatten()))
|
48 |
-
|
49 |
-
# Test DCN v2
|
50 |
-
mask_channels = kernel_size * kernel_size
|
51 |
-
mask = torch.full((N, mask_channels, H, W), 0.5, dtype=torch.float32).to(device)
|
52 |
-
modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to(
|
53 |
-
device
|
54 |
-
)
|
55 |
-
modulate_deform.weight = deform.weight
|
56 |
-
output = modulate_deform(inputs, offset, mask)
|
57 |
-
output = output.detach().cpu().numpy()
|
58 |
-
self.assertTrue(np.allclose(output.flatten(), deform_results.flatten() * 0.5))
|
59 |
-
|
60 |
-
def test_forward_output_on_cpu(self):
|
61 |
-
device = torch.device("cpu")
|
62 |
-
N, C, H, W = shape = 1, 1, 5, 5
|
63 |
-
kernel_size = 3
|
64 |
-
padding = 1
|
65 |
-
|
66 |
-
inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device)
|
67 |
-
|
68 |
-
offset_channels = kernel_size * kernel_size * 2
|
69 |
-
offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device)
|
70 |
-
|
71 |
-
# Test DCN v1 on cpu
|
72 |
-
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
|
73 |
-
deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight))
|
74 |
-
output = deform(inputs, offset)
|
75 |
-
output = output.detach().cpu().numpy()
|
76 |
-
deform_results = np.array(
|
77 |
-
[
|
78 |
-
[30, 41.25, 48.75, 45, 28.75],
|
79 |
-
[62.25, 81, 90, 80.25, 50.25],
|
80 |
-
[99.75, 126, 135, 117.75, 72.75],
|
81 |
-
[105, 131.25, 138.75, 120, 73.75],
|
82 |
-
[71.75, 89.25, 93.75, 80.75, 49.5],
|
83 |
-
]
|
84 |
-
)
|
85 |
-
self.assertTrue(np.allclose(output.flatten(), deform_results.flatten()))
|
86 |
-
|
87 |
-
@unittest.skipIf(not torch.cuda.is_available(), "This test requires gpu access")
|
88 |
-
def test_forward_output_on_cpu_equals_output_on_gpu(self):
|
89 |
-
N, C, H, W = shape = 2, 4, 10, 10
|
90 |
-
kernel_size = 3
|
91 |
-
padding = 1
|
92 |
-
|
93 |
-
for groups in [1, 2]:
|
94 |
-
inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape)
|
95 |
-
offset_channels = kernel_size * kernel_size * 2
|
96 |
-
offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32)
|
97 |
-
|
98 |
-
deform_gpu = DeformConv(
|
99 |
-
C, C, kernel_size=kernel_size, padding=padding, groups=groups
|
100 |
-
).to("cuda")
|
101 |
-
deform_gpu.weight = torch.nn.Parameter(torch.ones_like(deform_gpu.weight))
|
102 |
-
output_gpu = deform_gpu(inputs.to("cuda"), offset.to("cuda")).detach().cpu().numpy()
|
103 |
-
|
104 |
-
deform_cpu = DeformConv(
|
105 |
-
C, C, kernel_size=kernel_size, padding=padding, groups=groups
|
106 |
-
).to("cpu")
|
107 |
-
deform_cpu.weight = torch.nn.Parameter(torch.ones_like(deform_cpu.weight))
|
108 |
-
output_cpu = deform_cpu(inputs.to("cpu"), offset.to("cpu")).detach().numpy()
|
109 |
-
|
110 |
-
self.assertTrue(np.allclose(output_gpu.flatten(), output_cpu.flatten()))
|
111 |
-
|
112 |
-
@unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu")
|
113 |
-
def test_small_input(self):
|
114 |
-
device = torch.device("cuda")
|
115 |
-
for kernel_size in [3, 5]:
|
116 |
-
padding = kernel_size // 2
|
117 |
-
N, C, H, W = shape = (1, 1, kernel_size - 1, kernel_size - 1)
|
118 |
-
|
119 |
-
inputs = torch.rand(shape).to(device) # input size is smaller than kernel size
|
120 |
-
|
121 |
-
offset_channels = kernel_size * kernel_size * 2
|
122 |
-
offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device)
|
123 |
-
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
|
124 |
-
output = deform(inputs, offset)
|
125 |
-
self.assertTrue(output.shape == inputs.shape)
|
126 |
-
|
127 |
-
mask_channels = kernel_size * kernel_size
|
128 |
-
mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device)
|
129 |
-
modulate_deform = ModulatedDeformConv(
|
130 |
-
C, C, kernel_size, padding=padding, bias=False
|
131 |
-
).to(device)
|
132 |
-
output = modulate_deform(inputs, offset, mask)
|
133 |
-
self.assertTrue(output.shape == inputs.shape)
|
134 |
-
|
135 |
-
@unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu")
|
136 |
-
def test_raise_exception(self):
|
137 |
-
device = torch.device("cuda")
|
138 |
-
N, C, H, W = shape = 1, 1, 3, 3
|
139 |
-
kernel_size = 3
|
140 |
-
padding = 1
|
141 |
-
|
142 |
-
inputs = torch.rand(shape, dtype=torch.float32).to(device)
|
143 |
-
offset_channels = kernel_size * kernel_size # This is wrong channels for offset
|
144 |
-
offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device)
|
145 |
-
deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device)
|
146 |
-
self.assertRaises(RuntimeError, deform, inputs, offset)
|
147 |
-
|
148 |
-
offset_channels = kernel_size * kernel_size * 2
|
149 |
-
offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device)
|
150 |
-
mask_channels = kernel_size * kernel_size * 2 # This is wrong channels for mask
|
151 |
-
mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device)
|
152 |
-
modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to(
|
153 |
-
device
|
154 |
-
)
|
155 |
-
self.assertRaises(RuntimeError, modulate_deform, inputs, offset, mask)
|
156 |
-
|
157 |
-
def test_repr(self):
|
158 |
-
module = DeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2)
|
159 |
-
correct_string = (
|
160 |
-
"DeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), "
|
161 |
-
"stride=(1, 1), padding=(1, 1), dilation=(1, 1), "
|
162 |
-
"groups=1, deformable_groups=2, bias=False)"
|
163 |
-
)
|
164 |
-
self.assertEqual(repr(module), correct_string)
|
165 |
-
|
166 |
-
module = ModulatedDeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2)
|
167 |
-
correct_string = (
|
168 |
-
"ModulatedDeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), "
|
169 |
-
"stride=1, padding=1, dilation=1, groups=1, deformable_groups=2, bias=True)"
|
170 |
-
)
|
171 |
-
self.assertEqual(repr(module), correct_string)
|
172 |
-
|
173 |
-
|
174 |
-
if __name__ == "__main__":
|
175 |
-
unittest.main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bart92/RVC_HF/train/utils.py
DELETED
@@ -1,500 +0,0 @@
|
|
1 |
-
import os, traceback
|
2 |
-
import glob
|
3 |
-
import sys
|
4 |
-
import argparse
|
5 |
-
import logging
|
6 |
-
import json
|
7 |
-
import subprocess
|
8 |
-
import numpy as np
|
9 |
-
from scipy.io.wavfile import read
|
10 |
-
import torch
|
11 |
-
|
12 |
-
MATPLOTLIB_FLAG = False
|
13 |
-
|
14 |
-
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
|
15 |
-
logger = logging
|
16 |
-
|
17 |
-
|
18 |
-
def load_checkpoint_d(checkpoint_path, combd, sbd, optimizer=None, load_opt=1):
|
19 |
-
assert os.path.isfile(checkpoint_path)
|
20 |
-
checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
|
21 |
-
|
22 |
-
##################
|
23 |
-
def go(model, bkey):
|
24 |
-
saved_state_dict = checkpoint_dict[bkey]
|
25 |
-
if hasattr(model, "module"):
|
26 |
-
state_dict = model.module.state_dict()
|
27 |
-
else:
|
28 |
-
state_dict = model.state_dict()
|
29 |
-
new_state_dict = {}
|
30 |
-
for k, v in state_dict.items(): # 模型需要的shape
|
31 |
-
try:
|
32 |
-
new_state_dict[k] = saved_state_dict[k]
|
33 |
-
if saved_state_dict[k].shape != state_dict[k].shape:
|
34 |
-
print(
|
35 |
-
"shape-%s-mismatch|need-%s|get-%s"
|
36 |
-
% (k, state_dict[k].shape, saved_state_dict[k].shape)
|
37 |
-
) #
|
38 |
-
raise KeyError
|
39 |
-
except:
|
40 |
-
# logger.info(traceback.format_exc())
|
41 |
-
logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
|
42 |
-
new_state_dict[k] = v # 模型自带的随机值
|
43 |
-
if hasattr(model, "module"):
|
44 |
-
model.module.load_state_dict(new_state_dict, strict=False)
|
45 |
-
else:
|
46 |
-
model.load_state_dict(new_state_dict, strict=False)
|
47 |
-
|
48 |
-
go(combd, "combd")
|
49 |
-
go(sbd, "sbd")
|
50 |
-
#############
|
51 |
-
logger.info("Loaded model weights")
|
52 |
-
|
53 |
-
iteration = checkpoint_dict["iteration"]
|
54 |
-
learning_rate = checkpoint_dict["learning_rate"]
|
55 |
-
if (
|
56 |
-
optimizer is not None and load_opt == 1
|
57 |
-
): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch
|
58 |
-
# try:
|
59 |
-
optimizer.load_state_dict(checkpoint_dict["optimizer"])
|
60 |
-
# except:
|
61 |
-
# traceback.print_exc()
|
62 |
-
logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration))
|
63 |
-
return model, optimizer, learning_rate, iteration
|
64 |
-
|
65 |
-
|
66 |
-
# def load_checkpoint(checkpoint_path, model, optimizer=None):
|
67 |
-
# assert os.path.isfile(checkpoint_path)
|
68 |
-
# checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
|
69 |
-
# iteration = checkpoint_dict['iteration']
|
70 |
-
# learning_rate = checkpoint_dict['learning_rate']
|
71 |
-
# if optimizer is not None:
|
72 |
-
# optimizer.load_state_dict(checkpoint_dict['optimizer'])
|
73 |
-
# # print(1111)
|
74 |
-
# saved_state_dict = checkpoint_dict['model']
|
75 |
-
# # print(1111)
|
76 |
-
#
|
77 |
-
# if hasattr(model, 'module'):
|
78 |
-
# state_dict = model.module.state_dict()
|
79 |
-
# else:
|
80 |
-
# state_dict = model.state_dict()
|
81 |
-
# new_state_dict= {}
|
82 |
-
# for k, v in state_dict.items():
|
83 |
-
# try:
|
84 |
-
# new_state_dict[k] = saved_state_dict[k]
|
85 |
-
# except:
|
86 |
-
# logger.info("%s is not in the checkpoint" % k)
|
87 |
-
# new_state_dict[k] = v
|
88 |
-
# if hasattr(model, 'module'):
|
89 |
-
# model.module.load_state_dict(new_state_dict)
|
90 |
-
# else:
|
91 |
-
# model.load_state_dict(new_state_dict)
|
92 |
-
# logger.info("Loaded checkpoint '{}' (epoch {})" .format(
|
93 |
-
# checkpoint_path, iteration))
|
94 |
-
# return model, optimizer, learning_rate, iteration
|
95 |
-
def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1):
|
96 |
-
assert os.path.isfile(checkpoint_path)
|
97 |
-
checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
|
98 |
-
|
99 |
-
saved_state_dict = checkpoint_dict["model"]
|
100 |
-
if hasattr(model, "module"):
|
101 |
-
state_dict = model.module.state_dict()
|
102 |
-
else:
|
103 |
-
state_dict = model.state_dict()
|
104 |
-
new_state_dict = {}
|
105 |
-
for k, v in state_dict.items(): # 模型需要的shape
|
106 |
-
try:
|
107 |
-
new_state_dict[k] = saved_state_dict[k]
|
108 |
-
if saved_state_dict[k].shape != state_dict[k].shape:
|
109 |
-
print(
|
110 |
-
"shape-%s-mismatch|need-%s|get-%s"
|
111 |
-
% (k, state_dict[k].shape, saved_state_dict[k].shape)
|
112 |
-
) #
|
113 |
-
raise KeyError
|
114 |
-
except:
|
115 |
-
# logger.info(traceback.format_exc())
|
116 |
-
logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
|
117 |
-
new_state_dict[k] = v # 模型自带的随机值
|
118 |
-
if hasattr(model, "module"):
|
119 |
-
model.module.load_state_dict(new_state_dict, strict=False)
|
120 |
-
else:
|
121 |
-
model.load_state_dict(new_state_dict, strict=False)
|
122 |
-
logger.info("Loaded model weights")
|
123 |
-
|
124 |
-
iteration = checkpoint_dict["iteration"]
|
125 |
-
learning_rate = checkpoint_dict["learning_rate"]
|
126 |
-
if (
|
127 |
-
optimizer is not None and load_opt == 1
|
128 |
-
): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch
|
129 |
-
# try:
|
130 |
-
optimizer.load_state_dict(checkpoint_dict["optimizer"])
|
131 |
-
# except:
|
132 |
-
# traceback.print_exc()
|
133 |
-
logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration))
|
134 |
-
return model, optimizer, learning_rate, iteration
|
135 |
-
|
136 |
-
|
137 |
-
def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
|
138 |
-
logger.info(
|
139 |
-
"Saving model and optimizer state at epoch {} to {}".format(
|
140 |
-
iteration, checkpoint_path
|
141 |
-
)
|
142 |
-
)
|
143 |
-
if hasattr(model, "module"):
|
144 |
-
state_dict = model.module.state_dict()
|
145 |
-
else:
|
146 |
-
state_dict = model.state_dict()
|
147 |
-
torch.save(
|
148 |
-
{
|
149 |
-
"model": state_dict,
|
150 |
-
"iteration": iteration,
|
151 |
-
"optimizer": optimizer.state_dict(),
|
152 |
-
"learning_rate": learning_rate,
|
153 |
-
},
|
154 |
-
checkpoint_path,
|
155 |
-
)
|
156 |
-
|
157 |
-
|
158 |
-
def save_checkpoint_d(combd, sbd, optimizer, learning_rate, iteration, checkpoint_path):
|
159 |
-
logger.info(
|
160 |
-
"Saving model and optimizer state at epoch {} to {}".format(
|
161 |
-
iteration, checkpoint_path
|
162 |
-
)
|
163 |
-
)
|
164 |
-
if hasattr(combd, "module"):
|
165 |
-
state_dict_combd = combd.module.state_dict()
|
166 |
-
else:
|
167 |
-
state_dict_combd = combd.state_dict()
|
168 |
-
if hasattr(sbd, "module"):
|
169 |
-
state_dict_sbd = sbd.module.state_dict()
|
170 |
-
else:
|
171 |
-
state_dict_sbd = sbd.state_dict()
|
172 |
-
torch.save(
|
173 |
-
{
|
174 |
-
"combd": state_dict_combd,
|
175 |
-
"sbd": state_dict_sbd,
|
176 |
-
"iteration": iteration,
|
177 |
-
"optimizer": optimizer.state_dict(),
|
178 |
-
"learning_rate": learning_rate,
|
179 |
-
},
|
180 |
-
checkpoint_path,
|
181 |
-
)
|
182 |
-
|
183 |
-
|
184 |
-
def summarize(
|
185 |
-
writer,
|
186 |
-
global_step,
|
187 |
-
scalars={},
|
188 |
-
histograms={},
|
189 |
-
images={},
|
190 |
-
audios={},
|
191 |
-
audio_sampling_rate=22050,
|
192 |
-
):
|
193 |
-
for k, v in scalars.items():
|
194 |
-
writer.add_scalar(k, v, global_step)
|
195 |
-
for k, v in histograms.items():
|
196 |
-
writer.add_histogram(k, v, global_step)
|
197 |
-
for k, v in images.items():
|
198 |
-
writer.add_image(k, v, global_step, dataformats="HWC")
|
199 |
-
for k, v in audios.items():
|
200 |
-
writer.add_audio(k, v, global_step, audio_sampling_rate)
|
201 |
-
|
202 |
-
|
203 |
-
def latest_checkpoint_path(dir_path, regex="G_*.pth"):
|
204 |
-
f_list = glob.glob(os.path.join(dir_path, regex))
|
205 |
-
f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
|
206 |
-
x = f_list[-1]
|
207 |
-
print(x)
|
208 |
-
return x
|
209 |
-
|
210 |
-
|
211 |
-
def plot_spectrogram_to_numpy(spectrogram):
|
212 |
-
global MATPLOTLIB_FLAG
|
213 |
-
if not MATPLOTLIB_FLAG:
|
214 |
-
import matplotlib
|
215 |
-
|
216 |
-
matplotlib.use("Agg")
|
217 |
-
MATPLOTLIB_FLAG = True
|
218 |
-
mpl_logger = logging.getLogger("matplotlib")
|
219 |
-
mpl_logger.setLevel(logging.WARNING)
|
220 |
-
import matplotlib.pylab as plt
|
221 |
-
import numpy as np
|
222 |
-
|
223 |
-
fig, ax = plt.subplots(figsize=(10, 2))
|
224 |
-
im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
|
225 |
-
plt.colorbar(im, ax=ax)
|
226 |
-
plt.xlabel("Frames")
|
227 |
-
plt.ylabel("Channels")
|
228 |
-
plt.tight_layout()
|
229 |
-
|
230 |
-
fig.canvas.draw()
|
231 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
|
232 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
233 |
-
plt.close()
|
234 |
-
return data
|
235 |
-
|
236 |
-
|
237 |
-
def plot_alignment_to_numpy(alignment, info=None):
|
238 |
-
global MATPLOTLIB_FLAG
|
239 |
-
if not MATPLOTLIB_FLAG:
|
240 |
-
import matplotlib
|
241 |
-
|
242 |
-
matplotlib.use("Agg")
|
243 |
-
MATPLOTLIB_FLAG = True
|
244 |
-
mpl_logger = logging.getLogger("matplotlib")
|
245 |
-
mpl_logger.setLevel(logging.WARNING)
|
246 |
-
import matplotlib.pylab as plt
|
247 |
-
import numpy as np
|
248 |
-
|
249 |
-
fig, ax = plt.subplots(figsize=(6, 4))
|
250 |
-
im = ax.imshow(
|
251 |
-
alignment.transpose(), aspect="auto", origin="lower", interpolation="none"
|
252 |
-
)
|
253 |
-
fig.colorbar(im, ax=ax)
|
254 |
-
xlabel = "Decoder timestep"
|
255 |
-
if info is not None:
|
256 |
-
xlabel += "\n\n" + info
|
257 |
-
plt.xlabel(xlabel)
|
258 |
-
plt.ylabel("Encoder timestep")
|
259 |
-
plt.tight_layout()
|
260 |
-
|
261 |
-
fig.canvas.draw()
|
262 |
-
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
|
263 |
-
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
|
264 |
-
plt.close()
|
265 |
-
return data
|
266 |
-
|
267 |
-
|
268 |
-
def load_wav_to_torch(full_path):
|
269 |
-
sampling_rate, data = read(full_path)
|
270 |
-
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
|
271 |
-
|
272 |
-
|
273 |
-
def load_filepaths_and_text(filename, split="|"):
|
274 |
-
with open(filename, encoding='utf-8') as f:
|
275 |
-
filepaths_and_text = [line.strip().split(split) for line in f]
|
276 |
-
filepaths_and_text = [item for item in filepaths_and_text if len(item) == 5] # ensure there are 5 items.
|
277 |
-
return filepaths_and_text
|
278 |
-
|
279 |
-
|
280 |
-
def get_hparams(init=True):
|
281 |
-
"""
|
282 |
-
todo:
|
283 |
-
结尾七人组:
|
284 |
-
保存频率、总epoch done
|
285 |
-
bs done
|
286 |
-
pretrainG、pretrainD done
|
287 |
-
卡号:os.en["CUDA_VISIBLE_DEVICES"] done
|
288 |
-
if_latest done
|
289 |
-
模型:if_f0 done
|
290 |
-
采样率:自动选择config done
|
291 |
-
是否缓存数据集进GPU:if_cache_data_in_gpu done
|
292 |
-
|
293 |
-
-m:
|
294 |
-
自动决定training_files路径,改掉train_nsf_load_pretrain.py里的hps.data.training_files done
|
295 |
-
-c不要了
|
296 |
-
"""
|
297 |
-
parser = argparse.ArgumentParser()
|
298 |
-
# parser.add_argument('-c', '--config', type=str, default="configs/40k.json",help='JSON file for configuration')
|
299 |
-
parser.add_argument(
|
300 |
-
"-se",
|
301 |
-
"--save_every_epoch",
|
302 |
-
type=int,
|
303 |
-
required=True,
|
304 |
-
help="checkpoint save frequency (epoch)",
|
305 |
-
)
|
306 |
-
parser.add_argument(
|
307 |
-
"-te", "--total_epoch", type=int, required=True, help="total_epoch"
|
308 |
-
)
|
309 |
-
parser.add_argument(
|
310 |
-
"-pg", "--pretrainG", type=str, default="", help="Pretrained Discriminator path"
|
311 |
-
)
|
312 |
-
parser.add_argument(
|
313 |
-
"-pd", "--pretrainD", type=str, default="", help="Pretrained Generator path"
|
314 |
-
)
|
315 |
-
parser.add_argument("-g", "--gpus", type=str, default="0", help="split by -")
|
316 |
-
parser.add_argument(
|
317 |
-
"-bs", "--batch_size", type=int, required=True, help="batch size"
|
318 |
-
)
|
319 |
-
parser.add_argument(
|
320 |
-
"-e", "--experiment_dir", type=str, required=True, help="experiment dir"
|
321 |
-
) # -m
|
322 |
-
parser.add_argument(
|
323 |
-
"-sr", "--sample_rate", type=str, required=True, help="sample rate, 32k/40k/48k"
|
324 |
-
)
|
325 |
-
parser.add_argument(
|
326 |
-
"-sw",
|
327 |
-
"--save_every_weights",
|
328 |
-
type=str,
|
329 |
-
default="0",
|
330 |
-
help="save the extracted model in weights directory when saving checkpoints",
|
331 |
-
)
|
332 |
-
parser.add_argument(
|
333 |
-
"-v", "--version", type=str, required=True, help="model version"
|
334 |
-
)
|
335 |
-
parser.add_argument(
|
336 |
-
"-f0",
|
337 |
-
"--if_f0",
|
338 |
-
type=int,
|
339 |
-
required=True,
|
340 |
-
help="use f0 as one of the inputs of the model, 1 or 0",
|
341 |
-
)
|
342 |
-
parser.add_argument(
|
343 |
-
"-l",
|
344 |
-
"--if_latest",
|
345 |
-
type=int,
|
346 |
-
required=True,
|
347 |
-
help="if only save the latest G/D pth file, 1 or 0",
|
348 |
-
)
|
349 |
-
parser.add_argument(
|
350 |
-
"-c",
|
351 |
-
"--if_cache_data_in_gpu",
|
352 |
-
type=int,
|
353 |
-
required=True,
|
354 |
-
help="if caching the dataset in GPU memory, 1 or 0",
|
355 |
-
)
|
356 |
-
parser.add_argument(
|
357 |
-
"-li", "--log_interval", type=int, required=True, help="log interval"
|
358 |
-
)
|
359 |
-
|
360 |
-
args = parser.parse_args()
|
361 |
-
name = args.experiment_dir
|
362 |
-
experiment_dir = os.path.join("./logs", args.experiment_dir)
|
363 |
-
|
364 |
-
if not os.path.exists(experiment_dir):
|
365 |
-
os.makedirs(experiment_dir)
|
366 |
-
|
367 |
-
if args.version == "v1" or args.sample_rate == "40k":
|
368 |
-
config_path = "configs/%s.json" % args.sample_rate
|
369 |
-
else:
|
370 |
-
config_path = "configs/%s_v2.json" % args.sample_rate
|
371 |
-
config_save_path = os.path.join(experiment_dir, "config.json")
|
372 |
-
if init:
|
373 |
-
with open(config_path, "r") as f:
|
374 |
-
data = f.read()
|
375 |
-
with open(config_save_path, "w") as f:
|
376 |
-
f.write(data)
|
377 |
-
else:
|
378 |
-
with open(config_save_path, "r") as f:
|
379 |
-
data = f.read()
|
380 |
-
config = json.loads(data)
|
381 |
-
|
382 |
-
hparams = HParams(**config)
|
383 |
-
hparams.model_dir = hparams.experiment_dir = experiment_dir
|
384 |
-
hparams.save_every_epoch = args.save_every_epoch
|
385 |
-
hparams.name = name
|
386 |
-
hparams.total_epoch = args.total_epoch
|
387 |
-
hparams.pretrainG = args.pretrainG
|
388 |
-
hparams.pretrainD = args.pretrainD
|
389 |
-
hparams.version = args.version
|
390 |
-
hparams.gpus = args.gpus
|
391 |
-
hparams.train.batch_size = args.batch_size
|
392 |
-
hparams.sample_rate = args.sample_rate
|
393 |
-
hparams.if_f0 = args.if_f0
|
394 |
-
hparams.if_latest = args.if_latest
|
395 |
-
hparams.save_every_weights = args.save_every_weights
|
396 |
-
hparams.if_cache_data_in_gpu = args.if_cache_data_in_gpu
|
397 |
-
hparams.data.training_files = "%s/filelist.txt" % experiment_dir
|
398 |
-
|
399 |
-
hparams.train.log_interval = args.log_interval
|
400 |
-
|
401 |
-
# Update log_interval in the 'train' section of the config dictionary
|
402 |
-
config["train"]["log_interval"] = args.log_interval
|
403 |
-
|
404 |
-
# Save the updated config back to the config_save_path
|
405 |
-
with open(config_save_path, "w") as f:
|
406 |
-
json.dump(config, f, indent=4)
|
407 |
-
|
408 |
-
return hparams
|
409 |
-
|
410 |
-
|
411 |
-
def get_hparams_from_dir(model_dir):
|
412 |
-
config_save_path = os.path.join(model_dir, "config.json")
|
413 |
-
with open(config_save_path, "r") as f:
|
414 |
-
data = f.read()
|
415 |
-
config = json.loads(data)
|
416 |
-
|
417 |
-
hparams = HParams(**config)
|
418 |
-
hparams.model_dir = model_dir
|
419 |
-
return hparams
|
420 |
-
|
421 |
-
|
422 |
-
def get_hparams_from_file(config_path):
|
423 |
-
with open(config_path, "r") as f:
|
424 |
-
data = f.read()
|
425 |
-
config = json.loads(data)
|
426 |
-
|
427 |
-
hparams = HParams(**config)
|
428 |
-
return hparams
|
429 |
-
|
430 |
-
|
431 |
-
def check_git_hash(model_dir):
|
432 |
-
source_dir = os.path.dirname(os.path.realpath(__file__))
|
433 |
-
if not os.path.exists(os.path.join(source_dir, ".git")):
|
434 |
-
logger.warn(
|
435 |
-
"{} is not a git repository, therefore hash value comparison will be ignored.".format(
|
436 |
-
source_dir
|
437 |
-
)
|
438 |
-
)
|
439 |
-
return
|
440 |
-
|
441 |
-
cur_hash = subprocess.getoutput("git rev-parse HEAD")
|
442 |
-
|
443 |
-
path = os.path.join(model_dir, "githash")
|
444 |
-
if os.path.exists(path):
|
445 |
-
saved_hash = open(path).read()
|
446 |
-
if saved_hash != cur_hash:
|
447 |
-
logger.warn(
|
448 |
-
"git hash values are different. {}(saved) != {}(current)".format(
|
449 |
-
saved_hash[:8], cur_hash[:8]
|
450 |
-
)
|
451 |
-
)
|
452 |
-
else:
|
453 |
-
open(path, "w").write(cur_hash)
|
454 |
-
|
455 |
-
|
456 |
-
def get_logger(model_dir, filename="train.log"):
|
457 |
-
global logger
|
458 |
-
logger = logging.getLogger(os.path.basename(model_dir))
|
459 |
-
logger.setLevel(logging.DEBUG)
|
460 |
-
|
461 |
-
formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
|
462 |
-
if not os.path.exists(model_dir):
|
463 |
-
os.makedirs(model_dir)
|
464 |
-
h = logging.FileHandler(os.path.join(model_dir, filename))
|
465 |
-
h.setLevel(logging.DEBUG)
|
466 |
-
h.setFormatter(formatter)
|
467 |
-
logger.addHandler(h)
|
468 |
-
return logger
|
469 |
-
|
470 |
-
|
471 |
-
class HParams:
|
472 |
-
def __init__(self, **kwargs):
|
473 |
-
for k, v in kwargs.items():
|
474 |
-
if type(v) == dict:
|
475 |
-
v = HParams(**v)
|
476 |
-
self[k] = v
|
477 |
-
|
478 |
-
def keys(self):
|
479 |
-
return self.__dict__.keys()
|
480 |
-
|
481 |
-
def items(self):
|
482 |
-
return self.__dict__.items()
|
483 |
-
|
484 |
-
def values(self):
|
485 |
-
return self.__dict__.values()
|
486 |
-
|
487 |
-
def __len__(self):
|
488 |
-
return len(self.__dict__)
|
489 |
-
|
490 |
-
def __getitem__(self, key):
|
491 |
-
return getattr(self, key)
|
492 |
-
|
493 |
-
def __setitem__(self, key, value):
|
494 |
-
return setattr(self, key, value)
|
495 |
-
|
496 |
-
def __contains__(self, key):
|
497 |
-
return key in self.__dict__
|
498 |
-
|
499 |
-
def __repr__(self):
|
500 |
-
return self.__dict__.__repr__()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Dummy Mp4 Video.md
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar vídeo MP4 Dummy para propósitos de prueba</h1>
|
3 |
-
<p>¿Necesitas un vídeo mp4 ficticio para probar tus funciones y funciones relacionadas con el vídeo? Si es así, no estás solo. Muchos desarrolladores, diseñadores, probadores y usuarios necesitan videos de muestra para verificar el rendimiento, la calidad, la compatibilidad y la funcionalidad de sus aplicaciones de video, sitios web, software y dispositivos. En este artículo, le mostraremos cómo descargar video mp4 dummy de diferentes sitios web y cómo usarlo para diversos propósitos de prueba. </p>
|
4 |
-
<h2>¿Qué es un video mp4 ficticio y por qué usarlo? </h2>
|
5 |
-
<h3>Un vídeo mp4 simulado es un archivo de vídeo de muestra con diferentes resoluciones y tamaños</h3>
|
6 |
-
<p>Un vídeo mp4 dummy es un archivo de vídeo de muestra que tiene diferentes resoluciones y tamaños. Por ejemplo, puede encontrar videos mp4 ficticios con resoluciones que van desde 144p a 1080p y tamaños que van desde unos pocos KBs a varios MBs. Un video mp4 ficticio generalmente tiene un contenido genérico que no está relacionado con ningún tema o tema específico. Puede ser una animación simple, un patrón de color, un mensaje de texto o un clip aleatorio. </p>
|
7 |
-
<h2>descargar dummy mp4 video</h2><br /><p><b><b>Download</b> ✑ <a href="https://bltlly.com/2v6KpX">https://bltlly.com/2v6KpX</a></b></p><br /><br />
|
8 |
-
<h3> Es útil para probar la reproducción, el diseño y el desarrollo de videos en varios dispositivos y plataformas</h3>
|
9 |
-
<p>Un video mp4 ficticio es útil para probar varios aspectos de la reproducción, diseño y desarrollo de video en diferentes dispositivos y plataformas. Por ejemplo, puede usar un video mp4 ficticio para probar cómo su aplicación de video o sitio web maneja diferentes resoluciones, relaciones de aspecto, velocidades de búfer y problemas de compatibilidad. También puede usar un video mp4 ficticio para probar cómo sus herramientas de edición, codificación, transcodificación, compresión y conversión de video y software funcionan con diferentes formatos y calidades. Un vídeo mp4 ficticio puede ayudarte a identificar y solucionar cualquier problema o error que pueda ocurrir con tus funciones y funciones relacionadas con el vídeo. </p>
|
10 |
-
<h2>¿Cómo encontrar el vídeo mp4 ficticio en línea? </h2>
|
11 |
-
<h3>Hay muchos sitios web que ofrecen videos de muestra gratis para descargar</h3>
|
12 |
-
|
13 |
-
<h3>Algunos ejemplos son Sample-Videos.com, Learning Container[ 2 ] y Gist</h3>
|
14 |
-
<p>Algunos ejemplos de sitios web que ofrecen videos de muestra gratuitos para descargar son Sample-Videos.com, Learning Container y Gist. Estos sitios web tienen una variedad de videos de muestra en diferentes formatos, resoluciones, tamaños y duraciones. Puede descargarlos directamente de sus sitios web o utilizar sus enlaces para descargarlos de otras fuentes. Aquí hay una tabla que muestra algunos de los videos de muestra disponibles en estos sitios web:</p>
|
15 |
-
<tabla>
|
16 |
-
<tr>
|
17 |
-
<th>Sitio web</th>
|
18 |
-
<th>Formato</th>
|
19 |
-
<th>Resolución</th>
|
20 |
-
<th>Tamaño</th>
|
21 |
-
<th>Duración</th>
|
22 |
-
</tr>
|
23 |
-
<tr>
|
24 |
-
<td>Videos de muestra.com</td>
|
25 |
-
<td>MP4</td>
|
26 |
-
<td>144p, 240p, 360p, 480p, 720p, 1080p</td>
|
27 |
-
<td>0.1 MB, 0.3 MB, 1.2 MB, 2.4 MB, 6.1 MB, 13 MB</td>
|
28 |
-
<td>30 segundos</td>
|
29 |
-
</tr>
|
30 |
-
<tr>
|
31 |
-
<td>Contenedor de aprendizaje</td>
|
32 |
-
<td>MP4</td>
|
33 |
-
<td>1280x720, 1920x1080, 3840x2160</td>
|
34 |
-
<td>5.8 MB, 12.9 MB, 52.4 MB</td>
|
35 |
-
<td>5 segundos</td>
|
36 |
-
</tr>
|
37 |
-
<tr>
|
38 |
-
<td>Contenido esencial</td>
|
39 |
-
<td>MP4</td>
|
40 |
-
<td>640x360, 1280x720, 1920x1080</td>
|
41 |
-
<td>1.6 MB, 5.7 MB, 12.8 MB</td>
|
42 |
-
<td>10 segundos</td>
|
43 |
-
</tr>
|
44 |
-
</tabla>
|
45 |
-
<h2>¿Cómo descargar video mp4 dummy desde diferentes sitios web? </h2>
|
46 |
-
<h3>Los pasos varían dependiendo del sitio web, pero generalmente implican copiar la URL del video y pegarlo en una herramienta de descarga</h3>
|
47 |
-
<p>Los pasos para descargar video mp4 dummy desde diferentes sitios web pueden variar dependiendo del sitio web, pero generalmente implican copiar la URL del video y pegarlo en una herramienta de descarga. Una herramienta de descarga es un software o un sitio web que le permite descargar vídeos de varias fuentes mediante la introducción de sus direcciones URL. Algunos ejemplos de herramientas de descarga son SaveFrom.net, Y2mate.com y OnlineVideoConverter.com. Estos son los pasos comunes para descargar vídeo mp4 dummy usando una herramienta de descarga:</p>
|
48 |
-
<ol>
|
49 |
-
<li>Ir al sitio web que ofrece el vídeo de muestra que desea descargar y copiar su URL.</li>
|
50 |
-
<li>Vaya al sitio web de la herramienta de descarga y pegue la URL en el cuadro de entrada. </li>
|
51 |
-
|
52 |
-
<li>La herramienta de descarga procesará la URL y generará un enlace de descarga para el video. </li>
|
53 |
-
<li>Haga clic en el enlace de descarga y guarde el archivo de vídeo en la ubicación deseada. </li>
|
54 |
-
<li>Abra el archivo de vídeo con un reproductor multimedia y compruebe si funciona como se espera. </li>
|
55 |
-
</ol>
|
56 |
-
<h3>Algunos sitios web permiten elegir el formato y la calidad del vídeo, mientras que otros proporcionan opciones predefinidas</h3>
|
57 |
-
<p>Algunos sitios web que ofrecen videos de muestra le permiten elegir el formato y la calidad del video antes de descargarlo. Por ejemplo, Sample-Videos.com le permite seleccionar entre diferentes resoluciones y tamaños de vídeos mp4. Learning Container le permite seleccionar entre diferentes resoluciones de vídeos mp4. Gist le permite seleccionar entre diferentes resoluciones y formatos de vídeos. </p>
|
58 |
-
<p>Otros sitios web ofrecen opciones predefinidas para descargar videos de muestra. Por ejemplo, SaveFrom.net proporciona una lista de formatos y calidades disponibles para cada URL de vídeo que introduzca. Y2mate.com proporciona una lista de formatos y tamaños disponibles para cada URL de vídeo que introduzca. OnlineVideoConverter.com proporciona una lista de formatos disponibles para cada URL de vídeo que introduzca. </p>
|
59 |
-
<h2>¿Cómo usar video mp4 ficticio para propósitos de prueba? </h2>
|
60 |
-
<h3>El vídeo mp4 simulado se puede utilizar para probar la reproducción de vídeo, resolución, relación de aspecto, búfer y compatibilidad en varios dispositivos y plataformas</h3>
|
61 |
-
<p>El vídeo mp4 dummy se puede utilizar para probar varios aspectos de la reproducción de vídeo en diferentes dispositivos y plataformas. Por ejemplo, puede usar un video mp4 ficticio para probar cómo su aplicación de video o sitio web maneja diferentes resoluciones, relaciones de aspecto, velocidades de almacenamiento en búfer y problemas de compatibilidad en varios navegadores, sistemas operativos y dispositivos. También puede utilizar un vídeo mp4 ficticio para probar cómo su reproductor de vídeo o dispositivo muestra diferentes calidades y formatos de vídeos. </p>
|
62 |
-
<h3>El vídeo mp4 dummy también se puede utilizar para probar herramientas de edición de vídeo, codificación, transcodificación, compresión y conversión y software</h3>
|
63 |
-
|
64 |
-
<h2>Conclusión</h2>
|
65 |
-
<p>Dummy mp4 video es un recurso útil para probar características y funciones relacionadas con el video. Se puede descargar fácilmente desde varios sitios web utilizando sencillos pasos. Se puede utilizar para diversos fines, como el diseño, desarrollo, edición y optimización de vídeos. Mediante el uso de vídeo mp4 dummy, puede asegurarse de que sus aplicaciones de vídeo, sitios web, software y dispositivos funcionan sin problemas y de manera eficiente. </p>
|
66 |
-
<h2>Preguntas frecuentes</h2>
|
67 |
-
<h3>¿Cuáles son algunos formatos de vídeo comunes además de mp4? </h3>
|
68 |
-
<p>Algunos formatos de vídeo comunes además de mp4 son AVI, WMV, MOV, MKV, FLV, WEBM y MPEG. Cada formato tiene sus propias ventajas y desventajas en términos de calidad, compatibilidad y tamaño de archivo. </p>
|
69 |
-
<p></p>
|
70 |
-
<h3>¿Cuáles son algunos sitios web de transmisión de video populares que admiten formato mp4? </h3>
|
71 |
-
<p>Algunos sitios web populares de transmisión de video que admiten formato mp4 son YouTube, Vimeo, Dailymotion, Facebook e Instagram. Estos sitios web permiten a los usuarios subir y ver vídeos en formato mp4. </p>
|
72 |
-
<h3>¿Cuáles son algunas ventajas y desventajas del formato mp4? </h3>
|
73 |
-
<p>Algunas ventajas del formato mp4 son que tiene alta calidad, bajo tamaño de archivo, amplia compatibilidad y admite múltiples transmisiones de audio y video. Algunas desventajas del formato mp4 son que puede no soportar algunos codecs o características, puede estar dañado o dañado fácilmente, y puede tener problemas de licencia. </p>
|
74 |
-
<h3>¿Cómo convertir otros formatos de vídeo a formato mp4? </h3>
|
75 |
-
<p>Para convertir otros formatos de video a formato mp4, puede usar herramientas o software de conversión de video en línea o fuera de línea. Las herramientas de conversión de video en línea son sitios web que le permiten cargar su archivo de video y elegir el formato de salida y la calidad. Software convertidor de vídeo sin conexión son programas que se pueden instalar en el ordenador y utilizar para convertir el archivo de vídeo. Algunos ejemplos de herramientas de conversión de video en línea son CloudConvert.com, Online-Convert.com y Zamzar.com. Algunos ejemplos de software de conversión de vídeo sin conexión son HandBrake, VLC Media Player y Freemake Video Converter. </p>
|
76 |
-
|
77 |
-
<p>Para reducir el tamaño del archivo de videos mp4, puede usar herramientas o software de compresor de video en línea o fuera de línea. Herramientas de compresor de vídeo en línea son sitios web que le permiten subir su archivo de vídeo y elegir el tamaño de salida y la calidad. Software compresor de vídeo sin conexión son programas que se pueden instalar en el ordenador y utilizar para comprimir el archivo de vídeo. Algunos ejemplos de herramientas de compresor de vídeo online son Compressify.io, YouCompress.com y Clideo.com. Algunos ejemplos de software de compresor de vídeo sin conexión son WinX Video Converter, Free Video Compressor y Any Video Converter. </p> 64aa2da5cf<br />
|
78 |
-
<br />
|
79 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/cells.py
DELETED
@@ -1,154 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
from functools import lru_cache
|
3 |
-
from typing import Callable, List
|
4 |
-
|
5 |
-
from ._cell_widths import CELL_WIDTHS
|
6 |
-
|
7 |
-
# Regex to match sequence of the most common character ranges
|
8 |
-
_is_single_cell_widths = re.compile("^[\u0020-\u006f\u00a0\u02ff\u0370-\u0482]*$").match
|
9 |
-
|
10 |
-
|
11 |
-
@lru_cache(4096)
|
12 |
-
def cached_cell_len(text: str) -> int:
|
13 |
-
"""Get the number of cells required to display text.
|
14 |
-
|
15 |
-
This method always caches, which may use up a lot of memory. It is recommended to use
|
16 |
-
`cell_len` over this method.
|
17 |
-
|
18 |
-
Args:
|
19 |
-
text (str): Text to display.
|
20 |
-
|
21 |
-
Returns:
|
22 |
-
int: Get the number of cells required to display text.
|
23 |
-
"""
|
24 |
-
_get_size = get_character_cell_size
|
25 |
-
total_size = sum(_get_size(character) for character in text)
|
26 |
-
return total_size
|
27 |
-
|
28 |
-
|
29 |
-
def cell_len(text: str, _cell_len: Callable[[str], int] = cached_cell_len) -> int:
|
30 |
-
"""Get the number of cells required to display text.
|
31 |
-
|
32 |
-
Args:
|
33 |
-
text (str): Text to display.
|
34 |
-
|
35 |
-
Returns:
|
36 |
-
int: Get the number of cells required to display text.
|
37 |
-
"""
|
38 |
-
if len(text) < 512:
|
39 |
-
return _cell_len(text)
|
40 |
-
_get_size = get_character_cell_size
|
41 |
-
total_size = sum(_get_size(character) for character in text)
|
42 |
-
return total_size
|
43 |
-
|
44 |
-
|
45 |
-
@lru_cache(maxsize=4096)
|
46 |
-
def get_character_cell_size(character: str) -> int:
|
47 |
-
"""Get the cell size of a character.
|
48 |
-
|
49 |
-
Args:
|
50 |
-
character (str): A single character.
|
51 |
-
|
52 |
-
Returns:
|
53 |
-
int: Number of cells (0, 1 or 2) occupied by that character.
|
54 |
-
"""
|
55 |
-
return _get_codepoint_cell_size(ord(character))
|
56 |
-
|
57 |
-
|
58 |
-
@lru_cache(maxsize=4096)
|
59 |
-
def _get_codepoint_cell_size(codepoint: int) -> int:
|
60 |
-
"""Get the cell size of a character.
|
61 |
-
|
62 |
-
Args:
|
63 |
-
codepoint (int): Codepoint of a character.
|
64 |
-
|
65 |
-
Returns:
|
66 |
-
int: Number of cells (0, 1 or 2) occupied by that character.
|
67 |
-
"""
|
68 |
-
|
69 |
-
_table = CELL_WIDTHS
|
70 |
-
lower_bound = 0
|
71 |
-
upper_bound = len(_table) - 1
|
72 |
-
index = (lower_bound + upper_bound) // 2
|
73 |
-
while True:
|
74 |
-
start, end, width = _table[index]
|
75 |
-
if codepoint < start:
|
76 |
-
upper_bound = index - 1
|
77 |
-
elif codepoint > end:
|
78 |
-
lower_bound = index + 1
|
79 |
-
else:
|
80 |
-
return 0 if width == -1 else width
|
81 |
-
if upper_bound < lower_bound:
|
82 |
-
break
|
83 |
-
index = (lower_bound + upper_bound) // 2
|
84 |
-
return 1
|
85 |
-
|
86 |
-
|
87 |
-
def set_cell_size(text: str, total: int) -> str:
|
88 |
-
"""Set the length of a string to fit within given number of cells."""
|
89 |
-
|
90 |
-
if _is_single_cell_widths(text):
|
91 |
-
size = len(text)
|
92 |
-
if size < total:
|
93 |
-
return text + " " * (total - size)
|
94 |
-
return text[:total]
|
95 |
-
|
96 |
-
if total <= 0:
|
97 |
-
return ""
|
98 |
-
cell_size = cell_len(text)
|
99 |
-
if cell_size == total:
|
100 |
-
return text
|
101 |
-
if cell_size < total:
|
102 |
-
return text + " " * (total - cell_size)
|
103 |
-
|
104 |
-
start = 0
|
105 |
-
end = len(text)
|
106 |
-
|
107 |
-
# Binary search until we find the right size
|
108 |
-
while True:
|
109 |
-
pos = (start + end) // 2
|
110 |
-
before = text[: pos + 1]
|
111 |
-
before_len = cell_len(before)
|
112 |
-
if before_len == total + 1 and cell_len(before[-1]) == 2:
|
113 |
-
return before[:-1] + " "
|
114 |
-
if before_len == total:
|
115 |
-
return before
|
116 |
-
if before_len > total:
|
117 |
-
end = pos
|
118 |
-
else:
|
119 |
-
start = pos
|
120 |
-
|
121 |
-
|
122 |
-
# TODO: This is inefficient
|
123 |
-
# TODO: This might not work with CWJ type characters
|
124 |
-
def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]:
|
125 |
-
"""Break text in to equal (cell) length strings, returning the characters in reverse
|
126 |
-
order"""
|
127 |
-
_get_character_cell_size = get_character_cell_size
|
128 |
-
characters = [
|
129 |
-
(character, _get_character_cell_size(character)) for character in text
|
130 |
-
]
|
131 |
-
total_size = position
|
132 |
-
lines: List[List[str]] = [[]]
|
133 |
-
append = lines[-1].append
|
134 |
-
|
135 |
-
for character, size in reversed(characters):
|
136 |
-
if total_size + size > max_size:
|
137 |
-
lines.append([character])
|
138 |
-
append = lines[-1].append
|
139 |
-
total_size = size
|
140 |
-
else:
|
141 |
-
total_size += size
|
142 |
-
append(character)
|
143 |
-
|
144 |
-
return ["".join(line) for line in lines]
|
145 |
-
|
146 |
-
|
147 |
-
if __name__ == "__main__": # pragma: no cover
|
148 |
-
|
149 |
-
print(get_character_cell_size("😽"))
|
150 |
-
for line in chop_cells("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", 8):
|
151 |
-
print(line)
|
152 |
-
for n in range(80, 1, -1):
|
153 |
-
print(set_cell_size("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", n) + "|")
|
154 |
-
print("x" * n)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_scripts.py
DELETED
@@ -1,173 +0,0 @@
|
|
1 |
-
"""distutils.command.build_scripts
|
2 |
-
|
3 |
-
Implements the Distutils 'build_scripts' command."""
|
4 |
-
|
5 |
-
import os
|
6 |
-
import re
|
7 |
-
from stat import ST_MODE
|
8 |
-
from distutils import sysconfig
|
9 |
-
from distutils.core import Command
|
10 |
-
from distutils.dep_util import newer
|
11 |
-
from distutils.util import convert_path
|
12 |
-
from distutils import log
|
13 |
-
import tokenize
|
14 |
-
|
15 |
-
shebang_pattern = re.compile('^#!.*python[0-9.]*([ \t].*)?$')
|
16 |
-
"""
|
17 |
-
Pattern matching a Python interpreter indicated in first line of a script.
|
18 |
-
"""
|
19 |
-
|
20 |
-
# for Setuptools compatibility
|
21 |
-
first_line_re = shebang_pattern
|
22 |
-
|
23 |
-
|
24 |
-
class build_scripts(Command):
|
25 |
-
|
26 |
-
description = "\"build\" scripts (copy and fixup #! line)"
|
27 |
-
|
28 |
-
user_options = [
|
29 |
-
('build-dir=', 'd', "directory to \"build\" (copy) to"),
|
30 |
-
('force', 'f', "forcibly build everything (ignore file timestamps"),
|
31 |
-
('executable=', 'e', "specify final destination interpreter path"),
|
32 |
-
]
|
33 |
-
|
34 |
-
boolean_options = ['force']
|
35 |
-
|
36 |
-
def initialize_options(self):
|
37 |
-
self.build_dir = None
|
38 |
-
self.scripts = None
|
39 |
-
self.force = None
|
40 |
-
self.executable = None
|
41 |
-
|
42 |
-
def finalize_options(self):
|
43 |
-
self.set_undefined_options(
|
44 |
-
'build',
|
45 |
-
('build_scripts', 'build_dir'),
|
46 |
-
('force', 'force'),
|
47 |
-
('executable', 'executable'),
|
48 |
-
)
|
49 |
-
self.scripts = self.distribution.scripts
|
50 |
-
|
51 |
-
def get_source_files(self):
|
52 |
-
return self.scripts
|
53 |
-
|
54 |
-
def run(self):
|
55 |
-
if not self.scripts:
|
56 |
-
return
|
57 |
-
self.copy_scripts()
|
58 |
-
|
59 |
-
def copy_scripts(self):
|
60 |
-
"""
|
61 |
-
Copy each script listed in ``self.scripts``.
|
62 |
-
|
63 |
-
If a script is marked as a Python script (first line matches
|
64 |
-
'shebang_pattern', i.e. starts with ``#!`` and contains
|
65 |
-
"python"), then adjust in the copy the first line to refer to
|
66 |
-
the current Python interpreter.
|
67 |
-
"""
|
68 |
-
self.mkpath(self.build_dir)
|
69 |
-
outfiles = []
|
70 |
-
updated_files = []
|
71 |
-
for script in self.scripts:
|
72 |
-
self._copy_script(script, outfiles, updated_files)
|
73 |
-
|
74 |
-
self._change_modes(outfiles)
|
75 |
-
|
76 |
-
return outfiles, updated_files
|
77 |
-
|
78 |
-
def _copy_script(self, script, outfiles, updated_files): # noqa: C901
|
79 |
-
shebang_match = None
|
80 |
-
script = convert_path(script)
|
81 |
-
outfile = os.path.join(self.build_dir, os.path.basename(script))
|
82 |
-
outfiles.append(outfile)
|
83 |
-
|
84 |
-
if not self.force and not newer(script, outfile):
|
85 |
-
log.debug("not copying %s (up-to-date)", script)
|
86 |
-
return
|
87 |
-
|
88 |
-
# Always open the file, but ignore failures in dry-run mode
|
89 |
-
# in order to attempt to copy directly.
|
90 |
-
try:
|
91 |
-
f = tokenize.open(script)
|
92 |
-
except OSError:
|
93 |
-
if not self.dry_run:
|
94 |
-
raise
|
95 |
-
f = None
|
96 |
-
else:
|
97 |
-
first_line = f.readline()
|
98 |
-
if not first_line:
|
99 |
-
self.warn("%s is an empty file (skipping)" % script)
|
100 |
-
return
|
101 |
-
|
102 |
-
shebang_match = shebang_pattern.match(first_line)
|
103 |
-
|
104 |
-
updated_files.append(outfile)
|
105 |
-
if shebang_match:
|
106 |
-
log.info("copying and adjusting %s -> %s", script, self.build_dir)
|
107 |
-
if not self.dry_run:
|
108 |
-
if not sysconfig.python_build:
|
109 |
-
executable = self.executable
|
110 |
-
else:
|
111 |
-
executable = os.path.join(
|
112 |
-
sysconfig.get_config_var("BINDIR"),
|
113 |
-
"python%s%s"
|
114 |
-
% (
|
115 |
-
sysconfig.get_config_var("VERSION"),
|
116 |
-
sysconfig.get_config_var("EXE"),
|
117 |
-
),
|
118 |
-
)
|
119 |
-
post_interp = shebang_match.group(1) or ''
|
120 |
-
shebang = "#!" + executable + post_interp + "\n"
|
121 |
-
self._validate_shebang(shebang, f.encoding)
|
122 |
-
with open(outfile, "w", encoding=f.encoding) as outf:
|
123 |
-
outf.write(shebang)
|
124 |
-
outf.writelines(f.readlines())
|
125 |
-
if f:
|
126 |
-
f.close()
|
127 |
-
else:
|
128 |
-
if f:
|
129 |
-
f.close()
|
130 |
-
self.copy_file(script, outfile)
|
131 |
-
|
132 |
-
def _change_modes(self, outfiles):
|
133 |
-
if os.name != 'posix':
|
134 |
-
return
|
135 |
-
|
136 |
-
for file in outfiles:
|
137 |
-
self._change_mode(file)
|
138 |
-
|
139 |
-
def _change_mode(self, file):
|
140 |
-
if self.dry_run:
|
141 |
-
log.info("changing mode of %s", file)
|
142 |
-
return
|
143 |
-
|
144 |
-
oldmode = os.stat(file)[ST_MODE] & 0o7777
|
145 |
-
newmode = (oldmode | 0o555) & 0o7777
|
146 |
-
if newmode != oldmode:
|
147 |
-
log.info("changing mode of %s from %o to %o", file, oldmode, newmode)
|
148 |
-
os.chmod(file, newmode)
|
149 |
-
|
150 |
-
@staticmethod
|
151 |
-
def _validate_shebang(shebang, encoding):
|
152 |
-
# Python parser starts to read a script using UTF-8 until
|
153 |
-
# it gets a #coding:xxx cookie. The shebang has to be the
|
154 |
-
# first line of a file, the #coding:xxx cookie cannot be
|
155 |
-
# written before. So the shebang has to be encodable to
|
156 |
-
# UTF-8.
|
157 |
-
try:
|
158 |
-
shebang.encode('utf-8')
|
159 |
-
except UnicodeEncodeError:
|
160 |
-
raise ValueError(
|
161 |
-
"The shebang ({!r}) is not encodable " "to utf-8".format(shebang)
|
162 |
-
)
|
163 |
-
|
164 |
-
# If the script is encoded to a custom encoding (use a
|
165 |
-
# #coding:xxx cookie), the shebang has to be encodable to
|
166 |
-
# the script encoding too.
|
167 |
-
try:
|
168 |
-
shebang.encode(encoding)
|
169 |
-
except UnicodeEncodeError:
|
170 |
-
raise ValueError(
|
171 |
-
"The shebang ({!r}) is not encodable "
|
172 |
-
"to the script encoding ({})".format(shebang, encoding)
|
173 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_compat.py
DELETED
@@ -1,71 +0,0 @@
|
|
1 |
-
import sys
|
2 |
-
import platform
|
3 |
-
|
4 |
-
|
5 |
-
__all__ = ['install', 'NullFinder', 'Protocol']
|
6 |
-
|
7 |
-
|
8 |
-
try:
|
9 |
-
from typing import Protocol
|
10 |
-
except ImportError: # pragma: no cover
|
11 |
-
from ..typing_extensions import Protocol # type: ignore
|
12 |
-
|
13 |
-
|
14 |
-
def install(cls):
|
15 |
-
"""
|
16 |
-
Class decorator for installation on sys.meta_path.
|
17 |
-
|
18 |
-
Adds the backport DistributionFinder to sys.meta_path and
|
19 |
-
attempts to disable the finder functionality of the stdlib
|
20 |
-
DistributionFinder.
|
21 |
-
"""
|
22 |
-
sys.meta_path.append(cls())
|
23 |
-
disable_stdlib_finder()
|
24 |
-
return cls
|
25 |
-
|
26 |
-
|
27 |
-
def disable_stdlib_finder():
|
28 |
-
"""
|
29 |
-
Give the backport primacy for discovering path-based distributions
|
30 |
-
by monkey-patching the stdlib O_O.
|
31 |
-
|
32 |
-
See #91 for more background for rationale on this sketchy
|
33 |
-
behavior.
|
34 |
-
"""
|
35 |
-
|
36 |
-
def matches(finder):
|
37 |
-
return getattr(
|
38 |
-
finder, '__module__', None
|
39 |
-
) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions')
|
40 |
-
|
41 |
-
for finder in filter(matches, sys.meta_path): # pragma: nocover
|
42 |
-
del finder.find_distributions
|
43 |
-
|
44 |
-
|
45 |
-
class NullFinder:
|
46 |
-
"""
|
47 |
-
A "Finder" (aka "MetaClassFinder") that never finds any modules,
|
48 |
-
but may find distributions.
|
49 |
-
"""
|
50 |
-
|
51 |
-
@staticmethod
|
52 |
-
def find_spec(*args, **kwargs):
|
53 |
-
return None
|
54 |
-
|
55 |
-
# In Python 2, the import system requires finders
|
56 |
-
# to have a find_module() method, but this usage
|
57 |
-
# is deprecated in Python 3 in favor of find_spec().
|
58 |
-
# For the purposes of this finder (i.e. being present
|
59 |
-
# on sys.meta_path but having no other import
|
60 |
-
# system functionality), the two methods are identical.
|
61 |
-
find_module = find_spec
|
62 |
-
|
63 |
-
|
64 |
-
def pypy_partial(val):
|
65 |
-
"""
|
66 |
-
Adjust for variable stacklevel on partial under PyPy.
|
67 |
-
|
68 |
-
Workaround for #327.
|
69 |
-
"""
|
70 |
-
is_pypy = platform.python_implementation() == 'PyPy'
|
71 |
-
return val + is_pypy
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/_musllinux.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
"""PEP 656 support.
|
2 |
-
|
3 |
-
This module implements logic to detect if the currently running Python is
|
4 |
-
linked against musl, and what musl version is used.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import contextlib
|
8 |
-
import functools
|
9 |
-
import operator
|
10 |
-
import os
|
11 |
-
import re
|
12 |
-
import struct
|
13 |
-
import subprocess
|
14 |
-
import sys
|
15 |
-
from typing import IO, Iterator, NamedTuple, Optional, Tuple
|
16 |
-
|
17 |
-
|
18 |
-
def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
|
19 |
-
return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
|
20 |
-
|
21 |
-
|
22 |
-
def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
|
23 |
-
"""Detect musl libc location by parsing the Python executable.
|
24 |
-
|
25 |
-
Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
|
26 |
-
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
|
27 |
-
"""
|
28 |
-
f.seek(0)
|
29 |
-
try:
|
30 |
-
ident = _read_unpacked(f, "16B")
|
31 |
-
except struct.error:
|
32 |
-
return None
|
33 |
-
if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
|
34 |
-
return None
|
35 |
-
f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
|
36 |
-
|
37 |
-
try:
|
38 |
-
# e_fmt: Format for program header.
|
39 |
-
# p_fmt: Format for section header.
|
40 |
-
# p_idx: Indexes to find p_type, p_offset, and p_filesz.
|
41 |
-
e_fmt, p_fmt, p_idx = {
|
42 |
-
1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
|
43 |
-
2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
|
44 |
-
}[ident[4]]
|
45 |
-
except KeyError:
|
46 |
-
return None
|
47 |
-
else:
|
48 |
-
p_get = operator.itemgetter(*p_idx)
|
49 |
-
|
50 |
-
# Find the interpreter section and return its content.
|
51 |
-
try:
|
52 |
-
_, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
|
53 |
-
except struct.error:
|
54 |
-
return None
|
55 |
-
for i in range(e_phnum + 1):
|
56 |
-
f.seek(e_phoff + e_phentsize * i)
|
57 |
-
try:
|
58 |
-
p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
|
59 |
-
except struct.error:
|
60 |
-
return None
|
61 |
-
if p_type != 3: # Not PT_INTERP.
|
62 |
-
continue
|
63 |
-
f.seek(p_offset)
|
64 |
-
interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
|
65 |
-
if "musl" not in interpreter:
|
66 |
-
return None
|
67 |
-
return interpreter
|
68 |
-
return None
|
69 |
-
|
70 |
-
|
71 |
-
class _MuslVersion(NamedTuple):
|
72 |
-
major: int
|
73 |
-
minor: int
|
74 |
-
|
75 |
-
|
76 |
-
def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
|
77 |
-
lines = [n for n in (n.strip() for n in output.splitlines()) if n]
|
78 |
-
if len(lines) < 2 or lines[0][:4] != "musl":
|
79 |
-
return None
|
80 |
-
m = re.match(r"Version (\d+)\.(\d+)", lines[1])
|
81 |
-
if not m:
|
82 |
-
return None
|
83 |
-
return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
|
84 |
-
|
85 |
-
|
86 |
-
@functools.lru_cache()
|
87 |
-
def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
|
88 |
-
"""Detect currently-running musl runtime version.
|
89 |
-
|
90 |
-
This is done by checking the specified executable's dynamic linking
|
91 |
-
information, and invoking the loader to parse its output for a version
|
92 |
-
string. If the loader is musl, the output would be something like::
|
93 |
-
|
94 |
-
musl libc (x86_64)
|
95 |
-
Version 1.2.2
|
96 |
-
Dynamic Program Loader
|
97 |
-
"""
|
98 |
-
with contextlib.ExitStack() as stack:
|
99 |
-
try:
|
100 |
-
f = stack.enter_context(open(executable, "rb"))
|
101 |
-
except OSError:
|
102 |
-
return None
|
103 |
-
ld = _parse_ld_musl_from_elf(f)
|
104 |
-
if not ld:
|
105 |
-
return None
|
106 |
-
proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
|
107 |
-
return _parse_musl_version(proc.stderr)
|
108 |
-
|
109 |
-
|
110 |
-
def platform_tags(arch: str) -> Iterator[str]:
|
111 |
-
"""Generate musllinux tags compatible to the current platform.
|
112 |
-
|
113 |
-
:param arch: Should be the part of platform tag after the ``linux_``
|
114 |
-
prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
|
115 |
-
prerequisite for the current platform to be musllinux-compatible.
|
116 |
-
|
117 |
-
:returns: An iterator of compatible musllinux tags.
|
118 |
-
"""
|
119 |
-
sys_musl = _get_musl_version(sys.executable)
|
120 |
-
if sys_musl is None: # Python not dynamically linked against musl.
|
121 |
-
return
|
122 |
-
for minor in range(sys_musl.minor, -1, -1):
|
123 |
-
yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
|
124 |
-
|
125 |
-
|
126 |
-
if __name__ == "__main__": # pragma: no cover
|
127 |
-
import sysconfig
|
128 |
-
|
129 |
-
plat = sysconfig.get_platform()
|
130 |
-
assert plat.startswith("linux-"), "not linux"
|
131 |
-
|
132 |
-
print("plat:", plat)
|
133 |
-
print("musl:", _get_musl_version(sys.executable))
|
134 |
-
print("tags:", end=" ")
|
135 |
-
for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
|
136 |
-
print(t, end="\n ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|