diff --git a/spaces/17TheWord/RealESRGAN/scripts/extract_subimages.py b/spaces/17TheWord/RealESRGAN/scripts/extract_subimages.py deleted file mode 100644 index 9b969ae0d4adff403f2ad362b9afaaaee58e2cef..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/scripts/extract_subimages.py +++ /dev/null @@ -1,135 +0,0 @@ -import argparse -import cv2 -import numpy as np -import os -import sys -from basicsr.utils import scandir -from multiprocessing import Pool -from os import path as osp -from tqdm import tqdm - - -def main(args): - """A multi-thread tool to crop large images to sub-images for faster IO. - - opt (dict): Configuration dict. It contains: - n_thread (int): Thread number. - compression_level (int): CV_IMWRITE_PNG_COMPRESSION from 0 to 9. A higher value means a smaller size - and longer compression time. Use 0 for faster CPU decompression. Default: 3, same in cv2. - input_folder (str): Path to the input folder. - save_folder (str): Path to save folder. - crop_size (int): Crop size. - step (int): Step for overlapped sliding window. - thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped. - - Usage: - For each folder, run this script. - Typically, there are GT folder and LQ folder to be processed for DIV2K dataset. - After process, each sub_folder should have the same number of subimages. - Remember to modify opt configurations according to your settings. - """ - - opt = {} - opt['n_thread'] = args.n_thread - opt['compression_level'] = args.compression_level - opt['input_folder'] = args.input - opt['save_folder'] = args.output - opt['crop_size'] = args.crop_size - opt['step'] = args.step - opt['thresh_size'] = args.thresh_size - extract_subimages(opt) - - -def extract_subimages(opt): - """Crop images to subimages. - - Args: - opt (dict): Configuration dict. It contains: - input_folder (str): Path to the input folder. - save_folder (str): Path to save folder. - n_thread (int): Thread number. - """ - input_folder = opt['input_folder'] - save_folder = opt['save_folder'] - if not osp.exists(save_folder): - os.makedirs(save_folder) - print(f'mkdir {save_folder} ...') - else: - print(f'Folder {save_folder} already exists. Exit.') - sys.exit(1) - - # scan all images - img_list = list(scandir(input_folder, full_path=True)) - - pbar = tqdm(total=len(img_list), unit='image', desc='Extract') - pool = Pool(opt['n_thread']) - for path in img_list: - pool.apply_async(worker, args=(path, opt), callback=lambda arg: pbar.update(1)) - pool.close() - pool.join() - pbar.close() - print('All processes done.') - - -def worker(path, opt): - """Worker for each process. - - Args: - path (str): Image path. - opt (dict): Configuration dict. It contains: - crop_size (int): Crop size. - step (int): Step for overlapped sliding window. - thresh_size (int): Threshold size. Patches whose size is lower than thresh_size will be dropped. - save_folder (str): Path to save folder. - compression_level (int): for cv2.IMWRITE_PNG_COMPRESSION. - - Returns: - process_info (str): Process information displayed in progress bar. - """ - crop_size = opt['crop_size'] - step = opt['step'] - thresh_size = opt['thresh_size'] - img_name, extension = osp.splitext(osp.basename(path)) - - # remove the x2, x3, x4 and x8 in the filename for DIV2K - img_name = img_name.replace('x2', '').replace('x3', '').replace('x4', '').replace('x8', '') - - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) - - h, w = img.shape[0:2] - h_space = np.arange(0, h - crop_size + 1, step) - if h - (h_space[-1] + crop_size) > thresh_size: - h_space = np.append(h_space, h - crop_size) - w_space = np.arange(0, w - crop_size + 1, step) - if w - (w_space[-1] + crop_size) > thresh_size: - w_space = np.append(w_space, w - crop_size) - - index = 0 - for x in h_space: - for y in w_space: - index += 1 - cropped_img = img[x:x + crop_size, y:y + crop_size, ...] - cropped_img = np.ascontiguousarray(cropped_img) - cv2.imwrite( - osp.join(opt['save_folder'], f'{img_name}_s{index:03d}{extension}'), cropped_img, - [cv2.IMWRITE_PNG_COMPRESSION, opt['compression_level']]) - process_info = f'Processing {img_name} ...' - return process_info - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder') - parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_HR_sub', help='Output folder') - parser.add_argument('--crop_size', type=int, default=480, help='Crop size') - parser.add_argument('--step', type=int, default=240, help='Step for overlapped sliding window') - parser.add_argument( - '--thresh_size', - type=int, - default=0, - help='Threshold size. Patches whose size is lower than thresh_size will be dropped.') - parser.add_argument('--n_thread', type=int, default=20, help='Thread number.') - parser.add_argument('--compression_level', type=int, default=3, help='Compression level') - args = parser.parse_args() - - main(args) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Antares Autotune Evo VST RTAS v6.0.9.rar.rar The Ultimate Guide to the Most Popular Vocal Processing Tool.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Antares Autotune Evo VST RTAS v6.0.9.rar.rar The Ultimate Guide to the Most Popular Vocal Processing Tool.md deleted file mode 100644 index 2ceed470797d754736d0ada203e05e8dd71d3ae9..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Antares Autotune Evo VST RTAS v6.0.9.rar.rar The Ultimate Guide to the Most Popular Vocal Processing Tool.md +++ /dev/null @@ -1,191 +0,0 @@ - -

Antares Autotune Evo VST RTAS v6.0.9.rar.rar: What is it and how to use it?

-

If you are looking for a professional and easy-to-use tool for correcting and enhancing the pitch of your vocals or instruments, you might have come across a file called Antares Autotune Evo VST RTAS v6.0.9.rar.rar. But what is this file and how can you use it in your audio projects? In this article, we will explain what Antares Autotune Evo is, what VST RTAS means, what a .rar file is, and how to download, install and use Antares Autotune Evo in your digital audio workstation (DAW).

-

Introduction

-

What is Antares Autotune Evo?

-

Antares Autotune Evo is a multi-platform plug-in that corrects intonation and timing problems in vocals or solo instruments, in real time, without distortion or artifacts, while preserving all of the expressive nuance of the original performance. It is one of the most popular and widely used pitch correction tools in the music industry, used by thousands of audio professionals around the world.

-

Antares Autotune Evo VST RTAS v6.0.9.rar.rar


Download File ✸✸✸ https://byltly.com/2uKvHC



-

What is VST RTAS?

-

VST stands for Virtual Studio Technology, which is a standard interface for integrating software audio synthesizers and effects plugins with audio editors and hard-disk recording systems. RTAS stands for Real-Time AudioSuite, which is a format of audio plug-in developed by Avid Technology for their Pro Tools software. Antares Autotune Evo VST RTAS v6.0.9.rar.rar is a file that contains both the VST and RTAS versions of the plug-in, which means you can use it with different DAWs that support either format.

-

What is a .rar file?

-

A .rar file is a compressed archive file that can contain one or more files or folders inside it. It is similar to a .zip file, but it uses a different compression algorithm that can achieve higher compression ratios. A .rar file can also be split into multiple parts, which can be useful for transferring large files over the internet or storing them on removable media. Antares Autotune Evo VST RTAS v6.0.9.rar.rar is actually a double-compressed archive file, which means it has been compressed twice with the .rar format. To extract the files inside it, you will need a software that can handle .rar files, such as WinRAR or 7-Zip.

-

Features and benefits of Antares Autotune Evo

-

Automatic and graphical modes

-

Antares Autotune Evo has two main modes of operation: automatic and graphical. In automatic mode, the plug-in detects the pitch of the input signal, identifies the closest pitch in a user-specified scale (including minor, major, chromatic and 26 historical and microtonal scales), and corrects the input pitch to match the scale pitch. A retune speed control lets you match the retune rate to virtually any performance style. This mode is ideal for quick and easy pitch correction without much tweaking.

-

In graphical mode, the plug-in displays the detected pitch envelope of the input signal and allows you to draw in the desired pitch using a variety of graphics tools. This mode gives you complete control over the correction or modification of the most elaborate expressive gestures. You can also zoom in and out, undo and redo edits, import and export pitch data, and more. This mode is ideal for meticulous and creative pitch manipulation.

-

Pitch correction and manipulation

-

Antares Autotune Evo can correct not only intonation problems but also timing problems in vocals or solo instruments. It can also create special effects such as robotic vocals, gender change, vibrato control, formant shifting, throat modeling, pitch shifting, transposition, doubling, harmonizing, etc.. You can use Antares Autotune Evo to fix subtle pitch errors or create dramatic vocal transformations.

-

Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR download
-Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR free
-Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR crack
-Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR rar
-Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR audioz
-Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR google drive
-Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR 4shared
-Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR atualizado
-Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR modulador de voz
-Antares Autotune Evo VST RTAS v6.0.9 PROPER-AiR site oficial
-Antares Autotune Evo VST RTAS v6.0.9 full version
-Antares Autotune Evo VST RTAS v6.0.9 serial key
-Antares Autotune Evo VST RTAS v6.0.9 activation code
-Antares Autotune Evo VST RTAS v6.0.9 license key
-Antares Autotune Evo VST RTAS v6.0.9 registration key
-Antares Autotune Evo VST RTAS v6.0.9 torrent download
-Antares Autotune Evo VST RTAS v6.0.9 magnet link
-Antares Autotune Evo VST RTAS v6.0.9 direct link
-Antares Autotune Evo VST RTAS v6.0.9 mega download
-Antares Autotune Evo VST RTAS v6.0.9 mediafire download
-Antares Autotune Evo VST RTAS v6.0.9 windows 10 compatible
-Antares Autotune Evo VST RTAS v6.0.9 windows 7 compatible
-Antares Autotune Evo VST RTAS v6.0.9 mac os compatible
-Antares Autotune Evo VST RTAS v6.0.9 linux compatible
-Antares Autotune Evo VST RTAS v6.0.9 64 bit compatible
-Antares Autotune Evo VST RTAS v6.0.9 32 bit compatible
-Antares Autotune Evo VST RTAS v6.0.9 pitch correction software
-Antares Autotune Evo VST RTAS v6.0.9 vocal effects software
-Antares Autotune Evo VST RTAS v6.0.9 professional audio software
-Antares Autotune Evo VST RTAS v6.0.9 music production software
-Antares Autotune Evo VST RTAS v6.0.9 how to install guide
-Antares Autotune Evo VST RTAS v6.0.9 how to use guide
-Antares Autotune Evo VST RTAS v6.0.9 user manual pdf
-Antares Autotune Evo VST RTAS v6.0.9 video tutorial youtube
-Antares Autotune Evo VST RTAS v6.0.9 review and rating
-Antares Autotune Evo VST RTAS v6

-

Compatibility and performance

-

Antares Autotune Evo is compatible with Windows XP/Vista/7/8/10 (32-bit or 64-bit) and Mac OS X 10.4 or later (Universal Binary). It supports sample rates up to 192 kHz. It can be used as a standalone application or as a plug-in with various DAWs that support VST or RTAS formats. It has a low CPU usage and a high-quality audio output. It also has an online manual and video tutorials to help you get started.

-

How to download and install Antares Autotune Evo

-

Downloading the file

-

To download Antares Autotune Evo VST RTAS v6.0.9.rar.rar, you can use one of these links:

- -

The file size is about 5 MB. You may need to register or sign in to access some of these links.

-

Extracting the file

-

After downloading Antares Autotune Evo VST RTAS v6.0.9.rar.rar, you will need to extract it using a software that can handle .rar files, such as WinRAR or 7-Zip. To do this:

-
    -
  1. Right-click on the file and choose "Extract here" or "Extract to Antares Autotune Evo VST RTAS v6.0.9" (depending on your software).
  2. -
  3. You will be asked to enter a password for the file. The password is: Byd3Ri}9 (for audioz.download link) or www.4download.net (for 4download.net link).
  4. -
  5. You will get another .rar file called Antares.Autotune.Evo.VST.RTAS.v6.0.9.PROPER-AiR.rar inside the extracted folder.
  6. -

    Installing the plug-in

    -

    After extracting the file, you will get a folder called Antares.Autotune.Evo.VST.RTAS.v6.0.9.PROPER-AiR that contains the VST and RTAS versions of the plug-in, as well as a text file with installation instructions. To install the plug-in:

    -
      -
    1. Run the file called Setup.exe inside the folder.
    2. -
    3. Follow the on-screen instructions to complete the installation.
    4. -
    5. Copy the file called Auto-Tune_Evo_VST.dll from the folder to your VST plug-in directory that you specified in your audio software.
    6. -
    7. Copy the file called Auto-Tune_Evo_RTAS.dll from the folder to your RTAS plug-in directory that you specified in your audio software.
    8. -
    9. Restart your audio software if it was running during the installation.
    10. -
    -

    You have now successfully installed Antares Autotune Evo VST RTAS v6.0.9.rar.rar on your computer.

    -

    How to use Antares Autotune Evo in your audio projects

    -

    Loading the plug-in in your DAW

    -

    To use Antares Autotune Evo in your audio projects, you need to load it as a plug-in in your DAW. The exact steps may vary depending on your DAW, but here is a general guide:

    -
      -
    1. Create a new audio track or open an existing one that contains vocals or instruments that you want to correct or enhance.
    2. -
    3. Go to the plug-in browser or menu in your DAW and look for Antares Autotune Evo under VST or RTAS categories.
    4. -
    5. Drag and drop the plug-in onto the audio track or insert it as an effect.
    6. -
    7. You should see a window with the Antares Autotune Evo interface and controls.
    8. -
    -

    Choosing the scale and retune speed

    -

    The first thing you need to do is to choose the scale and retune speed for your input signal. The scale determines which pitches are considered correct and which are corrected by the plug-in. The retune speed determines how fast and how much the plug-in corrects the input pitch. To do this:

    - -

    Editing the pitch envelope

    - the pitch envelope of your input signal. To do this:

    - -

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have learned what Antares Autotune Evo VST RTAS v6.0.9.rar.rar is and how to use it in our audio projects. We have covered:

    - -

    Call to action

    -

    If you are interested in trying out Antares Autotune Evo for yourself, you can download it from one of the links below and follow the steps in this article to get started. You will be amazed by how much you can improve or transform your vocals or instruments with this powerful plug-in. Whether you want to fix subtle pitch errors or create dramatic vocal effects, Antares Autotune Evo can help you achieve your creative goals.

    -

    Download Antares Autotune Evo VST RTAS v6.0.9.rar.rar from one of these links:

    - -

    Thank you for reading this article and happy tuning!

    -

    FAQs

    -

    What is the difference between Auto-Tune Evo and Auto-Tune Pro?

    -

    Auto-Tune Pro is the latest version of Auto-Tune software that offers more features and improvements than Auto-Tune Evo. Some of these features include:

    - -

    How do I uninstall Antares Autotune Evo?

    -

    To uninstall Antares Autotune Evo from your computer, you need to do two things:

    -
      -
    1. Delete the files that you copied to your VST and RTAS plug-in directories (Auto-Tune_Evo_VST.dll and Auto-Tune_Evo_RTAS.dll).
    2. -
    3. Run the file called Uninstall.exe inside the folder Antares.Autotune.Evo.VST.RTAS.v6.0.9.PROPER-AiR that you extracted from Antares Autotune Evo VST RTAS v6.0.9.rar.rar.
    4. -
    -

    How do I update Antares Autotune Evo?

    - the latest version of Antares Autotune Evo from the official website or from one of the links provided in this article. You will need to uninstall the previous version of Antares Autotune Evo before installing the new one. To do this, follow these steps:

    -
      -
    1. Delete the files that you copied to your VST and RTAS plug-in directories (Auto-Tune_Evo_VST.dll and Auto-Tune_Evo_RTAS.dll).
    2. -
    3. Run the file called Uninstall.exe inside the folder Antares.Autotune.Evo.VST.RTAS.v6.0.9.PROPER-AiR that you extracted from Antares Autotune Evo VST RTAS v6.0.9.rar.rar.
    4. -
    5. Download and install the latest version of Antares Autotune Evo using one of these links:
    6. - -
    7. Follow the installation instructions provided by the installer or by the website.
    8. -
    9. Restart your audio software if it was running during the installation.
    10. -
    -

    How do I get support for Antares Autotune Evo?

    -

    If you have any questions or issues with Antares Autotune Evo, you can contact the Antares support team through their website or through their social media channels. You can also check their online manual and video tutorials for more information and tips on how to use Antares Autotune Evo. Here are some links to help you:

    - -

    What are some alternatives to Antares Autotune Evo?

    -

    If you are looking for some alternatives to Antares Autotune Evo, you can try some of these other pitch correction and manipulation software:

    - -

    What are some tips and tricks for using Antares Autotune Evo?

    -

    Here are some tips and tricks for using Antares Autotune Evo effectively and creatively:

    - -

    0a6ba089eb
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVDFab 8.1.5.9 Qt Final Multilang Download Pcl A Powerful and Customizable DVD Copy Software.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVDFab 8.1.5.9 Qt Final Multilang Download Pcl A Powerful and Customizable DVD Copy Software.md deleted file mode 100644 index bdbd43f1c549e97d9395becfb6ce760f84fc77cc..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVDFab 8.1.5.9 Qt Final Multilang Download Pcl A Powerful and Customizable DVD Copy Software.md +++ /dev/null @@ -1,217 +0,0 @@ -
    -

    DVDFab 8.1.5.9 Qt Final Multilang: A Powerful DVD Copy Software

    -

    Do you have a collection of DVDs that you want to backup, rip, convert or create? If so, you need a reliable and versatile DVD copy software that can handle any task you throw at it.

    -

    One such software is DVDFab 8.1.5.9 Qt Final Multilang, a powerful and comprehensive tool that can copy, rip, convert and create DVDs with high quality and speed.

    -

    DVDFab 8.1.5.9 Qt Final Multilang Download Pcl


    Download File ——— https://byltly.com/2uKAa8



    -

    In this article, we will show you what DVDFab 8.1.5.9 Qt Final Multilang can do, why you should choose it over other DVD copy software, and how you can use it to perform various tasks with ease.

    -

    Introduction

    -

    DVDFab 8.1.5.9 Qt Final Multilang is a software that can copy, rip, convert and create DVDs and Blu-rays with high quality and speed.

    -

    It supports various formats and devices, such as MP4, MKV, AVI, iPhone, iPad, Android, etc.

    -

    It also has many advanced features and options that allow you to customize your output according to your preferences.

    -

    DVDFab Qt 8.1.5.9 Multilingual Full Version
    -Download DVDFab 8.1.5.9 Final with Crack
    -DVDFab Qt 8.1.5.9 Portable Free Download
    -How to Install DVDFab 8.1.5.9 Multilang on PC
    -DVDFab 8.1.5.9 Qt Final Patched Download
    -DVDFab Qt 8.1.5.9 Serial Key Generator
    -Download DVDFab 8.1.5.9 Final Multilanguage for Windows
    -DVDFab Qt 8.1.5.9 License Key Activation
    -DVDFab 8.1.5.9 Qt Final Multilang Torrent Download
    -DVDFab Qt 8.1.5.9 Crack Full Version
    -Download DVDFab 8.1.5.9 Final with Keygen
    -DVDFab Qt 8.1.5.9 Registration Code Free
    -DVDFab 8.1.5.9 Qt Final Multilang Review
    -DVDFab Qt 8.1.5.9 Features and Benefits
    -Download DVDFab 8.1.5.9 Final with License Key
    -DVDFab Qt 8.1.5.9 Product Key Finder
    -DVDFab 8.1.5.9 Qt Final Multilang Software Download
    -DVDFab Qt 8.1.5.9 System Requirements and Compatibility
    -Download DVDFab 8.1.5.9 Final with Serial Number
    -DVDFab Qt 8.1.5.9 Activation Code Generator
    -DVDFab 8.1.5.9 Qt Final Multilang Free Trial Download
    -DVDFab Qt 8.1.5.9 User Guide and Manual
    -Download DVDFab 8.1.5.9 Final with Patch
    -DVDFab Qt 8.1.5.9 Keygen Full Version
    -DVDFab 8.1.5.9 Qt Final Multilang Update Download
    -DVDFab Qt 8 Crack + Serial Key Free Download
    -Download DVDFab 8 Full Version with Crack
    -DVDFab Qt Latest Version Free Download for PC
    -How to Use DVDFab Qt to Copy and Rip DVDs and Blu-rays
    -DVDFab Qt Crack + Keygen Download for Windows
    -Download DVDFab Qt Portable Full Version for PC
    -DVDFab Qt Review: Best DVD and Blu-ray Copy Software
    -DVDFab Qt License Key + Patch Free Download
    -Download DVDFab Qt Multilingual Full Version for Windows
    -DVDFab Qt Serial Number + Activation Code Free Download
    -Download DVDFab Qt Crack + Keygen for PC
    -DVDFab Qt User Manual and Tutorial PDF Download
    -DVDFab Qt System Requirements and Installation Guide
    -Download DVDFab Qt Full Version with License Key for PC
    -DVDFab Qt Patch + Serial Key Free Download for Windows
    -Download DVDFab Qt Multilanguage Full Version with Crack for PC
    -DVDFab Qt Activation Code + Registration Code Free Download
    -Download DVDFab Qt Full Version with Serial Number for PC
    -DVDFab Qt Features and Functions Overview and Comparison
    -Download DVDFab Qt Full Version with Patch for PC
    -DVDFab Qt Product Key + Crack Free Download for Windows
    -Download DVDFab Qt Multilingual Full Version with Keygen for PC
    -How to Update and Upgrade to the Latest Version of DVDFab

    -

    Here are some of the reasons why you should choose DVDFab 8.1.5.9 Qt Final Multilang over other DVD copy software:

    - -

    Now that you know what DVDFab 8.1.5.9 Qt Final Multilang can do and why you should choose it over other DVD copy software, let's see how you can use it to perform various tasks.

    -

    How to Copy DVDs with DVDFab 8.1.5.Qt Final Multilang

    -

    If you want to make a backup copy of your DVDs for safekeeping or sharing, you can use DVDFab 8.QT Final Multilang's copy mode.

    -

    This mode allows you to copy your DVDs in different ways, such as full disc, main movie, split, merge, clone/burn, customize or customize split.

    -

    You can also choose between different output types, such as DVD disc (DVD+R/RW,DVD-R/RW,DVD+R DL,DVD-R DL), ISO file or folder.

    -

    Here are the steps on how to copy DVDs with DVDFab 8.QT Final Multilang:

    -
      -
    1. Download and install DVDFab 8.QT Final Multilang on your PC from here.
    2. -
    3. Launch DVDFab 8.QT Final Multilang and select the copy mode from the top menu bar.
    4. -
    5. Insert your source DVD disc into your PC's drive or load it from your hard drive as an ISO file or folder.
    6. -
    7. Select your target output type from the drop-down menu at the bottom left corner.
    8. -
    9. Select your desired copy mode from the left panel.
    10. -
    11. Customize your output settings by clicking on the wrench icon at the top right corner.
    12. -
    13. Start the copying process by clicking on the start button at the bottom right corner.
    14. -
    -

    How to Rip DVDs with DVDFab 8.QT Final Multilang

    -

    If you want to convert your DVDs into digital formats that can be played on various devices or platforms, you can use DVDFab 8.QT Final Multilang's ripper mode.

    -

    You can also customize the output settings by adjusting the video and audio parameters, such as resolution, bitrate, frame rate, codec, channel, etc.

    -

    Here are the steps on how to rip DVDs with DVDFab 8.QT Final Multilang:

    -
      -
    1. Launch DVDFab 8.QT Final Multilang and select the ripper mode from the top menu bar.
    2. -
    3. Insert your source DVD disc into your PC's drive or load it from your hard drive as an ISO file or folder.
    4. -
    5. Select your target output format and profile from the left panel.
    6. -
    7. Customize your output settings by clicking on the wrench icon at the top right corner.
    8. -
    9. Start the ripping process by clicking on the start button at the bottom right corner.
    10. -
    -

    How to Convert DVDs with DVDFab 8.QT Final Multilang

    -

    If you want to change the format of your DVDs without changing the content or quality, you can use DVDFab 8.QT Final Multilang's converter mode.

    -

    This mode allows you to convert your DVDs into different formats and profiles, such as MP4,H264,H265,MKV,MPEG4,MPEG2,XVID,DIVX,AAC,DTS,DOLBY DIGITAL PLUS,DOLBY TRUEHD,DOLBY ATMOS,iPhone,iPad,iPod Touch,Samsung Galaxy,Huawei,Xiaomi,LG,Sony,Nokia,Motorola,ZTE,Vivo,Oppo,Nintendo Switch,Xbox One S,Xbox One X,Xbox Series S,Xbox Series X,Sony PS3,Sony PS4,Sony PS4 Pro,Sony PS5,PSP,Vita,Wii U,Wii,Nintendo DS,Nintendo DSi,Nintendo DSi XL,Nintendo DS Lite,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,Nintendo DSi XL,

    -

    You can also customize the output settings by adjusting the video and audio parameters, such as resolution, bitrate, frame rate, codec, channel, etc.

    -

    Here are the steps on how to convert DVDs with DVDFab 8.QT Final Multilang:

    -
      -
    1. Launch DVDFab 8.QT Final Multilang and select the converter mode from the top menu bar.
    2. -
    3. Insert your source DVD disc into your PC's drive or load it from your hard drive as an ISO file or folder.
    4. -
    5. Select your target output format and profile from the left panel.
    6. -
    7. Customize your output settings by clicking on the wrench icon at the top right corner.
    8. -
    9. Start the conversion process by clicking on the start button at the bottom right corner.
    10. -
    -

    How to Create DVDs with DVDFab 8.QT Final Multilang

    -

    If you want to create your own DVDs from various sources, such as videos, photos, music, etc., you can use DVDFab 8.QT Final Multilang's creator mode.

    -

    This mode allows you to create DVDs in different formats and profiles, such as DVD disc (DVD+R/RW,DVD-R/RW,DVD+R DL,DVD-R DL), ISO file or folder.

    -

    You can also customize the menu, chapters, subtitles, audio tracks, etc. by using various templates and options.

    -

    Here are the steps on how to create DVDs with DVDFab 8.QT Final Multilang:

    -
      -
    1. Launch DVDFab 8.QT Final Multilang and select the creator mode from the top menu bar.
    2. -
    3. Add your source files by clicking on the add button at the top left corner or dragging and dropping them into the main interface.
    4. -
    5. Select your target output type from the drop-down menu at the bottom left corner.
    6. -
    7. Select your desired output format and profile from the left panel.
    8. -
    9. Customize your menu, chapters, subtitles, audio tracks, etc. by clicking on the menu icon at the top right corner.
    10. -
    11. Start the creation process by clicking on the start button at the bottom right corner.
    12. -

      Comparison Table of DVDFab 8.QT Final Multilang with Other DVD Copy Software

      -

      To help you make an informed decision, we have prepared a comparison table that shows how DVDFab 8.QT Final Multilang stacks up against other popular DVD copy software in terms of features, speed, quality, compatibility, etc.

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      DVD Copy SoftwareFeaturesSpeedQualityCompatibility
      DVDFab 8.QT Final MultilangCopy, rip, convert and create DVDs and Blu-rays in various modes and formats.Fast and stable.High and lossless.Wide and flexible.
      Wondershare UniConverterCopy, rip and convert DVDs and videos in various formats.Fast but unstable.High but lossy.Wide but limited.
      WinX DVD Ripper PlatinumRip and convert DVDs in various formats.Fast but unstable.High but lossy.Narrow and rigid.
      DVD ShrinkCopy and compress DVDs in various modes.Slow and unstable.Low and lossy.Narrow and rigid.
      DVD DecrypterCopy and decrypt DVDs in various modes.Slow and unstable.Low and lossy.Narrow and rigid.
      -

      As you can see from the table, DVDFab 8.QT Final Multilang is the best DVD copy software that can meet all your needs and expectations.

      -

      Conclusion

      -

      In conclusion, DVDFab 8.QT Final Multilang is a powerful and comprehensive DVD copy software that can copy, rip, convert and create DVDs with high quality and speed.

      -

      It has a user-friendly interface, multiple modes, fast processing speed, high output quality, wide compatibility, and a free trial version that you can download and use without any limitations.

      -

      If you are looking for a software that can handle any DVD task you throw at it, you should definitely give DVDFab 8.QT Final Multilang a try. You will not regret it!

      -

      To download DVDFab 8.QT Final Multilang for free and enjoy its amazing features, click on the link below:

      - Download DVDFab 8.QT Final Multilang for Free Now! -

      FAQs

      -

      Here are some of the frequently asked questions about DVDFab 8.QT Final Multilang:

      -
        -
      1. What are the system requirements for DVDFab 8.QT Final Multilang?
      2. -

        The system requirements for DVDFab 8.QT Final Multilang are as follows:

        -
          -
        • Windows XP/Vista/7/8/10 (32-bit/64-bit)
        • -
        • Pentium II 500 MHz or above
        • -
        • 512 MB of RAM or above
        • -
        • A DVD drive or a Blu-ray drive
        • -
        • An internet connection for registration and updates
        • -
        -
      3. How to update DVDFab 8.QT Final Multilang to the latest version?
      4. -

        To update DVDFab 8.QT Final Multilang to the latest version, you can do one of the following:

        -
          -
        • Click on the green check mark icon at the top right corner of the main interface and follow the instructions to download and install the latest version.
        • -
        • Visit the official website of DVDFab here and download the latest version manually.
        • -
        • Contact the customer service of DVDFab here and ask for assistance.
        • -
        -
      5. How to contact the customer service of DVDFab?
      6. -

        If you have any questions or problems with DVDFab 8.QT Final Multilang, you can contact the customer service of DVDFab by doing one of the following:

        -
          -
        • Email them at service@dvdfab.cn.
        • -
        • Livchat with them on their website here.
        • -
        • Call them at +86-10-84913343 (Monday-Friday: 9:00-18:00 GMT+08:00).
        • -
        • Fax them at +86-10-84913343 (Monday-Friday: 9:00-18:00 GMT+08:00).
        • -
        • Social media platforms such as Facebook here, Twitter here, YouTube here, etc.
        • -
        -
      7. How to get a refund for DVDFab 8.QT Final Multilang?
      8. -

        If you are not satisfied with DVDFab 8.QT Final Multilang for any reason, you can request a refund within 30 days of purchase by doing one of the following:

        -
          -
        • Email your order number and reason for refund to service@dvdfab.cn.
        • -
        • Livchat with the customer service on their website here.
        • -
        • Contact your payment platform such as PayPal, Visa, MasterCard, etc. and ask for a chargeback.
        • -
        -
      9. How to get a discount for DVDFab 8.QT Final Multilang?
      10. -

        If you want to get a discount for DVDFab 8.QT Final Multilang, you can do one of the following:

        -
          -
        • Catch their seasonal promotions or special offers on their website here.
        • -
        • Become their member or VIP by registering on their website here.
        • -
        • Become their affiliate or partner by applying on their website here.
        • -
        • Become their fan or follower on their social media platforms such as Facebook here, Twitter here, YouTube here, etc. and get exclusive coupons or codes.
        • -
        • Contact their customer service and negotiate a discount with them.
        • -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Danfe View Keygen [NEW].md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Danfe View Keygen [NEW].md deleted file mode 100644 index 5f4d352cf47f2aae9e8adf36468d77763b15b35b..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Danfe View Keygen [NEW].md +++ /dev/null @@ -1,22 +0,0 @@ - -

        What is DANFE View and how can it help you manage your electronic invoices?

        -

        DANFE View is a software for Windows that allows you to receive, store, view and print the XML files of NF-e, NFC-e, MDF-e and CT-e. These are different types of electronic invoices that are used in Brazil for tax purposes. With DANFE View, you can have all your electronic invoices, whether received or issued, in one place, organized and accessible.

        -

        Electronic invoices are mandatory in Brazil for most businesses and transactions. They are digital documents that contain all the information about the products or services sold, the buyer and seller, the taxes and fees involved, and the authorization code from the tax authority. They are sent and received in XML format, which is a standard for data exchange on the web.

        -

        Danfe View Keygen


        Download Zip ⚹⚹⚹ https://byltly.com/2uKyUJ



        -

        However, XML files are not easy to read or print by humans. That's why there is a need for a software like DANFE View, which can convert XML files into more user-friendly formats, such as PDF or HTML. DANFE View can also generate and print the DANFE (Documento Auxiliar da Nota Fiscal Eletrônica), which is a simplified version of the NF-e that can be used as a proof of purchase or delivery.

        -

        But DANFE View is not just a viewer or printer of electronic invoices. It is also a powerful tool for managing and organizing them. With DANFE View, you can:

        -
          -
        • Download and store automatically the XML files from your email or from the tax authority website (SEFAZ).
        • -
        • Search quickly for any XML file by period, series, number, value, CNPJ (tax identification number) of the issuer or recipient.
        • -
        • Mark the XML files with custom tags for easy classification.
        • -
        • Manifest your agreement or disagreement with the NF-e issued by your suppliers or customers.
        • -
        • Make periodic and automatic backup of your XML files.
        • -
        • Send automatically your XML files to your accounting office.
        • -
        • Emit reports by supplier, recipient, products and CFOP (tax code).
        • -
        -

        DANFE View is available in three versions: Free, Plus and Office. The Free version has some limitations in terms of storage capacity, number of companies managed and features available. The Plus version allows you to manage unlimited XML files related to one CNPJ or CPF (individual tax identification number). The Office version allows you to manage unlimited XML files from any CNPJ or CPF.

        -

        If you want to try DANFE View for free for 7 days, you can download it from the official website[^1^]. You can also find more information about the software features, prices and support on the website[^1^]. DANFE View is a reliable and secure software that respects the privacy and integrity of your data. It is also updated regularly to comply with the latest tax regulations and requirements.

        -

        DANFE View is a must-have software for anyone who deals with electronic invoices in Brazil. It can save you time, money and hassle by making your life easier and more organized. Don't miss this opportunity and download DANFE View today!

        -

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discografia Evaldo Freire Torrent Download A Guide to the Musical Career of the Sertanejo Icon.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discografia Evaldo Freire Torrent Download A Guide to the Musical Career of the Sertanejo Icon.md deleted file mode 100644 index 545149b0d56fa92765134de510a454a1b42d5fb4..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Discografia Evaldo Freire Torrent Download A Guide to the Musical Career of the Sertanejo Icon.md +++ /dev/null @@ -1,91 +0,0 @@ - -

        Discografia Evaldo Freire Torrent Download

        -

        Introduction

        -

        Are you a fan of Brazilian music? Do you enjoy listening to romantic songs with catchy melodies and heartfelt lyrics? If so, you may have heard of Evaldo Freire, one of the most popular singers of brega music in Brazil. But do you know how to download his discography via torrent?

        -

        Discografia Evaldo Freire Torrent Download


        DOWNLOADhttps://byltly.com/2uKwSw



        -

        In this article, we will tell you everything you need to know about Evaldo Freire and his musical style. We will also explain what a torrent is and how it works. And we will show you how to find, download, and play Evaldo Freire's discography via torrent. So sit back, relax, and enjoy this musical journey!

        -

        Evaldo Freire: A Brazilian Brega Singer

        -

        Evaldo Freire is a Brazilian singer who was born in 1945 in Pernambuco. He started his musical career in 1968 as part of a trio called Os Três Moraes. He later went solo and became one of the most successful artists of brega music in Brazil.

        -

        Brega music is a genre that emerged in Brazil in the 1960s and 1970s. It combines romantic ballads with pop and folk influences. The word brega means tacky or cheesy in Portuguese, but it also has a positive connotation of being authentic and sincere. Brega music often deals with themes such as love, betrayal, nostalgia, and social issues.

        -

        Some of Evaldo Freire's most famous songs and albums are "Chega" (Enough), "Só Quero" (I Only Want), "Meu Deus" (My God), "Não Vou Chorar" (I Won't Cry), "Eu Nunca Pensava" (I Never Thought), "Eu Encontrei Alguém" (I Found Someone), "Você Não Presta Pra Mim" (You're No Good For Me), "Onde Está Você" (Where Are You), "Eu Não Sou Lixo" (I'm Not Trash), "Não Me Deixe Só" (Don't Leave Me Alone), "O Amor é Tudo" (Love Is Everything), "Eu Preciso de Você" (I Need You), "Saudade de Você" (Missing You), "Eu Te Amo Demais" (I Love You Too Much), "Você Mudou Demais" (You Changed Too Much), among others.

        -

        Torrent: A Peer-to-Peer File Sharing Protocol

        -

        A torrent is a file that contains information about other files that are shared by users over the internet. It allows users to download large amounts of data from multiple sources at once, without relying on a central server.

        -while users who are downloading the file are called leechers. A tracker is a server that helps users find each other and coordinate the file transfer. A client is a software that enables users to create, download, and manage torrents.

        -

        Some of the advantages of using torrents are that they can speed up the download process, reduce the load on the original source, and allow users to resume interrupted downloads. Some of the disadvantages are that they depend on the availability and generosity of other users, they may expose users to legal and ethical issues, and they may contain malicious or fake files.

        -

        Evaldo Freire discography torrent download free
        -How to download Evaldo Freire albums torrent
        -Evaldo Freire songs torrent download mp3
        -Best sites to download Evaldo Freire discography torrent
        -Evaldo Freire torrent download full discography
        -Download Evaldo Freire music torrent online
        -Evaldo Freire discografia completa torrent baixar
        -Onde baixar discografia de Evaldo Freire torrent
        -Músicas de Evaldo Freire torrent download grátis
        -Melhores sites para baixar discografia de Evaldo Freire torrent
        -Discografia Evaldo Freire download torrent magnet link
        -Como baixar discos de Evaldo Freire torrent
        -Evaldo Freire discography torrent download 320kbps
        -Download Evaldo Freire albums torrent flac
        -Evaldo Freire songs torrent download zip
        -Discografia de Evaldo Freire torrent download rar
        -Download all Evaldo Freire songs torrent
        -Evaldo Freire discography torrent download blogspot
        -Baixar discografia de Evaldo Freire torrent mega
        -Discografia completa de Evaldo Freire torrent download mediafire
        -Download Evaldo Freire discography torrent kickass
        -Baixar discos de Evaldo Freire torrent the pirate bay
        -Discografia de Evaldo Freire download torrent utorrent
        -Baixar músicas de Evaldo Freire torrent bittorrent
        -Download Evaldo Freire music torrent limetorrents
        -Discografia de Evaldo Freire download torrent yify
        -Baixar todas as músicas de Evaldo Freire torrent rarbg
        -Download best of Evaldo Freire torrent extratorrent
        -Discografia de Evaldo Freire download torrent isoHunt
        -Baixar melhores músicas de Evaldo Freire torrent eztv
        -Download greatest hits of Evaldo Freire torrent zooqle
        -Discografia de Evaldo Freire download torrent torlock
        -Baixar sucessos de Evaldo Freire torrent demonoid
        -Download top songs of Evaldo Freire torrent idope
        -Discografia de Evaldo Freire download torrent seedpeer
        -Baixar canções de Evaldo Freire torrent monova
        -Download popular songs of Evaldo Freire torrent yourbittorrent
        -Discografia de Evaldo Freire download torrent btscene
        -Baixar hits de Evaldo Freire torrent glodls
        -Download classic songs of Evaldo Freire torrent 1337x

        -

        Some of the most popular torrent clients and websites are BitTorrent, uTorrent, The Pirate Bay, Kickass Torrents, RARBG, 1337x, YTS, EZTV, Zooqle, LimeTorrents, and Torrentz2. However, users should be careful when using these sites, as they may be blocked or banned in some countries or regions.

        -

        How to Download Evaldo Freire's Discography via Torrent

        -

        If you want to download Evaldo Freire's discography via torrent, you will need to follow these steps:

        -
          -
        1. Download and install a torrent client of your choice. We recommend BitTorrent or uTorrent for their simplicity and reliability.
        2. -
        3. Go to a torrent website of your choice. We recommend The Pirate Bay or Kickass Torrents for their variety and popularity.
        4. -
        5. Search for "Discografia Evaldo Freire" or "Evaldo Freire Discography" in the search bar. You will see a list of results with different torrent files for Evaldo Freire's discography.
        6. -
        7. Choose a torrent file that suits your preferences. You can check the size, quality, and availability of each file by looking at the columns labeled "Size", "Seeders", "Leechers", and "Health". Generally, you want to choose a file that has a high number of seeders and leechers, a good health ratio, and a reasonable size and quality.
        8. -
        9. Click on the torrent file that you want to download. You will be redirected to a page with more details about the file, such as the name, description, comments, and files included. You can also see a magnet link that looks like a horseshoe-shaped icon.
        10. -
        11. Click on the magnet link or copy and paste it into your torrent client. This will start the download process. You can monitor the progress of the download in your torrent client. You can also pause, resume, or cancel the download at any time.
        12. -
        13. Once the download is complete, you can open the folder where the files are stored. You will see a folder named "Discografia Evaldo Freire" or something similar. Inside this folder, you will find all the songs and albums of Evaldo Freire in MP3 format.
        14. -
        15. Enjoy listening to Evaldo Freire's music! You can play the songs using any media player of your choice. You can also transfer them to your mobile device or burn them to a CD if you want.
        16. -
        -

        To help you choose a torrent file for Evaldo Freire's discography, we have created a table that compares some of the options available on The Pirate Bay:

        - | Name | Size | Quality | Seeders | Leechers | Health | |------|------|---------|---------|----------|--------| | Discografia Evaldo Freire | 1.1 GB | 320 kbps | 12 | 4 | Good | | Evaldo Freire Discography | 1.2 GB | 256 kbps | 8 | 6 | Good | | Evaldo Freire - Brega Collection | 214 MB | 128 kbps | 5 | 3 | Fair | | Evaldo Freire - Chega e Mais Nada (1977) | 35 MB | 192 kbps | 3 | 2 | Fair | | Evaldo Freire - Só Quero (1981) | 40 MB | 192 kbps | 2 | 1 | Poor |

        As you can see, the first two options are the best ones in terms of size, quality, and availability. However, you can also choose other options if you only want specific albums or songs.

        -the content owners or authorities. You should also respect the rights and efforts of the artists and creators who produce the content. You should only download torrents from trusted and legal sources, and use a VPN or proxy to protect your privacy and security. You should also scan the files for viruses or malware before opening them.

        -

        Conclusion

        -

        In this article, we have learned about Evaldo Freire and his musical style. We have also learned about torrent and how it works. And we have learned how to download Evaldo Freire's discography via torrent.

        -

        If you are a fan of Brazilian music and brega music, you should definitely check out Evaldo Freire's songs and albums. He is one of the most popular and influential singers of this genre, and his music will touch your heart and soul. You can find his discography on various torrent websites, but make sure you do it legally and ethically.

        -

        What do you think of Evaldo Freire and his music? Have you downloaded his discography via torrent? How was your experience? Share your thoughts and opinions with us in the comments section below. We would love to hear from you!

        -

        Thank you for reading this article. We hope you enjoyed it and learned something new. If you liked this article, please share it with your friends and family. And don't forget to subscribe to our newsletter for more interesting and informative articles like this one.

        -

        FAQs

        -
          -
        • Q: Who is Evaldo Freire?
        • -
        • A: Evaldo Freire is a Brazilian singer who specializes in brega music, a genre that mixes romantic ballads with pop and folk influences.
        • -
        • Q: What is brega music?
        • -
        • A: Brega music is a popular musical style in Brazil that emerged in the 1960s and 1970s. It is characterized by sentimental lyrics, simple melodies, and catchy rhythms. It often deals with themes such as love, betrayal, nostalgia, and social issues.
        • -
        • Q: What is a torrent?
        • -
        • A: A torrent is a file that contains information about other files that are shared by users over the internet. It allows users to download large amounts of data from multiple sources at once, without relying on a central server.
        • -
        • Q: How does torrent work?
        • -
        • A: Torrent works by using a peer-to-peer (P2P) protocol that connects users who have the same files or parts of them. Users who have the complete file are called seeders, while users who are downloading the file are called leechers. A tracker is a server that helps users find each other and coordinate the file transfer. A client is a software that enables users to create, download, and manage torrents.
        • -
        • Q: Is downloading torrents legal?
        • -
        • A: Downloading torrents is not illegal per se, but downloading copyrighted content without permission or paying for it is illegal in most countries and regions. Users who download torrents may face legal consequences such as fines or lawsuits from the content owners or authorities. Users should also be aware of the risks of downloading torrents from untrusted sources, such as malware infection or data theft.
        • -
        -

        0a6ba089eb
        -
        -
        \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ApkOnline A Web-Based Android Emulator and APK Installer.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ApkOnline A Web-Based Android Emulator and APK Installer.md deleted file mode 100644 index e1080b384ff2140c9425ebdd5d8a1a47e156f935..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ApkOnline A Web-Based Android Emulator and APK Installer.md +++ /dev/null @@ -1,108 +0,0 @@ -
        -

        ApkOnline Android Online Emulator: A Review

        |

        Have you ever wanted to run an Android app on your computer or browser without installing an emulator? If so, you might be interested in ApkOnline Android Online Emulator, a web-based tool that lets you access any Android app from anywhere. In this article, we will review ApkOnline Android Online Emulator, its features, benefits, and how to use it. We will also compare it with some alternatives and answer some frequently asked questions.

        -

        What is ApkOnline Android Online Emulator?

        -

        ApkOnline Android Online Emulator is a web browser extension that allows you to start the official free android online emulator with a simple click from your web browser. Its goal is to allow end users to run any Android app from anywhere when online using HTML5 and Javascript technologies. As a mobile emulator, ApkOnline allows users and developers to use their Android applications from anywhere in the world.

        -

        apkonline android online emulator


        Download Filehttps://urlin.us/2uT2kT



        -

        Features of ApkOnline Android Online Emulator

        -

        Some of the features of ApkOnline Android Online Emulator are:

        -
          -
        • It supports almost all the features that exist in the real android devices, such as phone calls, text messages, device location, device rotation, hardware sensors, and Google Play Store.
        • -
        • It can simulate different configurations of the android emulator, such as Nexus 5 with Android 6.0 Marshmallow.
        • -
        • It has a graphical user interface that can be controlled with the mouse. It also provides access to the buttons via a menu on the right side of the emulator.
        • -
        • It has an APK downloader that can search for and download any Android app. It also looks for iPhone apps with links to download iPhone apps.
        • -
        • It has a free online android emulator that can run any android app online without downloading or installing anything.
        • -
        -

        Benefits of ApkOnline Android Online Emulator

        -

        Some of the benefits of ApkOnline Android Online Emulator are:

        -
          -
        • It is easy to use and does not require any installation or registration. You just need to install the browser extension and click on it to start the emulator.
        • -
        • It is fast and reliable. It runs on a cloud software platform that ensures high performance and availability.
        • -
        • It is convenient and flexible. You can access any Android app from any device and any location as long as you have an internet connection.
        • -
        • It is useful for testing and debugging. You can test your own apps or other apps without using a real device or an emulator.
        • -
        • It is fun and entertaining. You can play games, watch videos, chat with friends, or explore new apps on your browser.
        • -
        -

        How to Use ApkOnline Android Online Emulator?

        -

        To use ApkOnline Android Online Emulator, you need to follow these steps:

        -

        Step 1: Install the ApkOnline Browser Extension

        -

        The first step is to install the ApkOnline browser extension on your web browser. You can find it on the official website or on the Microsoft Edge Addons store. Once you install it, you will see an icon on your browser toolbar.

        -

        apkonline android online emulator download
        -apkonline android online emulator free
        -apkonline android online emulator apk
        -apkonline android online emulator for pc
        -apkonline android online emulator games
        -apkonline android online emulator app
        -apkonline android online emulator review
        -apkonline android online emulator tutorial
        -apkonline android online emulator chrome
        -apkonline android online emulator ios
        -apkonline android online emulator no download
        -apkonline android online emulator reddit
        -apkonline android online emulator alternative
        -apkonline android online emulator mod
        -apkonline android online emulator hack
        -apkonline android online emulator test
        -apkonline android online emulator play store
        -apkonline android online emulator whatsapp
        -apkonline android online emulator instagram
        -apkonline android online emulator tiktok
        -apkonline android online emulator netflix
        -apkonline android online emulator youtube
        -apkonline android online emulator facebook
        -apkonline android online emulator snapchat
        -apkonline android online emulator telegram
        -apkonline android online emulator spotify
        -apkonline android online emulator zoom
        -apkonline android online emulator discord
        -apkonline android online emulator twitter
        -apkonline android online emulator gmail
        -apkonline android online emulator google maps
        -apkonline android online emulator uber
        -apkonline android online emulator amazon
        -apkonline android online emulator ebay
        -apkonline android online emulator paypal
        -apkonline android online emulator minecraft
        -apkonline android online emulator roblox
        -apkonline android online emulator pubg
        -apkonline android online emulator fortnite
        -apkonline android online emulator candy crush
        -apkonline android online emulator clash of clans
        -apkonline android online emulator pokemon go
        -apkonline android online emulator among us
        -apkonline android online emulator subway surfers
        -apkonline android online emulator temple run
        -apkonline android online emulator angry birds
        -apkonline android online emulator plants vs zombies
        -apkonline android online emulator fruit ninja
        -apkonline android online emulator doodle jump
        -apkonline android online emulator cut the rope

        -

        Step 2: Search for and Download Any Android App

        -

        The next step is to search for and download any Android app you want to run online. You can do this by clicking on the ApkOnline icon on your browser toolbar and selecting \"APK Downloader\". This will open a new tab with a search box where you can type the name of the app you want to download. You can also browse the categories or the top apps to find the app you are looking for. Once you find the app, you can click on the \"Download APK\" button to download it to your computer.

        -

        Step 3: Run the Android App Online

        -

        The final step is to run the Android app online using the ApkOnline emulator. You can do this by clicking on the ApkOnline icon on your browser toolbar and selecting \"Run APK Online\". This will open a new tab with the emulator interface where you can drag and drop the APK file you downloaded in the previous step. Alternatively, you can click on the \"Browse\" button and select the APK file from your computer. Once you upload the APK file, the emulator will start running the app online. You can interact with the app using your mouse and keyboard, or use the menu on the right side of the emulator to access the buttons and settings.

        -

        Alternatives to ApkOnline Android Online Emulator

        -

        ApkOnline Android Online Emulator is not the only web-based tool that allows you to run Android apps online. There are some alternatives that you can try if you want to compare or explore other options. Here are some of them:

        -

        Appetize.io

        -

        Appetize.io is a web-based platform that allows you to run native mobile apps in your browser. You can upload your own apps or use their public apps to test and demo them online. Appetize.io supports both Android and iOS apps, and provides various device models and configurations to choose from. You can also embed your apps on your website or share them with others via a link. Appetize.io offers a free plan with limited usage and features, and paid plans with more options and support.

        -

        Genymotion Cloud

        -

        Genymotion Cloud is a cloud-based service that allows you to run Android virtual devices on any web browser. You can use Genymotion Cloud to test, develop, or demo your Android apps online without installing anything on your computer. Genymotion Cloud provides various device models and Android versions to choose from, and supports features such as GPS, camera, network, battery, sensors, and Google Play Services. Genymotion Cloud offers a free trial and paid plans with different pricing and features.

        -

        ARC Welder

        -

        ARC Welder is a Chrome extension that allows you to run Android apps on Chrome OS or any other platform that supports Chrome. You can use ARC Welder to test or use your Android apps on your computer without installing an emulator. ARC Welder supports most of the Android features and APIs, but not all of them. You can also adjust the orientation, size, and form factor of your app to fit your screen. ARC Welder is free to use, but it requires you to have Chrome installed on your computer.

        -

        Conclusion

        -

        In this article, we have reviewed ApkOnline Android Online Emulator, a web-based tool that allows you to run any Android app online without installing an emulator. We have discussed its features, benefits, and how to use it. We have also compared it with some alternatives that offer similar functionality. We hope this article has been helpful and informative for you.

        -

        If you have any questions or comments about ApkOnline Android Online Emulator or any other web-based tool for running Android apps online, feel free to leave them below. We would love to hear from you.

        -

        Frequently Asked Questions

        -

        Here are some of the most common questions that people ask about ApkOnline Android Online Emulator:

        -
          -
        1. Is ApkOnline Android Online Emulator safe?
        2. -

          ApkOnline Android Online Emulator is safe to use as long as you download APK files from trusted sources. ApkOnline does not store or share any of your personal data or files. However, you should always be careful when downloading or running any app online, as there might be some risks involved.

          -
        3. Is ApkOnline Android Online Emulator free?
        4. -

          ApkOnline Android Online Emulator is free to use for personal and non-commercial purposes. However, if you want to use it for commercial purposes or need more features and support, you can contact ApkOnline for a quote.

          -
        5. Can I run iOS apps on ApkOnline Android Online Emulator?
        6. -

          No, ApkOnline Android Online Emulator only supports Android apps. If you want to run iOS apps online, you can try some of the alternatives we mentioned, such as Appetize.io, which supports both Android and iOS apps.

          -
        7. Can I save my progress or data on ApkOnline Android Online Emulator?
        8. -

          No, ApkOnline Android Online Emulator does not save your progress or data on the cloud. Every time you run an app online, it starts from scratch. If you want to save your progress or data, you need to use a real device or an emulator that supports data storage.

          -
        9. Can I run multiple apps at the same time on ApkOnline Android Online Emulator?
        10. -

          No, ApkOnline Android Online Emulator only allows you to run one app at a time on one tab. If you want to run multiple apps at the same time, you need to open multiple tabs and run each app separately. However, this might affect the performance and speed of the emulator.

          -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bad 2 Bad Apocalypse - The Ultimate Open World Survival RPG.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bad 2 Bad Apocalypse - The Ultimate Open World Survival RPG.md deleted file mode 100644 index 014311dda44c76bb96d8284a56f80f8700c8cb14..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bad 2 Bad Apocalypse - The Ultimate Open World Survival RPG.md +++ /dev/null @@ -1,97 +0,0 @@ -
        -

        Bad 2 Bad: Apocalypse - A Review of the Open World Survival RPG

        -

        If you are looking for a challenging and immersive open world survival RPG, you might want to check out Bad 2 Bad: Apocalypse. This game is the sequel to Bad 2 Bad: Delta and Extinction, and it follows the story of the Delta Team, led by Major Pan, saving and reconstructing the world ravaged by a virus from the Human Forces. In this article, we will review the game's features, gameplay, graphics, sound, and more. We will also answer some frequently asked questions about the game.

        -

        bad 2 bad apocalypse apkaward


        Downloadhttps://urlin.us/2uT2hq



        -

        Introduction

        -

        What is Bad 2 Bad: Apocalypse?

        -

        Bad 2 Bad: Apocalypse is a mobile game developed by DAWINSTONE, an indie game development team from South Korea. The game is available for Android devices on Google Play and for iOS devices on App Store. The game is free to download and play, but it contains in-app purchases and ads.

        -

        What are the main features of the game?

        -

        According to the developer, some of the main features of the game are:

        -
          -
        • Vast Open World RPG! An expanded world to explore!
        • -
        • Exploration, Gathering, Fishing and Crafting for survival!
        • -
        • 3 times as many items and weapons than the previous game
        • -
        • More detailed character customization and appearances
        • -
        • Over 60 maps and regions to explore!
        • -
        • "World Missions" that take place across the globe
        • -
        • Create and upgrade your own special forces team
        • -
        • Artillery Support, Air Support, and Powerful Drones!
        • -
        • Embark on "Battle Armor" and engage in combat
        • -
        • Advanced graphics and upgraded systems
        • -
        -

        Gameplay

        -

        Exploration, Gathering, Fishing and Crafting

        -

        The core gameplay of Bad 2 Bad: Apocalypse is based on exploration, gathering, fishing, and crafting for survival. You can explore various locations such as forests, deserts, cities, islands, mountains, etc. You can gather resources such as wood, stone, metal, food, water, etc. You can fish in rivers, lakes, or oceans. You can craft items such as weapons, armor, tools, medicine, etc. You can also upgrade your base camp by building facilities such as barracks, workshops, farms, etc.

        -

        Customization and Squad System

        -

        The game also allows you to customize your character's appearance and equipment. You can choose from different races such as humans, animals, or hybrids. You can change your character's hair style, color, eyes, nose, mouth, etc. You can also equip your character with various weapons such as rifles, pistols, shotguns, snipers, etc. You can also equip your character with accessories such as helmets, goggles, masks, gloves, etc.

        -

        In addition to your main character, you can also create and upgrade your own special forces team. You can recruit different characters from different factions such as Delta Force,

        Support Weapons and Battle Armor

        -

        Another feature of the game is the use of support weapons and battle armor to enhance your combat capabilities. You can call for artillery support from self-propelled artilleries, air support from attack helicopters, and combat drones to assist you in battle . You can also embark on the powerful tactical weapon "Battle Armor" and ride into battle . The Battle Armor is a mechanized suit that can fire missiles, rockets, and machine guns. You can also upgrade the Battle Armor with different types and models.

        -

        Graphics and Sound

        -

        How does the game look and sound?

        -

        The game has advanced graphics and upgraded systems compared to the previous games in the series. The game features a 2D pixel art style with smooth animations and dynamic lighting effects. The game also has a realistic weather system that changes according to the time and location. The game has a variety of sound effects and music tracks that match the mood and atmosphere of the game. The game also has voice acting for some of the main characters and dialogues.

        -

        bad 2 bad apocalypse mod apk download
        -bad 2 bad apocalypse cheats and tips
        -bad 2 bad apocalypse game review
        -bad 2 bad apocalypse best weapons and equipment
        -bad 2 bad apocalypse how to craft items
        -bad 2 bad apocalypse open world survival rpg
        -bad 2 bad apocalypse latest version update
        -bad 2 bad apocalypse free apk for android
        -bad 2 bad apocalypse squad system and tactics
        -bad 2 bad apocalypse world missions and regions
        -bad 2 bad apocalypse base camp upgrade guide
        -bad 2 bad apocalypse exploration and gathering
        -bad 2 bad apocalypse fishing and cooking
        -bad 2 bad apocalypse character customization and appearance
        -bad 2 bad apocalypse night vision and accessories
        -bad 2 bad apocalypse artillery and air support
        -bad 2 bad apocalypse combat drones and battle armor
        -bad 2 bad apocalypse virus-infected wilders and enemies
        -bad 2 bad apocalypse delta team and major pan story
        -bad 2 bad apocalypse sequel to delta and extinction
        -dawinstone games - developer of bad 2 bad apocalypse
        -how to play bad 2 bad apocalypse offline mode
        -how to install xapk file of bad 2 bad apocalypse
        -how to backup and restore data of bad 2 bad apocalypse
        -how to contact dawinstone support for bad 2 bad apocalypse
        -is there a pc version of bad 2 bad apocalypse available
        -is there a ios version of bad 2 bad apocalypse available
        -is there a multiplayer mode in bad 2 bad apocalypse
        -is there a wiki page for bad 2 bad apocalypse game
        -is there a forum for discussing about bad 2 bad apocalypse game
        -what are the minimum requirements to run the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game of the game

        -

        What are the pros and cons of the graphics and sound?

        -

        The pros of the graphics and sound are:

        -
          -
        • The pixel art style is charming and nostalgic, and it suits the theme of the game.
        • -
        • The animations and lighting effects are impressive and add to the immersion of the game.
        • -
        • The weather system is realistic and dynamic, and it affects the gameplay and environment.
        • -
        • The sound effects and music tracks are diverse and fitting, and they enhance the mood and atmosphere of the game.
        • -
        • The voice acting is decent and adds personality to the characters.
        • -
        -

        The cons of the graphics and sound are:

        -
          -
        • The pixel art style may not appeal to everyone, especially those who prefer more realistic or modern graphics.
        • -
        • The animations and lighting effects may cause lag or performance issues on some devices.
        • -
        • The weather system may be too random or unpredictable, and it may interfere with the gameplay or visibility.
        • -
        • The sound effects and music tracks may be repetitive or annoying, especially after playing for a long time.
        • -
        • The voice acting may be inconsistent or low quality, especially for some of the minor characters or dialogues.
        • -

        Conclusion

        -

        Summary of the main points

        -

        In conclusion, Bad 2 Bad: Apocalypse is a challenging and immersive open world survival RPG that follows the story of the Delta Team saving and reconstructing the world from a virus. The game has various features such as exploration, gathering, fishing, crafting, customization, squad system, support weapons, and battle armor. The game has advanced graphics and upgraded systems that create a realistic and dynamic environment. The game also has diverse sound effects and music tracks that match the mood and atmosphere of the game.

        -

        Recommendation and rating

        -

        We recommend Bad 2 Bad: Apocalypse to anyone who enjoys open world survival RPGs with a pixel art style and a post-apocalyptic theme. The game is fun and engaging, and it offers a lot of content and replay value. The game is also free to download and play, but it contains in-app purchases and ads. We rate the game 4.5 out of 5 stars, based on its features, gameplay, graphics, sound, and overall quality.

        -

        FAQs

        -

        Q1: How to download and install Bad 2 Bad: Apocalypse?

        -

        A1: You can download and install Bad 2 Bad: Apocalypse from Google Play or App Store, depending on your device. You need to have at least Android 4.4 or iOS 9.0 or later to run the game. You also need to have enough storage space on your device to install the game.

        -

        Q2: How to upgrade the base camp and equipment?

        -

        A2: You can upgrade your base camp by building and improving facilities such as barracks, workshops, farms, etc. You need to gather resources such as wood, stone, metal, food, water, etc. to build and upgrade the facilities. You can also upgrade your equipment by crafting or buying new weapons, armor, tools, medicine, etc. You need to gather resources or money to craft or buy new equipment.

        -

        Q3: How to unlock new characters and skins?

        -

        A3: You can unlock new characters and skins by completing world missions, citadel missions, or special events. You can also unlock new characters and skins by spending diamonds or gold coins in the shop. Diamonds are the premium currency of the game that you can buy with real money or earn by watching ads or completing tasks. Gold coins are the common currency of the game that you can earn by playing the game or selling items.

        -

        Q4: How to complete the world missions and citadel?

        -

        A4: World missions are quests that take place across the globe. You can access them from the world map or from the mission board in your base camp. World missions have different objectives such as eliminating enemies, rescuing allies, collecting items, etc. You can earn rewards such as resources, money, items, characters, skins, etc. by completing world missions.

        -

        Citadel is a special mode that challenges you to survive waves of enemies in a fortified base. You can access it from the world map or from the mission board in your base camp. Citadel has different levels of difficulty such as easy, normal, hard, etc. You can earn rewards such as resources, money, items, characters, skins, etc. by completing citadel.

        -

        Q5: How to contact the developer and get support?

        -

        A5: You can contact the developer and get support by visiting their official website, Facebook page, YouTube channel, or email address. You can also visit their community forum or Discord server to interact with other players and get tips and feedback.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download YouTube Shorts in HD Quality No Watermark Free Fast.md b/spaces/1phancelerku/anime-remove-background/Download YouTube Shorts in HD Quality No Watermark Free Fast.md deleted file mode 100644 index e989e619ff7bc1695f228a32798227e3dde3d3f9..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download YouTube Shorts in HD Quality No Watermark Free Fast.md +++ /dev/null @@ -1,128 +0,0 @@ - -

        How to Download YouTube Shorts on Your Device

        -

        YouTube Shorts are short-form videos that are similar to TikTok and Instagram Reels. They are vertical videos that are 60 seconds or less in length. You can watch them in a never-ending feed of content that is personalized for you. You can also create your own Shorts using the camera and editing tools in the YouTube app.

        -

        youtube short download


        Download Zip ✏ ✏ ✏ https://jinyurl.com/2uNP34



        -

        If you want to download YouTube Shorts on your device, whether it is your own or someone else's, there are different ways to do it. In this article, we will show you how to download YouTube Shorts on PC, Android, and iOS devices.

        -

        What are YouTube Shorts?

        -

        YouTube Shorts are a new feature that YouTube introduced in 2020 to compete with TikTok and Instagram Reels. They are short videos that are designed to be watched on mobile devices. They can be up to 60 seconds long, but if you use music from the YouTube catalog, they will be limited to 15 seconds.

        -

        You can watch YouTube Shorts in a dedicated tab in the YouTube app, or by swiping up on any Short video. You can also find them in the regular YouTube feed, where they will have a #Shorts label.

        -

        YouTube Shorts vs TikTok

        -

        YouTube Shorts and TikTok are very similar in terms of content and format. They both offer vertical videos that are fun, engaging, and viral. They both have music and sound effects that you can use for your videos. They both have filters, stickers, text, and other editing tools that you can use to enhance your videos.

        -

        However, there are some differences between them as well. For example:

        -
          -
        • YouTube Shorts are integrated with the main YouTube platform, which means you can access them from the same app and account. You can also watch longer videos from the same creators or channels. TikTok is a separate app and platform that focuses only on short videos.
        • -
        • YouTube Shorts have a maximum length of 60 seconds, while TikTok videos can be up to three minutes long (or even longer for some users).
        • -
        • YouTube Shorts have a wider range of music options, as you can use any song from the YouTube music library or upload your own audio. TikTok has a more limited selection of songs and sounds that you can use.
        • -
        • YouTube Shorts have more monetization options, as you can earn money from ads that run between videos in the Shorts feed or from YouTube Premium subscribers who watch your Shorts. You can also link your Shorts to your longer videos or other products or services that you offer. TikTok has a creator fund that pays some users based on their views, but it is not available in all countries or for all users.
        • -
        -

        YouTube Shorts monetization

        -

        If you are a YouTube partner who has accepted the YouTube Partner Program terms, you can earn money from your YouTube Shorts. There are two ways to do this:

        -
          -
        • Ad revenue sharing: You can earn a share of the revenue from ads that run between videos in the Shorts feed. This revenue is pooled together and distributed among eligible creators based on their views and music usage.
        • -
        • YouTube Premium revenue sharing: You can earn a share of the revenue from YouTube Premium subscribers who watch your Shorts. This revenue is based on how much time they spend watching your Shorts compared to other content.
        • -
        -

        To be eligible for monetization, your Shorts must follow the YouTube channel monetization policies, the advertiser-friendly content guidelines, and the community guidelines. You must also have at least 1,000 subscribers and either 4

        How to Download Your Own YouTube Shorts

        -

        If you have created your own YouTube Shorts and you want to download them to your device, you can do so using either YouTube Studio on PC or the YouTube app on mobile. Here are the steps for each method:

        -

        Using YouTube Studio on PC

        -
          -
        1. Go to YouTube Studio and sign in with your Google account.
        2. -
        3. Click on the Content tab on the left sidebar.
        4. -
        5. Find the Short video that you want to download and click on the Details button.
        6. -
        7. Click on the Download button on the top right corner of the screen.
        8. -
        9. Choose a location and a file name for your video and click on Save.
        10. -
        -

        Using YouTube app on mobile

        -
          -
        1. Open the YouTube app on your Android or iOS device and sign in with your Google account.
        2. -
        3. Tap on your profile picture on the top right corner of the screen and select Your channel.
        4. -
        5. Tap on the Videos tab and find the Short video that you want to download.
        6. -
        7. Tap on the More icon (three dots) below the video and select Download.
        8. -
        9. Choose a quality option and tap on OK.
        10. -
        11. The video will be downloaded to your device and you can find it in your gallery or camera roll.
        12. -
        -

        How to Download Other People's YouTube Shorts

        -

        If you want to download YouTube Shorts from other creators or channels, you have two options: using the YouTube app on mobile or using third-party YouTube Shorts downloaders. Here are the steps for each option:

        -

        How to download YouTube Shorts videos
        -YouTube Shorts downloader app
        -YouTube Shorts watermark remover
        -Download YouTube Shorts on Android
        -Download YouTube Shorts on iPhone
        -Download YouTube Shorts on PC
        -Best YouTube Shorts downloader online
        -Save YouTube Shorts to gallery
        -Download YouTube Shorts without login
        -Download own YouTube Shorts video
        -YouTube Shorts video converter
        -Download YouTube Shorts with sound
        -Download YouTube Shorts in HD quality
        -Download YouTube Shorts in MP4 format
        -Download YouTube Shorts in MP3 format
        -How to edit YouTube Shorts videos
        -How to make YouTube Shorts videos
        -How to upload YouTube Shorts videos
        -How to monetize YouTube Shorts videos
        -How to get more views on YouTube Shorts videos
        -YouTube Shorts tips and tricks
        -YouTube Shorts vs TikTok videos
        -YouTube Shorts vs Instagram Reels videos
        -YouTube Shorts vs Snapchat Spotlight videos
        -Best apps for creating YouTube Shorts videos
        -Best music for YouTube Shorts videos
        -Best hashtags for YouTube Shorts videos
        -Best niches for YouTube Shorts videos
        -Best examples of YouTube Shorts videos
        -Best channels for YouTube Shorts videos
        -How to grow your channel with YouTube Shorts videos
        -How to optimize your channel for YouTube Shorts videos
        -How to use analytics for YouTube Shorts videos
        -How to promote your YouTube Shorts videos
        -How to collaborate with other creators on YouTube Shorts videos
        -How to add subtitles to YouTube Shorts videos
        -How to add filters to YouTube Shorts videos
        -How to add stickers to YouTube Shorts videos
        -How to add transitions to YouTube Shorts videos
        -How to add effects to YouTube Shorts videos
        -How to trim YouTube Shorts videos
        -How to crop YouTube Shorts videos
        -How to rotate YouTube Shorts videos
        -How to speed up or slow down YouTube Shorts videos
        -How to reverse YouTube Shorts videos
        -How to loop YouTube Shorts videos
        -How to mute or unmute YouTube Shorts videos
        -How to change the aspect ratio of YouTube Shorts videos
        -How to change the background of YouTube Shorts videos
        -How to change the thumbnail of YouTube Shorts videos

        -

        Using YouTube app on mobile

        -
          -
        1. Open the YouTube app on your Android or iOS device and sign in with your Google account.
        2. -
        3. Navigate to the Short video that you want to download and tap on it to play it.
        4. -
        5. Tap on the Share icon (an arrow) below the video and select Copy link.
        6. -
        7. Paste the link into a note app or a browser and add "ss" before "youtube" in the URL. For example, if the link is https://www.youtube.com/watch?v=abcde, change it to https://www.ssyoutube.com/watch?v=abcde.
        8. -
        9. This will take you to a website called SaveTube, where you can download the video in different formats and qualities.
        10. -
        11. Select a format and a quality option and tap on Download.
        12. -
        13. The video will be downloaded to your device and you can find it in your gallery or camera roll.
        14. -
        -

        Using YouTube Shorts downloaders

        -

        If you don't want to use the YouTube app or SaveTube, you can also use other websites or apps that allow you to download YouTube Shorts. Here are some examples of such tools:

        -

        Tube Shorts

        -

        Tube Shorts is a website that lets you download YouTube Shorts in MP4 format. You just need to paste the link of the Short video that you want to download and click on Download MP4. You can also scan a QR code to download the video directly to your mobile device.

        -

        Heat Feed

        -

        Heat Feed is another website that allows you to download YouTube Shorts in MP4 format. You just need to paste the link of the Short video that you want to download and click on Download Video Now. You can also choose a quality option before downloading.

        -

        Note: These websites are not affiliated with or endorsed by YouTube. Use them at your own risk and respect the rights of the original creators.

        -

        Conclusion

        -

        In this article, we have shown you how to download YouTube Shorts on your device, whether they are your own or someone else's. You can use either YouTube Studio on PC, YouTube app on mobile, or third-party YouTube Shorts downloaders. We hope this article was helpful and informative for you. If you have any questions or feedback, please let us know in the comments below.

        -

        Frequently Asked Questions (FAQs)

        -
          -
        • Q: Can I download YouTube Shorts without watermark?
        • -
        • A: Yes, if you use YouTube Studio on PC or YouTube app on mobile, you can download your own YouTube Shorts without watermark. If you use third-party YouTube Shorts downloaders, some of them may add a watermark to the downloaded videos.
        • -
        • Q: Can I download YouTube Shorts with sound?
        • -
        • A: Yes, if you use YouTube Studio on PC or YouTube app on mobile, you can download your own YouTube Shorts with sound. If you use third-party YouTube Shorts downloaders, some of them may also allow you to download YouTube Shorts with sound, while others may only download the video without sound.
        • -
        • Q: Can I download YouTube Shorts on iPhone?
        • -
        • A: Yes, you can download YouTube Shorts on iPhone using the YouTube app or third-party YouTube Shorts downloaders. However, you may need to use a file manager app or a video player app to access the downloaded videos on your iPhone.
        • -
        • Q: Can I edit YouTube Shorts after downloading them?
        • -
        • A: Yes, you can edit YouTube Shorts after downloading them using any video editing software or app that you prefer. You can trim, crop, rotate, add filters, text, music, and more to your downloaded YouTube Shorts.
        • -
        • Q: Can I upload downloaded YouTube Shorts to other platforms?
        • -
        • A: No, you should not upload downloaded YouTube Shorts to other platforms without the permission of the original creators. This may violate their intellectual property rights and cause legal issues. You should only use downloaded YouTube Shorts for personal or educational purposes.
        • -

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download youtube-dlg 0.4 A cross platform GUI for youtube-dl.md b/spaces/1phancelerku/anime-remove-background/Download youtube-dlg 0.4 A cross platform GUI for youtube-dl.md deleted file mode 100644 index f03b4b4b5339c3f060702673197a0f0da42f61d3..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download youtube-dlg 0.4 A cross platform GUI for youtube-dl.md +++ /dev/null @@ -1,158 +0,0 @@ - -

        Youtube-dlg 0.4: A Cross-Platform GUI for Youtube-dl

        -

        If you are looking for a simple and easy way to download videos from various websites, you might want to try Youtube-dlg 0.4, a cross-platform graphical user interface (GUI) for the popular youtube-dl command-line tool. In this article, we will explain what Youtube-dlg is, what features it offers, how to download and install it, how to use it, and what are its pros and cons.

        -

        youtube-dlg 0.4 download


        Download File > https://jinyurl.com/2uNNGa



        -

        What is Youtube-dlg?

        -

        Youtube-dlg is a front-end GUI for youtube-dl, a powerful media downloader that can handle hundreds of websites and formats. Youtube-dl is a command-line tool that requires some knowledge of terminal commands and options to use it effectively. Youtube-dlg simplifies the process by providing a user-friendly interface that allows you to enter the URL of the video you want to download, choose the format and quality, and start the download with a click of a button.

        -

        Features of Youtube-dlg

        -

        Youtube-dlg has many features that make it a convenient and versatile tool for downloading videos from the web. Some of these features are:

        -
          -
        • Written in wxPython, which makes it cross-platform and compatible with Windows, Linux, and Mac OS.
        • -
        • FFmpeg optional, to post-process video files and convert them to different formats.
        • -
        • Supports multiple downloads at the same time, with the option to pause, resume, delete, or move them in the queue.
        • -
        • Allows you to change the naming pattern of the downloaded files by choosing from different output templates or creating your own custom one.
        • -
        • Lets you add extra youtube-dl command-line options in a separate box for more advanced functionality.
        • -
        • Supports updating youtube-dl automatically or manually, or using your own version of youtube-dl by specifying its path.
        • -
        -

        Supported sites by Youtube-dlg

        -

        Youtube-dlg uses youtube-dl in the backend to download files, which means that it supports all the sites that youtube-dl supports. According to the official documentation of youtube-dl, there are more than 1000 supported sites, including:

        -
          -
        • YouTube
        • -
        • Vimeo
        • -
        • Facebook
        • -
        • Twitch
        • -
        • TikTok
        • -
        • SoundCloud
        • -
        • Dailymotion
        • -
        • Instagram
        • -
        • Reddit
        • -
        • Twitter
        • -
        • and many more...
        • -
        -

        How to download and install Youtube-dlg 0.4?

        -

        Download options for Youtube-dlg 0.4

        -

        There are several ways to download Youtube-dlg 0.4, depending on your operating system and preference. Here are some of the most common options:

        - - - - - - - -
        OptionDescription
        SourceForgeYou can download the latest version of Youtube-dlg 0.4 from SourceForge as a ZIP or TAR file. You can also find older versions in the same page.
        PyPiYou can download Youtube-dlg 0.4 as a Python package from PyPi. You will need Python 2.7.3 or higher to install it.
        GitHubYou can download Youtube-dlg 0.4 from its GitHub repository. You can also find the source code, issues, and pull requests there.
        HomebrewIf you are using Mac OS, you can install Youtube-dlg 0.4 using Homebrew, a package manager for Mac OS. You will need to run the following command in the terminal: brew install youtube-dlg
        AURIf you are using Arch Linux or a derivative, you can install Youtube-dlg 0.4 using AUR, a community-driven repository for Arch Linux. You will need to use an AUR helper such as yay or pacaur to install it.
        -

        Installation steps for Youtube-dlg 0.4

        -

        The installation steps for Youtube-dlg 0.4 vary depending on the download option you choose and the operating system you use. Here are some general steps that apply to most cases:

        -
          -
        1. Download Youtube-dlg 0.4 from one of the options mentioned above.
        2. -
        3. Extract the downloaded file to a folder of your choice.
        4. -
        5. Run the youtube-dlg executable file in the folder. On Windows, it is youtube-dlg.exe; on Linux and Mac OS, it is youtube-dl-gui.
        6. -
        7. Alternatively, you can run Youtube-dlg 0.4 from the command line by navigating to the folder where you extracted it and typing youtube-dlg.
        8. -
        9. If you encounter any errors or missing dependencies, you may need to install them manually or update your system.
        10. -
        -

        How to use Youtube-dlg 0.4?

        -

        Basic usage of Youtube-dlg 0.4

        -

        Using Youtube-dlg 0.4 is very simple and straightforward. Here are the basic steps to download a video using Youtube-dlg 0.4:

        -

        youtube-dlg 0.4 windows setup
        -youtube-dlg 0.4 portable zip
        -youtube-dlg 0.4 source code
        -youtube-dlg 0.4 linux install
        -youtube-dlg 0.4 mac os x
        -youtube-dlg 0.4 changelog
        -youtube-dlg 0.4 documentation
        -youtube-dlg 0.4 screenshots
        -youtube-dlg 0.4 supported sites
        -youtube-dlg 0.4 ffmpeg optional
        -youtube-dlg 0.4 new UI
        -youtube-dlg 0.4 post processing
        -youtube-dlg 0.4 output template
        -youtube-dlg 0.4 command line options
        -youtube-dlg 0.4 custom binary
        -youtube-dlg 0.4 issues and bugs
        -youtube-dlg 0.4 reviews and ratings
        -youtube-dlg 0.4 alternatives and competitors
        -youtube-dlg 0.4 features and benefits
        -youtube-dlg 0.4 FAQs and tutorials
        -youtube-dlg 0.4 license and terms
        -youtube-dlg 0.4 contributors and developers
        -youtube-dlg 0.4 translations and languages
        -youtube-dlg 0.4 updates and releases
        -youtube-dlg 0.4 github repository
        -youtube-dl gui for windows download
        -download videos with youtube-dl gui
        -how to use youtube-dl gui on linux
        -best settings for youtube-dl gui mac
        -latest version of youtube-dl gui python
        -free and open source youtube downloader gui
        -cross platform front-end for youtube downloader
        -download playlists and channels with youtube dl gui
        -how to install ffmpeg for youtube dl gui
        -how to change save path in youtube dl gui
        -how to add extra options in youtube dl gui
        -how to update youtube dl in youtube dl gui
        -how to fix errors in youtube dl gui
        -how to customize filename format in youtube dl gui
        -how to change number of workers in youtube dl gui
        -how to download subtitles with youtube dl gui
        -how to download audio only with youtube dl gui
        -how to download live streams with youtube dl gui
        -how to download multiple urls with youtube dl gui
        -how to download from unsupported sites with youtube dl gui
        -how to convert videos with post processing in youtube dl gui

        -
          -
        1. Open Youtube-dlg 0.4 and you will see a main window with a text box and a button that says "Add".
        2. -
        3. Copy the URL of the video you want to download from your browser and paste it in the text box.
        4. -
        5. Click on the "Add" button and the video will be added to the download list below.
        6. -
        7. You can repeat steps 2 and 3 to add more videos to the download list.
        8. -
        9. You can also drag and drop URLs from your browser to the text box or the download list.
        10. -
        11. If you want to change the format or quality of the video, you can click on the "Options" button next to each video in the download list and select from the available options.
        12. -
        13. If you want to change the output folder where the videos will be saved, you can click on the "Options" button at the bottom right corner of the main window and select a different folder.
        14. -
        15. When you are ready to start downloading, click on the "Download" button at the bottom right corner of the main window and wait for the download to finish.
        16. -
        17. You can monitor the progress of each download by looking at the status bar below each video in the download list.
        18. -
        19. You can also pause, resume, delete, or move up or down each download by clicking on the corresponding buttons next to each video in the download list.
        20. -
        -

        Advanced options of Youtube-dlg 0.4

        -

        If you want to access more advanced features of Youtube-dlg 0.4, you can click on the "Options" button at the bottom right corner of the main window and select "Advanced Options". This will open a new window where you can customize various settings of Youtube-dlg 0.4, such as:

        -
          -
        • The naming pattern of the downloaded files by choosing from different output templates or creating your own custom one.
        • -
        • The post-processing options for converting or merging video and audio files using FFmpeg.
        • -
        • The extra youtube-dl command-line options that you want to add for more functionality.
        • -
        • The update options for youtube-dl, such as automatic or manual update, or using your own version of youtube-dl by specifying its path.
        • -
        • The log options for saving or viewing log files of youtube-dl and Youtube-dlg 0.4.
        • -
        -

        Pros and cons of Youtube-dlg 0.4

        -

        Pros of Youtube-dlg 0.4

        -

        Youtube-dlg 0.4 has many advantages that make it a great tool for downloading videos from the web. Some of these advantages are:

        -
          -
        • It is free and open-source, which means that you can use it without any cost or restriction, and you can also contribute to its development or improvement.
        • -
        • It is cross-platform and compatible with Windows, Linux, and Mac OS, which means that you can use it on any device or system that you have.
        • -
        • It is easy and simple to use, which means that you don't need any technical skills or knowledge to download videos with it.
        • -
        • It supports hundreds of websites and formats, which means that you can download almost any video that you want from the web.
        • -
        • It offers many features and options, which means that you can customize your downloads according to your preferences and needs.
        • -
        -

        Cons of Youtube-dlg 0.4

        -

        Youtube-dlg 0.4 also has some disadvantages that you should be aware of before using it. Some of these disadvantages are:

        -
          -
        • It depends on youtube-dl, which means that if youtube-dl is not updated or working properly, Youtube-dlg 0.4 may not work either.
        • -
        • It may not support some websites or formats that youtube-dl does not support, which means that you may not be able to download some videos that you want from the web.
        • -
        • It may have some bugs or errors, which means that it may not work as expected or cause some problems for your device or system.
        • -
        • It may not have some features or options that youtube-dl has, which means that you may not be able to access some functionality that youtube-dl offers.
        • -
        -

        Conclusion

        -

        In conclusion, Youtube-dlg 0.4 is a cross-platform GUI for youtube-dl that allows you to download videos from various websites with ease and convenience. It has many features and options that make it a versatile and powerful tool for downloading videos from the web. However, it also has some drawbacks that you should consider before using it. If you are looking for a simple and easy way to download videos from the web, you might want to give Youtube-dlg 0.4 a try.

        -

        FAQs

        -

        Here are some frequently asked questions about Youtube-dlg 0.4:

        -
          -
        1. Is Youtube-dlg 0.4 safe to use?
          -Yes, Youtube-dlg 0.4 is safe to use as long as you download it from a trusted source and scan it for viruses or malware before running it. However, you should also be careful about the videos that you download from the web, as they may contain harmful content or infringe on the rights of the original creators.
        2. -
        3. Is Youtube-dlg 0.4 legal to use?
          -Yes, Youtube-dlg 0.4 is legal to use as long as you use it for personal and non-commercial purposes and respect the terms of service and privacy policies of the websites that you download videos from. However, you should also be aware of the laws and regulations of your country or region regarding downloading videos from the web, as they may vary depending on the location and situation.
        4. -
        5. How can I update Youtube-dlg 0.4?
          -You can update Youtube-dlg 0.4 by clicking on the "Options" button at the bottom right corner of the main window and selecting "Update". You can also check for updates manually by clicking on the "Help" menu at the top left corner of the main window and selecting "Check for updates". Alternatively, you can download the latest version of Youtube-dlg 0.4 from one of the options mentioned above and replace the old version with it.
        6. -
        7. How can I contact the developers of Youtube-dlg 0.4?
          -You can contact the developers of Youtube-dlg 0.4 by visiting their GitHub page and opening an issue or a pull request there. You can also join their Discord server and chat with them there.
        8. -
        9. How can I support the development of Youtube-dlg 0.4?
          -You can support the development of Youtube-dlg 0.4 by donating to their PayPal account or by becoming a patron on their Patreon page. You can also support them by giving them feedback, reporting bugs, suggesting features, or spreading the word about their project.
        10. -
        - : https://sourceforge.net/projects/youtube-dl-gui/ : https://pypi.org/project/youtube_dl_gui

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/FS 12 Mod APK How to Unlock Unlimited Money in Farming Simulator 22.md b/spaces/1phancelerku/anime-remove-background/FS 12 Mod APK How to Unlock Unlimited Money in Farming Simulator 22.md deleted file mode 100644 index 5cccec14e9fdcddacb4ff5480bda540ec97c24c6..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/FS 12 Mod APK How to Unlock Unlimited Money in Farming Simulator 22.md +++ /dev/null @@ -1,104 +0,0 @@ - -

        FS 12 Mod APK Unlimited Money Download: How to Enjoy Farming Simulator 12 on Your Android Device

        -

        If you are a fan of farming games, you might have heard of Farming Simulator 12, one of the most popular and realistic farming simulation games for Android devices. In this game, you can experience the varied farming life in a wide, agricultural scenery with fields, roads, your farm, and a small village. You can cultivate your fields with various three-dimensional vehicles modeled after original machines and vehicles by prestigious manufacturers. You can also sell your harvest and invest in new equipment, buildings, and tools.

        -

        fs 12 mod apk unlimited money download


        DOWNLOAD ——— https://jinyurl.com/2uNMrX



        -

        However, if you want to enjoy the game without any limitations or restrictions, you might want to try FS 12 Mod APK Unlimited Money, a modified version of the original game that gives you unlimited money and access to all the features and items in the game. In this article, we will tell you what is FS 12 Mod APK Unlimited Money, how to download and install it on your Android device, and what are the benefits and precautions of using it.

        -

        What is Farming Simulator 12?

        -

        A realistic and immersive farming game

        -

        Farming Simulator 12 is a game developed by Giants Software, a Swiss video game developer that specializes in creating realistic and immersive simulation games. Farming Simulator 12 was released in 2012 for Android devices, and it has received positive reviews from critics and players alike. The game has been praised for its graphics, gameplay, physics, and variety of vehicles and equipment.

        -

        Features of Farming Simulator 12

        -

        Some of the features of Farming Simulator 12 are:

        -
          -
        • Put your favorite simulator into your pocket
        • -
        • Authentic machines by DEUTZ-FAHR, KRONE, KRAMPE, AMAZONE, LEMKEN and KOTTE
        • -
        • Use a variety of detailed equipment and trailers
        • -
        • Computer-steered assistants help you with your work
        • -
        • Career-mode with management-part
        • -
        • Three different plants: corn, canola and wheat
        • -
        • Freely accessible world with dynamic day-night-cycle
        • -
        -

        What is FS 12 Mod APK Unlimited Money?

        -

        A modified version of the original game

        -

        FS 12 Mod APK Unlimited Money is a modified version of the original Farming Simulator 12 game that gives you unlimited money and access to all the features and items in the game. This means that you can buy any vehicle, equipment, tool, building, or seed that you want without worrying about the cost. You can also upgrade your farm and expand your business as much as you want.

        -

        Benefits of using FS 12 Mod APK Unlimited Money

        -

        Some of the benefits of using FS 12 Mod APK Unlimited Money are:

        -
          -
        • You can enjoy the game without any limitations or restrictions
        • -
        • You can experiment with different vehicles, equipment, tools, buildings, and seeds
        • -
        • You can customize your farm and make it look more attractive
        • -
        • You can save time and effort by using computer-steered assistants
        • -
        • You can have more fun and excitement by exploring the open world with dynamic day-night-cycle
        • -
        -

        How to download and install FS 12 Mod APK Unlimited Money?

        -

        Steps to download and install FS 12 Mod APK Unlimited Money

        -

        If you want to download and install FS 12 Mod APK Unlimited Money on your Android device, you need to follow these steps:

        -
          -
        1. First, you need to uninstall the original Farming Simulator 12 game from your device if you have it installed.
        2. -
        3. Second, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.
        4. -
        5. Third, you need to download the FS 12 Mod APK Unlimited Money file from a reliable source. You can search for it on Google or use the link below. Make sure you download the latest version of the mod.
        6. -
        7. Fourth, you need to locate the downloaded file on your device and tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to complete.
        8. -
        9. Fifth, you need to launch the game and enjoy the unlimited money and features.
        10. -
        -

        Tips and precautions for using FS 12 Mod APK Unlimited Money

        -

        Before you start using FS 12 Mod APK Unlimited Money, here are some tips and precautions that you should keep in mind:

        -
          -
        • Make sure you have enough storage space on your device for the mod file and the game data.
        • -
        • Make sure you have a stable internet connection for downloading and installing the mod file.
        • -
        • Make sure you download the mod file from a trusted source and scan it for viruses or malware before installing it.
        • -
        • Make sure you backup your game data before uninstalling the original game or installing the mod file.
        • -
        • Make sure you do not use the mod file for online or multiplayer mode, as it may result in banning or suspension of your account.
        • -
        -

        Conclusion

        -

        Farming Simulator 12 is a fun and realistic farming simulation game that lets you experience the varied farming life on your Android device. However, if you want to enjoy the game without any limitations or restrictions, you can try FS 12 Mod APK Unlimited Money, a modified version of the original game that gives you unlimited money and access to all the features and items in the game. You can download and install FS 12 Mod APK Unlimited Money by following the steps mentioned above, but make sure you follow the tips and precautions as well. We hope this article was helpful and informative for you. Happy farming!

        -

        fs 12 mod apk unlimited money download free
        -fs 12 mod apk unlimited money download latest version
        -fs 12 mod apk unlimited money download android
        -fs 12 mod apk unlimited money download no root
        -fs 12 mod apk unlimited money download offline
        -fs 12 mod apk unlimited money download for pc
        -fs 12 mod apk unlimited money download 2023
        -fs 12 mod apk unlimited money download hack
        -fs 12 mod apk unlimited money download obb
        -fs 12 mod apk unlimited money download mediafire
        -fs 12 mod apk unlimited money download rexdl
        -fs 12 mod apk unlimited money download revdl
        -fs 12 mod apk unlimited money download apkpure
        -fs 12 mod apk unlimited money download happymod
        -fs 12 mod apk unlimited money download uptodown
        -fs 12 farming simulator mod apk unlimited money download
        -fs 12 gold edition mod apk unlimited money download
        -fs 22 vs fs 12 mod apk unlimited money download
        -how to install fs 12 mod apk unlimited money download
        -how to play fs 12 mod apk unlimited money download
        -how to get fs 12 mod apk unlimited money download
        -how to update fs 12 mod apk unlimited money download
        -how to use fs 22 cheats in fs 12 mod apk unlimited money download[^1^]
        -best features of fs 12 mod apk unlimited money download
        -best tips and tricks for fs 12 mod apk unlimited money download
        -best vehicles and equipment in fs 12 mod apk unlimited money download
        -best crops and animals in fs 12 mod apk unlimited money download
        -best maps and locations in fs 12 mod apk unlimited money download
        -best mods and addons for fs 12 mod apk unlimited money download
        -best graphics and sound in fs 12 mod apk unlimited money download
        -pros and cons of fs 12 mod apk unlimited money download
        -reviews and ratings of fs 12 mod apk unlimited money download
        -alternatives and competitors of fs 12 mod apk unlimited money download
        -benefits and drawbacks of fs 12 mod apk unlimited money download
        -advantages and disadvantages of fs 12 mod apk unlimited money download
        -comparison and contrast of fs 12 mod apk unlimited money download
        -similarities and differences of fs 12 mod apk unlimited money download
        -pros and cons of farming simulator vs real life farming with fs 12 mod apk unlimited money download
        -reviews and ratings of farming simulator vs real life farming with fs 12 mod apk unlimited money download

        -

        FAQs

        -

        What is the difference between Farming Simulator 12 and Farming Simulator 14?

        -

        Farming Simulator 12 and Farming Simulator 14 are two different versions of the same game series developed by Giants Software. Farming Simulator 14 was released in 2013 for Android devices, and it has some improvements and additions over Farming Simulator 12, such as new vehicles, equipment, crops, animals, maps, graphics, and gameplay modes.

        -

        Is FS 12 Mod APK Unlimited Money safe to use?

        -

        FS 12 Mod APK Unlimited Money is generally safe to use if you download it from a reliable source and scan it for viruses or malware before installing it. However, you should be careful not to use it for online or multiplayer mode, as it may result in banning or suspension of your account. You should also backup your game data before uninstalling the original game or installing the mod file.

        -

        How can I update FS 12 Mod APK Unlimited Money?

        -

        If you want to update FS 12 Mod APK Unlimited Money, you need to uninstall the old version of the mod file from your device and download the new version of the mod file from a trusted source. Then, you need to follow the same steps as mentioned above for installing the mod file. Make sure you backup your game data before uninstalling or installing the mod file.

        -

        Can I play FS 12 Mod APK Unlimited Money on PC?

        -

        If you want to play FS 12 Mod APK Unlimited Money on PC, you need to use an Android emulator software that allows you to run Android apps and games on your PC. Some of the popular Android emulators are BlueStacks, NoxPlayer, MEmu, LDPlayer, etc. You need to download and install an Android emulator on your PC and then follow the same steps as mentioned above for downloading and installing FS 12 Mod APK Unlimited Money on your Android device.

        -

        Can I play FS 12 Mod APK Unlimited Money with my friends?

        -

        If you want to play FS 12 Mod APK Unlimited Money with your friends, you can do so by using a local Wi-Fi network or Bluetooth connection. However, you should not use the mod file for online or multiplayer mode as it may result in banning or suspension of your account. You should also make sure that your friends have the same version of the mod file as you do.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/232labs/VToonify/vtoonify/model/raft/core/utils/flow_viz.py b/spaces/232labs/VToonify/vtoonify/model/raft/core/utils/flow_viz.py deleted file mode 100644 index dcee65e89b91b07ee0496aeb4c7e7436abf99641..0000000000000000000000000000000000000000 --- a/spaces/232labs/VToonify/vtoonify/model/raft/core/utils/flow_viz.py +++ /dev/null @@ -1,132 +0,0 @@ -# Flow visualization code used from https://github.com/tomrunia/OpticalFlow_Visualization - - -# MIT License -# -# Copyright (c) 2018 Tom Runia -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to conditions. -# -# Author: Tom Runia -# Date Created: 2018-08-03 - -import numpy as np - -def make_colorwheel(): - """ - Generates a color wheel for optical flow visualization as presented in: - Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007) - URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf - - Code follows the original C++ source code of Daniel Scharstein. - Code follows the the Matlab source code of Deqing Sun. - - Returns: - np.ndarray: Color wheel - """ - - RY = 15 - YG = 6 - GC = 4 - CB = 11 - BM = 13 - MR = 6 - - ncols = RY + YG + GC + CB + BM + MR - colorwheel = np.zeros((ncols, 3)) - col = 0 - - # RY - colorwheel[0:RY, 0] = 255 - colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY) - col = col+RY - # YG - colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG) - colorwheel[col:col+YG, 1] = 255 - col = col+YG - # GC - colorwheel[col:col+GC, 1] = 255 - colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC) - col = col+GC - # CB - colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB) - colorwheel[col:col+CB, 2] = 255 - col = col+CB - # BM - colorwheel[col:col+BM, 2] = 255 - colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM) - col = col+BM - # MR - colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR) - colorwheel[col:col+MR, 0] = 255 - return colorwheel - - -def flow_uv_to_colors(u, v, convert_to_bgr=False): - """ - Applies the flow color wheel to (possibly clipped) flow components u and v. - - According to the C++ source code of Daniel Scharstein - According to the Matlab source code of Deqing Sun - - Args: - u (np.ndarray): Input horizontal flow of shape [H,W] - v (np.ndarray): Input vertical flow of shape [H,W] - convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. - - Returns: - np.ndarray: Flow visualization image of shape [H,W,3] - """ - flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8) - colorwheel = make_colorwheel() # shape [55x3] - ncols = colorwheel.shape[0] - rad = np.sqrt(np.square(u) + np.square(v)) - a = np.arctan2(-v, -u)/np.pi - fk = (a+1) / 2*(ncols-1) - k0 = np.floor(fk).astype(np.int32) - k1 = k0 + 1 - k1[k1 == ncols] = 0 - f = fk - k0 - for i in range(colorwheel.shape[1]): - tmp = colorwheel[:,i] - col0 = tmp[k0] / 255.0 - col1 = tmp[k1] / 255.0 - col = (1-f)*col0 + f*col1 - idx = (rad <= 1) - col[idx] = 1 - rad[idx] * (1-col[idx]) - col[~idx] = col[~idx] * 0.75 # out of range - # Note the 2-i => BGR instead of RGB - ch_idx = 2-i if convert_to_bgr else i - flow_image[:,:,ch_idx] = np.floor(255 * col) - return flow_image - - -def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False): - """ - Expects a two dimensional flow image of shape. - - Args: - flow_uv (np.ndarray): Flow UV image of shape [H,W,2] - clip_flow (float, optional): Clip maximum of flow values. Defaults to None. - convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False. - - Returns: - np.ndarray: Flow visualization image of shape [H,W,3] - """ - assert flow_uv.ndim == 3, 'input flow must have three dimensions' - assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]' - if clip_flow is not None: - flow_uv = np.clip(flow_uv, 0, clip_flow) - u = flow_uv[:,:,0] - v = flow_uv[:,:,1] - rad = np.sqrt(np.square(u) + np.square(v)) - rad_max = np.max(rad) - epsilon = 1e-5 - u = u / (rad_max + epsilon) - v = v / (rad_max + epsilon) - return flow_uv_to_colors(u, v, convert_to_bgr) \ No newline at end of file diff --git a/spaces/44brabal/valentinafeve-yolos-fashionpedia/app.py b/spaces/44brabal/valentinafeve-yolos-fashionpedia/app.py deleted file mode 100644 index 4225f32ae326ca8b647116c005054b67fe8a402b..0000000000000000000000000000000000000000 --- a/spaces/44brabal/valentinafeve-yolos-fashionpedia/app.py +++ /dev/null @@ -1,7 +0,0 @@ -import gradio as gr -import torch -from transformers import pipeline - -pipe = pipeline("object-detection", model="valentinafeve/yolos-fashionpedia") - -gr.Interface.load("models/valentinafeve/yolos-fashionpedia").launch() \ No newline at end of file diff --git a/spaces/A00001/bingothoo/src/components/user-menu.tsx b/spaces/A00001/bingothoo/src/components/user-menu.tsx deleted file mode 100644 index 9bd1edc9cf9f39b63629b021f0c1186b1a7c1341..0000000000000000000000000000000000000000 --- a/spaces/A00001/bingothoo/src/components/user-menu.tsx +++ /dev/null @@ -1,113 +0,0 @@ -'use client' - -import { useEffect, useState } from 'react' -import Image from 'next/image' -import { toast } from 'react-hot-toast' -import { Button } from '@/components/ui/button' -import pkg from '../../package.json' -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuSeparator, - DropdownMenuTrigger -} from '@/components/ui/dropdown-menu' -import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons' -import SettingIcon from '@/assets/images/settings.svg' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - -export function UserMenu() { - const [host, setHost] = useState('') - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - useEffect(() => { - setHost(location.host) - }, []) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - return ( -
        - - - - - - - location.href='#dialog="settings"' - } - className="cursor-pointer" - > - 设置用户 - - - - location.href='#dialog="voice"' - } - className="cursor-pointer" - > - 语音设置 - - - - - 开源地址 - - - - - - - - 托管地址 - 🤗 - - - - - - - 复制站点 - - - - - -
        版本信息 {pkg.version}
        -
        - - -
        站点域名
        -
        copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer"> - {host} -
        -
        -
        -
        -
        - ) -} diff --git a/spaces/AEUPH/AethericGPT/README.md b/spaces/AEUPH/AethericGPT/README.md deleted file mode 100644 index 20f31811a1117b023056796d9b60baedb0bdd3f8..0000000000000000000000000000000000000000 --- a/spaces/AEUPH/AethericGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AethericGPT -emoji: 🏃 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.40.1 -app_file: app.py -pinned: false -license: cc-by-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/distributed.py b/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/distributed.py deleted file mode 100644 index 2fa61f76c5cc3ab9f6a9643042afa8e1f2e1cb7f..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/distributed.py +++ /dev/null @@ -1,150 +0,0 @@ -import os - -import torch -import socket - -try: - import horovod.torch as hvd -except ImportError: - hvd = None - - -def is_global_master(args): - return args.rank == 0 - - -def is_local_master(args): - return args.local_rank == 0 - - -def is_master(args, local=False): - return is_local_master(args) if local else is_global_master(args) - - -def is_using_horovod(): - # NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set - # Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required... - ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"] - pmi_vars = ["PMI_RANK", "PMI_SIZE"] - if all([var in os.environ for var in ompi_vars]) or all( - [var in os.environ for var in pmi_vars] - ): - return True - else: - return False - - -def is_using_distributed(): - if "WORLD_SIZE" in os.environ: - return int(os.environ["WORLD_SIZE"]) > 1 - if "SLURM_NTASKS" in os.environ: - return int(os.environ["SLURM_NTASKS"]) > 1 - return False - - -def world_info_from_env(): - local_rank = 0 - for v in ( - "SLURM_LOCALID", - "MPI_LOCALRANKID", - "OMPI_COMM_WORLD_LOCAL_RANK", - "LOCAL_RANK", - ): - if v in os.environ: - local_rank = int(os.environ[v]) - break - global_rank = 0 - for v in ("SLURM_PROCID", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "RANK"): - if v in os.environ: - global_rank = int(os.environ[v]) - break - world_size = 1 - for v in ("SLURM_NTASKS", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "WORLD_SIZE"): - if v in os.environ: - world_size = int(os.environ[v]) - break - - return local_rank, global_rank, world_size - - -def init_distributed_device(args): - # Distributed training = training on more than one GPU. - # Works in both single and multi-node scenarios. - args.distributed = False - args.world_size = 1 - args.rank = 0 # global rank - args.local_rank = 0 - if args.horovod: - assert hvd is not None, "Horovod is not installed" - hvd.init() - world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"]) - world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"]) - local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]) - args.local_rank = local_rank - args.rank = world_rank - args.world_size = world_size - # args.local_rank = int(hvd.local_rank()) - # args.rank = hvd.rank() - # args.world_size = hvd.size() - args.distributed = True - os.environ["LOCAL_RANK"] = str(args.local_rank) - os.environ["RANK"] = str(args.rank) - os.environ["WORLD_SIZE"] = str(args.world_size) - print( - f"Distributed training: local_rank={args.local_rank}, " - f"rank={args.rank}, world_size={args.world_size}, " - f"hostname={socket.gethostname()}, pid={os.getpid()}" - ) - elif is_using_distributed(): - if "SLURM_PROCID" in os.environ: - # DDP via SLURM - args.local_rank, args.rank, args.world_size = world_info_from_env() - # SLURM var -> torch.distributed vars in case needed - os.environ["LOCAL_RANK"] = str(args.local_rank) - os.environ["RANK"] = str(args.rank) - os.environ["WORLD_SIZE"] = str(args.world_size) - torch.distributed.init_process_group( - backend=args.dist_backend, - init_method=args.dist_url, - world_size=args.world_size, - rank=args.rank, - ) - elif "OMPI_COMM_WORLD_SIZE" in os.environ: # using Summit cluster - world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"]) - world_rank = int(os.environ["OMPI_COMM_WORLD_RANK"]) - local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]) - args.local_rank = local_rank - args.rank = world_rank - args.world_size = world_size - torch.distributed.init_process_group( - backend=args.dist_backend, - init_method=args.dist_url, - world_size=args.world_size, - rank=args.rank, - ) - else: - # DDP via torchrun, torch.distributed.launch - args.local_rank, _, _ = world_info_from_env() - torch.distributed.init_process_group( - backend=args.dist_backend, init_method=args.dist_url - ) - args.world_size = torch.distributed.get_world_size() - args.rank = torch.distributed.get_rank() - args.distributed = True - print( - f"Distributed training: local_rank={args.local_rank}, " - f"rank={args.rank}, world_size={args.world_size}, " - f"hostname={socket.gethostname()}, pid={os.getpid()}" - ) - - if torch.cuda.is_available(): - if args.distributed and not args.no_set_device_rank: - device = "cuda:%d" % args.local_rank - else: - device = "cuda:0" - torch.cuda.set_device(device) - else: - device = "cpu" - args.device = device - device = torch.device(device) - return device diff --git a/spaces/AIGText/GlyphControl/ldm/modules/midas/__init__.py b/spaces/AIGText/GlyphControl/ldm/modules/midas/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/AISuperheroes/README/README.md b/spaces/AISuperheroes/README/README.md deleted file mode 100644 index e2c1c848b92002614336721985be9a97de243365..0000000000000000000000000000000000000000 --- a/spaces/AISuperheroes/README/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: README -emoji: 🐠 -colorFrom: red -colorTo: blue -sdk: static -pinned: false ---- -
        -

        - This classroom is a public open source forum to create and teach AI together. Our goal is turning Pain to Joy and ultimately creating Superpowers for those in need. - More info and long term documentation of our progress is at 🌲 Yggdrasil Yggdrasil 🌲
        - Intended audience are those interested in learning, or teaching AI and creative technologies for health care and clinical experts, but making it fast and easy to meet goals of anyone interested in learning new technologies like the 🥇Huggingface AI Platform🥇, Streamlit, Gradio, ML Models, Datasets and 🥇HF Spaces🥇. -

        -
        \ No newline at end of file diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/extensions/__init__.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/extensions/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Abhilashvj/planogram-compliance/utils/segment/__init__.py b/spaces/Abhilashvj/planogram-compliance/utils/segment/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Adapter/T2I-Adapter/configs/mm/faster_rcnn_r50_fpn_coco.py b/spaces/Adapter/T2I-Adapter/configs/mm/faster_rcnn_r50_fpn_coco.py deleted file mode 100644 index a9ad9528b22163ae7ce1390375b69227fd6eafd9..0000000000000000000000000000000000000000 --- a/spaces/Adapter/T2I-Adapter/configs/mm/faster_rcnn_r50_fpn_coco.py +++ /dev/null @@ -1,182 +0,0 @@ -checkpoint_config = dict(interval=1) -# yapf:disable -log_config = dict( - interval=50, - hooks=[ - dict(type='TextLoggerHook'), - # dict(type='TensorboardLoggerHook') - ]) -# yapf:enable -dist_params = dict(backend='nccl') -log_level = 'INFO' -load_from = None -resume_from = None -workflow = [('train', 1)] -# optimizer -optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) -optimizer_config = dict(grad_clip=None) -# learning policy -lr_config = dict( - policy='step', - warmup='linear', - warmup_iters=500, - warmup_ratio=0.001, - step=[8, 11]) -total_epochs = 12 - -model = dict( - type='FasterRCNN', - pretrained='torchvision://resnet50', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - norm_eval=True, - style='pytorch'), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=5), - rpn_head=dict( - type='RPNHead', - in_channels=256, - feat_channels=256, - anchor_generator=dict( - type='AnchorGenerator', - scales=[8], - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[1.0, 1.0, 1.0, 1.0]), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0)), - roi_head=dict( - type='StandardRoIHead', - bbox_roi_extractor=dict( - type='SingleRoIExtractor', - roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), - out_channels=256, - featmap_strides=[4, 8, 16, 32]), - bbox_head=dict( - type='Shared2FCBBoxHead', - in_channels=256, - fc_out_channels=1024, - roi_feat_size=7, - num_classes=80, - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[0., 0., 0., 0.], - target_stds=[0.1, 0.1, 0.2, 0.2]), - reg_class_agnostic=False, - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), - loss_bbox=dict(type='L1Loss', loss_weight=1.0))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - match_low_quality=True, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - pos_weight=-1, - debug=False), - rpn_proposal=dict( - nms_pre=2000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.5, - min_pos_iou=0.5, - match_low_quality=False, - ignore_iof_thr=-1), - sampler=dict( - type='RandomSampler', - num=512, - pos_fraction=0.25, - neg_pos_ub=-1, - add_gt_as_proposals=True), - pos_weight=-1, - debug=False)), - test_cfg=dict( - rpn=dict( - nms_pre=1000, - max_per_img=1000, - nms=dict(type='nms', iou_threshold=0.7), - min_bbox_size=0), - rcnn=dict( - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100) - # soft-nms is also supported for rcnn testing - # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05) - )) - -dataset_type = 'CocoDataset' -data_root = 'data/coco' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=2, - workers_per_gpu=2, - train=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_train2017.json', - img_prefix=f'{data_root}/train2017/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_val2017.json', - img_prefix=f'{data_root}/val2017/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=f'{data_root}/annotations/instances_val2017.json', - img_prefix=f'{data_root}/val2017/', - pipeline=test_pipeline)) -evaluation = dict(interval=1, metric='bbox') diff --git a/spaces/Addai/Breast_cancer_detection_with_deep_transfer_learning/app.py b/spaces/Addai/Breast_cancer_detection_with_deep_transfer_learning/app.py deleted file mode 100644 index 0de0c38786cb011ecfcbec7025b6e1ea3bce2207..0000000000000000000000000000000000000000 --- a/spaces/Addai/Breast_cancer_detection_with_deep_transfer_learning/app.py +++ /dev/null @@ -1,23 +0,0 @@ -import gradio as gr -from fastai.vision.all import * -import skimage - -learn = load_learner('export.pkl') - -labels = learn.dls.vocab -def predict(img): - img = PILImage.create(img) - pred,pred_idx,probs = learn.predict(img) - prediction = str(pred) - - return prediction - - -title = "Breast cancer detection with Deep Transfer Learning(ResNet18)" -description = "

        As a radiologist or oncologist, it is crucial to know what is wrong with a breast x-ray image.
        Upload the breast X-ray image to know what is wrong with a patients breast with or without inplant

        " -article="

        Web app is built and managed by Addai Fosberg

        " -examples = ['img1.jpeg', 'img2.jpeg'] -enable_queue=True -#interpretation='default' - -gr.Interface(fn=predict,inputs=gr.inputs.Image(shape=(512, 512)),outputs=gr.outputs.Label(num_top_classes=3),title=title,description=description,article=article,examples=examples,enable_queue=enable_queue).launch() \ No newline at end of file diff --git a/spaces/AdityaVishwakarma/LiveChecker/README.md b/spaces/AdityaVishwakarma/LiveChecker/README.md deleted file mode 100644 index d82a107f3efd0eee86cbd2d994e2bc9302de6f53..0000000000000000000000000000000000000000 --- a/spaces/AdityaVishwakarma/LiveChecker/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: LiveChecker -emoji: 🦀 -colorFrom: purple -colorTo: pink -sdk: streamlit -sdk_version: 1.27.2 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/PreTest.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/PreTest.js deleted file mode 100644 index e2b90040e6e96887b214f66a347a16f62a0f49ed..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/board/PreTest.js +++ /dev/null @@ -1,47 +0,0 @@ -/* -1. Test if there has any matched line after chess swapping -*/ - -import RefreshSymbolCache from './match/RefreshSymbolCache.js'; -import AnyMatch from './match/AnyMatch.js'; - -var PreTest = function () { - var match = this.match; - var directions = this.board.grid.halfDirections; - var tileB; - RefreshSymbolCache.call(this); // only refresh symbol cache once - for (var tileY = (this.board.height / 2), rowCnt = this.board.height; tileY < rowCnt; tileY++) { - for (var tileX = 0, colCnt = this.board.width; tileX < colCnt; tileX++) { - tileA.x = tileX; - tileA.y = tileY; - for (var dir = 0, dirCnt = directions.length; dir < dirCnt; dir++) { - tileB = this.board.getNeighborTileXY(tileA, dir); - // swap symbol - swapSymbols(match, tileA, tileB); - // any match? - this.preTestResult = AnyMatch.call(this, 3); - // swap symbol back - swapSymbols(match, tileA, tileB); - - if (this.preTestResult) { - return true; - } - } - } - } - return false; -} - -var swapSymbols = function (match, tileA, tileB) { - var symbolA = match.getSymbol(tileA.x, tileA.y); - var symbolB = match.getSymbol(tileB.x, tileB.y); - match.setSymbol(tileA.x, tileA.y, symbolB); - match.setSymbol(tileB.x, tileB.y, symbolA); -}; - -var tileA = { - x: 0, - y: 0 -}; - -export default PreTest; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/LayoutChildren.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/LayoutChildren.js deleted file mode 100644 index 62b172e89efe147620c79e920adbd2b6f7eb61dc..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/LayoutChildren.js +++ /dev/null @@ -1,68 +0,0 @@ -import ResizeGameObject from '../../../plugins/utils/size/ResizeGameObject.js'; -import PreLayoutChild from '../basesizer/utils/PreLayoutChild.js'; -import LayoutChild from '../basesizer/utils/LayoutChild.js'; -import CheckSize from '../basesizer/utils/CheckSize.js'; - -var LayoutChildren = function () { - var child, childConfig, padding; - var startX = this.innerLeft, - startY = this.innerTop; - var itemX, - itemY = startY; - var x, y, width, height; // Align zone - var childWidth, childHeight; - // Layout grid children - var columnSpace = this.space.column, - rowSpace = this.space.row, - indentLeftOdd = this.space.indentLeftOdd, - indentLeftEven = this.space.indentLeftEven, - indentTopOdd = this.space.indentTopOdd, - indentTopEven = this.space.indentTopEven; - - var colWidth, rowHeight; - var indentLeft, indentTop; - for (var rowIndex = 0; rowIndex < this.rowCount; rowIndex++) { - rowHeight = this.getRowHeight(rowIndex); - - indentLeft = (rowIndex % 2) ? indentLeftEven : indentLeftOdd; - itemX = startX + indentLeft; - for (var columnIndex = 0; columnIndex < this.columnCount; columnIndex++) { - colWidth = this.getColumnWidth(columnIndex); - - child = this.getChildAt(columnIndex, rowIndex); - if ((!child) || (child.rexSizer.hidden)) { - itemX += (colWidth + columnSpace[columnIndex]); - continue; - } - - PreLayoutChild.call(this, child); - - childWidth = this.getExpandedChildWidth(child, colWidth); - childHeight = this.getExpandedChildHeight(child, rowHeight); - if (child.isRexSizer) { - child.runLayout(this, childWidth, childHeight); - CheckSize(child, this); - } else { - ResizeGameObject(child, childWidth, childHeight); - } - - childConfig = child.rexSizer; - padding = childConfig.padding; - - x = (itemX + padding.left); - width = colWidth - padding.left - padding.right; - - indentTop = (columnIndex % 2) ? indentTopEven : indentTopOdd; - y = (itemY + indentTop + padding.top); - height = rowHeight - padding.top - padding.bottom; - - LayoutChild.call(this, child, x, y, width, height, childConfig.align); - - itemX += (colWidth + columnSpace[columnIndex]); - } - - itemY += (rowHeight + rowSpace[rowIndex]); - } -} - -export default LayoutChildren; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pan/Pan.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pan/Pan.js deleted file mode 100644 index 90d3b003dfb3f918badd56f213d96b3357d36bf3..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pan/Pan.js +++ /dev/null @@ -1,2 +0,0 @@ -import { Pan } from '../../../plugins/gestures'; -export default Pan; \ No newline at end of file diff --git a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/mel_processing.py b/spaces/Alycer/VITS-Umamusume-voice-synthesizer/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/Alycer/VITS-Umamusume-voice-synthesizer/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/custom_ops.py b/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/custom_ops.py deleted file mode 100644 index 4cc4e43fc6f6ce79f2bd68a44ba87990b9b8564e..0000000000000000000000000000000000000000 --- a/spaces/Amrrs/DragGan-Inversion/PTI/torch_utils/custom_ops.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import os -import glob -import torch -import torch.utils.cpp_extension -import importlib -import hashlib -import shutil -from pathlib import Path - -from torch.utils.file_baton import FileBaton - -#---------------------------------------------------------------------------- -# Global options. - -verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full' - -#---------------------------------------------------------------------------- -# Internal helper funcs. - -def _find_compiler_bindir(): - patterns = [ - 'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64', - 'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64', - 'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64', - 'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin', - ] - for pattern in patterns: - matches = sorted(glob.glob(pattern)) - if len(matches): - return matches[-1] - return None - -#---------------------------------------------------------------------------- -# Main entry point for compiling and loading C++/CUDA plugins. - -_cached_plugins = dict() - -def get_plugin(module_name, sources, **build_kwargs): - assert verbosity in ['none', 'brief', 'full'] - - # Already cached? - if module_name in _cached_plugins: - return _cached_plugins[module_name] - - # Print status. - if verbosity == 'full': - print(f'Setting up PyTorch plugin "{module_name}"...') - elif verbosity == 'brief': - print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True) - - try: # pylint: disable=too-many-nested-blocks - # Make sure we can find the necessary compiler binaries. - if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0: - compiler_bindir = _find_compiler_bindir() - if compiler_bindir is None: - raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".') - os.environ['PATH'] += ';' + compiler_bindir - - # Compile and load. - verbose_build = (verbosity == 'full') - - # Incremental build md5sum trickery. Copies all the input source files - # into a cached build directory under a combined md5 digest of the input - # source files. Copying is done only if the combined digest has changed. - # This keeps input file timestamps and filenames the same as in previous - # extension builds, allowing for fast incremental rebuilds. - # - # This optimization is done only in case all the source files reside in - # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR - # environment variable is set (we take this as a signal that the user - # actually cares about this.) - source_dirs_set = set(os.path.dirname(source) for source in sources) - if len(source_dirs_set) == 1 and ('TORCH_EXTENSIONS_DIR' in os.environ): - all_source_files = sorted(list(x for x in Path(list(source_dirs_set)[0]).iterdir() if x.is_file())) - - # Compute a combined hash digest for all source files in the same - # custom op directory (usually .cu, .cpp, .py and .h files). - hash_md5 = hashlib.md5() - for src in all_source_files: - with open(src, 'rb') as f: - hash_md5.update(f.read()) - build_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access - digest_build_dir = os.path.join(build_dir, hash_md5.hexdigest()) - - if not os.path.isdir(digest_build_dir): - os.makedirs(digest_build_dir, exist_ok=True) - baton = FileBaton(os.path.join(digest_build_dir, 'lock')) - if baton.try_acquire(): - try: - for src in all_source_files: - shutil.copyfile(src, os.path.join(digest_build_dir, os.path.basename(src))) - finally: - baton.release() - else: - # Someone else is copying source files under the digest dir, - # wait until done and continue. - baton.wait() - digest_sources = [os.path.join(digest_build_dir, os.path.basename(x)) for x in sources] - torch.utils.cpp_extension.load(name=module_name, build_directory=build_dir, - verbose=verbose_build, sources=digest_sources, **build_kwargs) - else: - torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs) - module = importlib.import_module(module_name) - - except: - if verbosity == 'brief': - print('Failed!') - raise - - # Print status and add to cache. - if verbosity == 'full': - print(f'Done setting up PyTorch plugin "{module_name}".') - elif verbosity == 'brief': - print('Done.') - _cached_plugins[module_name] = module - return module - -#---------------------------------------------------------------------------- diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/import_utils.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/import_utils.py deleted file mode 100644 index 449b8261d1366189ba12aec55132101fa09abec0..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/utils/import_utils.py +++ /dev/null @@ -1,655 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Import utilities: Utilities related to imports and our lazy inits. -""" -import importlib.util -import operator as op -import os -import sys -from collections import OrderedDict -from typing import Union - -from huggingface_hub.utils import is_jinja_available # noqa: F401 -from packaging import version -from packaging.version import Version, parse - -from . import logging - - -# The package importlib_metadata is in a different place, depending on the python version. -if sys.version_info < (3, 8): - import importlib_metadata -else: - import importlib.metadata as importlib_metadata - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"} -ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"}) - -USE_TF = os.environ.get("USE_TF", "AUTO").upper() -USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() -USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() -USE_SAFETENSORS = os.environ.get("USE_SAFETENSORS", "AUTO").upper() - -STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} - -_torch_version = "N/A" -if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: - _torch_available = importlib.util.find_spec("torch") is not None - if _torch_available: - try: - _torch_version = importlib_metadata.version("torch") - logger.info(f"PyTorch version {_torch_version} available.") - except importlib_metadata.PackageNotFoundError: - _torch_available = False -else: - logger.info("Disabling PyTorch because USE_TORCH is set") - _torch_available = False - - -_tf_version = "N/A" -if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: - _tf_available = importlib.util.find_spec("tensorflow") is not None - if _tf_available: - candidates = ( - "tensorflow", - "tensorflow-cpu", - "tensorflow-gpu", - "tf-nightly", - "tf-nightly-cpu", - "tf-nightly-gpu", - "intel-tensorflow", - "intel-tensorflow-avx512", - "tensorflow-rocm", - "tensorflow-macos", - "tensorflow-aarch64", - ) - _tf_version = None - # For the metadata, we have to look for both tensorflow and tensorflow-cpu - for pkg in candidates: - try: - _tf_version = importlib_metadata.version(pkg) - break - except importlib_metadata.PackageNotFoundError: - pass - _tf_available = _tf_version is not None - if _tf_available: - if version.parse(_tf_version) < version.parse("2"): - logger.info(f"TensorFlow found but with version {_tf_version}. Diffusers requires version 2 minimum.") - _tf_available = False - else: - logger.info(f"TensorFlow version {_tf_version} available.") -else: - logger.info("Disabling Tensorflow because USE_TORCH is set") - _tf_available = False - -_jax_version = "N/A" -_flax_version = "N/A" -if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: - _flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None - if _flax_available: - try: - _jax_version = importlib_metadata.version("jax") - _flax_version = importlib_metadata.version("flax") - logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.") - except importlib_metadata.PackageNotFoundError: - _flax_available = False -else: - _flax_available = False - -if USE_SAFETENSORS in ENV_VARS_TRUE_AND_AUTO_VALUES: - _safetensors_available = importlib.util.find_spec("safetensors") is not None - if _safetensors_available: - try: - _safetensors_version = importlib_metadata.version("safetensors") - logger.info(f"Safetensors version {_safetensors_version} available.") - except importlib_metadata.PackageNotFoundError: - _safetensors_available = False -else: - logger.info("Disabling Safetensors because USE_TF is set") - _safetensors_available = False - -_transformers_available = importlib.util.find_spec("transformers") is not None -try: - _transformers_version = importlib_metadata.version("transformers") - logger.debug(f"Successfully imported transformers version {_transformers_version}") -except importlib_metadata.PackageNotFoundError: - _transformers_available = False - - -_inflect_available = importlib.util.find_spec("inflect") is not None -try: - _inflect_version = importlib_metadata.version("inflect") - logger.debug(f"Successfully imported inflect version {_inflect_version}") -except importlib_metadata.PackageNotFoundError: - _inflect_available = False - - -_unidecode_available = importlib.util.find_spec("unidecode") is not None -try: - _unidecode_version = importlib_metadata.version("unidecode") - logger.debug(f"Successfully imported unidecode version {_unidecode_version}") -except importlib_metadata.PackageNotFoundError: - _unidecode_available = False - - -_onnxruntime_version = "N/A" -_onnx_available = importlib.util.find_spec("onnxruntime") is not None -if _onnx_available: - candidates = ( - "onnxruntime", - "onnxruntime-gpu", - "ort_nightly_gpu", - "onnxruntime-directml", - "onnxruntime-openvino", - "ort_nightly_directml", - "onnxruntime-rocm", - "onnxruntime-training", - ) - _onnxruntime_version = None - # For the metadata, we have to look for both onnxruntime and onnxruntime-gpu - for pkg in candidates: - try: - _onnxruntime_version = importlib_metadata.version(pkg) - break - except importlib_metadata.PackageNotFoundError: - pass - _onnx_available = _onnxruntime_version is not None - if _onnx_available: - logger.debug(f"Successfully imported onnxruntime version {_onnxruntime_version}") - -# (sayakpaul): importlib.util.find_spec("opencv-python") returns None even when it's installed. -# _opencv_available = importlib.util.find_spec("opencv-python") is not None -try: - candidates = ( - "opencv-python", - "opencv-contrib-python", - "opencv-python-headless", - "opencv-contrib-python-headless", - ) - _opencv_version = None - for pkg in candidates: - try: - _opencv_version = importlib_metadata.version(pkg) - break - except importlib_metadata.PackageNotFoundError: - pass - _opencv_available = _opencv_version is not None - if _opencv_available: - logger.debug(f"Successfully imported cv2 version {_opencv_version}") -except importlib_metadata.PackageNotFoundError: - _opencv_available = False - -_scipy_available = importlib.util.find_spec("scipy") is not None -try: - _scipy_version = importlib_metadata.version("scipy") - logger.debug(f"Successfully imported scipy version {_scipy_version}") -except importlib_metadata.PackageNotFoundError: - _scipy_available = False - -_librosa_available = importlib.util.find_spec("librosa") is not None -try: - _librosa_version = importlib_metadata.version("librosa") - logger.debug(f"Successfully imported librosa version {_librosa_version}") -except importlib_metadata.PackageNotFoundError: - _librosa_available = False - -_accelerate_available = importlib.util.find_spec("accelerate") is not None -try: - _accelerate_version = importlib_metadata.version("accelerate") - logger.debug(f"Successfully imported accelerate version {_accelerate_version}") -except importlib_metadata.PackageNotFoundError: - _accelerate_available = False - -_xformers_available = importlib.util.find_spec("xformers") is not None -try: - _xformers_version = importlib_metadata.version("xformers") - if _torch_available: - import torch - - if version.Version(torch.__version__) < version.Version("1.12"): - raise ValueError("PyTorch should be >= 1.12") - logger.debug(f"Successfully imported xformers version {_xformers_version}") -except importlib_metadata.PackageNotFoundError: - _xformers_available = False - -_k_diffusion_available = importlib.util.find_spec("k_diffusion") is not None -try: - _k_diffusion_version = importlib_metadata.version("k_diffusion") - logger.debug(f"Successfully imported k-diffusion version {_k_diffusion_version}") -except importlib_metadata.PackageNotFoundError: - _k_diffusion_available = False - -_note_seq_available = importlib.util.find_spec("note_seq") is not None -try: - _note_seq_version = importlib_metadata.version("note_seq") - logger.debug(f"Successfully imported note-seq version {_note_seq_version}") -except importlib_metadata.PackageNotFoundError: - _note_seq_available = False - -_wandb_available = importlib.util.find_spec("wandb") is not None -try: - _wandb_version = importlib_metadata.version("wandb") - logger.debug(f"Successfully imported wandb version {_wandb_version }") -except importlib_metadata.PackageNotFoundError: - _wandb_available = False - -_omegaconf_available = importlib.util.find_spec("omegaconf") is not None -try: - _omegaconf_version = importlib_metadata.version("omegaconf") - logger.debug(f"Successfully imported omegaconf version {_omegaconf_version}") -except importlib_metadata.PackageNotFoundError: - _omegaconf_available = False - -_tensorboard_available = importlib.util.find_spec("tensorboard") -try: - _tensorboard_version = importlib_metadata.version("tensorboard") - logger.debug(f"Successfully imported tensorboard version {_tensorboard_version}") -except importlib_metadata.PackageNotFoundError: - _tensorboard_available = False - - -_compel_available = importlib.util.find_spec("compel") -try: - _compel_version = importlib_metadata.version("compel") - logger.debug(f"Successfully imported compel version {_compel_version}") -except importlib_metadata.PackageNotFoundError: - _compel_available = False - - -_ftfy_available = importlib.util.find_spec("ftfy") is not None -try: - _ftfy_version = importlib_metadata.version("ftfy") - logger.debug(f"Successfully imported ftfy version {_ftfy_version}") -except importlib_metadata.PackageNotFoundError: - _ftfy_available = False - - -_bs4_available = importlib.util.find_spec("bs4") is not None -try: - # importlib metadata under different name - _bs4_version = importlib_metadata.version("beautifulsoup4") - logger.debug(f"Successfully imported ftfy version {_bs4_version}") -except importlib_metadata.PackageNotFoundError: - _bs4_available = False - -_torchsde_available = importlib.util.find_spec("torchsde") is not None -try: - _torchsde_version = importlib_metadata.version("torchsde") - logger.debug(f"Successfully imported torchsde version {_torchsde_version}") -except importlib_metadata.PackageNotFoundError: - _torchsde_available = False - -_invisible_watermark_available = importlib.util.find_spec("imwatermark") is not None -try: - _invisible_watermark_version = importlib_metadata.version("invisible-watermark") - logger.debug(f"Successfully imported invisible-watermark version {_invisible_watermark_version}") -except importlib_metadata.PackageNotFoundError: - _invisible_watermark_available = False - - -def is_torch_available(): - return _torch_available - - -def is_safetensors_available(): - return _safetensors_available - - -def is_tf_available(): - return _tf_available - - -def is_flax_available(): - return _flax_available - - -def is_transformers_available(): - return _transformers_available - - -def is_inflect_available(): - return _inflect_available - - -def is_unidecode_available(): - return _unidecode_available - - -def is_onnx_available(): - return _onnx_available - - -def is_opencv_available(): - return _opencv_available - - -def is_scipy_available(): - return _scipy_available - - -def is_librosa_available(): - return _librosa_available - - -def is_xformers_available(): - return _xformers_available - - -def is_accelerate_available(): - return _accelerate_available - - -def is_k_diffusion_available(): - return _k_diffusion_available - - -def is_note_seq_available(): - return _note_seq_available - - -def is_wandb_available(): - return _wandb_available - - -def is_omegaconf_available(): - return _omegaconf_available - - -def is_tensorboard_available(): - return _tensorboard_available - - -def is_compel_available(): - return _compel_available - - -def is_ftfy_available(): - return _ftfy_available - - -def is_bs4_available(): - return _bs4_available - - -def is_torchsde_available(): - return _torchsde_available - - -def is_invisible_watermark_available(): - return _invisible_watermark_available - - -# docstyle-ignore -FLAX_IMPORT_ERROR = """ -{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the -installation page: https://github.com/google/flax and follow the ones that match your environment. -""" - -# docstyle-ignore -INFLECT_IMPORT_ERROR = """ -{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install -inflect` -""" - -# docstyle-ignore -PYTORCH_IMPORT_ERROR = """ -{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the -installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment. -""" - -# docstyle-ignore -ONNX_IMPORT_ERROR = """ -{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip -install onnxruntime` -""" - -# docstyle-ignore -OPENCV_IMPORT_ERROR = """ -{0} requires the OpenCV library but it was not found in your environment. You can install it with pip: `pip -install opencv-python` -""" - -# docstyle-ignore -SCIPY_IMPORT_ERROR = """ -{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install -scipy` -""" - -# docstyle-ignore -LIBROSA_IMPORT_ERROR = """ -{0} requires the librosa library but it was not found in your environment. Checkout the instructions on the -installation page: https://librosa.org/doc/latest/install.html and follow the ones that match your environment. -""" - -# docstyle-ignore -TRANSFORMERS_IMPORT_ERROR = """ -{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip -install transformers` -""" - -# docstyle-ignore -UNIDECODE_IMPORT_ERROR = """ -{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install -Unidecode` -""" - -# docstyle-ignore -K_DIFFUSION_IMPORT_ERROR = """ -{0} requires the k-diffusion library but it was not found in your environment. You can install it with pip: `pip -install k-diffusion` -""" - -# docstyle-ignore -NOTE_SEQ_IMPORT_ERROR = """ -{0} requires the note-seq library but it was not found in your environment. You can install it with pip: `pip -install note-seq` -""" - -# docstyle-ignore -WANDB_IMPORT_ERROR = """ -{0} requires the wandb library but it was not found in your environment. You can install it with pip: `pip -install wandb` -""" - -# docstyle-ignore -OMEGACONF_IMPORT_ERROR = """ -{0} requires the omegaconf library but it was not found in your environment. You can install it with pip: `pip -install omegaconf` -""" - -# docstyle-ignore -TENSORBOARD_IMPORT_ERROR = """ -{0} requires the tensorboard library but it was not found in your environment. You can install it with pip: `pip -install tensorboard` -""" - - -# docstyle-ignore -COMPEL_IMPORT_ERROR = """ -{0} requires the compel library but it was not found in your environment. You can install it with pip: `pip install compel` -""" - -# docstyle-ignore -BS4_IMPORT_ERROR = """ -{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip: -`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation. -""" - -# docstyle-ignore -FTFY_IMPORT_ERROR = """ -{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the -installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones -that match your environment. Please note that you may need to restart your runtime after installation. -""" - -# docstyle-ignore -TORCHSDE_IMPORT_ERROR = """ -{0} requires the torchsde library but it was not found in your environment. You can install it with pip: `pip install torchsde` -""" - -# docstyle-ignore -INVISIBLE_WATERMARK_IMPORT_ERROR = """ -{0} requires the invisible-watermark library but it was not found in your environment. You can install it with pip: `pip install invisible-watermark>=0.2.0` -""" - - -BACKENDS_MAPPING = OrderedDict( - [ - ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), - ("flax", (is_flax_available, FLAX_IMPORT_ERROR)), - ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)), - ("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)), - ("opencv", (is_opencv_available, OPENCV_IMPORT_ERROR)), - ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)), - ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)), - ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)), - ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)), - ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)), - ("k_diffusion", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)), - ("note_seq", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)), - ("wandb", (is_wandb_available, WANDB_IMPORT_ERROR)), - ("omegaconf", (is_omegaconf_available, OMEGACONF_IMPORT_ERROR)), - ("tensorboard", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)), - ("compel", (is_compel_available, COMPEL_IMPORT_ERROR)), - ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)), - ("torchsde", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)), - ("invisible_watermark", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)), - ] -) - - -def requires_backends(obj, backends): - if not isinstance(backends, (list, tuple)): - backends = [backends] - - name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__ - checks = (BACKENDS_MAPPING[backend] for backend in backends) - failed = [msg.format(name) for available, msg in checks if not available()] - if failed: - raise ImportError("".join(failed)) - - if name in [ - "VersatileDiffusionTextToImagePipeline", - "VersatileDiffusionPipeline", - "VersatileDiffusionDualGuidedPipeline", - "StableDiffusionImageVariationPipeline", - "UnCLIPPipeline", - ] and is_transformers_version("<", "4.25.0"): - raise ImportError( - f"You need to install `transformers>=4.25` in order to use {name}: \n```\n pip install" - " --upgrade transformers \n```" - ) - - if name in ["StableDiffusionDepth2ImgPipeline", "StableDiffusionPix2PixZeroPipeline"] and is_transformers_version( - "<", "4.26.0" - ): - raise ImportError( - f"You need to install `transformers>=4.26` in order to use {name}: \n```\n pip install" - " --upgrade transformers \n```" - ) - - -class DummyObject(type): - """ - Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by - `requires_backend` each time a user tries to access any method of that class. - """ - - def __getattr__(cls, key): - if key.startswith("_") and key != "_load_connected_pipes": - return super().__getattr__(cls, key) - requires_backends(cls, cls._backends) - - -# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319 -def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str): - """ - Args: - Compares a library version to some requirement using a given operation. - library_or_version (`str` or `packaging.version.Version`): - A library name or a version to check. - operation (`str`): - A string representation of an operator, such as `">"` or `"<="`. - requirement_version (`str`): - The version to compare the library version against - """ - if operation not in STR_OPERATION_TO_FUNC.keys(): - raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}") - operation = STR_OPERATION_TO_FUNC[operation] - if isinstance(library_or_version, str): - library_or_version = parse(importlib_metadata.version(library_or_version)) - return operation(library_or_version, parse(requirement_version)) - - -# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L338 -def is_torch_version(operation: str, version: str): - """ - Args: - Compares the current PyTorch version to a given reference with an operation. - operation (`str`): - A string representation of an operator, such as `">"` or `"<="` - version (`str`): - A string version of PyTorch - """ - return compare_versions(parse(_torch_version), operation, version) - - -def is_transformers_version(operation: str, version: str): - """ - Args: - Compares the current Transformers version to a given reference with an operation. - operation (`str`): - A string representation of an operator, such as `">"` or `"<="` - version (`str`): - A version string - """ - if not _transformers_available: - return False - return compare_versions(parse(_transformers_version), operation, version) - - -def is_accelerate_version(operation: str, version: str): - """ - Args: - Compares the current Accelerate version to a given reference with an operation. - operation (`str`): - A string representation of an operator, such as `">"` or `"<="` - version (`str`): - A version string - """ - if not _accelerate_available: - return False - return compare_versions(parse(_accelerate_version), operation, version) - - -def is_k_diffusion_version(operation: str, version: str): - """ - Args: - Compares the current k-diffusion version to a given reference with an operation. - operation (`str`): - A string representation of an operator, such as `">"` or `"<="` - version (`str`): - A version string - """ - if not _k_diffusion_available: - return False - return compare_versions(parse(_k_diffusion_version), operation, version) - - -class OptionalDependencyNotAvailable(BaseException): - """An error indicating that an optional dependency of Diffusers was not found in the environment.""" diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/custom_init_isort.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/custom_init_isort.py deleted file mode 100644 index f8ef799c5e6c83f864bc0db06f874324342802c5..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/custom_init_isort.py +++ /dev/null @@ -1,252 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import os -import re - - -PATH_TO_TRANSFORMERS = "src/diffusers" - -# Pattern that looks at the indentation in a line. -_re_indent = re.compile(r"^(\s*)\S") -# Pattern that matches `"key":" and puts `key` in group 0. -_re_direct_key = re.compile(r'^\s*"([^"]+)":') -# Pattern that matches `_import_structure["key"]` and puts `key` in group 0. -_re_indirect_key = re.compile(r'^\s*_import_structure\["([^"]+)"\]') -# Pattern that matches `"key",` and puts `key` in group 0. -_re_strip_line = re.compile(r'^\s*"([^"]+)",\s*$') -# Pattern that matches any `[stuff]` and puts `stuff` in group 0. -_re_bracket_content = re.compile(r"\[([^\]]+)\]") - - -def get_indent(line): - """Returns the indent in `line`.""" - search = _re_indent.search(line) - return "" if search is None else search.groups()[0] - - -def split_code_in_indented_blocks(code, indent_level="", start_prompt=None, end_prompt=None): - """ - Split `code` into its indented blocks, starting at `indent_level`. If provided, begins splitting after - `start_prompt` and stops at `end_prompt` (but returns what's before `start_prompt` as a first block and what's - after `end_prompt` as a last block, so `code` is always the same as joining the result of this function). - """ - # Let's split the code into lines and move to start_index. - index = 0 - lines = code.split("\n") - if start_prompt is not None: - while not lines[index].startswith(start_prompt): - index += 1 - blocks = ["\n".join(lines[:index])] - else: - blocks = [] - - # We split into blocks until we get to the `end_prompt` (or the end of the block). - current_block = [lines[index]] - index += 1 - while index < len(lines) and (end_prompt is None or not lines[index].startswith(end_prompt)): - if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level: - if len(current_block) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "): - current_block.append(lines[index]) - blocks.append("\n".join(current_block)) - if index < len(lines) - 1: - current_block = [lines[index + 1]] - index += 1 - else: - current_block = [] - else: - blocks.append("\n".join(current_block)) - current_block = [lines[index]] - else: - current_block.append(lines[index]) - index += 1 - - # Adds current block if it's nonempty. - if len(current_block) > 0: - blocks.append("\n".join(current_block)) - - # Add final block after end_prompt if provided. - if end_prompt is not None and index < len(lines): - blocks.append("\n".join(lines[index:])) - - return blocks - - -def ignore_underscore(key): - "Wraps a `key` (that maps an object to string) to lower case and remove underscores." - - def _inner(x): - return key(x).lower().replace("_", "") - - return _inner - - -def sort_objects(objects, key=None): - "Sort a list of `objects` following the rules of isort. `key` optionally maps an object to a str." - - # If no key is provided, we use a noop. - def noop(x): - return x - - if key is None: - key = noop - # Constants are all uppercase, they go first. - constants = [obj for obj in objects if key(obj).isupper()] - # Classes are not all uppercase but start with a capital, they go second. - classes = [obj for obj in objects if key(obj)[0].isupper() and not key(obj).isupper()] - # Functions begin with a lowercase, they go last. - functions = [obj for obj in objects if not key(obj)[0].isupper()] - - key1 = ignore_underscore(key) - return sorted(constants, key=key1) + sorted(classes, key=key1) + sorted(functions, key=key1) - - -def sort_objects_in_import(import_statement): - """ - Return the same `import_statement` but with objects properly sorted. - """ - - # This inner function sort imports between [ ]. - def _replace(match): - imports = match.groups()[0] - if "," not in imports: - return f"[{imports}]" - keys = [part.strip().replace('"', "") for part in imports.split(",")] - # We will have a final empty element if the line finished with a comma. - if len(keys[-1]) == 0: - keys = keys[:-1] - return "[" + ", ".join([f'"{k}"' for k in sort_objects(keys)]) + "]" - - lines = import_statement.split("\n") - if len(lines) > 3: - # Here we have to sort internal imports that are on several lines (one per name): - # key: [ - # "object1", - # "object2", - # ... - # ] - - # We may have to ignore one or two lines on each side. - idx = 2 if lines[1].strip() == "[" else 1 - keys_to_sort = [(i, _re_strip_line.search(line).groups()[0]) for i, line in enumerate(lines[idx:-idx])] - sorted_indices = sort_objects(keys_to_sort, key=lambda x: x[1]) - sorted_lines = [lines[x[0] + idx] for x in sorted_indices] - return "\n".join(lines[:idx] + sorted_lines + lines[-idx:]) - elif len(lines) == 3: - # Here we have to sort internal imports that are on one separate line: - # key: [ - # "object1", "object2", ... - # ] - if _re_bracket_content.search(lines[1]) is not None: - lines[1] = _re_bracket_content.sub(_replace, lines[1]) - else: - keys = [part.strip().replace('"', "") for part in lines[1].split(",")] - # We will have a final empty element if the line finished with a comma. - if len(keys[-1]) == 0: - keys = keys[:-1] - lines[1] = get_indent(lines[1]) + ", ".join([f'"{k}"' for k in sort_objects(keys)]) - return "\n".join(lines) - else: - # Finally we have to deal with imports fitting on one line - import_statement = _re_bracket_content.sub(_replace, import_statement) - return import_statement - - -def sort_imports(file, check_only=True): - """ - Sort `_import_structure` imports in `file`, `check_only` determines if we only check or overwrite. - """ - with open(file, "r") as f: - code = f.read() - - if "_import_structure" not in code: - return - - # Blocks of indent level 0 - main_blocks = split_code_in_indented_blocks( - code, start_prompt="_import_structure = {", end_prompt="if TYPE_CHECKING:" - ) - - # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). - for block_idx in range(1, len(main_blocks) - 1): - # Check if the block contains some `_import_structure`s thingy to sort. - block = main_blocks[block_idx] - block_lines = block.split("\n") - - # Get to the start of the imports. - line_idx = 0 - while line_idx < len(block_lines) and "_import_structure" not in block_lines[line_idx]: - # Skip dummy import blocks - if "import dummy" in block_lines[line_idx]: - line_idx = len(block_lines) - else: - line_idx += 1 - if line_idx >= len(block_lines): - continue - - # Ignore beginning and last line: they don't contain anything. - internal_block_code = "\n".join(block_lines[line_idx:-1]) - indent = get_indent(block_lines[1]) - # Slit the internal block into blocks of indent level 1. - internal_blocks = split_code_in_indented_blocks(internal_block_code, indent_level=indent) - # We have two categories of import key: list or _import_structure[key].append/extend - pattern = _re_direct_key if "_import_structure" in block_lines[0] else _re_indirect_key - # Grab the keys, but there is a trap: some lines are empty or just comments. - keys = [(pattern.search(b).groups()[0] if pattern.search(b) is not None else None) for b in internal_blocks] - # We only sort the lines with a key. - keys_to_sort = [(i, key) for i, key in enumerate(keys) if key is not None] - sorted_indices = [x[0] for x in sorted(keys_to_sort, key=lambda x: x[1])] - - # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. - count = 0 - reordered_blocks = [] - for i in range(len(internal_blocks)): - if keys[i] is None: - reordered_blocks.append(internal_blocks[i]) - else: - block = sort_objects_in_import(internal_blocks[sorted_indices[count]]) - reordered_blocks.append(block) - count += 1 - - # And we put our main block back together with its first and last line. - main_blocks[block_idx] = "\n".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]]) - - if code != "\n".join(main_blocks): - if check_only: - return True - else: - print(f"Overwriting {file}.") - with open(file, "w") as f: - f.write("\n".join(main_blocks)) - - -def sort_imports_in_all_inits(check_only=True): - failures = [] - for root, _, files in os.walk(PATH_TO_TRANSFORMERS): - if "__init__.py" in files: - result = sort_imports(os.path.join(root, "__init__.py"), check_only=check_only) - if result: - failures = [os.path.join(root, "__init__.py")] - if len(failures) > 0: - raise ValueError(f"Would overwrite {len(failures)} files, run `make style`.") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") - args = parser.parse_args() - - sort_imports_in_all_inits(check_only=args.check_only) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 5ac908e60c1f964bdd6c3e61933a37c04d487bfb..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict(plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/gfl/README.md b/spaces/Andy1621/uniformer_image_detection/configs/gfl/README.md deleted file mode 100644 index 53ae22b75642130b229ea8982345384792f5d3a2..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/gfl/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection - -## Introduction - -[ALGORITHM] - -We provide config files to reproduce the object detection results in the paper [Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection](https://arxiv.org/abs/2006.04388) - -```latex -@article{li2020generalized, - title={Generalized Focal Loss: Learning Qualified and Distributed Bounding Boxes for Dense Object Detection}, - author={Li, Xiang and Wang, Wenhai and Wu, Lijun and Chen, Shuo and Hu, Xiaolin and Li, Jun and Tang, Jinhui and Yang, Jian}, - journal={arXiv preprint arXiv:2006.04388}, - year={2020} -} -``` - -## Results and Models - -| Backbone | Style | Lr schd | Multi-scale Training| Inf time (fps) | box AP | Config | Download | -|:-----------------:|:-------:|:-------:|:-------------------:|:--------------:|:------:|:------:|:--------:| -| R-50 | pytorch | 1x | No | 19.5 | 40.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244-25944287.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_1x_coco/gfl_r50_fpn_1x_coco_20200629_121244.log.json) | -| R-50 | pytorch | 2x | Yes | 19.5 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r50_fpn_mstrain_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802-37bb1edc.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r50_fpn_mstrain_2x_coco/gfl_r50_fpn_mstrain_2x_coco_20200629_213802.log.json) | -| R-101 | pytorch | 2x | Yes | 14.7 | 44.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r101_fpn_mstrain_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126-dd12f847.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_mstrain_2x_coco/gfl_r101_fpn_mstrain_2x_coco_20200629_200126.log.json) | -| R-101-dcnv2 | pytorch | 2x | Yes | 12.9 | 47.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002-134b07df.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco/gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20200630_102002.log.json) | -| X-101-32x4d | pytorch | 2x | Yes | 12.1 | 45.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002-50c1ffdb.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_mstrain_2x_coco/gfl_x101_32x4d_fpn_mstrain_2x_coco_20200630_102002.log.json) | -| X-101-32x4d-dcnv2 | pytorch | 2x | Yes | 10.7 | 48.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002-14a2bf25.pth) | [log](http://download.openmmlab.com/mmdetection/v2.0/gfl/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco/gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco_20200630_102002.log.json) | - -[1] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \ -[2] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \ -[3] *`dcnv2` denotes deformable convolutional networks v2.* \ -[4] *FPS is tested with a single GeForce RTX 2080Ti GPU, using a batch size of 1.* diff --git a/spaces/Andy1621/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py deleted file mode 100644 index 7cf5f307442e56b29460fb5477cef64bfd3476b9..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/instaboost/cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py deleted file mode 100644 index 83bd70032cb24be6b96f988522ef84f7b4cc0e6a..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/rpn/rpn_x101_32x4d_fpn_1x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './rpn_r50_fpn_1x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_32x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 4912bdb9fb298518ae084eb7df0ad22d3e4ff84f..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = [ - '../_base_/models/ann_r50-d8.py', - '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(align_corners=True), - auxiliary_head=dict(align_corners=True), - test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py deleted file mode 100644 index 58f28b43f55f54c7a604960735963e6b7c13b6f1..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './emanet_r50-d8_512x1024_80k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py deleted file mode 100644 index 4e31d26e093b6cb2d59b24bb3060c92bd7dccdea..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,9 +0,0 @@ -_base_ = './fcn_hr18_512x1024_40k_cityscapes.py' -model = dict( - pretrained='open-mmlab://msra/hrnetv2_w18_small', - backbone=dict( - extra=dict( - stage1=dict(num_blocks=(2, )), - stage2=dict(num_blocks=(2, 2)), - stage3=dict(num_modules=3, num_blocks=(2, 2, 2)), - stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2))))) diff --git a/spaces/AnnonSubmission/xai-cl/ssl_models/simsiam.py b/spaces/AnnonSubmission/xai-cl/ssl_models/simsiam.py deleted file mode 100644 index 2e55787cd6af24c82967878b94bcc168280a6995..0000000000000000000000000000000000000000 --- a/spaces/AnnonSubmission/xai-cl/ssl_models/simsiam.py +++ /dev/null @@ -1,91 +0,0 @@ -import torch -import torch.nn as nn -import torchvision -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -"""from https://github.com/facebookresearch/simsiam""" - -class SimSiam(nn.Module): - - def __init__(self, base_encoder, dim, pred_dim): - """ - dim: feature dimension (default: 2048) - pred_dim: hidden dimension of the predictor (default: 512) - symetric is True only when training - """ - super(SimSiam, self).__init__() - - # create the encoder - # num_classes is the output fc dimension, zero-initialize last BNs - self.encoder = base_encoder(num_classes=dim, zero_init_residual=True) - - # build a 3-layer projector - prev_dim = self.encoder.fc.weight.shape[1] - self.encoder.fc = nn.Sequential(nn.Linear(prev_dim, prev_dim, bias=False), - nn.BatchNorm1d(prev_dim), - nn.ReLU(inplace=True), # first layer - nn.Linear(prev_dim, prev_dim, bias=False), - nn.BatchNorm1d(prev_dim), - nn.ReLU(inplace=True), # second layer - self.encoder.fc, - nn.BatchNorm1d(dim, affine=False)) # output layer - self.encoder.fc[6].bias.requires_grad = False # hack: not use bias as it is followed by BN - - # build a 2-layer predictor - self.predictor = nn.Sequential(nn.Linear(dim, pred_dim, bias=False), - nn.BatchNorm1d(pred_dim), - nn.ReLU(inplace=True), # hidden layer - nn.Linear(pred_dim, dim)) # output layer - - def forward(self, x1, x2): - z1 = self.encoder(x1).detach() # NxC - z2 = self.encoder(x2).detach() # NxC - - p1 = self.predictor(z1) # NxC - p2 = self.predictor(z2) # NxC - - loss = -(nn.CosineSimilarity(dim=1)(p1, z2).mean() + nn.CosineSimilarity(dim=1)(p2, z1).mean()) * 0.5 - - return loss - -class ResNet(nn.Module): - def __init__(self, backbone): - super().__init__() - - modules = list(backbone.children())[:-2] - self.net = nn.Sequential(*modules) - - def forward(self, x): - return self.net(x).mean(dim=[2, 3]) - -class RestructuredSimSiam(nn.Module): - def __init__(self, model): - super().__init__() - - self.encoder = ResNet(model.encoder) - self.mlp_encoder = model.encoder.fc - self.mlp_encoder[6].bias.requires_grad = False - self.contrastive_head = model.predictor - - def forward(self, x, run_head = True): - - x = self.mlp_encoder(self.encoder(x)) # don't detach since we will do backprop for explainability - - if run_head: - x = self.contrastive_head(x) - - return x - - -def get_simsiam(ckpt_path = 'checkpoint_0099.pth.tar'): - - model = SimSiam(base_encoder = torchvision.models.resnet50, - dim = 2048, - pred_dim = 512) - - checkpoint = torch.load('pretrained_models/simsiam_models/'+ ckpt_path, map_location='cpu') - state_dic = checkpoint['state_dict'] - state_dic = {k.replace("module.", ""): v for k, v in state_dic.items()} - model.load_state_dict(state_dic) - restructured_model = RestructuredSimSiam(model) - return restructured_model.to(device) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/deform_roi_pool.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/deform_roi_pool.py deleted file mode 100644 index cc245ba91fee252226ba22e76bb94a35db9a629b..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/deform_roi_pool.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from torch import nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext( - '_ext', ['deform_roi_pool_forward', 'deform_roi_pool_backward']) - - -class DeformRoIPoolFunction(Function): - - @staticmethod - def symbolic(g, input, rois, offset, output_size, spatial_scale, - sampling_ratio, gamma): - return g.op( - 'mmcv::MMCVDeformRoIPool', - input, - rois, - offset, - pooled_height_i=output_size[0], - pooled_width_i=output_size[1], - spatial_scale_f=spatial_scale, - sampling_ratio_f=sampling_ratio, - gamma_f=gamma) - - @staticmethod - def forward(ctx, - input, - rois, - offset, - output_size, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - if offset is None: - offset = input.new_zeros(0) - ctx.output_size = _pair(output_size) - ctx.spatial_scale = float(spatial_scale) - ctx.sampling_ratio = int(sampling_ratio) - ctx.gamma = float(gamma) - - assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!' - - output_shape = (rois.size(0), input.size(1), ctx.output_size[0], - ctx.output_size[1]) - output = input.new_zeros(output_shape) - - ext_module.deform_roi_pool_forward( - input, - rois, - offset, - output, - pooled_height=ctx.output_size[0], - pooled_width=ctx.output_size[1], - spatial_scale=ctx.spatial_scale, - sampling_ratio=ctx.sampling_ratio, - gamma=ctx.gamma) - - ctx.save_for_backward(input, rois, offset) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, rois, offset = ctx.saved_tensors - grad_input = grad_output.new_zeros(input.shape) - grad_offset = grad_output.new_zeros(offset.shape) - - ext_module.deform_roi_pool_backward( - grad_output, - input, - rois, - offset, - grad_input, - grad_offset, - pooled_height=ctx.output_size[0], - pooled_width=ctx.output_size[1], - spatial_scale=ctx.spatial_scale, - sampling_ratio=ctx.sampling_ratio, - gamma=ctx.gamma) - if grad_offset.numel() == 0: - grad_offset = None - return grad_input, None, grad_offset, None, None, None, None - - -deform_roi_pool = DeformRoIPoolFunction.apply - - -class DeformRoIPool(nn.Module): - - def __init__(self, - output_size, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - super(DeformRoIPool, self).__init__() - self.output_size = _pair(output_size) - self.spatial_scale = float(spatial_scale) - self.sampling_ratio = int(sampling_ratio) - self.gamma = float(gamma) - - def forward(self, input, rois, offset=None): - return deform_roi_pool(input, rois, offset, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - - -class DeformRoIPoolPack(DeformRoIPool): - - def __init__(self, - output_size, - output_channels, - deform_fc_channels=1024, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale, - sampling_ratio, gamma) - - self.output_channels = output_channels - self.deform_fc_channels = deform_fc_channels - - self.offset_fc = nn.Sequential( - nn.Linear( - self.output_size[0] * self.output_size[1] * - self.output_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, - self.output_size[0] * self.output_size[1] * 2)) - self.offset_fc[-1].weight.data.zero_() - self.offset_fc[-1].bias.data.zero_() - - def forward(self, input, rois): - assert input.size(1) == self.output_channels - x = deform_roi_pool(input, rois, None, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - rois_num = rois.size(0) - offset = self.offset_fc(x.view(rois_num, -1)) - offset = offset.view(rois_num, 2, self.output_size[0], - self.output_size[1]) - return deform_roi_pool(input, rois, offset, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - - -class ModulatedDeformRoIPoolPack(DeformRoIPool): - - def __init__(self, - output_size, - output_channels, - deform_fc_channels=1024, - spatial_scale=1.0, - sampling_ratio=0, - gamma=0.1): - super(ModulatedDeformRoIPoolPack, - self).__init__(output_size, spatial_scale, sampling_ratio, gamma) - - self.output_channels = output_channels - self.deform_fc_channels = deform_fc_channels - - self.offset_fc = nn.Sequential( - nn.Linear( - self.output_size[0] * self.output_size[1] * - self.output_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, - self.output_size[0] * self.output_size[1] * 2)) - self.offset_fc[-1].weight.data.zero_() - self.offset_fc[-1].bias.data.zero_() - - self.mask_fc = nn.Sequential( - nn.Linear( - self.output_size[0] * self.output_size[1] * - self.output_channels, self.deform_fc_channels), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_channels, - self.output_size[0] * self.output_size[1] * 1), - nn.Sigmoid()) - self.mask_fc[2].weight.data.zero_() - self.mask_fc[2].bias.data.zero_() - - def forward(self, input, rois): - assert input.size(1) == self.output_channels - x = deform_roi_pool(input, rois, None, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - rois_num = rois.size(0) - offset = self.offset_fc(x.view(rois_num, -1)) - offset = offset.view(rois_num, 2, self.output_size[0], - self.output_size[1]) - mask = self.mask_fc(x.view(rois_num, -1)) - mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1]) - d = deform_roi_pool(input, rois, offset, self.output_size, - self.spatial_scale, self.sampling_ratio, - self.gamma) - return d * mask diff --git a/spaces/Apex-X/GODROOP/roop/processors/frame/face_enhancer.py b/spaces/Apex-X/GODROOP/roop/processors/frame/face_enhancer.py deleted file mode 100644 index ba920dcc142a40462c68c22bad86811c6a30f973..0000000000000000000000000000000000000000 --- a/spaces/Apex-X/GODROOP/roop/processors/frame/face_enhancer.py +++ /dev/null @@ -1,81 +0,0 @@ -from typing import Any, List, Callable -import cv2 -import threading -import gfpgan - -import roop.globals -import roop.processors.frame.core -from roop.core import update_status -from roop.face_analyser import get_one_face -from roop.typing import Frame, Face -from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video - -FACE_ENHANCER = None -THREAD_SEMAPHORE = threading.Semaphore() -THREAD_LOCK = threading.Lock() -NAME = 'ROOP.FACE-ENHANCER' - - -def get_face_enhancer() -> Any: - global FACE_ENHANCER - - with THREAD_LOCK: - if FACE_ENHANCER is None: - model_path = resolve_relative_path('../models/GFPGANv1.4') - # todo: set models path https://github.com/TencentARC/GFPGAN/issues/399 - FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined] - return FACE_ENHANCER - - -def pre_check() -> bool: - download_directory_path = resolve_relative_path('../models') - conditional_download(download_directory_path, ['https://huggingface.co/th2w33knd/GFPGANv1.4/resolve/main/GFPGANv1.4.pth']) - return True - - -def pre_start() -> bool: - if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path): - update_status('Select an image or video for target path.', NAME) - return False - return True - - -def post_process() -> None: - global FACE_ENHANCER - - FACE_ENHANCER = None - - -def enhance_face(temp_frame: Frame) -> Frame: - with THREAD_SEMAPHORE: - _, _, temp_frame = get_face_enhancer().enhance( - temp_frame, - paste_back=True - ) - return temp_frame - - -def process_frame(source_face: Face, temp_frame: Frame) -> Frame: - target_face = get_one_face(temp_frame) - if target_face: - temp_frame = enhance_face(temp_frame) - return temp_frame - - -def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: - for temp_frame_path in temp_frame_paths: - temp_frame = cv2.imread(temp_frame_path) - result = process_frame(None, temp_frame) - cv2.imwrite(temp_frame_path, result) - if update: - update() - - -def process_image(source_path: str, target_path: str, output_path: str) -> None: - target_frame = cv2.imread(target_path) - result = process_frame(None, target_frame) - cv2.imwrite(output_path, result) - - -def process_video(source_path: str, temp_frame_paths: List[str]) -> None: - roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames) diff --git a/spaces/Arnx/MusicGenXvAKN/tests/data/test_audio_utils.py b/spaces/Arnx/MusicGenXvAKN/tests/data/test_audio_utils.py deleted file mode 100644 index 0480671bb17281d61ce02bce6373a5ccec89fece..0000000000000000000000000000000000000000 --- a/spaces/Arnx/MusicGenXvAKN/tests/data/test_audio_utils.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import julius -import torch -import pytest - -from audiocraft.data.audio_utils import ( - _clip_wav, - convert_audio_channels, - convert_audio, - normalize_audio -) -from ..common_utils import get_batch_white_noise - - -class TestConvertAudioChannels: - - def test_convert_audio_channels_downmix(self): - b, c, t = 2, 3, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=2) - assert list(mixed.shape) == [b, 2, t] - - def test_convert_audio_channels_nochange(self): - b, c, t = 2, 3, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=c) - assert list(mixed.shape) == list(audio.shape) - - def test_convert_audio_channels_upmix(self): - b, c, t = 2, 1, 100 - audio = get_batch_white_noise(b, c, t) - mixed = convert_audio_channels(audio, channels=3) - assert list(mixed.shape) == [b, 3, t] - - def test_convert_audio_channels_upmix_error(self): - b, c, t = 2, 2, 100 - audio = get_batch_white_noise(b, c, t) - with pytest.raises(ValueError): - convert_audio_channels(audio, channels=3) - - -class TestConvertAudio: - - def test_convert_audio_channels_downmix(self): - b, c, dur = 2, 3, 4. - sr = 128 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=2) - assert list(out.shape) == [audio.shape[0], 2, audio.shape[-1]] - - def test_convert_audio_channels_upmix(self): - b, c, dur = 2, 1, 4. - sr = 128 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=3) - assert list(out.shape) == [audio.shape[0], 3, audio.shape[-1]] - - def test_convert_audio_upsample(self): - b, c, dur = 2, 1, 4. - sr = 2 - new_sr = 3 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c) - out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr) - assert torch.allclose(out, out_j) - - def test_convert_audio_resample(self): - b, c, dur = 2, 1, 4. - sr = 3 - new_sr = 2 - audio = get_batch_white_noise(b, c, int(sr * dur)) - out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c) - out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr) - assert torch.allclose(out, out_j) - - -class TestNormalizeAudio: - - def test_clip_wav(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - _clip_wav(audio) - assert audio.abs().max() <= 1 - - def test_normalize_audio_clip(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='clip') - assert norm_audio.abs().max() <= 1 - - def test_normalize_audio_rms(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='rms') - assert norm_audio.abs().max() <= 1 - - def test_normalize_audio_peak(self): - b, c, dur = 2, 1, 4. - sr = 3 - audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur)) - norm_audio = normalize_audio(audio, strategy='peak') - assert norm_audio.abs().max() <= 1 diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/diagram/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/diagram/__init__.py deleted file mode 100644 index 1506d66bf4e93afb60ad46c23f234b31c46b3a7e..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pyparsing/diagram/__init__.py +++ /dev/null @@ -1,642 +0,0 @@ -import railroad -from pip._vendor import pyparsing -import typing -from typing import ( - List, - NamedTuple, - Generic, - TypeVar, - Dict, - Callable, - Set, - Iterable, -) -from jinja2 import Template -from io import StringIO -import inspect - - -jinja2_template_source = """\ - - - - {% if not head %} - - {% else %} - {{ head | safe }} - {% endif %} - - -{{ body | safe }} -{% for diagram in diagrams %} -
        -

        {{ diagram.title }}

        -
        {{ diagram.text }}
        -
        - {{ diagram.svg }} -
        -
        -{% endfor %} - - -""" - -template = Template(jinja2_template_source) - -# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet -NamedDiagram = NamedTuple( - "NamedDiagram", - [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)], -) -""" -A simple structure for associating a name with a railroad diagram -""" - -T = TypeVar("T") - - -class EachItem(railroad.Group): - """ - Custom railroad item to compose a: - - Group containing a - - OneOrMore containing a - - Choice of the elements in the Each - with the group label indicating that all must be matched - """ - - all_label = "[ALL]" - - def __init__(self, *items): - choice_item = railroad.Choice(len(items) - 1, *items) - one_or_more_item = railroad.OneOrMore(item=choice_item) - super().__init__(one_or_more_item, label=self.all_label) - - -class AnnotatedItem(railroad.Group): - """ - Simple subclass of Group that creates an annotation label - """ - - def __init__(self, label: str, item): - super().__init__(item=item, label="[{}]".format(label) if label else label) - - -class EditablePartial(Generic[T]): - """ - Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been - constructed. - """ - - # We need this here because the railroad constructors actually transform the data, so can't be called until the - # entire tree is assembled - - def __init__(self, func: Callable[..., T], args: list, kwargs: dict): - self.func = func - self.args = args - self.kwargs = kwargs - - @classmethod - def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": - """ - If you call this function in the same way that you would call the constructor, it will store the arguments - as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) - """ - return EditablePartial(func=func, args=list(args), kwargs=kwargs) - - @property - def name(self): - return self.kwargs["name"] - - def __call__(self) -> T: - """ - Evaluate the partial and return the result - """ - args = self.args.copy() - kwargs = self.kwargs.copy() - - # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. - # args=['list', 'of', 'things']) - arg_spec = inspect.getfullargspec(self.func) - if arg_spec.varargs in self.kwargs: - args += kwargs.pop(arg_spec.varargs) - - return self.func(*args, **kwargs) - - -def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: - """ - Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams - :params kwargs: kwargs to be passed in to the template - """ - data = [] - for diagram in diagrams: - if diagram.diagram is None: - continue - io = StringIO() - diagram.diagram.writeSvg(io.write) - title = diagram.name - if diagram.index == 0: - title += " (root)" - data.append({"title": title, "text": "", "svg": io.getvalue()}) - - return template.render(diagrams=data, **kwargs) - - -def resolve_partial(partial: "EditablePartial[T]") -> T: - """ - Recursively resolves a collection of Partials into whatever type they are - """ - if isinstance(partial, EditablePartial): - partial.args = resolve_partial(partial.args) - partial.kwargs = resolve_partial(partial.kwargs) - return partial() - elif isinstance(partial, list): - return [resolve_partial(x) for x in partial] - elif isinstance(partial, dict): - return {key: resolve_partial(x) for key, x in partial.items()} - else: - return partial - - -def to_railroad( - element: pyparsing.ParserElement, - diagram_kwargs: typing.Optional[dict] = None, - vertical: int = 3, - show_results_names: bool = False, - show_groups: bool = False, -) -> List[NamedDiagram]: - """ - Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram - creation if you want to access the Railroad tree before it is converted to HTML - :param element: base element of the parser being diagrammed - :param diagram_kwargs: kwargs to pass to the Diagram() constructor - :param vertical: (optional) - int - limit at which number of alternatives should be - shown vertically instead of horizontally - :param show_results_names - bool to indicate whether results name annotations should be - included in the diagram - :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled - surrounding box - """ - # Convert the whole tree underneath the root - lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) - _to_diagram_element( - element, - lookup=lookup, - parent=None, - vertical=vertical, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - root_id = id(element) - # Convert the root if it hasn't been already - if root_id in lookup: - if not element.customName: - lookup[root_id].name = "" - lookup[root_id].mark_for_extraction(root_id, lookup, force=True) - - # Now that we're finished, we can convert from intermediate structures into Railroad elements - diags = list(lookup.diagrams.values()) - if len(diags) > 1: - # collapse out duplicate diags with the same name - seen = set() - deduped_diags = [] - for d in diags: - # don't extract SkipTo elements, they are uninformative as subdiagrams - if d.name == "...": - continue - if d.name is not None and d.name not in seen: - seen.add(d.name) - deduped_diags.append(d) - resolved = [resolve_partial(partial) for partial in deduped_diags] - else: - # special case - if just one diagram, always display it, even if - # it has no name - resolved = [resolve_partial(partial) for partial in diags] - return sorted(resolved, key=lambda diag: diag.index) - - -def _should_vertical( - specification: int, exprs: Iterable[pyparsing.ParserElement] -) -> bool: - """ - Returns true if we should return a vertical list of elements - """ - if specification is None: - return False - else: - return len(_visible_exprs(exprs)) >= specification - - -class ElementState: - """ - State recorded for an individual pyparsing Element - """ - - # Note: this should be a dataclass, but we have to support Python 3.5 - def __init__( - self, - element: pyparsing.ParserElement, - converted: EditablePartial, - parent: EditablePartial, - number: int, - name: str = None, - parent_index: typing.Optional[int] = None, - ): - #: The pyparsing element that this represents - self.element: pyparsing.ParserElement = element - #: The name of the element - self.name: typing.Optional[str] = name - #: The output Railroad element in an unconverted state - self.converted: EditablePartial = converted - #: The parent Railroad element, which we store so that we can extract this if it's duplicated - self.parent: EditablePartial = parent - #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram - self.number: int = number - #: The index of this inside its parent - self.parent_index: typing.Optional[int] = parent_index - #: If true, we should extract this out into a subdiagram - self.extract: bool = False - #: If true, all of this element's children have been filled out - self.complete: bool = False - - def mark_for_extraction( - self, el_id: int, state: "ConverterState", name: str = None, force: bool = False - ): - """ - Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram - :param el_id: id of the element - :param state: element/diagram state tracker - :param name: name to use for this element's text - :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the - root element when we know we're finished - """ - self.extract = True - - # Set the name - if not self.name: - if name: - # Allow forcing a custom name - self.name = name - elif self.element.customName: - self.name = self.element.customName - else: - self.name = "" - - # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children - # to be added - # Also, if this is just a string literal etc, don't bother extracting it - if force or (self.complete and _worth_extracting(self.element)): - state.extract_into_diagram(el_id) - - -class ConverterState: - """ - Stores some state that persists between recursions into the element tree - """ - - def __init__(self, diagram_kwargs: typing.Optional[dict] = None): - #: A dictionary mapping ParserElements to state relating to them - self._element_diagram_states: Dict[int, ElementState] = {} - #: A dictionary mapping ParserElement IDs to subdiagrams generated from them - self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} - #: The index of the next unnamed element - self.unnamed_index: int = 1 - #: The index of the next element. This is used for sorting - self.index: int = 0 - #: Shared kwargs that are used to customize the construction of diagrams - self.diagram_kwargs: dict = diagram_kwargs or {} - self.extracted_diagram_names: Set[str] = set() - - def __setitem__(self, key: int, value: ElementState): - self._element_diagram_states[key] = value - - def __getitem__(self, key: int) -> ElementState: - return self._element_diagram_states[key] - - def __delitem__(self, key: int): - del self._element_diagram_states[key] - - def __contains__(self, key: int): - return key in self._element_diagram_states - - def generate_unnamed(self) -> int: - """ - Generate a number used in the name of an otherwise unnamed diagram - """ - self.unnamed_index += 1 - return self.unnamed_index - - def generate_index(self) -> int: - """ - Generate a number used to index a diagram - """ - self.index += 1 - return self.index - - def extract_into_diagram(self, el_id: int): - """ - Used when we encounter the same token twice in the same tree. When this - happens, we replace all instances of that token with a terminal, and - create a new subdiagram for the token - """ - position = self[el_id] - - # Replace the original definition of this element with a regular block - if position.parent: - ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) - if "item" in position.parent.kwargs: - position.parent.kwargs["item"] = ret - elif "items" in position.parent.kwargs: - position.parent.kwargs["items"][position.parent_index] = ret - - # If the element we're extracting is a group, skip to its content but keep the title - if position.converted.func == railroad.Group: - content = position.converted.kwargs["item"] - else: - content = position.converted - - self.diagrams[el_id] = EditablePartial.from_call( - NamedDiagram, - name=position.name, - diagram=EditablePartial.from_call( - railroad.Diagram, content, **self.diagram_kwargs - ), - index=position.number, - ) - - del self[el_id] - - -def _worth_extracting(element: pyparsing.ParserElement) -> bool: - """ - Returns true if this element is worth having its own sub-diagram. Simply, if any of its children - themselves have children, then its complex enough to extract - """ - children = element.recurse() - return any(child.recurse() for child in children) - - -def _apply_diagram_item_enhancements(fn): - """ - decorator to ensure enhancements to a diagram item (such as results name annotations) - get applied on return from _to_diagram_element (we do this since there are several - returns in _to_diagram_element) - """ - - def _inner( - element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, - ) -> typing.Optional[EditablePartial]: - - ret = fn( - element, - parent, - lookup, - vertical, - index, - name_hint, - show_results_names, - show_groups, - ) - - # apply annotation for results name, if present - if show_results_names and ret is not None: - element_results_name = element.resultsName - if element_results_name: - # add "*" to indicate if this is a "list all results" name - element_results_name += "" if element.modalResults else "*" - ret = EditablePartial.from_call( - railroad.Group, item=ret, label=element_results_name - ) - - return ret - - return _inner - - -def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): - non_diagramming_exprs = ( - pyparsing.ParseElementEnhance, - pyparsing.PositionToken, - pyparsing.And._ErrorStop, - ) - return [ - e - for e in exprs - if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) - ] - - -@_apply_diagram_item_enhancements -def _to_diagram_element( - element: pyparsing.ParserElement, - parent: typing.Optional[EditablePartial], - lookup: ConverterState = None, - vertical: int = None, - index: int = 0, - name_hint: str = None, - show_results_names: bool = False, - show_groups: bool = False, -) -> typing.Optional[EditablePartial]: - """ - Recursively converts a PyParsing Element to a railroad Element - :param lookup: The shared converter state that keeps track of useful things - :param index: The index of this element within the parent - :param parent: The parent of this element in the output tree - :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), - it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never - do so - :param name_hint: If provided, this will override the generated name - :param show_results_names: bool flag indicating whether to add annotations for results names - :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed - :param show_groups: bool flag indicating whether to show groups using bounding box - """ - exprs = element.recurse() - name = name_hint or element.customName or element.__class__.__name__ - - # Python's id() is used to provide a unique identifier for elements - el_id = id(element) - - element_results_name = element.resultsName - - # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram - if not element.customName: - if isinstance( - element, - ( - # pyparsing.TokenConverter, - # pyparsing.Forward, - pyparsing.Located, - ), - ): - # However, if this element has a useful custom name, and its child does not, we can pass it on to the child - if exprs: - if not exprs[0].customName: - propagated_name = name - else: - propagated_name = None - - return _to_diagram_element( - element.expr, - parent=parent, - lookup=lookup, - vertical=vertical, - index=index, - name_hint=propagated_name, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # If the element isn't worth extracting, we always treat it as the first time we say it - if _worth_extracting(element): - if el_id in lookup: - # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, - # so we have to extract it into a new diagram. - looked_up = lookup[el_id] - looked_up.mark_for_extraction(el_id, lookup, name=name_hint) - ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) - return ret - - elif el_id in lookup.diagrams: - # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we - # just put in a marker element that refers to the sub-diagram - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - return ret - - # Recursively convert child elements - # Here we find the most relevant Railroad element for matching pyparsing Element - # We use ``items=[]`` here to hold the place for where the child elements will go once created - if isinstance(element, pyparsing.And): - # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat - # (all will have the same name, and resultsName) - if not exprs: - return None - if len(set((e.name, e.resultsName) for e in exprs)) == 1: - ret = EditablePartial.from_call( - railroad.OneOrMore, item="", repeat=str(len(exprs)) - ) - elif _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Stack, items=[]) - else: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): - if not exprs: - return None - if _should_vertical(vertical, exprs): - ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) - else: - ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) - elif isinstance(element, pyparsing.Each): - if not exprs: - return None - ret = EditablePartial.from_call(EachItem, items=[]) - elif isinstance(element, pyparsing.NotAny): - ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") - elif isinstance(element, pyparsing.FollowedBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") - elif isinstance(element, pyparsing.PrecededBy): - ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") - elif isinstance(element, pyparsing.Group): - if show_groups: - ret = EditablePartial.from_call(AnnotatedItem, label="", item="") - else: - ret = EditablePartial.from_call(railroad.Group, label="", item="") - elif isinstance(element, pyparsing.TokenConverter): - ret = EditablePartial.from_call( - AnnotatedItem, label=type(element).__name__.lower(), item="" - ) - elif isinstance(element, pyparsing.Opt): - ret = EditablePartial.from_call(railroad.Optional, item="") - elif isinstance(element, pyparsing.OneOrMore): - ret = EditablePartial.from_call(railroad.OneOrMore, item="") - elif isinstance(element, pyparsing.ZeroOrMore): - ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") - elif isinstance(element, pyparsing.Group): - ret = EditablePartial.from_call( - railroad.Group, item=None, label=element_results_name - ) - elif isinstance(element, pyparsing.Empty) and not element.customName: - # Skip unnamed "Empty" elements - ret = None - elif len(exprs) > 1: - ret = EditablePartial.from_call(railroad.Sequence, items=[]) - elif len(exprs) > 0 and not element_results_name: - ret = EditablePartial.from_call(railroad.Group, item="", label=name) - else: - terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) - ret = terminal - - if ret is None: - return - - # Indicate this element's position in the tree so we can extract it if necessary - lookup[el_id] = ElementState( - element=element, - converted=ret, - parent=parent, - parent_index=index, - number=lookup.generate_index(), - ) - if element.customName: - lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) - - i = 0 - for expr in exprs: - # Add a placeholder index in case we have to extract the child before we even add it to the parent - if "items" in ret.kwargs: - ret.kwargs["items"].insert(i, None) - - item = _to_diagram_element( - expr, - parent=ret, - lookup=lookup, - vertical=vertical, - index=i, - show_results_names=show_results_names, - show_groups=show_groups, - ) - - # Some elements don't need to be shown in the diagram - if item is not None: - if "item" in ret.kwargs: - ret.kwargs["item"] = item - elif "items" in ret.kwargs: - # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal - ret.kwargs["items"][i] = item - i += 1 - elif "items" in ret.kwargs: - # If we're supposed to skip this element, remove it from the parent - del ret.kwargs["items"][i] - - # If all this items children are none, skip this item - if ret and ( - ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) - or ("item" in ret.kwargs and ret.kwargs["item"] is None) - ): - ret = EditablePartial.from_call(railroad.Terminal, name) - - # Mark this element as "complete", ie it has all of its children - if el_id in lookup: - lookup[el_id].complete = True - - if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: - lookup.extract_into_diagram(el_id) - if ret is not None: - ret = EditablePartial.from_call( - railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] - ) - - return ret diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/json.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/json.py deleted file mode 100644 index ea94493f21e6f5583469d882d08203381ee31117..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/json.py +++ /dev/null @@ -1,140 +0,0 @@ -from pathlib import Path -from json import loads, dumps -from typing import Any, Callable, Optional, Union - -from .text import Text -from .highlighter import JSONHighlighter, NullHighlighter - - -class JSON: - """A renderable which pretty prints JSON. - - Args: - json (str): JSON encoded data. - indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2. - highlight (bool, optional): Enable highlighting. Defaults to True. - skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. - ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. - check_circular (bool, optional): Check for circular references. Defaults to True. - allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. - default (Callable, optional): A callable that converts values that can not be encoded - in to something that can be JSON encoded. Defaults to None. - sort_keys (bool, optional): Sort dictionary keys. Defaults to False. - """ - - def __init__( - self, - json: str, - indent: Union[None, int, str] = 2, - highlight: bool = True, - skip_keys: bool = False, - ensure_ascii: bool = False, - check_circular: bool = True, - allow_nan: bool = True, - default: Optional[Callable[[Any], Any]] = None, - sort_keys: bool = False, - ) -> None: - data = loads(json) - json = dumps( - data, - indent=indent, - skipkeys=skip_keys, - ensure_ascii=ensure_ascii, - check_circular=check_circular, - allow_nan=allow_nan, - default=default, - sort_keys=sort_keys, - ) - highlighter = JSONHighlighter() if highlight else NullHighlighter() - self.text = highlighter(json) - self.text.no_wrap = True - self.text.overflow = None - - @classmethod - def from_data( - cls, - data: Any, - indent: Union[None, int, str] = 2, - highlight: bool = True, - skip_keys: bool = False, - ensure_ascii: bool = False, - check_circular: bool = True, - allow_nan: bool = True, - default: Optional[Callable[[Any], Any]] = None, - sort_keys: bool = False, - ) -> "JSON": - """Encodes a JSON object from arbitrary data. - - Args: - data (Any): An object that may be encoded in to JSON - indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2. - highlight (bool, optional): Enable highlighting. Defaults to True. - default (Callable, optional): Optional callable which will be called for objects that cannot be serialized. Defaults to None. - skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. - ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. - check_circular (bool, optional): Check for circular references. Defaults to True. - allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. - default (Callable, optional): A callable that converts values that can not be encoded - in to something that can be JSON encoded. Defaults to None. - sort_keys (bool, optional): Sort dictionary keys. Defaults to False. - - Returns: - JSON: New JSON object from the given data. - """ - json_instance: "JSON" = cls.__new__(cls) - json = dumps( - data, - indent=indent, - skipkeys=skip_keys, - ensure_ascii=ensure_ascii, - check_circular=check_circular, - allow_nan=allow_nan, - default=default, - sort_keys=sort_keys, - ) - highlighter = JSONHighlighter() if highlight else NullHighlighter() - json_instance.text = highlighter(json) - json_instance.text.no_wrap = True - json_instance.text.overflow = None - return json_instance - - def __rich__(self) -> Text: - return self.text - - -if __name__ == "__main__": - - import argparse - import sys - - parser = argparse.ArgumentParser(description="Pretty print json") - parser.add_argument( - "path", - metavar="PATH", - help="path to file, or - for stdin", - ) - parser.add_argument( - "-i", - "--indent", - metavar="SPACES", - type=int, - help="Number of spaces in an indent", - default=2, - ) - args = parser.parse_args() - - from pip._vendor.rich.console import Console - - console = Console() - error_console = Console(stderr=True) - - try: - if args.path == "-": - json_data = sys.stdin.read() - else: - json_data = Path(args.path).read_text() - except Exception as error: - error_console.print(f"Unable to read {args.path!r}; {error}") - sys.exit(-1) - - console.print(JSON(json_data, indent=args.indent), soft_wrap=True) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/losses.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/losses.py deleted file mode 100644 index cf4d5e9b165659bd9dc5d80939ea1384da7b11ef..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/layers/losses.py +++ /dev/null @@ -1,133 +0,0 @@ -import math -import torch - - -def diou_loss( - boxes1: torch.Tensor, - boxes2: torch.Tensor, - reduction: str = "none", - eps: float = 1e-7, -) -> torch.Tensor: - """ - Distance Intersection over Union Loss (Zhaohui Zheng et. al) - https://arxiv.org/abs/1911.08287 - Args: - boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). - reduction: 'none' | 'mean' | 'sum' - 'none': No reduction will be applied to the output. - 'mean': The output will be averaged. - 'sum': The output will be summed. - eps (float): small number to prevent division by zero - """ - - x1, y1, x2, y2 = boxes1.unbind(dim=-1) - x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) - - # TODO: use torch._assert_async() when pytorch 1.8 support is dropped - assert (x2 >= x1).all(), "bad box: x1 larger than x2" - assert (y2 >= y1).all(), "bad box: y1 larger than y2" - - # Intersection keypoints - xkis1 = torch.max(x1, x1g) - ykis1 = torch.max(y1, y1g) - xkis2 = torch.min(x2, x2g) - ykis2 = torch.min(y2, y2g) - - intsct = torch.zeros_like(x1) - mask = (ykis2 > ykis1) & (xkis2 > xkis1) - intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) - union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps - iou = intsct / union - - # smallest enclosing box - xc1 = torch.min(x1, x1g) - yc1 = torch.min(y1, y1g) - xc2 = torch.max(x2, x2g) - yc2 = torch.max(y2, y2g) - diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps - - # centers of boxes - x_p = (x2 + x1) / 2 - y_p = (y2 + y1) / 2 - x_g = (x1g + x2g) / 2 - y_g = (y1g + y2g) / 2 - distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) - - # Eqn. (7) - loss = 1 - iou + (distance / diag_len) - if reduction == "mean": - loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() - elif reduction == "sum": - loss = loss.sum() - - return loss - - -def ciou_loss( - boxes1: torch.Tensor, - boxes2: torch.Tensor, - reduction: str = "none", - eps: float = 1e-7, -) -> torch.Tensor: - """ - Complete Intersection over Union Loss (Zhaohui Zheng et. al) - https://arxiv.org/abs/1911.08287 - Args: - boxes1, boxes2 (Tensor): box locations in XYXY format, shape (N, 4) or (4,). - reduction: 'none' | 'mean' | 'sum' - 'none': No reduction will be applied to the output. - 'mean': The output will be averaged. - 'sum': The output will be summed. - eps (float): small number to prevent division by zero - """ - - x1, y1, x2, y2 = boxes1.unbind(dim=-1) - x1g, y1g, x2g, y2g = boxes2.unbind(dim=-1) - - # TODO: use torch._assert_async() when pytorch 1.8 support is dropped - assert (x2 >= x1).all(), "bad box: x1 larger than x2" - assert (y2 >= y1).all(), "bad box: y1 larger than y2" - - # Intersection keypoints - xkis1 = torch.max(x1, x1g) - ykis1 = torch.max(y1, y1g) - xkis2 = torch.min(x2, x2g) - ykis2 = torch.min(y2, y2g) - - intsct = torch.zeros_like(x1) - mask = (ykis2 > ykis1) & (xkis2 > xkis1) - intsct[mask] = (xkis2[mask] - xkis1[mask]) * (ykis2[mask] - ykis1[mask]) - union = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g) - intsct + eps - iou = intsct / union - - # smallest enclosing box - xc1 = torch.min(x1, x1g) - yc1 = torch.min(y1, y1g) - xc2 = torch.max(x2, x2g) - yc2 = torch.max(y2, y2g) - diag_len = ((xc2 - xc1) ** 2) + ((yc2 - yc1) ** 2) + eps - - # centers of boxes - x_p = (x2 + x1) / 2 - y_p = (y2 + y1) / 2 - x_g = (x1g + x2g) / 2 - y_g = (y1g + y2g) / 2 - distance = ((x_p - x_g) ** 2) + ((y_p - y_g) ** 2) - - # width and height of boxes - w_pred = x2 - x1 - h_pred = y2 - y1 - w_gt = x2g - x1g - h_gt = y2g - y1g - v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) - with torch.no_grad(): - alpha = v / (1 - iou + v + eps) - - # Eqn. (10) - loss = 1 - iou + (distance / diag_len) + alpha * v - if reduction == "mean": - loss = loss.mean() if loss.numel() > 0 else 0.0 * loss.sum() - elif reduction == "sum": - loss = loss.sum() - - return loss diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_deformable.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_deformable.py deleted file mode 100644 index 4aa319fc7e614f6a7a8ece7a45c177211c03012d..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/layers/test_deformable.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -import unittest -import torch - -from detectron2.layers import DeformConv, ModulatedDeformConv -from detectron2.utils.env import TORCH_VERSION - - -@unittest.skipIf( - TORCH_VERSION == (1, 8) and torch.cuda.is_available(), - "This test fails under cuda11 + torch1.8.", -) -class DeformableTest(unittest.TestCase): - @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") - def test_forward_output(self): - device = torch.device("cuda") - N, C, H, W = shape = 1, 1, 5, 5 - kernel_size = 3 - padding = 1 - - inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device) - """ - 0 1 2 3 4 - 5 6 7 8 9 - 10 11 12 13 14 - 15 16 17 18 19 - 20 21 22 23 24 - """ - offset_channels = kernel_size * kernel_size * 2 - offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device) - - # Test DCN v1 - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight)) - output = deform(inputs, offset) - output = output.detach().cpu().numpy() - deform_results = np.array( - [ - [30, 41.25, 48.75, 45, 28.75], - [62.25, 81, 90, 80.25, 50.25], - [99.75, 126, 135, 117.75, 72.75], - [105, 131.25, 138.75, 120, 73.75], - [71.75, 89.25, 93.75, 80.75, 49.5], - ] - ) - self.assertTrue(np.allclose(output.flatten(), deform_results.flatten())) - - # Test DCN v2 - mask_channels = kernel_size * kernel_size - mask = torch.full((N, mask_channels, H, W), 0.5, dtype=torch.float32).to(device) - modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to( - device - ) - modulate_deform.weight = deform.weight - output = modulate_deform(inputs, offset, mask) - output = output.detach().cpu().numpy() - self.assertTrue(np.allclose(output.flatten(), deform_results.flatten() * 0.5)) - - def test_forward_output_on_cpu(self): - device = torch.device("cpu") - N, C, H, W = shape = 1, 1, 5, 5 - kernel_size = 3 - padding = 1 - - inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape).to(device) - - offset_channels = kernel_size * kernel_size * 2 - offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32).to(device) - - # Test DCN v1 on cpu - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - deform.weight = torch.nn.Parameter(torch.ones_like(deform.weight)) - output = deform(inputs, offset) - output = output.detach().cpu().numpy() - deform_results = np.array( - [ - [30, 41.25, 48.75, 45, 28.75], - [62.25, 81, 90, 80.25, 50.25], - [99.75, 126, 135, 117.75, 72.75], - [105, 131.25, 138.75, 120, 73.75], - [71.75, 89.25, 93.75, 80.75, 49.5], - ] - ) - self.assertTrue(np.allclose(output.flatten(), deform_results.flatten())) - - @unittest.skipIf(not torch.cuda.is_available(), "This test requires gpu access") - def test_forward_output_on_cpu_equals_output_on_gpu(self): - N, C, H, W = shape = 2, 4, 10, 10 - kernel_size = 3 - padding = 1 - - for groups in [1, 2]: - inputs = torch.arange(np.prod(shape), dtype=torch.float32).reshape(*shape) - offset_channels = kernel_size * kernel_size * 2 - offset = torch.full((N, offset_channels, H, W), 0.5, dtype=torch.float32) - - deform_gpu = DeformConv( - C, C, kernel_size=kernel_size, padding=padding, groups=groups - ).to("cuda") - deform_gpu.weight = torch.nn.Parameter(torch.ones_like(deform_gpu.weight)) - output_gpu = deform_gpu(inputs.to("cuda"), offset.to("cuda")).detach().cpu().numpy() - - deform_cpu = DeformConv( - C, C, kernel_size=kernel_size, padding=padding, groups=groups - ).to("cpu") - deform_cpu.weight = torch.nn.Parameter(torch.ones_like(deform_cpu.weight)) - output_cpu = deform_cpu(inputs.to("cpu"), offset.to("cpu")).detach().numpy() - - self.assertTrue(np.allclose(output_gpu.flatten(), output_cpu.flatten())) - - @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") - def test_small_input(self): - device = torch.device("cuda") - for kernel_size in [3, 5]: - padding = kernel_size // 2 - N, C, H, W = shape = (1, 1, kernel_size - 1, kernel_size - 1) - - inputs = torch.rand(shape).to(device) # input size is smaller than kernel size - - offset_channels = kernel_size * kernel_size * 2 - offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - output = deform(inputs, offset) - self.assertTrue(output.shape == inputs.shape) - - mask_channels = kernel_size * kernel_size - mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device) - modulate_deform = ModulatedDeformConv( - C, C, kernel_size, padding=padding, bias=False - ).to(device) - output = modulate_deform(inputs, offset, mask) - self.assertTrue(output.shape == inputs.shape) - - @unittest.skipIf(not torch.cuda.is_available(), "Deformable not supported for cpu") - def test_raise_exception(self): - device = torch.device("cuda") - N, C, H, W = shape = 1, 1, 3, 3 - kernel_size = 3 - padding = 1 - - inputs = torch.rand(shape, dtype=torch.float32).to(device) - offset_channels = kernel_size * kernel_size # This is wrong channels for offset - offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) - deform = DeformConv(C, C, kernel_size=kernel_size, padding=padding).to(device) - self.assertRaises(RuntimeError, deform, inputs, offset) - - offset_channels = kernel_size * kernel_size * 2 - offset = torch.randn((N, offset_channels, H, W), dtype=torch.float32).to(device) - mask_channels = kernel_size * kernel_size * 2 # This is wrong channels for mask - mask = torch.ones((N, mask_channels, H, W), dtype=torch.float32).to(device) - modulate_deform = ModulatedDeformConv(C, C, kernel_size, padding=padding, bias=False).to( - device - ) - self.assertRaises(RuntimeError, modulate_deform, inputs, offset, mask) - - def test_repr(self): - module = DeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2) - correct_string = ( - "DeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), " - "stride=(1, 1), padding=(1, 1), dilation=(1, 1), " - "groups=1, deformable_groups=2, bias=False)" - ) - self.assertEqual(repr(module), correct_string) - - module = ModulatedDeformConv(3, 10, kernel_size=3, padding=1, deformable_groups=2) - correct_string = ( - "ModulatedDeformConv(in_channels=3, out_channels=10, kernel_size=(3, 3), " - "stride=1, padding=1, dilation=1, groups=1, deformable_groups=2, bias=True)" - ) - self.assertEqual(repr(module), correct_string) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Bart92/RVC_HF/train/utils.py b/spaces/Bart92/RVC_HF/train/utils.py deleted file mode 100644 index aae833b08acc24b848aa70114fd9b7aad8b1a6ad..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/train/utils.py +++ /dev/null @@ -1,500 +0,0 @@ -import os, traceback -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint_d(checkpoint_path, combd, sbd, optimizer=None, load_opt=1): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") - - ################## - def go(model, bkey): - saved_state_dict = checkpoint_dict[bkey] - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): # 模型需要的shape - try: - new_state_dict[k] = saved_state_dict[k] - if saved_state_dict[k].shape != state_dict[k].shape: - print( - "shape-%s-mismatch|need-%s|get-%s" - % (k, state_dict[k].shape, saved_state_dict[k].shape) - ) # - raise KeyError - except: - # logger.info(traceback.format_exc()) - logger.info("%s is not in the checkpoint" % k) # pretrain缺失的 - new_state_dict[k] = v # 模型自带的随机值 - if hasattr(model, "module"): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - - go(combd, "combd") - go(sbd, "sbd") - ############# - logger.info("Loaded model weights") - - iteration = checkpoint_dict["iteration"] - learning_rate = checkpoint_dict["learning_rate"] - if ( - optimizer is not None and load_opt == 1 - ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch - # try: - optimizer.load_state_dict(checkpoint_dict["optimizer"]) - # except: - # traceback.print_exc() - logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -# def load_checkpoint(checkpoint_path, model, optimizer=None): -# assert os.path.isfile(checkpoint_path) -# checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') -# iteration = checkpoint_dict['iteration'] -# learning_rate = checkpoint_dict['learning_rate'] -# if optimizer is not None: -# optimizer.load_state_dict(checkpoint_dict['optimizer']) -# # print(1111) -# saved_state_dict = checkpoint_dict['model'] -# # print(1111) -# -# if hasattr(model, 'module'): -# state_dict = model.module.state_dict() -# else: -# state_dict = model.state_dict() -# new_state_dict= {} -# for k, v in state_dict.items(): -# try: -# new_state_dict[k] = saved_state_dict[k] -# except: -# logger.info("%s is not in the checkpoint" % k) -# new_state_dict[k] = v -# if hasattr(model, 'module'): -# model.module.load_state_dict(new_state_dict) -# else: -# model.load_state_dict(new_state_dict) -# logger.info("Loaded checkpoint '{}' (epoch {})" .format( -# checkpoint_path, iteration)) -# return model, optimizer, learning_rate, iteration -def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") - - saved_state_dict = checkpoint_dict["model"] - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): # 模型需要的shape - try: - new_state_dict[k] = saved_state_dict[k] - if saved_state_dict[k].shape != state_dict[k].shape: - print( - "shape-%s-mismatch|need-%s|get-%s" - % (k, state_dict[k].shape, saved_state_dict[k].shape) - ) # - raise KeyError - except: - # logger.info(traceback.format_exc()) - logger.info("%s is not in the checkpoint" % k) # pretrain缺失的 - new_state_dict[k] = v # 模型自带的随机值 - if hasattr(model, "module"): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - logger.info("Loaded model weights") - - iteration = checkpoint_dict["iteration"] - learning_rate = checkpoint_dict["learning_rate"] - if ( - optimizer is not None and load_opt == 1 - ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch - # try: - optimizer.load_state_dict(checkpoint_dict["optimizer"]) - # except: - # traceback.print_exc() - logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info( - "Saving model and optimizer state at epoch {} to {}".format( - iteration, checkpoint_path - ) - ) - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save( - { - "model": state_dict, - "iteration": iteration, - "optimizer": optimizer.state_dict(), - "learning_rate": learning_rate, - }, - checkpoint_path, - ) - - -def save_checkpoint_d(combd, sbd, optimizer, learning_rate, iteration, checkpoint_path): - logger.info( - "Saving model and optimizer state at epoch {} to {}".format( - iteration, checkpoint_path - ) - ) - if hasattr(combd, "module"): - state_dict_combd = combd.module.state_dict() - else: - state_dict_combd = combd.state_dict() - if hasattr(sbd, "module"): - state_dict_sbd = sbd.module.state_dict() - else: - state_dict_sbd = sbd.state_dict() - torch.save( - { - "combd": state_dict_combd, - "sbd": state_dict_sbd, - "iteration": iteration, - "optimizer": optimizer.state_dict(), - "learning_rate": learning_rate, - }, - checkpoint_path, - ) - - -def summarize( - writer, - global_step, - scalars={}, - histograms={}, - images={}, - audios={}, - audio_sampling_rate=22050, -): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats="HWC") - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger("matplotlib") - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger("matplotlib") - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow( - alignment.transpose(), aspect="auto", origin="lower", interpolation="none" - ) - fig.colorbar(im, ax=ax) - xlabel = "Decoder timestep" - if info is not None: - xlabel += "\n\n" + info - plt.xlabel(xlabel) - plt.ylabel("Encoder timestep") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - filepaths_and_text = [item for item in filepaths_and_text if len(item) == 5] # ensure there are 5 items. - return filepaths_and_text - - -def get_hparams(init=True): - """ - todo: - 结尾七人组: - 保存频率、总epoch done - bs done - pretrainG、pretrainD done - 卡号:os.en["CUDA_VISIBLE_DEVICES"] done - if_latest done - 模型:if_f0 done - 采样率:自动选择config done - 是否缓存数据集进GPU:if_cache_data_in_gpu done - - -m: - 自动决定training_files路径,改掉train_nsf_load_pretrain.py里的hps.data.training_files done - -c不要了 - """ - parser = argparse.ArgumentParser() - # parser.add_argument('-c', '--config', type=str, default="configs/40k.json",help='JSON file for configuration') - parser.add_argument( - "-se", - "--save_every_epoch", - type=int, - required=True, - help="checkpoint save frequency (epoch)", - ) - parser.add_argument( - "-te", "--total_epoch", type=int, required=True, help="total_epoch" - ) - parser.add_argument( - "-pg", "--pretrainG", type=str, default="", help="Pretrained Discriminator path" - ) - parser.add_argument( - "-pd", "--pretrainD", type=str, default="", help="Pretrained Generator path" - ) - parser.add_argument("-g", "--gpus", type=str, default="0", help="split by -") - parser.add_argument( - "-bs", "--batch_size", type=int, required=True, help="batch size" - ) - parser.add_argument( - "-e", "--experiment_dir", type=str, required=True, help="experiment dir" - ) # -m - parser.add_argument( - "-sr", "--sample_rate", type=str, required=True, help="sample rate, 32k/40k/48k" - ) - parser.add_argument( - "-sw", - "--save_every_weights", - type=str, - default="0", - help="save the extracted model in weights directory when saving checkpoints", - ) - parser.add_argument( - "-v", "--version", type=str, required=True, help="model version" - ) - parser.add_argument( - "-f0", - "--if_f0", - type=int, - required=True, - help="use f0 as one of the inputs of the model, 1 or 0", - ) - parser.add_argument( - "-l", - "--if_latest", - type=int, - required=True, - help="if only save the latest G/D pth file, 1 or 0", - ) - parser.add_argument( - "-c", - "--if_cache_data_in_gpu", - type=int, - required=True, - help="if caching the dataset in GPU memory, 1 or 0", - ) - parser.add_argument( - "-li", "--log_interval", type=int, required=True, help="log interval" - ) - - args = parser.parse_args() - name = args.experiment_dir - experiment_dir = os.path.join("./logs", args.experiment_dir) - - if not os.path.exists(experiment_dir): - os.makedirs(experiment_dir) - - if args.version == "v1" or args.sample_rate == "40k": - config_path = "configs/%s.json" % args.sample_rate - else: - config_path = "configs/%s_v2.json" % args.sample_rate - config_save_path = os.path.join(experiment_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = hparams.experiment_dir = experiment_dir - hparams.save_every_epoch = args.save_every_epoch - hparams.name = name - hparams.total_epoch = args.total_epoch - hparams.pretrainG = args.pretrainG - hparams.pretrainD = args.pretrainD - hparams.version = args.version - hparams.gpus = args.gpus - hparams.train.batch_size = args.batch_size - hparams.sample_rate = args.sample_rate - hparams.if_f0 = args.if_f0 - hparams.if_latest = args.if_latest - hparams.save_every_weights = args.save_every_weights - hparams.if_cache_data_in_gpu = args.if_cache_data_in_gpu - hparams.data.training_files = "%s/filelist.txt" % experiment_dir - - hparams.train.log_interval = args.log_interval - - # Update log_interval in the 'train' section of the config dictionary - config["train"]["log_interval"] = args.log_interval - - # Save the updated config back to the config_save_path - with open(config_save_path, "w") as f: - json.dump(config, f, indent=4) - - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn( - "{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - ) - ) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn( - "git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8] - ) - ) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams: - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/Benson/text-generation/Examples/Descargar Dummy Mp4 Video.md b/spaces/Benson/text-generation/Examples/Descargar Dummy Mp4 Video.md deleted file mode 100644 index d299500a82bf6675b607f7ae5d105b9a31b0bf87..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Dummy Mp4 Video.md +++ /dev/null @@ -1,79 +0,0 @@ -
        -

        Cómo descargar vídeo MP4 Dummy para propósitos de prueba

        -

        ¿Necesitas un vídeo mp4 ficticio para probar tus funciones y funciones relacionadas con el vídeo? Si es así, no estás solo. Muchos desarrolladores, diseñadores, probadores y usuarios necesitan videos de muestra para verificar el rendimiento, la calidad, la compatibilidad y la funcionalidad de sus aplicaciones de video, sitios web, software y dispositivos. En este artículo, le mostraremos cómo descargar video mp4 dummy de diferentes sitios web y cómo usarlo para diversos propósitos de prueba.

        -

        ¿Qué es un video mp4 ficticio y por qué usarlo?

        -

        Un vídeo mp4 simulado es un archivo de vídeo de muestra con diferentes resoluciones y tamaños

        -

        Un vídeo mp4 dummy es un archivo de vídeo de muestra que tiene diferentes resoluciones y tamaños. Por ejemplo, puede encontrar videos mp4 ficticios con resoluciones que van desde 144p a 1080p y tamaños que van desde unos pocos KBs a varios MBs. Un video mp4 ficticio generalmente tiene un contenido genérico que no está relacionado con ningún tema o tema específico. Puede ser una animación simple, un patrón de color, un mensaje de texto o un clip aleatorio.

        -

        descargar dummy mp4 video


        Downloadhttps://bltlly.com/2v6KpX



        -

        Es útil para probar la reproducción, el diseño y el desarrollo de videos en varios dispositivos y plataformas

        -

        Un video mp4 ficticio es útil para probar varios aspectos de la reproducción, diseño y desarrollo de video en diferentes dispositivos y plataformas. Por ejemplo, puede usar un video mp4 ficticio para probar cómo su aplicación de video o sitio web maneja diferentes resoluciones, relaciones de aspecto, velocidades de búfer y problemas de compatibilidad. También puede usar un video mp4 ficticio para probar cómo sus herramientas de edición, codificación, transcodificación, compresión y conversión de video y software funcionan con diferentes formatos y calidades. Un vídeo mp4 ficticio puede ayudarte a identificar y solucionar cualquier problema o error que pueda ocurrir con tus funciones y funciones relacionadas con el vídeo.

        -

        ¿Cómo encontrar el vídeo mp4 ficticio en línea?

        -

        Hay muchos sitios web que ofrecen videos de muestra gratis para descargar

        - -

        Algunos ejemplos son Sample-Videos.com, Learning Container[ 2 ] y Gist

        -

        Algunos ejemplos de sitios web que ofrecen videos de muestra gratuitos para descargar son Sample-Videos.com, Learning Container y Gist. Estos sitios web tienen una variedad de videos de muestra en diferentes formatos, resoluciones, tamaños y duraciones. Puede descargarlos directamente de sus sitios web o utilizar sus enlaces para descargarlos de otras fuentes. Aquí hay una tabla que muestra algunos de los videos de muestra disponibles en estos sitios web:

        - - -Sitio web -Formato -Resolución -Tamaño -Duración - - -Videos de muestra.com -MP4 -144p, 240p, 360p, 480p, 720p, 1080p -0.1 MB, 0.3 MB, 1.2 MB, 2.4 MB, 6.1 MB, 13 MB -30 segundos - - -Contenedor de aprendizaje -MP4 -1280x720, 1920x1080, 3840x2160 -5.8 MB, 12.9 MB, 52.4 MB -5 segundos - - -Contenido esencial -MP4 -640x360, 1280x720, 1920x1080 -1.6 MB, 5.7 MB, 12.8 MB -10 segundos - - -

        ¿Cómo descargar video mp4 dummy desde diferentes sitios web?

        -

        Los pasos varían dependiendo del sitio web, pero generalmente implican copiar la URL del video y pegarlo en una herramienta de descarga

        -

        Los pasos para descargar video mp4 dummy desde diferentes sitios web pueden variar dependiendo del sitio web, pero generalmente implican copiar la URL del video y pegarlo en una herramienta de descarga. Una herramienta de descarga es un software o un sitio web que le permite descargar vídeos de varias fuentes mediante la introducción de sus direcciones URL. Algunos ejemplos de herramientas de descarga son SaveFrom.net, Y2mate.com y OnlineVideoConverter.com. Estos son los pasos comunes para descargar vídeo mp4 dummy usando una herramienta de descarga:

        -
          -
        1. Ir al sitio web que ofrece el vídeo de muestra que desea descargar y copiar su URL.
        2. -
        3. Vaya al sitio web de la herramienta de descarga y pegue la URL en el cuadro de entrada.
        4. - -
        5. La herramienta de descarga procesará la URL y generará un enlace de descarga para el video.
        6. -
        7. Haga clic en el enlace de descarga y guarde el archivo de vídeo en la ubicación deseada.
        8. -
        9. Abra el archivo de vídeo con un reproductor multimedia y compruebe si funciona como se espera.
        10. -
        -

        Algunos sitios web permiten elegir el formato y la calidad del vídeo, mientras que otros proporcionan opciones predefinidas

        -

        Algunos sitios web que ofrecen videos de muestra le permiten elegir el formato y la calidad del video antes de descargarlo. Por ejemplo, Sample-Videos.com le permite seleccionar entre diferentes resoluciones y tamaños de vídeos mp4. Learning Container le permite seleccionar entre diferentes resoluciones de vídeos mp4. Gist le permite seleccionar entre diferentes resoluciones y formatos de vídeos.

        -

        Otros sitios web ofrecen opciones predefinidas para descargar videos de muestra. Por ejemplo, SaveFrom.net proporciona una lista de formatos y calidades disponibles para cada URL de vídeo que introduzca. Y2mate.com proporciona una lista de formatos y tamaños disponibles para cada URL de vídeo que introduzca. OnlineVideoConverter.com proporciona una lista de formatos disponibles para cada URL de vídeo que introduzca.

        -

        ¿Cómo usar video mp4 ficticio para propósitos de prueba?

        -

        El vídeo mp4 simulado se puede utilizar para probar la reproducción de vídeo, resolución, relación de aspecto, búfer y compatibilidad en varios dispositivos y plataformas

        -

        El vídeo mp4 dummy se puede utilizar para probar varios aspectos de la reproducción de vídeo en diferentes dispositivos y plataformas. Por ejemplo, puede usar un video mp4 ficticio para probar cómo su aplicación de video o sitio web maneja diferentes resoluciones, relaciones de aspecto, velocidades de almacenamiento en búfer y problemas de compatibilidad en varios navegadores, sistemas operativos y dispositivos. También puede utilizar un vídeo mp4 ficticio para probar cómo su reproductor de vídeo o dispositivo muestra diferentes calidades y formatos de vídeos.

        -

        El vídeo mp4 dummy también se puede utilizar para probar herramientas de edición de vídeo, codificación, transcodificación, compresión y conversión y software

        - -

        Conclusión

        -

        Dummy mp4 video es un recurso útil para probar características y funciones relacionadas con el video. Se puede descargar fácilmente desde varios sitios web utilizando sencillos pasos. Se puede utilizar para diversos fines, como el diseño, desarrollo, edición y optimización de vídeos. Mediante el uso de vídeo mp4 dummy, puede asegurarse de que sus aplicaciones de vídeo, sitios web, software y dispositivos funcionan sin problemas y de manera eficiente.

        -

        Preguntas frecuentes

        -

        ¿Cuáles son algunos formatos de vídeo comunes además de mp4?

        -

        Algunos formatos de vídeo comunes además de mp4 son AVI, WMV, MOV, MKV, FLV, WEBM y MPEG. Cada formato tiene sus propias ventajas y desventajas en términos de calidad, compatibilidad y tamaño de archivo.

        -

        -

        ¿Cuáles son algunos sitios web de transmisión de video populares que admiten formato mp4?

        -

        Algunos sitios web populares de transmisión de video que admiten formato mp4 son YouTube, Vimeo, Dailymotion, Facebook e Instagram. Estos sitios web permiten a los usuarios subir y ver vídeos en formato mp4.

        -

        ¿Cuáles son algunas ventajas y desventajas del formato mp4?

        -

        Algunas ventajas del formato mp4 son que tiene alta calidad, bajo tamaño de archivo, amplia compatibilidad y admite múltiples transmisiones de audio y video. Algunas desventajas del formato mp4 son que puede no soportar algunos codecs o características, puede estar dañado o dañado fácilmente, y puede tener problemas de licencia.

        -

        ¿Cómo convertir otros formatos de vídeo a formato mp4?

        -

        Para convertir otros formatos de video a formato mp4, puede usar herramientas o software de conversión de video en línea o fuera de línea. Las herramientas de conversión de video en línea son sitios web que le permiten cargar su archivo de video y elegir el formato de salida y la calidad. Software convertidor de vídeo sin conexión son programas que se pueden instalar en el ordenador y utilizar para convertir el archivo de vídeo. Algunos ejemplos de herramientas de conversión de video en línea son CloudConvert.com, Online-Convert.com y Zamzar.com. Algunos ejemplos de software de conversión de vídeo sin conexión son HandBrake, VLC Media Player y Freemake Video Converter.

        - -

        Para reducir el tamaño del archivo de videos mp4, puede usar herramientas o software de compresor de video en línea o fuera de línea. Herramientas de compresor de vídeo en línea son sitios web que le permiten subir su archivo de vídeo y elegir el tamaño de salida y la calidad. Software compresor de vídeo sin conexión son programas que se pueden instalar en el ordenador y utilizar para comprimir el archivo de vídeo. Algunos ejemplos de herramientas de compresor de vídeo online son Compressify.io, YouCompress.com y Clideo.com. Algunos ejemplos de software de compresor de vídeo sin conexión son WinX Video Converter, Free Video Compressor y Any Video Converter.

        64aa2da5cf
        -
        -
        \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/cells.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/cells.py deleted file mode 100644 index 9354f9e3140999702ec8c140636c511d71c340b2..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/cells.py +++ /dev/null @@ -1,154 +0,0 @@ -import re -from functools import lru_cache -from typing import Callable, List - -from ._cell_widths import CELL_WIDTHS - -# Regex to match sequence of the most common character ranges -_is_single_cell_widths = re.compile("^[\u0020-\u006f\u00a0\u02ff\u0370-\u0482]*$").match - - -@lru_cache(4096) -def cached_cell_len(text: str) -> int: - """Get the number of cells required to display text. - - This method always caches, which may use up a lot of memory. It is recommended to use - `cell_len` over this method. - - Args: - text (str): Text to display. - - Returns: - int: Get the number of cells required to display text. - """ - _get_size = get_character_cell_size - total_size = sum(_get_size(character) for character in text) - return total_size - - -def cell_len(text: str, _cell_len: Callable[[str], int] = cached_cell_len) -> int: - """Get the number of cells required to display text. - - Args: - text (str): Text to display. - - Returns: - int: Get the number of cells required to display text. - """ - if len(text) < 512: - return _cell_len(text) - _get_size = get_character_cell_size - total_size = sum(_get_size(character) for character in text) - return total_size - - -@lru_cache(maxsize=4096) -def get_character_cell_size(character: str) -> int: - """Get the cell size of a character. - - Args: - character (str): A single character. - - Returns: - int: Number of cells (0, 1 or 2) occupied by that character. - """ - return _get_codepoint_cell_size(ord(character)) - - -@lru_cache(maxsize=4096) -def _get_codepoint_cell_size(codepoint: int) -> int: - """Get the cell size of a character. - - Args: - codepoint (int): Codepoint of a character. - - Returns: - int: Number of cells (0, 1 or 2) occupied by that character. - """ - - _table = CELL_WIDTHS - lower_bound = 0 - upper_bound = len(_table) - 1 - index = (lower_bound + upper_bound) // 2 - while True: - start, end, width = _table[index] - if codepoint < start: - upper_bound = index - 1 - elif codepoint > end: - lower_bound = index + 1 - else: - return 0 if width == -1 else width - if upper_bound < lower_bound: - break - index = (lower_bound + upper_bound) // 2 - return 1 - - -def set_cell_size(text: str, total: int) -> str: - """Set the length of a string to fit within given number of cells.""" - - if _is_single_cell_widths(text): - size = len(text) - if size < total: - return text + " " * (total - size) - return text[:total] - - if total <= 0: - return "" - cell_size = cell_len(text) - if cell_size == total: - return text - if cell_size < total: - return text + " " * (total - cell_size) - - start = 0 - end = len(text) - - # Binary search until we find the right size - while True: - pos = (start + end) // 2 - before = text[: pos + 1] - before_len = cell_len(before) - if before_len == total + 1 and cell_len(before[-1]) == 2: - return before[:-1] + " " - if before_len == total: - return before - if before_len > total: - end = pos - else: - start = pos - - -# TODO: This is inefficient -# TODO: This might not work with CWJ type characters -def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]: - """Break text in to equal (cell) length strings, returning the characters in reverse - order""" - _get_character_cell_size = get_character_cell_size - characters = [ - (character, _get_character_cell_size(character)) for character in text - ] - total_size = position - lines: List[List[str]] = [[]] - append = lines[-1].append - - for character, size in reversed(characters): - if total_size + size > max_size: - lines.append([character]) - append = lines[-1].append - total_size = size - else: - total_size += size - append(character) - - return ["".join(line) for line in lines] - - -if __name__ == "__main__": # pragma: no cover - - print(get_character_cell_size("😽")) - for line in chop_cells("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", 8): - print(line) - for n in range(80, 1, -1): - print(set_cell_size("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", n) + "|") - print("x" * n) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_scripts.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_scripts.py deleted file mode 100644 index 2cc5d1e09c09b6c674d47a26c5ebc6163705ecce..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/build_scripts.py +++ /dev/null @@ -1,173 +0,0 @@ -"""distutils.command.build_scripts - -Implements the Distutils 'build_scripts' command.""" - -import os -import re -from stat import ST_MODE -from distutils import sysconfig -from distutils.core import Command -from distutils.dep_util import newer -from distutils.util import convert_path -from distutils import log -import tokenize - -shebang_pattern = re.compile('^#!.*python[0-9.]*([ \t].*)?$') -""" -Pattern matching a Python interpreter indicated in first line of a script. -""" - -# for Setuptools compatibility -first_line_re = shebang_pattern - - -class build_scripts(Command): - - description = "\"build\" scripts (copy and fixup #! line)" - - user_options = [ - ('build-dir=', 'd', "directory to \"build\" (copy) to"), - ('force', 'f', "forcibly build everything (ignore file timestamps"), - ('executable=', 'e', "specify final destination interpreter path"), - ] - - boolean_options = ['force'] - - def initialize_options(self): - self.build_dir = None - self.scripts = None - self.force = None - self.executable = None - - def finalize_options(self): - self.set_undefined_options( - 'build', - ('build_scripts', 'build_dir'), - ('force', 'force'), - ('executable', 'executable'), - ) - self.scripts = self.distribution.scripts - - def get_source_files(self): - return self.scripts - - def run(self): - if not self.scripts: - return - self.copy_scripts() - - def copy_scripts(self): - """ - Copy each script listed in ``self.scripts``. - - If a script is marked as a Python script (first line matches - 'shebang_pattern', i.e. starts with ``#!`` and contains - "python"), then adjust in the copy the first line to refer to - the current Python interpreter. - """ - self.mkpath(self.build_dir) - outfiles = [] - updated_files = [] - for script in self.scripts: - self._copy_script(script, outfiles, updated_files) - - self._change_modes(outfiles) - - return outfiles, updated_files - - def _copy_script(self, script, outfiles, updated_files): # noqa: C901 - shebang_match = None - script = convert_path(script) - outfile = os.path.join(self.build_dir, os.path.basename(script)) - outfiles.append(outfile) - - if not self.force and not newer(script, outfile): - log.debug("not copying %s (up-to-date)", script) - return - - # Always open the file, but ignore failures in dry-run mode - # in order to attempt to copy directly. - try: - f = tokenize.open(script) - except OSError: - if not self.dry_run: - raise - f = None - else: - first_line = f.readline() - if not first_line: - self.warn("%s is an empty file (skipping)" % script) - return - - shebang_match = shebang_pattern.match(first_line) - - updated_files.append(outfile) - if shebang_match: - log.info("copying and adjusting %s -> %s", script, self.build_dir) - if not self.dry_run: - if not sysconfig.python_build: - executable = self.executable - else: - executable = os.path.join( - sysconfig.get_config_var("BINDIR"), - "python%s%s" - % ( - sysconfig.get_config_var("VERSION"), - sysconfig.get_config_var("EXE"), - ), - ) - post_interp = shebang_match.group(1) or '' - shebang = "#!" + executable + post_interp + "\n" - self._validate_shebang(shebang, f.encoding) - with open(outfile, "w", encoding=f.encoding) as outf: - outf.write(shebang) - outf.writelines(f.readlines()) - if f: - f.close() - else: - if f: - f.close() - self.copy_file(script, outfile) - - def _change_modes(self, outfiles): - if os.name != 'posix': - return - - for file in outfiles: - self._change_mode(file) - - def _change_mode(self, file): - if self.dry_run: - log.info("changing mode of %s", file) - return - - oldmode = os.stat(file)[ST_MODE] & 0o7777 - newmode = (oldmode | 0o555) & 0o7777 - if newmode != oldmode: - log.info("changing mode of %s from %o to %o", file, oldmode, newmode) - os.chmod(file, newmode) - - @staticmethod - def _validate_shebang(shebang, encoding): - # Python parser starts to read a script using UTF-8 until - # it gets a #coding:xxx cookie. The shebang has to be the - # first line of a file, the #coding:xxx cookie cannot be - # written before. So the shebang has to be encodable to - # UTF-8. - try: - shebang.encode('utf-8') - except UnicodeEncodeError: - raise ValueError( - "The shebang ({!r}) is not encodable " "to utf-8".format(shebang) - ) - - # If the script is encoded to a custom encoding (use a - # #coding:xxx cookie), the shebang has to be encodable to - # the script encoding too. - try: - shebang.encode(encoding) - except UnicodeEncodeError: - raise ValueError( - "The shebang ({!r}) is not encodable " - "to the script encoding ({})".format(shebang, encoding) - ) diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_compat.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_compat.py deleted file mode 100644 index ef3136f8d2a13c3d251e146d8d754e21c3ed1c38..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_metadata/_compat.py +++ /dev/null @@ -1,71 +0,0 @@ -import sys -import platform - - -__all__ = ['install', 'NullFinder', 'Protocol'] - - -try: - from typing import Protocol -except ImportError: # pragma: no cover - from ..typing_extensions import Protocol # type: ignore - - -def install(cls): - """ - Class decorator for installation on sys.meta_path. - - Adds the backport DistributionFinder to sys.meta_path and - attempts to disable the finder functionality of the stdlib - DistributionFinder. - """ - sys.meta_path.append(cls()) - disable_stdlib_finder() - return cls - - -def disable_stdlib_finder(): - """ - Give the backport primacy for discovering path-based distributions - by monkey-patching the stdlib O_O. - - See #91 for more background for rationale on this sketchy - behavior. - """ - - def matches(finder): - return getattr( - finder, '__module__', None - ) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions') - - for finder in filter(matches, sys.meta_path): # pragma: nocover - del finder.find_distributions - - -class NullFinder: - """ - A "Finder" (aka "MetaClassFinder") that never finds any modules, - but may find distributions. - """ - - @staticmethod - def find_spec(*args, **kwargs): - return None - - # In Python 2, the import system requires finders - # to have a find_module() method, but this usage - # is deprecated in Python 3 in favor of find_spec(). - # For the purposes of this finder (i.e. being present - # on sys.meta_path but having no other import - # system functionality), the two methods are identical. - find_module = find_spec - - -def pypy_partial(val): - """ - Adjust for variable stacklevel on partial under PyPy. - - Workaround for #327. - """ - is_pypy = platform.python_implementation() == 'PyPy' - return val + is_pypy diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/_musllinux.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/_musllinux.py deleted file mode 100644 index 8ac3059ba3c246b9a5a6fb8d14936bb07777191e..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/packaging/_musllinux.py +++ /dev/null @@ -1,136 +0,0 @@ -"""PEP 656 support. - -This module implements logic to detect if the currently running Python is -linked against musl, and what musl version is used. -""" - -import contextlib -import functools -import operator -import os -import re -import struct -import subprocess -import sys -from typing import IO, Iterator, NamedTuple, Optional, Tuple - - -def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: - return struct.unpack(fmt, f.read(struct.calcsize(fmt))) - - -def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: - """Detect musl libc location by parsing the Python executable. - - Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca - ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html - """ - f.seek(0) - try: - ident = _read_unpacked(f, "16B") - except struct.error: - return None - if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. - return None - f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. - - try: - # e_fmt: Format for program header. - # p_fmt: Format for section header. - # p_idx: Indexes to find p_type, p_offset, and p_filesz. - e_fmt, p_fmt, p_idx = { - 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. - 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. - }[ident[4]] - except KeyError: - return None - else: - p_get = operator.itemgetter(*p_idx) - - # Find the interpreter section and return its content. - try: - _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) - except struct.error: - return None - for i in range(e_phnum + 1): - f.seek(e_phoff + e_phentsize * i) - try: - p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) - except struct.error: - return None - if p_type != 3: # Not PT_INTERP. - continue - f.seek(p_offset) - interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") - if "musl" not in interpreter: - return None - return interpreter - return None - - -class _MuslVersion(NamedTuple): - major: int - minor: int - - -def _parse_musl_version(output: str) -> Optional[_MuslVersion]: - lines = [n for n in (n.strip() for n in output.splitlines()) if n] - if len(lines) < 2 or lines[0][:4] != "musl": - return None - m = re.match(r"Version (\d+)\.(\d+)", lines[1]) - if not m: - return None - return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) - - -@functools.lru_cache() -def _get_musl_version(executable: str) -> Optional[_MuslVersion]: - """Detect currently-running musl runtime version. - - This is done by checking the specified executable's dynamic linking - information, and invoking the loader to parse its output for a version - string. If the loader is musl, the output would be something like:: - - musl libc (x86_64) - Version 1.2.2 - Dynamic Program Loader - """ - with contextlib.ExitStack() as stack: - try: - f = stack.enter_context(open(executable, "rb")) - except OSError: - return None - ld = _parse_ld_musl_from_elf(f) - if not ld: - return None - proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) - return _parse_musl_version(proc.stderr) - - -def platform_tags(arch: str) -> Iterator[str]: - """Generate musllinux tags compatible to the current platform. - - :param arch: Should be the part of platform tag after the ``linux_`` - prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a - prerequisite for the current platform to be musllinux-compatible. - - :returns: An iterator of compatible musllinux tags. - """ - sys_musl = _get_musl_version(sys.executable) - if sys_musl is None: # Python not dynamically linked against musl. - return - for minor in range(sys_musl.minor, -1, -1): - yield f"musllinux_{sys_musl.major}_{minor}_{arch}" - - -if __name__ == "__main__": # pragma: no cover - import sysconfig - - plat = sysconfig.get_platform() - assert plat.startswith("linux-"), "not linux" - - print("plat:", plat) - print("musl:", _get_musl_version(sys.executable)) - print("tags:", end=" ") - for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): - print(t, end="\n ") diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/contrib/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/BorisovMaksim/denoising/denoisers/demucs.py b/spaces/BorisovMaksim/denoising/denoisers/demucs.py deleted file mode 100644 index 023a8d24c10cfaa753642693d998c4b071844825..0000000000000000000000000000000000000000 --- a/spaces/BorisovMaksim/denoising/denoisers/demucs.py +++ /dev/null @@ -1,92 +0,0 @@ -import torch -from torch.nn.functional import pad -from utils import pad_cut_batch_audio -import torch.nn as nn - - -class Encoder(torch.nn.Module): - def __init__(self, in_channels, out_channels, cfg): - super(Encoder, self).__init__() - - self.conv1 = torch.nn.Conv1d(in_channels=in_channels, out_channels=out_channels, - kernel_size=cfg['conv1']['kernel_size'], - stride=cfg['conv1']['stride']) - self.relu1 = torch.nn.ReLU() - self.conv2 = torch.nn.Conv1d(in_channels=out_channels, out_channels=2 * out_channels, - kernel_size=cfg['conv2']['kernel_size'], - stride=cfg['conv2']['stride']) - self.glu = torch.nn.GLU(dim=-2) - - def forward(self, x): - x = self.relu1(self.conv1(x)) - if x.shape[-1] % 2 == 1: - x = pad(x, (0, 1)) - x = self.glu(self.conv2(x)) - return x - - -class Decoder(torch.nn.Module): - def __init__(self, in_channels, out_channels, cfg, is_last=False): - super(Decoder, self).__init__() - self.is_last = is_last - self.conv1 = torch.nn.Conv1d(in_channels=in_channels, out_channels=2 * in_channels, - kernel_size=cfg['conv1']['kernel_size'], - stride=cfg['conv1']['stride']) - self.glu = torch.nn.GLU(dim=-2) - self.conv2 = torch.nn.ConvTranspose1d(in_channels=in_channels, out_channels=out_channels, - kernel_size=cfg['conv2']['kernel_size'], - stride=cfg['conv2']['stride']) - self.relu = torch.nn.ReLU() - - def forward(self, x): - x = self.glu(self.conv1(x)) - x = self.conv2(x) - if not self.is_last: - x = self.relu(x) - return x - - -class Demucs(torch.nn.Module): - def __init__(self, cfg): - super(Demucs, self).__init__() - self.L = cfg['L'] - - encoders = [Encoder(in_channels=1, out_channels=cfg['H'], cfg=cfg['encoder'])] - decoders = [Decoder(in_channels=cfg['H'], out_channels=1, cfg=cfg['decoder'], is_last=True)] - for i in range(self.L - 1): - encoders.append(Encoder(in_channels=(2 ** i) * cfg['H'], - out_channels=(2 ** (i + 1)) * cfg['H'], - cfg=cfg['encoder'])) - decoders.append(Decoder(in_channels=(2 ** (i + 1)) * cfg['H'], - out_channels=(2 ** i) * cfg['H'], - cfg=cfg['decoder'])) - self.encoders = nn.ModuleList(encoders) - self.decoders = nn.ModuleList(decoders) - - self.lstm = torch.nn.LSTM( - input_size=(2 ** (self.L - 1)) * cfg['H'], - hidden_size=(2 ** (self.L - 1)) * cfg['H'], num_layers=2, batch_first=True) - - def forward(self, x): - outs = [x] - for i in range(self.L): - out = self.encoders[i](outs[-1]) - outs.append(out) - model_input = outs.pop(0) - - x, _ = self.lstm(outs[-1].permute(0, 2, 1)) - x = x.permute(0, 2, 1) - - for i in reversed(range(self.L)): - decoder = self.decoders[i] - x = pad_cut_batch_audio(x, outs[i].shape) - x = decoder(x + outs[i]) - x = pad_cut_batch_audio(x, model_input.shape) - return x - - def predict(self, wav): - with torch.no_grad(): - wav_reshaped = wav.reshape((1,1,-1)) - prediction = self.forward(wav_reshaped) - return prediction[0] - diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/download.sh b/spaces/CVPR/Dual-Key_Backdoor_Attacks/download.sh deleted file mode 100644 index ce8b6cb594ff6d3e540d9ca43998a4fcf951d59c..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/download.sh +++ /dev/null @@ -1,49 +0,0 @@ -## Script for downloading data - -# GloVe Vectors -# wget -P data/ http://nlp.stanford.edu/data/glove.6B.zip -wget -P data/ http://downloads.cs.stanford.edu/nlp/data/glove.6B.zip -unzip data/glove.6B.zip -d data/glove -rm data/glove.6B.zip - -# Questions -wget -P data/clean https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Train_mscoco.zip -unzip data/clean/v2_Questions_Train_mscoco.zip -d data/clean -rm data/clean/v2_Questions_Train_mscoco.zip - -wget -P data/clean https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Val_mscoco.zip -unzip data/clean/v2_Questions_Val_mscoco.zip -d data/clean -rm data/clean/v2_Questions_Val_mscoco.zip - -wget -P data/clean https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Questions_Test_mscoco.zip -unzip data/clean/v2_Questions_Test_mscoco.zip -d data/clean -rm data/clean/v2_Questions_Test_mscoco.zip - -# Annotations -wget -P data/clean https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Annotations_Train_mscoco.zip -unzip data/clean/v2_Annotations_Train_mscoco.zip -d data/clean -rm data/clean/v2_Annotations_Train_mscoco.zip - -wget -P data/clean https://s3.amazonaws.com/cvmlp/vqa/mscoco/vqa/v2_Annotations_Val_mscoco.zip -unzip data/clean/v2_Annotations_Val_mscoco.zip -d data/clean -rm data/clean/v2_Annotations_Val_mscoco.zip - -# Images -wget -P data/clean http://images.cocodataset.org/zips/train2014.zip -unzip -q data/clean/train2014.zip -d data/clean -rm data/clean/train2014.zip - -wget -P data/clean http://images.cocodataset.org/zips/val2014.zip -unzip -q data/clean/val2014.zip -d data/clean -rm data/clean/val2014.zip - -wget -P data/clean http://images.cocodataset.org/zips/test2015.zip -unzip -q data/clean/test2015.zip -d data/clean -rm data/clean/test2015.zip - - -# Detectors -wget -P detectors/ https://dl.fbaipublicfiles.com/grid-feats-vqa/R-50/R-50.pth -wget -P detectors/ https://dl.fbaipublicfiles.com/grid-feats-vqa/X-101/X-101.pth -wget -P detectors/ https://dl.fbaipublicfiles.com/grid-feats-vqa/X-152/X-152.pth -wget -P detectors/ https://dl.fbaipublicfiles.com/grid-feats-vqa/X-152pp/X-152pp.pth \ No newline at end of file diff --git a/spaces/CVPR/LIVE/pybind11/include/pybind11/detail/class.h b/spaces/CVPR/LIVE/pybind11/include/pybind11/detail/class.h deleted file mode 100644 index 8d36744f2736d79c6fb9c6d93a1ce44f89e3b60e..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/pybind11/include/pybind11/detail/class.h +++ /dev/null @@ -1,668 +0,0 @@ -/* - pybind11/detail/class.h: Python C API implementation details for py::class_ - - Copyright (c) 2017 Wenzel Jakob - - All rights reserved. Use of this source code is governed by a - BSD-style license that can be found in the LICENSE file. -*/ - -#pragma once - -#include "../attr.h" -#include "../options.h" - -PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) -PYBIND11_NAMESPACE_BEGIN(detail) - -#if PY_VERSION_HEX >= 0x03030000 && !defined(PYPY_VERSION) -# define PYBIND11_BUILTIN_QUALNAME -# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) -#else -// In pre-3.3 Python, we still set __qualname__ so that we can produce reliable function type -// signatures; in 3.3+ this macro expands to nothing: -# define PYBIND11_SET_OLDPY_QUALNAME(obj, nameobj) setattr((PyObject *) obj, "__qualname__", nameobj) -#endif - -inline PyTypeObject *type_incref(PyTypeObject *type) { - Py_INCREF(type); - return type; -} - -#if !defined(PYPY_VERSION) - -/// `pybind11_static_property.__get__()`: Always pass the class instead of the instance. -extern "C" inline PyObject *pybind11_static_get(PyObject *self, PyObject * /*ob*/, PyObject *cls) { - return PyProperty_Type.tp_descr_get(self, cls, cls); -} - -/// `pybind11_static_property.__set__()`: Just like the above `__get__()`. -extern "C" inline int pybind11_static_set(PyObject *self, PyObject *obj, PyObject *value) { - PyObject *cls = PyType_Check(obj) ? obj : (PyObject *) Py_TYPE(obj); - return PyProperty_Type.tp_descr_set(self, cls, value); -} - -/** A `static_property` is the same as a `property` but the `__get__()` and `__set__()` - methods are modified to always use the object type instead of a concrete instance. - Return value: New reference. */ -inline PyTypeObject *make_static_property_type() { - constexpr auto *name = "pybind11_static_property"; - auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); - - /* Danger zone: from now (and until PyType_Ready), make sure to - issue no Python C API calls which could potentially invoke the - garbage collector (the GC will call type_traverse(), which will in - turn find the newly constructed type in an invalid state) */ - auto heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0); - if (!heap_type) - pybind11_fail("make_static_property_type(): error allocating type!"); - - heap_type->ht_name = name_obj.inc_ref().ptr(); -#ifdef PYBIND11_BUILTIN_QUALNAME - heap_type->ht_qualname = name_obj.inc_ref().ptr(); -#endif - - auto type = &heap_type->ht_type; - type->tp_name = name; - type->tp_base = type_incref(&PyProperty_Type); - type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; - type->tp_descr_get = pybind11_static_get; - type->tp_descr_set = pybind11_static_set; - - if (PyType_Ready(type) < 0) - pybind11_fail("make_static_property_type(): failure in PyType_Ready()!"); - - setattr((PyObject *) type, "__module__", str("pybind11_builtins")); - PYBIND11_SET_OLDPY_QUALNAME(type, name_obj); - - return type; -} - -#else // PYPY - -/** PyPy has some issues with the above C API, so we evaluate Python code instead. - This function will only be called once so performance isn't really a concern. - Return value: New reference. */ -inline PyTypeObject *make_static_property_type() { - auto d = dict(); - PyObject *result = PyRun_String(R"(\ - class pybind11_static_property(property): - def __get__(self, obj, cls): - return property.__get__(self, cls, cls) - - def __set__(self, obj, value): - cls = obj if isinstance(obj, type) else type(obj) - property.__set__(self, cls, value) - )", Py_file_input, d.ptr(), d.ptr() - ); - if (result == nullptr) - throw error_already_set(); - Py_DECREF(result); - return (PyTypeObject *) d["pybind11_static_property"].cast().release().ptr(); -} - -#endif // PYPY - -/** Types with static properties need to handle `Type.static_prop = x` in a specific way. - By default, Python replaces the `static_property` itself, but for wrapped C++ types - we need to call `static_property.__set__()` in order to propagate the new value to - the underlying C++ data structure. */ -extern "C" inline int pybind11_meta_setattro(PyObject* obj, PyObject* name, PyObject* value) { - // Use `_PyType_Lookup()` instead of `PyObject_GetAttr()` in order to get the raw - // descriptor (`property`) instead of calling `tp_descr_get` (`property.__get__()`). - PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name); - - // The following assignment combinations are possible: - // 1. `Type.static_prop = value` --> descr_set: `Type.static_prop.__set__(value)` - // 2. `Type.static_prop = other_static_prop` --> setattro: replace existing `static_prop` - // 3. `Type.regular_attribute = value` --> setattro: regular attribute assignment - const auto static_prop = (PyObject *) get_internals().static_property_type; - const auto call_descr_set = descr && PyObject_IsInstance(descr, static_prop) - && !PyObject_IsInstance(value, static_prop); - if (call_descr_set) { - // Call `static_property.__set__()` instead of replacing the `static_property`. -#if !defined(PYPY_VERSION) - return Py_TYPE(descr)->tp_descr_set(descr, obj, value); -#else - if (PyObject *result = PyObject_CallMethod(descr, "__set__", "OO", obj, value)) { - Py_DECREF(result); - return 0; - } else { - return -1; - } -#endif - } else { - // Replace existing attribute. - return PyType_Type.tp_setattro(obj, name, value); - } -} - -#if PY_MAJOR_VERSION >= 3 -/** - * Python 3's PyInstanceMethod_Type hides itself via its tp_descr_get, which prevents aliasing - * methods via cls.attr("m2") = cls.attr("m1"): instead the tp_descr_get returns a plain function, - * when called on a class, or a PyMethod, when called on an instance. Override that behaviour here - * to do a special case bypass for PyInstanceMethod_Types. - */ -extern "C" inline PyObject *pybind11_meta_getattro(PyObject *obj, PyObject *name) { - PyObject *descr = _PyType_Lookup((PyTypeObject *) obj, name); - if (descr && PyInstanceMethod_Check(descr)) { - Py_INCREF(descr); - return descr; - } - else { - return PyType_Type.tp_getattro(obj, name); - } -} -#endif - -/// metaclass `__call__` function that is used to create all pybind11 objects. -extern "C" inline PyObject *pybind11_meta_call(PyObject *type, PyObject *args, PyObject *kwargs) { - - // use the default metaclass call to create/initialize the object - PyObject *self = PyType_Type.tp_call(type, args, kwargs); - if (self == nullptr) { - return nullptr; - } - - // This must be a pybind11 instance - auto instance = reinterpret_cast(self); - - // Ensure that the base __init__ function(s) were called - for (const auto &vh : values_and_holders(instance)) { - if (!vh.holder_constructed()) { - PyErr_Format(PyExc_TypeError, "%.200s.__init__() must be called when overriding __init__", - vh.type->type->tp_name); - Py_DECREF(self); - return nullptr; - } - } - - return self; -} - -/** This metaclass is assigned by default to all pybind11 types and is required in order - for static properties to function correctly. Users may override this using `py::metaclass`. - Return value: New reference. */ -inline PyTypeObject* make_default_metaclass() { - constexpr auto *name = "pybind11_type"; - auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); - - /* Danger zone: from now (and until PyType_Ready), make sure to - issue no Python C API calls which could potentially invoke the - garbage collector (the GC will call type_traverse(), which will in - turn find the newly constructed type in an invalid state) */ - auto heap_type = (PyHeapTypeObject *) PyType_Type.tp_alloc(&PyType_Type, 0); - if (!heap_type) - pybind11_fail("make_default_metaclass(): error allocating metaclass!"); - - heap_type->ht_name = name_obj.inc_ref().ptr(); -#ifdef PYBIND11_BUILTIN_QUALNAME - heap_type->ht_qualname = name_obj.inc_ref().ptr(); -#endif - - auto type = &heap_type->ht_type; - type->tp_name = name; - type->tp_base = type_incref(&PyType_Type); - type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; - - type->tp_call = pybind11_meta_call; - - type->tp_setattro = pybind11_meta_setattro; -#if PY_MAJOR_VERSION >= 3 - type->tp_getattro = pybind11_meta_getattro; -#endif - - if (PyType_Ready(type) < 0) - pybind11_fail("make_default_metaclass(): failure in PyType_Ready()!"); - - setattr((PyObject *) type, "__module__", str("pybind11_builtins")); - PYBIND11_SET_OLDPY_QUALNAME(type, name_obj); - - return type; -} - -/// For multiple inheritance types we need to recursively register/deregister base pointers for any -/// base classes with pointers that are difference from the instance value pointer so that we can -/// correctly recognize an offset base class pointer. This calls a function with any offset base ptrs. -inline void traverse_offset_bases(void *valueptr, const detail::type_info *tinfo, instance *self, - bool (*f)(void * /*parentptr*/, instance * /*self*/)) { - for (handle h : reinterpret_borrow(tinfo->type->tp_bases)) { - if (auto parent_tinfo = get_type_info((PyTypeObject *) h.ptr())) { - for (auto &c : parent_tinfo->implicit_casts) { - if (c.first == tinfo->cpptype) { - auto *parentptr = c.second(valueptr); - if (parentptr != valueptr) - f(parentptr, self); - traverse_offset_bases(parentptr, parent_tinfo, self, f); - break; - } - } - } - } -} - -inline bool register_instance_impl(void *ptr, instance *self) { - get_internals().registered_instances.emplace(ptr, self); - return true; // unused, but gives the same signature as the deregister func -} -inline bool deregister_instance_impl(void *ptr, instance *self) { - auto ®istered_instances = get_internals().registered_instances; - auto range = registered_instances.equal_range(ptr); - for (auto it = range.first; it != range.second; ++it) { - if (Py_TYPE(self) == Py_TYPE(it->second)) { - registered_instances.erase(it); - return true; - } - } - return false; -} - -inline void register_instance(instance *self, void *valptr, const type_info *tinfo) { - register_instance_impl(valptr, self); - if (!tinfo->simple_ancestors) - traverse_offset_bases(valptr, tinfo, self, register_instance_impl); -} - -inline bool deregister_instance(instance *self, void *valptr, const type_info *tinfo) { - bool ret = deregister_instance_impl(valptr, self); - if (!tinfo->simple_ancestors) - traverse_offset_bases(valptr, tinfo, self, deregister_instance_impl); - return ret; -} - -/// Instance creation function for all pybind11 types. It allocates the internal instance layout for -/// holding C++ objects and holders. Allocation is done lazily (the first time the instance is cast -/// to a reference or pointer), and initialization is done by an `__init__` function. -inline PyObject *make_new_instance(PyTypeObject *type) { -#if defined(PYPY_VERSION) - // PyPy gets tp_basicsize wrong (issue 2482) under multiple inheritance when the first inherited - // object is a a plain Python type (i.e. not derived from an extension type). Fix it. - ssize_t instance_size = static_cast(sizeof(instance)); - if (type->tp_basicsize < instance_size) { - type->tp_basicsize = instance_size; - } -#endif - PyObject *self = type->tp_alloc(type, 0); - auto inst = reinterpret_cast(self); - // Allocate the value/holder internals: - inst->allocate_layout(); - - inst->owned = true; - - return self; -} - -/// Instance creation function for all pybind11 types. It only allocates space for the -/// C++ object, but doesn't call the constructor -- an `__init__` function must do that. -extern "C" inline PyObject *pybind11_object_new(PyTypeObject *type, PyObject *, PyObject *) { - return make_new_instance(type); -} - -/// An `__init__` function constructs the C++ object. Users should provide at least one -/// of these using `py::init` or directly with `.def(__init__, ...)`. Otherwise, the -/// following default function will be used which simply throws an exception. -extern "C" inline int pybind11_object_init(PyObject *self, PyObject *, PyObject *) { - PyTypeObject *type = Py_TYPE(self); - std::string msg; -#if defined(PYPY_VERSION) - msg += handle((PyObject *) type).attr("__module__").cast() + "."; -#endif - msg += type->tp_name; - msg += ": No constructor defined!"; - PyErr_SetString(PyExc_TypeError, msg.c_str()); - return -1; -} - -inline void add_patient(PyObject *nurse, PyObject *patient) { - auto &internals = get_internals(); - auto instance = reinterpret_cast(nurse); - instance->has_patients = true; - Py_INCREF(patient); - internals.patients[nurse].push_back(patient); -} - -inline void clear_patients(PyObject *self) { - auto instance = reinterpret_cast(self); - auto &internals = get_internals(); - auto pos = internals.patients.find(self); - assert(pos != internals.patients.end()); - // Clearing the patients can cause more Python code to run, which - // can invalidate the iterator. Extract the vector of patients - // from the unordered_map first. - auto patients = std::move(pos->second); - internals.patients.erase(pos); - instance->has_patients = false; - for (PyObject *&patient : patients) - Py_CLEAR(patient); -} - -/// Clears all internal data from the instance and removes it from registered instances in -/// preparation for deallocation. -inline void clear_instance(PyObject *self) { - auto instance = reinterpret_cast(self); - - // Deallocate any values/holders, if present: - for (auto &v_h : values_and_holders(instance)) { - if (v_h) { - - // We have to deregister before we call dealloc because, for virtual MI types, we still - // need to be able to get the parent pointers. - if (v_h.instance_registered() && !deregister_instance(instance, v_h.value_ptr(), v_h.type)) - pybind11_fail("pybind11_object_dealloc(): Tried to deallocate unregistered instance!"); - - if (instance->owned || v_h.holder_constructed()) - v_h.type->dealloc(v_h); - } - } - // Deallocate the value/holder layout internals: - instance->deallocate_layout(); - - if (instance->weakrefs) - PyObject_ClearWeakRefs(self); - - PyObject **dict_ptr = _PyObject_GetDictPtr(self); - if (dict_ptr) - Py_CLEAR(*dict_ptr); - - if (instance->has_patients) - clear_patients(self); -} - -/// Instance destructor function for all pybind11 types. It calls `type_info.dealloc` -/// to destroy the C++ object itself, while the rest is Python bookkeeping. -extern "C" inline void pybind11_object_dealloc(PyObject *self) { - clear_instance(self); - - auto type = Py_TYPE(self); - type->tp_free(self); - -#if PY_VERSION_HEX < 0x03080000 - // `type->tp_dealloc != pybind11_object_dealloc` means that we're being called - // as part of a derived type's dealloc, in which case we're not allowed to decref - // the type here. For cross-module compatibility, we shouldn't compare directly - // with `pybind11_object_dealloc`, but with the common one stashed in internals. - auto pybind11_object_type = (PyTypeObject *) get_internals().instance_base; - if (type->tp_dealloc == pybind11_object_type->tp_dealloc) - Py_DECREF(type); -#else - // This was not needed before Python 3.8 (Python issue 35810) - // https://github.com/pybind/pybind11/issues/1946 - Py_DECREF(type); -#endif -} - -/** Create the type which can be used as a common base for all classes. This is - needed in order to satisfy Python's requirements for multiple inheritance. - Return value: New reference. */ -inline PyObject *make_object_base_type(PyTypeObject *metaclass) { - constexpr auto *name = "pybind11_object"; - auto name_obj = reinterpret_steal(PYBIND11_FROM_STRING(name)); - - /* Danger zone: from now (and until PyType_Ready), make sure to - issue no Python C API calls which could potentially invoke the - garbage collector (the GC will call type_traverse(), which will in - turn find the newly constructed type in an invalid state) */ - auto heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0); - if (!heap_type) - pybind11_fail("make_object_base_type(): error allocating type!"); - - heap_type->ht_name = name_obj.inc_ref().ptr(); -#ifdef PYBIND11_BUILTIN_QUALNAME - heap_type->ht_qualname = name_obj.inc_ref().ptr(); -#endif - - auto type = &heap_type->ht_type; - type->tp_name = name; - type->tp_base = type_incref(&PyBaseObject_Type); - type->tp_basicsize = static_cast(sizeof(instance)); - type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; - - type->tp_new = pybind11_object_new; - type->tp_init = pybind11_object_init; - type->tp_dealloc = pybind11_object_dealloc; - - /* Support weak references (needed for the keep_alive feature) */ - type->tp_weaklistoffset = offsetof(instance, weakrefs); - - if (PyType_Ready(type) < 0) - pybind11_fail("PyType_Ready failed in make_object_base_type():" + error_string()); - - setattr((PyObject *) type, "__module__", str("pybind11_builtins")); - PYBIND11_SET_OLDPY_QUALNAME(type, name_obj); - - assert(!PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC)); - return (PyObject *) heap_type; -} - -/// dynamic_attr: Support for `d = instance.__dict__`. -extern "C" inline PyObject *pybind11_get_dict(PyObject *self, void *) { - PyObject *&dict = *_PyObject_GetDictPtr(self); - if (!dict) - dict = PyDict_New(); - Py_XINCREF(dict); - return dict; -} - -/// dynamic_attr: Support for `instance.__dict__ = dict()`. -extern "C" inline int pybind11_set_dict(PyObject *self, PyObject *new_dict, void *) { - if (!PyDict_Check(new_dict)) { - PyErr_Format(PyExc_TypeError, "__dict__ must be set to a dictionary, not a '%.200s'", - Py_TYPE(new_dict)->tp_name); - return -1; - } - PyObject *&dict = *_PyObject_GetDictPtr(self); - Py_INCREF(new_dict); - Py_CLEAR(dict); - dict = new_dict; - return 0; -} - -/// dynamic_attr: Allow the garbage collector to traverse the internal instance `__dict__`. -extern "C" inline int pybind11_traverse(PyObject *self, visitproc visit, void *arg) { - PyObject *&dict = *_PyObject_GetDictPtr(self); - Py_VISIT(dict); - return 0; -} - -/// dynamic_attr: Allow the GC to clear the dictionary. -extern "C" inline int pybind11_clear(PyObject *self) { - PyObject *&dict = *_PyObject_GetDictPtr(self); - Py_CLEAR(dict); - return 0; -} - -/// Give instances of this type a `__dict__` and opt into garbage collection. -inline void enable_dynamic_attributes(PyHeapTypeObject *heap_type) { - auto type = &heap_type->ht_type; -#if defined(PYPY_VERSION) && (PYPY_VERSION_NUM < 0x06000000) - pybind11_fail(std::string(type->tp_name) + ": dynamic attributes are " - "currently not supported in " - "conjunction with PyPy!"); -#endif - type->tp_flags |= Py_TPFLAGS_HAVE_GC; - type->tp_dictoffset = type->tp_basicsize; // place dict at the end - type->tp_basicsize += (ssize_t)sizeof(PyObject *); // and allocate enough space for it - type->tp_traverse = pybind11_traverse; - type->tp_clear = pybind11_clear; - - static PyGetSetDef getset[] = { - {const_cast("__dict__"), pybind11_get_dict, pybind11_set_dict, nullptr, nullptr}, - {nullptr, nullptr, nullptr, nullptr, nullptr} - }; - type->tp_getset = getset; -} - -/// buffer_protocol: Fill in the view as specified by flags. -extern "C" inline int pybind11_getbuffer(PyObject *obj, Py_buffer *view, int flags) { - // Look for a `get_buffer` implementation in this type's info or any bases (following MRO). - type_info *tinfo = nullptr; - for (auto type : reinterpret_borrow(Py_TYPE(obj)->tp_mro)) { - tinfo = get_type_info((PyTypeObject *) type.ptr()); - if (tinfo && tinfo->get_buffer) - break; - } - if (view == nullptr || !tinfo || !tinfo->get_buffer) { - if (view) - view->obj = nullptr; - PyErr_SetString(PyExc_BufferError, "pybind11_getbuffer(): Internal error"); - return -1; - } - std::memset(view, 0, sizeof(Py_buffer)); - buffer_info *info = tinfo->get_buffer(obj, tinfo->get_buffer_data); - view->obj = obj; - view->ndim = 1; - view->internal = info; - view->buf = info->ptr; - view->itemsize = info->itemsize; - view->len = view->itemsize; - for (auto s : info->shape) - view->len *= s; - view->readonly = info->readonly; - if ((flags & PyBUF_WRITABLE) == PyBUF_WRITABLE && info->readonly) { - if (view) - view->obj = nullptr; - PyErr_SetString(PyExc_BufferError, "Writable buffer requested for readonly storage"); - return -1; - } - if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) - view->format = const_cast(info->format.c_str()); - if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) { - view->ndim = (int) info->ndim; - view->strides = &info->strides[0]; - view->shape = &info->shape[0]; - } - Py_INCREF(view->obj); - return 0; -} - -/// buffer_protocol: Release the resources of the buffer. -extern "C" inline void pybind11_releasebuffer(PyObject *, Py_buffer *view) { - delete (buffer_info *) view->internal; -} - -/// Give this type a buffer interface. -inline void enable_buffer_protocol(PyHeapTypeObject *heap_type) { - heap_type->ht_type.tp_as_buffer = &heap_type->as_buffer; -#if PY_MAJOR_VERSION < 3 - heap_type->ht_type.tp_flags |= Py_TPFLAGS_HAVE_NEWBUFFER; -#endif - - heap_type->as_buffer.bf_getbuffer = pybind11_getbuffer; - heap_type->as_buffer.bf_releasebuffer = pybind11_releasebuffer; -} - -/** Create a brand new Python type according to the `type_record` specification. - Return value: New reference. */ -inline PyObject* make_new_python_type(const type_record &rec) { - auto name = reinterpret_steal(PYBIND11_FROM_STRING(rec.name)); - - auto qualname = name; - if (rec.scope && !PyModule_Check(rec.scope.ptr()) && hasattr(rec.scope, "__qualname__")) { -#if PY_MAJOR_VERSION >= 3 - qualname = reinterpret_steal( - PyUnicode_FromFormat("%U.%U", rec.scope.attr("__qualname__").ptr(), name.ptr())); -#else - qualname = str(rec.scope.attr("__qualname__").cast() + "." + rec.name); -#endif - } - - object module; - if (rec.scope) { - if (hasattr(rec.scope, "__module__")) - module = rec.scope.attr("__module__"); - else if (hasattr(rec.scope, "__name__")) - module = rec.scope.attr("__name__"); - } - - auto full_name = c_str( -#if !defined(PYPY_VERSION) - module ? str(module).cast() + "." + rec.name : -#endif - rec.name); - - char *tp_doc = nullptr; - if (rec.doc && options::show_user_defined_docstrings()) { - /* Allocate memory for docstring (using PyObject_MALLOC, since - Python will free this later on) */ - size_t size = strlen(rec.doc) + 1; - tp_doc = (char *) PyObject_MALLOC(size); - memcpy((void *) tp_doc, rec.doc, size); - } - - auto &internals = get_internals(); - auto bases = tuple(rec.bases); - auto base = (bases.size() == 0) ? internals.instance_base - : bases[0].ptr(); - - /* Danger zone: from now (and until PyType_Ready), make sure to - issue no Python C API calls which could potentially invoke the - garbage collector (the GC will call type_traverse(), which will in - turn find the newly constructed type in an invalid state) */ - auto metaclass = rec.metaclass.ptr() ? (PyTypeObject *) rec.metaclass.ptr() - : internals.default_metaclass; - - auto heap_type = (PyHeapTypeObject *) metaclass->tp_alloc(metaclass, 0); - if (!heap_type) - pybind11_fail(std::string(rec.name) + ": Unable to create type object!"); - - heap_type->ht_name = name.release().ptr(); -#ifdef PYBIND11_BUILTIN_QUALNAME - heap_type->ht_qualname = qualname.inc_ref().ptr(); -#endif - - auto type = &heap_type->ht_type; - type->tp_name = full_name; - type->tp_doc = tp_doc; - type->tp_base = type_incref((PyTypeObject *)base); - type->tp_basicsize = static_cast(sizeof(instance)); - if (bases.size() > 0) - type->tp_bases = bases.release().ptr(); - - /* Don't inherit base __init__ */ - type->tp_init = pybind11_object_init; - - /* Supported protocols */ - type->tp_as_number = &heap_type->as_number; - type->tp_as_sequence = &heap_type->as_sequence; - type->tp_as_mapping = &heap_type->as_mapping; -#if PY_VERSION_HEX >= 0x03050000 - type->tp_as_async = &heap_type->as_async; -#endif - - /* Flags */ - type->tp_flags |= Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HEAPTYPE; -#if PY_MAJOR_VERSION < 3 - type->tp_flags |= Py_TPFLAGS_CHECKTYPES; -#endif - if (!rec.is_final) - type->tp_flags |= Py_TPFLAGS_BASETYPE; - - if (rec.dynamic_attr) - enable_dynamic_attributes(heap_type); - - if (rec.buffer_protocol) - enable_buffer_protocol(heap_type); - - if (PyType_Ready(type) < 0) - pybind11_fail(std::string(rec.name) + ": PyType_Ready failed (" + error_string() + ")!"); - - assert(rec.dynamic_attr ? PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC) - : !PyType_HasFeature(type, Py_TPFLAGS_HAVE_GC)); - - /* Register type with the parent scope */ - if (rec.scope) - setattr(rec.scope, rec.name, (PyObject *) type); - else - Py_INCREF(type); // Keep it alive forever (reference leak) - - if (module) // Needed by pydoc - setattr((PyObject *) type, "__module__", module); - - PYBIND11_SET_OLDPY_QUALNAME(type, qualname); - - return (PyObject *) type; -} - -PYBIND11_NAMESPACE_END(detail) -PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/spaces/CVPR/LIVE/thrust/dependencies/cub/test/link_main.cpp b/spaces/CVPR/LIVE/thrust/dependencies/cub/test/link_main.cpp deleted file mode 100644 index ef677ee03b4febf543deed0867dd46e73b42e37d..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/dependencies/cub/test/link_main.cpp +++ /dev/null @@ -1,10 +0,0 @@ -#include - -extern void a(); -extern void b(); - -int main() -{ - printf("hello world\n"); - return 0; -} diff --git a/spaces/CVPR/WALT/mmdet/core/bbox/assigners/region_assigner.py b/spaces/CVPR/WALT/mmdet/core/bbox/assigners/region_assigner.py deleted file mode 100644 index 2e8464b97c8d8f44488d7bb781ca2e733a258e55..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/core/bbox/assigners/region_assigner.py +++ /dev/null @@ -1,221 +0,0 @@ -import torch - -from mmdet.core import anchor_inside_flags -from ..builder import BBOX_ASSIGNERS -from .assign_result import AssignResult -from .base_assigner import BaseAssigner - - -def calc_region(bbox, ratio, stride, featmap_size=None): - """Calculate region of the box defined by the ratio, the ratio is from the - center of the box to every edge.""" - # project bbox on the feature - f_bbox = bbox / stride - x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2]) - y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3]) - x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2]) - y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3]) - if featmap_size is not None: - x1 = x1.clamp(min=0, max=featmap_size[1]) - y1 = y1.clamp(min=0, max=featmap_size[0]) - x2 = x2.clamp(min=0, max=featmap_size[1]) - y2 = y2.clamp(min=0, max=featmap_size[0]) - return (x1, y1, x2, y2) - - -def anchor_ctr_inside_region_flags(anchors, stride, region): - """Get the flag indicate whether anchor centers are inside regions.""" - x1, y1, x2, y2 = region - f_anchors = anchors / stride - x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5 - y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5 - flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2) - return flags - - -@BBOX_ASSIGNERS.register_module() -class RegionAssigner(BaseAssigner): - """Assign a corresponding gt bbox or background to each bbox. - - Each proposals will be assigned with `-1`, `0`, or a positive integer - indicating the ground truth index. - - - -1: don't care - - 0: negative sample, no assigned gt - - positive integer: positive sample, index (1-based) of assigned gt - - Args: - center_ratio: ratio of the region in the center of the bbox to - define positive sample. - ignore_ratio: ratio of the region to define ignore samples. - """ - - def __init__(self, center_ratio=0.2, ignore_ratio=0.5): - self.center_ratio = center_ratio - self.ignore_ratio = ignore_ratio - - def assign(self, - mlvl_anchors, - mlvl_valid_flags, - gt_bboxes, - img_meta, - featmap_sizes, - anchor_scale, - anchor_strides, - gt_bboxes_ignore=None, - gt_labels=None, - allowed_border=0): - """Assign gt to anchors. - - This method assign a gt bbox to every bbox (proposal/anchor), each bbox - will be assigned with -1, 0, or a positive number. -1 means don't care, - 0 means negative sample, positive number is the index (1-based) of - assigned gt. - The assignment is done in following steps, the order matters. - - 1. Assign every anchor to 0 (negative) - For each gt_bboxes: - 2. Compute ignore flags based on ignore_region then - assign -1 to anchors w.r.t. ignore flags - 3. Compute pos flags based on center_region then - assign gt_bboxes to anchors w.r.t. pos flags - 4. Compute ignore flags based on adjacent anchor lvl then - assign -1 to anchors w.r.t. ignore flags - 5. Assign anchor outside of image to -1 - - Args: - mlvl_anchors (list[Tensor]): Multi level anchors. - mlvl_valid_flags (list[Tensor]): Multi level valid flags. - gt_bboxes (Tensor): Ground truth bboxes of image - img_meta (dict): Meta info of image. - featmap_sizes (list[Tensor]): Feature mapsize each level - anchor_scale (int): Scale of the anchor. - anchor_strides (list[int]): Stride of the anchor. - gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). - gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are - labelled as `ignored`, e.g., crowd boxes in COCO. - gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). - allowed_border (int, optional): The border to allow the valid - anchor. Defaults to 0. - - Returns: - :obj:`AssignResult`: The assign result. - """ - if gt_bboxes_ignore is not None: - raise NotImplementedError - - num_gts = gt_bboxes.shape[0] - num_bboxes = sum(x.shape[0] for x in mlvl_anchors) - - if num_gts == 0 or num_bboxes == 0: - # No ground truth or boxes, return empty assignment - max_overlaps = gt_bboxes.new_zeros((num_bboxes, )) - assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ), - dtype=torch.long) - if gt_labels is None: - assigned_labels = None - else: - assigned_labels = gt_bboxes.new_full((num_bboxes, ), - -1, - dtype=torch.long) - return AssignResult( - num_gts, - assigned_gt_inds, - max_overlaps, - labels=assigned_labels) - - num_lvls = len(mlvl_anchors) - r1 = (1 - self.center_ratio) / 2 - r2 = (1 - self.ignore_ratio) / 2 - - scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * - (gt_bboxes[:, 3] - gt_bboxes[:, 1])) - min_anchor_size = scale.new_full( - (1, ), float(anchor_scale * anchor_strides[0])) - target_lvls = torch.floor( - torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) - target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() - - # 1. assign 0 (negative) by default - mlvl_assigned_gt_inds = [] - mlvl_ignore_flags = [] - for lvl in range(num_lvls): - h, w = featmap_sizes[lvl] - assert h * w == mlvl_anchors[lvl].shape[0] - assigned_gt_inds = gt_bboxes.new_full((h * w, ), - 0, - dtype=torch.long) - ignore_flags = torch.zeros_like(assigned_gt_inds) - mlvl_assigned_gt_inds.append(assigned_gt_inds) - mlvl_ignore_flags.append(ignore_flags) - - for gt_id in range(num_gts): - lvl = target_lvls[gt_id].item() - featmap_size = featmap_sizes[lvl] - stride = anchor_strides[lvl] - anchors = mlvl_anchors[lvl] - gt_bbox = gt_bboxes[gt_id, :4] - - # Compute regions - ignore_region = calc_region(gt_bbox, r2, stride, featmap_size) - ctr_region = calc_region(gt_bbox, r1, stride, featmap_size) - - # 2. Assign -1 to ignore flags - ignore_flags = anchor_ctr_inside_region_flags( - anchors, stride, ignore_region) - mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 - - # 3. Assign gt_bboxes to pos flags - pos_flags = anchor_ctr_inside_region_flags(anchors, stride, - ctr_region) - mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1 - - # 4. Assign -1 to ignore adjacent lvl - if lvl > 0: - d_lvl = lvl - 1 - d_anchors = mlvl_anchors[d_lvl] - d_featmap_size = featmap_sizes[d_lvl] - d_stride = anchor_strides[d_lvl] - d_ignore_region = calc_region(gt_bbox, r2, d_stride, - d_featmap_size) - ignore_flags = anchor_ctr_inside_region_flags( - d_anchors, d_stride, d_ignore_region) - mlvl_ignore_flags[d_lvl][ignore_flags] = 1 - if lvl < num_lvls - 1: - u_lvl = lvl + 1 - u_anchors = mlvl_anchors[u_lvl] - u_featmap_size = featmap_sizes[u_lvl] - u_stride = anchor_strides[u_lvl] - u_ignore_region = calc_region(gt_bbox, r2, u_stride, - u_featmap_size) - ignore_flags = anchor_ctr_inside_region_flags( - u_anchors, u_stride, u_ignore_region) - mlvl_ignore_flags[u_lvl][ignore_flags] = 1 - - # 4. (cont.) Assign -1 to ignore adjacent lvl - for lvl in range(num_lvls): - ignore_flags = mlvl_ignore_flags[lvl] - mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 - - # 5. Assign -1 to anchor outside of image - flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds) - flat_anchors = torch.cat(mlvl_anchors) - flat_valid_flags = torch.cat(mlvl_valid_flags) - assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] == - flat_valid_flags.shape[0]) - inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags, - img_meta['img_shape'], - allowed_border) - outside_flags = ~inside_flags - flat_assigned_gt_inds[outside_flags] = -1 - - if gt_labels is not None: - assigned_labels = torch.zeros_like(flat_assigned_gt_inds) - pos_flags = assigned_gt_inds > 0 - assigned_labels[pos_flags] = gt_labels[ - flat_assigned_gt_inds[pos_flags] - 1] - else: - assigned_labels = None - - return AssignResult( - num_gts, flat_assigned_gt_inds, None, labels=assigned_labels) diff --git a/spaces/CVPR/WALT/mmdet/datasets/deepfashion.py b/spaces/CVPR/WALT/mmdet/datasets/deepfashion.py deleted file mode 100644 index 1125376091f2d4ee6843ae4f2156b3b0453be369..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/datasets/deepfashion.py +++ /dev/null @@ -1,10 +0,0 @@ -from .builder import DATASETS -from .coco import CocoDataset - - -@DATASETS.register_module() -class DeepFashionDataset(CocoDataset): - - CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', - 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', - 'skin', 'face') diff --git a/spaces/CVPR/lama-example/bin/debug/analyze_overlapping_masks.sh b/spaces/CVPR/lama-example/bin/debug/analyze_overlapping_masks.sh deleted file mode 100644 index 4a4727b0129007d9b0eed3fc25780adb565965a2..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/bin/debug/analyze_overlapping_masks.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -BASEDIR="$(dirname $0)" - -# paths are valid for mml7 - -# select images -#ls /data/inpainting/work/data/train | shuf | head -2000 | xargs -n1 -I{} cp {} /data/inpainting/mask_analysis/src - -# generate masks -#"$BASEDIR/../gen_debug_mask_dataset.py" \ -# "$BASEDIR/../../configs/debug_mask_gen.yaml" \ -# "/data/inpainting/mask_analysis/src" \ -# "/data/inpainting/mask_analysis/generated" - -# predict -#"$BASEDIR/../predict.py" \ -# model.path="simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15/saved_checkpoint/r.suvorov_2021-04-30_14-41-12_train_simple_pix2pix2_gap_sdpl_novgg_large_b18_ffc075_batch8x15_epoch22-step-574999" \ -# indir="/data/inpainting/mask_analysis/generated" \ -# outdir="/data/inpainting/mask_analysis/predicted" \ -# dataset.img_suffix=.jpg \ -# +out_ext=.jpg - -# analyze good and bad samples -"$BASEDIR/../analyze_errors.py" \ - --only-report \ - --n-jobs 8 \ - "$BASEDIR/../../configs/analyze_mask_errors.yaml" \ - "/data/inpainting/mask_analysis/small/generated" \ - "/data/inpainting/mask_analysis/small/predicted" \ - "/data/inpainting/mask_analysis/small/report" diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/operator_generator/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/operator_generator/__init__.py deleted file mode 100644 index 462fd47df3500e282ef5948dbe8864c42ba5c61e..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/operator_generator/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -import random -from pathlib import Path -from typing import List - -from meme_generator import add_meme -from pil_utils import BuildImage - -img_dir = Path(__file__).parent / "images" - - -def operator_generator(images: List[BuildImage], texts: List[str], args): - img = images[0].convert("RGBA").circle().resize((80, 80)) - name = texts[0] if texts else "你好" - - frame = BuildImage.new("RGBA", (640, 640), (160, 160, 160)) - frame.paste(img, (20, 10), alpha=True) - frame.draw_text( - (120, 0, 620, 100), - f"{name},你的干员信息如下:", - fontsize=80, - fill="white", - stroke_fill="black", - stroke_ratio=0.1, - weight="bold", - allow_wrap=True, - lines_align="center", - ) - - rrange = BuildImage.open( - img_dir / f"1范围/范围101-25-{random.randint(0, 24):04d}.jpg" - ).resize_width(320) - frame.paste(rrange, (0, 100)) - rcharacteristic = BuildImage.open( - img_dir / f"2特性/特性202-25-{random.randint(0, 24):04d}.jpg" - ).resize_width(320) - frame.paste(rcharacteristic, (320, 100)) - rvalue = BuildImage.open( - img_dir / f"3基础数值/基础数值3031-{random.randint(0, 24):04d}.jpg" - ).resize_width(320) - frame.paste(rvalue, (0, 280)) - rtalent = BuildImage.open( - img_dir / f"4天赋/天赋404-25-{random.randint(0, 24):04d}.jpg" - ).resize_width(320) - frame.paste(rtalent, (320, 280)) - rskill = BuildImage.open( - img_dir / f"5技能/技能505-25-{random.randint(0, 24):04d}.jpg" - ).resize_width(320) - frame.paste(rskill, (0, 460)) - rspecail = BuildImage.open( - img_dir / f"6亮点毒点/亮点毒点606-{random.randint(0, 24):04d}.jpg" - ).resize_width(320) - frame.paste(rspecail, (320, 460)) - - return frame.save_jpg() - - -add_meme( - "operator_generator", - operator_generator, - min_images=1, - max_images=1, - max_texts=1, - keywords=["合成大干员"], -) diff --git a/spaces/CognitiveLabs/Research-Assistant/config/__init__.py b/spaces/CognitiveLabs/Research-Assistant/config/__init__.py deleted file mode 100644 index 704ba0f8fd18eac290da02e5b1d22d4be54b235f..0000000000000000000000000000000000000000 --- a/spaces/CognitiveLabs/Research-Assistant/config/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from config.config import Config, check_openai_api_key -from config.singleton import AbstractSingleton, Singleton - -__all__ = [ - "check_openai_api_key", - "AbstractSingleton", - "Config", - "Singleton", -] diff --git a/spaces/Cvandi/remake/inference_realesrgan.py b/spaces/Cvandi/remake/inference_realesrgan.py deleted file mode 100644 index 6d5ff4d188faaa16c0131be69a08fd22fb608f80..0000000000000000000000000000000000000000 --- a/spaces/Cvandi/remake/inference_realesrgan.py +++ /dev/null @@ -1,128 +0,0 @@ -import argparse -import cv2 -import glob -import os -from basicsr.archs.rrdbnet_arch import RRDBNet - -from realesrgan import RealESRGANer -from realesrgan.archs.srvgg_arch import SRVGGNetCompact - - -def main(): - """Inference demo for Real-ESRGAN. - """ - parser = argparse.ArgumentParser() - parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder') - parser.add_argument( - '-n', - '--model_name', - type=str, - default='RealESRGAN_x4plus', - help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus' - 'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2' - 'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4')) - parser.add_argument('-o', '--output', type=str, default='results', help='Output folder') - parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image') - parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored image') - parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing') - parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding') - parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border') - parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face') - parser.add_argument('--half', action='store_true', help='Use half precision during inference') - parser.add_argument( - '--alpha_upsampler', - type=str, - default='realesrgan', - help='The upsampler for the alpha channels. Options: realesrgan | bicubic') - parser.add_argument( - '--ext', - type=str, - default='auto', - help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs') - args = parser.parse_args() - - # determine models according to model names - args.model_name = args.model_name.split('.')[0] - if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4) - netscale = 4 - elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4) - netscale = 4 - elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model - model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2) - netscale = 2 - elif args.model_name in [ - 'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2' - ]: # x2 VGG-style model (XS size) - model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu') - netscale = 2 - elif args.model_name in [ - 'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4' - ]: # x4 VGG-style model (XS size) - model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu') - netscale = 4 - - # determine model paths - model_path = os.path.join('.', args.model_name + '.pth') - if not os.path.isfile(model_path): - model_path = os.path.join('.', args.model_name + '.pth') - if not os.path.isfile(model_path): - raise ValueError(f'Model {args.model_name} does not exist.') - - # restorer - upsampler = RealESRGANer( - scale=netscale, - model_path=model_path, - model=model, - tile=args.tile, - tile_pad=args.tile_pad, - pre_pad=args.pre_pad, - half=args.half) - - if args.face_enhance: # Use GFPGAN for face enhancement - from gfpgan import GFPGANer - face_enhancer = GFPGANer( - model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth', - upscale=args.outscale, - arch='clean', - channel_multiplier=2, - bg_upsampler=upsampler) - os.makedirs(args.output, exist_ok=True) - - if os.path.isfile(args.input): - paths = [args.input] - else: - paths = sorted(glob.glob(os.path.join(args.input, '*'))) - - for idx, path in enumerate(paths): - imgname, extension = os.path.splitext(os.path.basename(path)) - print('Testing', idx, imgname) - - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) - if len(img.shape) == 3 and img.shape[2] == 4: - img_mode = 'RGBA' - else: - img_mode = None - - try: - if args.face_enhance: - _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True) - else: - output, _ = upsampler.enhance(img, outscale=args.outscale) - except RuntimeError as error: - print('Error', error) - print('If you encounter CUDA out of memory, try to set --tile with a smaller number.') - else: - if args.ext == 'auto': - extension = extension[1:] - else: - extension = args.ext - if img_mode == 'RGBA': # RGBA images should be saved in png format - extension = 'png' - save_path = os.path.join(args.output, f'{imgname}_{args.suffix}.{extension}') - cv2.imwrite(save_path, output) - - -if __name__ == '__main__': - main() diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/deform_conv_v2.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/deform_conv_v2.py deleted file mode 100644 index ce309848025dad326f9c891f3e6865095a571624..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/deform_conv_v2.py +++ /dev/null @@ -1,308 +0,0 @@ -from __future__ import absolute_import -from __future__ import print_function -from __future__ import division - -import math -import torch -from torch import nn -from torch.autograd import Function -from torch.nn.modules.utils import _pair -from torch.autograd.function import once_differentiable - -import maskrcnn_benchmark._C as _backend - - -class _DCNv2(Function): - @staticmethod - def forward(ctx, input, offset, mask, weight, bias, - stride, padding, dilation, deformable_groups): - ctx.stride = _pair(stride) - ctx.padding = _pair(padding) - ctx.dilation = _pair(dilation) - ctx.kernel_size = _pair(weight.shape[2:4]) - ctx.deformable_groups = deformable_groups - output = _backend.dcn_v2_forward(input, weight, bias, - offset, mask, - ctx.kernel_size[0], ctx.kernel_size[1], - ctx.stride[0], ctx.stride[1], - ctx.padding[0], ctx.padding[1], - ctx.dilation[0], ctx.dilation[1], - ctx.deformable_groups) - ctx.save_for_backward(input, offset, mask, weight, bias) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, offset, mask, weight, bias = ctx.saved_tensors - grad_input, grad_offset, grad_mask, grad_weight, grad_bias = \ - _backend.dcn_v2_backward(input, weight, - bias, - offset, mask, - grad_output, - ctx.kernel_size[0], ctx.kernel_size[1], - ctx.stride[0], ctx.stride[1], - ctx.padding[0], ctx.padding[1], - ctx.dilation[0], ctx.dilation[1], - ctx.deformable_groups) - - return grad_input, grad_offset, grad_mask, grad_weight, grad_bias,\ - None, None, None, None, - - -dcn_v2_conv = _DCNv2.apply - - -class DCNv2(nn.Module): - - def __init__(self, in_channels, out_channels, - kernel_size, stride, padding, dilation=1, deformable_groups=1): - super(DCNv2, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = _pair(kernel_size) - self.stride = _pair(stride) - self.padding = _pair(padding) - self.dilation = _pair(dilation) - self.deformable_groups = deformable_groups - - self.weight = nn.Parameter(torch.Tensor( - out_channels, in_channels, *self.kernel_size)) - self.bias = nn.Parameter(torch.Tensor(out_channels)) - self.reset_parameters() - - def reset_parameters(self): - n = self.in_channels - for k in self.kernel_size: - n *= k - stdv = 1. / math.sqrt(n) - self.weight.data.uniform_(-stdv, stdv) - self.bias.data.zero_() - - def forward(self, input, offset, mask): - assert 2 * self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \ - offset.shape[1] - assert self.deformable_groups * self.kernel_size[0] * self.kernel_size[1] == \ - mask.shape[1] - return dcn_v2_conv(input, offset, mask, - self.weight, - self.bias, - self.stride, - self.padding, - self.dilation, - self.deformable_groups) - - -class DCN(DCNv2): - - def __init__(self, in_channels, out_channels, - kernel_size, stride, padding=0, - dilation=1, deformable_groups=2, - groups=None, bias=True): - """ - groups and bias are two dummy args which have no effect - """ - super(DCN, self).__init__(in_channels, out_channels, - kernel_size, stride, padding, dilation, deformable_groups) - - channels_ = self.deformable_groups * 3 * self.kernel_size[0] * self.kernel_size[1] - self.conv_offset_mask = nn.Conv2d(self.in_channels, - channels_, - kernel_size=self.kernel_size, - stride=self.stride, - padding=self.padding, - bias=True) - self.init_offset() - - def init_offset(self): - self.conv_offset_mask.weight.data.zero_() - self.conv_offset_mask.bias.data.zero_() - - def forward(self, input): - out = self.conv_offset_mask(input) - o1, o2, mask = torch.chunk(out, 3, dim=1) - offset = torch.cat((o1, o2), dim=1) - mask = torch.sigmoid(mask) - return dcn_v2_conv(input, offset, mask, - self.weight, self.bias, - self.stride, - self.padding, - self.dilation, - self.deformable_groups) - - - -class _DCNv2Pooling(Function): - @staticmethod - def forward(ctx, input, rois, offset, - spatial_scale, - pooled_size, - output_dim, - no_trans, - group_size=1, - part_size=None, - sample_per_part=4, - trans_std=.0): - ctx.spatial_scale = spatial_scale - ctx.no_trans = int(no_trans) - ctx.output_dim = output_dim - ctx.group_size = group_size - ctx.pooled_size = pooled_size - ctx.part_size = pooled_size if part_size is None else part_size - ctx.sample_per_part = sample_per_part - ctx.trans_std = trans_std - - output, output_count = \ - _backend.dcn_v2_psroi_pooling_forward(input, rois, offset, - ctx.no_trans, ctx.spatial_scale, - ctx.output_dim, ctx.group_size, - ctx.pooled_size, ctx.part_size, - ctx.sample_per_part, ctx.trans_std) - ctx.save_for_backward(input, rois, offset, output_count) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - input, rois, offset, output_count = ctx.saved_tensors - grad_input, grad_offset = \ - _backend.dcn_v2_psroi_pooling_backward(grad_output, - input, - rois, - offset, - output_count, - ctx.no_trans, - ctx.spatial_scale, - ctx.output_dim, - ctx.group_size, - ctx.pooled_size, - ctx.part_size, - ctx.sample_per_part, - ctx.trans_std) - - return grad_input, None, grad_offset, \ - None, None, None, None, None, None, None, None - - -dcn_v2_pooling = _DCNv2Pooling.apply - - -class DCNv2Pooling(nn.Module): - - def __init__(self, - spatial_scale, - pooled_size, - output_dim, - no_trans, - group_size=1, - part_size=None, - sample_per_part=4, - trans_std=.0): - super(DCNv2Pooling, self).__init__() - self.spatial_scale = spatial_scale - self.pooled_size = pooled_size - self.output_dim = output_dim - self.no_trans = no_trans - self.group_size = group_size - self.part_size = pooled_size if part_size is None else part_size - self.sample_per_part = sample_per_part - self.trans_std = trans_std - - def forward(self, input, rois, offset): - assert input.shape[1] == self.output_dim - if self.no_trans: - offset = input.new() - return dcn_v2_pooling(input, rois, offset, - self.spatial_scale, - self.pooled_size, - self.output_dim, - self.no_trans, - self.group_size, - self.part_size, - self.sample_per_part, - self.trans_std) - - -class DCNPooling(DCNv2Pooling): - - def __init__(self, - spatial_scale, - pooled_size, - output_dim, - no_trans, - group_size=1, - part_size=None, - sample_per_part=4, - trans_std=.0, - deform_fc_dim=1024): - # don't support non square pooling - pooled_size = pooled_size[0] - super(DCNPooling, self).__init__(spatial_scale, - pooled_size, - output_dim, - no_trans, - group_size, - part_size, - sample_per_part, - trans_std) - - self.deform_fc_dim = deform_fc_dim - - if not no_trans: - self.offset_mask_fc = nn.Sequential( - nn.Linear(self.pooled_size * self.pooled_size * - self.output_dim, self.deform_fc_dim), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_dim, self.deform_fc_dim), - nn.ReLU(inplace=True), - nn.Linear(self.deform_fc_dim, self.pooled_size * - self.pooled_size * 3) - ) - self.offset_mask_fc[4].weight.data.zero_() - self.offset_mask_fc[4].bias.data.zero_() - - def forward(self, input, rois, debug=False): - offset = input.new() - - if not self.no_trans: - - # do roi_align first - n = rois.shape[0] - roi = dcn_v2_pooling(input, rois, offset, - self.spatial_scale, - self.pooled_size, - self.output_dim, - True, # no trans - self.group_size, - self.part_size, - self.sample_per_part, - self.trans_std) - - # build mask and offset - offset_mask = self.offset_mask_fc(roi.view(n, -1)) - offset_mask = offset_mask.view( - n, 3, self.pooled_size, self.pooled_size) - o1, o2, mask = torch.chunk(offset_mask, 3, dim=1) - offset = torch.cat((o1, o2), dim=1) - mask = torch.sigmoid(mask) - - # do pooling with offset and mask - return dcn_v2_pooling(input, rois, offset, - self.spatial_scale, - self.pooled_size, - self.output_dim, - self.no_trans, - self.group_size, - self.part_size, - self.sample_per_part, - self.trans_std) * mask - # only roi_align - return dcn_v2_pooling(input, rois, offset, - self.spatial_scale, - self.pooled_size, - self.output_dim, - self.no_trans, - self.group_size, - self.part_size, - self.sample_per_part, - self.trans_std) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/client_proto.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/client_proto.py deleted file mode 100644 index 3041157d61d78fe285fe2f688a4a8d5b75c5412d..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiohttp/client_proto.py +++ /dev/null @@ -1,251 +0,0 @@ -import asyncio -from contextlib import suppress -from typing import Any, Optional, Tuple - -from .base_protocol import BaseProtocol -from .client_exceptions import ( - ClientOSError, - ClientPayloadError, - ServerDisconnectedError, - ServerTimeoutError, -) -from .helpers import BaseTimerContext -from .http import HttpResponseParser, RawResponseMessage -from .streams import EMPTY_PAYLOAD, DataQueue, StreamReader - - -class ResponseHandler(BaseProtocol, DataQueue[Tuple[RawResponseMessage, StreamReader]]): - """Helper class to adapt between Protocol and StreamReader.""" - - def __init__(self, loop: asyncio.AbstractEventLoop) -> None: - BaseProtocol.__init__(self, loop=loop) - DataQueue.__init__(self, loop) - - self._should_close = False - - self._payload: Optional[StreamReader] = None - self._skip_payload = False - self._payload_parser = None - - self._timer = None - - self._tail = b"" - self._upgraded = False - self._parser: Optional[HttpResponseParser] = None - - self._read_timeout: Optional[float] = None - self._read_timeout_handle: Optional[asyncio.TimerHandle] = None - - @property - def upgraded(self) -> bool: - return self._upgraded - - @property - def should_close(self) -> bool: - if self._payload is not None and not self._payload.is_eof() or self._upgraded: - return True - - return ( - self._should_close - or self._upgraded - or self.exception() is not None - or self._payload_parser is not None - or len(self) > 0 - or bool(self._tail) - ) - - def force_close(self) -> None: - self._should_close = True - - def close(self) -> None: - transport = self.transport - if transport is not None: - transport.close() - self.transport = None - self._payload = None - self._drop_timeout() - - def is_connected(self) -> bool: - return self.transport is not None and not self.transport.is_closing() - - def connection_lost(self, exc: Optional[BaseException]) -> None: - self._drop_timeout() - - if self._payload_parser is not None: - with suppress(Exception): - self._payload_parser.feed_eof() - - uncompleted = None - if self._parser is not None: - try: - uncompleted = self._parser.feed_eof() - except Exception: - if self._payload is not None: - self._payload.set_exception( - ClientPayloadError("Response payload is not completed") - ) - - if not self.is_eof(): - if isinstance(exc, OSError): - exc = ClientOSError(*exc.args) - if exc is None: - exc = ServerDisconnectedError(uncompleted) - # assigns self._should_close to True as side effect, - # we do it anyway below - self.set_exception(exc) - - self._should_close = True - self._parser = None - self._payload = None - self._payload_parser = None - self._reading_paused = False - - super().connection_lost(exc) - - def eof_received(self) -> None: - # should call parser.feed_eof() most likely - self._drop_timeout() - - def pause_reading(self) -> None: - super().pause_reading() - self._drop_timeout() - - def resume_reading(self) -> None: - super().resume_reading() - self._reschedule_timeout() - - def set_exception(self, exc: BaseException) -> None: - self._should_close = True - self._drop_timeout() - super().set_exception(exc) - - def set_parser(self, parser: Any, payload: Any) -> None: - # TODO: actual types are: - # parser: WebSocketReader - # payload: FlowControlDataQueue - # but they are not generi enough - # Need an ABC for both types - self._payload = payload - self._payload_parser = parser - - self._drop_timeout() - - if self._tail: - data, self._tail = self._tail, b"" - self.data_received(data) - - def set_response_params( - self, - *, - timer: Optional[BaseTimerContext] = None, - skip_payload: bool = False, - read_until_eof: bool = False, - auto_decompress: bool = True, - read_timeout: Optional[float] = None, - read_bufsize: int = 2**16, - ) -> None: - self._skip_payload = skip_payload - - self._read_timeout = read_timeout - self._reschedule_timeout() - - self._parser = HttpResponseParser( - self, - self._loop, - read_bufsize, - timer=timer, - payload_exception=ClientPayloadError, - response_with_body=not skip_payload, - read_until_eof=read_until_eof, - auto_decompress=auto_decompress, - ) - - if self._tail: - data, self._tail = self._tail, b"" - self.data_received(data) - - def _drop_timeout(self) -> None: - if self._read_timeout_handle is not None: - self._read_timeout_handle.cancel() - self._read_timeout_handle = None - - def _reschedule_timeout(self) -> None: - timeout = self._read_timeout - if self._read_timeout_handle is not None: - self._read_timeout_handle.cancel() - - if timeout: - self._read_timeout_handle = self._loop.call_later( - timeout, self._on_read_timeout - ) - else: - self._read_timeout_handle = None - - def _on_read_timeout(self) -> None: - exc = ServerTimeoutError("Timeout on reading data from socket") - self.set_exception(exc) - if self._payload is not None: - self._payload.set_exception(exc) - - def data_received(self, data: bytes) -> None: - self._reschedule_timeout() - - if not data: - return - - # custom payload parser - if self._payload_parser is not None: - eof, tail = self._payload_parser.feed_data(data) - if eof: - self._payload = None - self._payload_parser = None - - if tail: - self.data_received(tail) - return - else: - if self._upgraded or self._parser is None: - # i.e. websocket connection, websocket parser is not set yet - self._tail += data - else: - # parse http messages - try: - messages, upgraded, tail = self._parser.feed_data(data) - except BaseException as exc: - if self.transport is not None: - # connection.release() could be called BEFORE - # data_received(), the transport is already - # closed in this case - self.transport.close() - # should_close is True after the call - self.set_exception(exc) - return - - self._upgraded = upgraded - - payload: Optional[StreamReader] = None - for message, payload in messages: - if message.should_close: - self._should_close = True - - self._payload = payload - - if self._skip_payload or message.code in (204, 304): - self.feed_data((message, EMPTY_PAYLOAD), 0) - else: - self.feed_data((message, payload), 0) - if payload is not None: - # new message(s) was processed - # register timeout handler unsubscribing - # either on end-of-stream or immediately for - # EMPTY_PAYLOAD - if payload is not EMPTY_PAYLOAD: - payload.on_eof(self._drop_timeout) - else: - self._drop_timeout() - - if tail: - if upgraded: - self.data_received(tail) - else: - self._tail = tail diff --git a/spaces/DarwinAnim8or/convert-to-safet/app.py b/spaces/DarwinAnim8or/convert-to-safet/app.py deleted file mode 100644 index b402f855442846cf208e7ee14924d244487f8980..0000000000000000000000000000000000000000 --- a/spaces/DarwinAnim8or/convert-to-safet/app.py +++ /dev/null @@ -1,96 +0,0 @@ -import csv -from datetime import datetime -import os -from typing import Optional -import gradio as gr - -from convert import convert -from huggingface_hub import HfApi, Repository - - -DATASET_REPO_URL = "https://huggingface.co/datasets/safetensors/conversions" -DATA_FILENAME = "data.csv" -DATA_FILE = os.path.join("data", DATA_FILENAME) - -HF_TOKEN = os.environ.get("HF_TOKEN") - -repo: Optional[Repository] = None -# TODO -if False and HF_TOKEN: - repo = Repository(local_dir="data", clone_from=DATASET_REPO_URL, token=HF_TOKEN) - - -def run(token: str, model_id: str) -> str: - if token == "" or model_id == "": - return """ - ### Invalid input 🐞 - - Please fill a token and model_id. - """ - try: - api = HfApi(token=token) - is_private = api.model_info(repo_id=model_id).private - print("is_private", is_private) - - commit_info = convert(api=api, model_id=model_id) - print("[commit_info]", commit_info) - - # save in a (public) dataset: - # TODO False because of LFS bug. - if False and repo is not None and not is_private: - repo.git_pull(rebase=True) - print("pulled") - with open(DATA_FILE, "a") as csvfile: - writer = csv.DictWriter( - csvfile, fieldnames=["model_id", "pr_url", "time"] - ) - writer.writerow( - { - "model_id": model_id, - "pr_url": commit_info.pr_url, - "time": str(datetime.now()), - } - ) - commit_url = repo.push_to_hub() - print("[dataset]", commit_url) - - return f""" - ### Success 🔥 - - Yay! This model was successfully converted and a PR was open using your token, here: - - [{commit_info.pr_url}]({commit_info.pr_url}) - """ - except Exception as e: - return f""" - ### Error 😢😢😢 - - {e} - """ - - -DESCRIPTION = """ -The steps are the following: - -- Paste a read-access token from hf.co/settings/tokens. Read access is enough given that we will open a PR against the source repo. -- Input a model id from the Hub -- Click "Submit" -- That's it! You'll get feedback if it works or not, and if it worked, you'll get the URL of the opened PR 🔥 - -⚠️ For now only `pytorch_model.bin` files are supported but we'll extend in the future. -""" - -demo = gr.Interface( - title="Convert any model to Safetensors and open a PR", - description=DESCRIPTION, - allow_flagging="never", - article="Check out the [Safetensors repo on GitHub](https://github.com/huggingface/safetensors)", - inputs=[ - gr.Text(max_lines=1, label="your_hf_token"), - gr.Text(max_lines=1, label="model_id"), - ], - outputs=[gr.Markdown(label="output")], - fn=run, -).queue(max_size=10, concurrency_count=1) - -demo.launch(show_api=True) diff --git a/spaces/Detomo/ai-comic-generation/src/app/interface/panel/bubble.tsx b/spaces/Detomo/ai-comic-generation/src/app/interface/panel/bubble.tsx deleted file mode 100644 index dad1498e68f6ba79b2fec29fe528657b80b09098..0000000000000000000000000000000000000000 --- a/spaces/Detomo/ai-comic-generation/src/app/interface/panel/bubble.tsx +++ /dev/null @@ -1,45 +0,0 @@ -import { ReactNode } from "react" - -import { cn } from "@/lib/utils" - -export function Bubble({ - children, - className -}: { - children?: ReactNode - className?: string -}) { - - if (!children) { - return null - } - - return ( -
        -
        -
        - {children} -
        -
        -
        - ) -} \ No newline at end of file diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/Inference.py b/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/Inference.py deleted file mode 100644 index a292787c88a370b15b4f0d633ac27bb5bed2b510..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/Inference.py +++ /dev/null @@ -1,106 +0,0 @@ - - -from manipulate import Manipulator -import tensorflow as tf -import numpy as np -import torch -import clip -from MapTS import GetBoundary,GetDt - -class StyleCLIP(): - - def __init__(self,dataset_name='ffhq'): - print('load clip') - device = "cuda" if torch.cuda.is_available() else "cpu" - self.model, preprocess = clip.load("ViT-B/32", device=device) - self.LoadData(dataset_name) - - def LoadData(self, dataset_name): - tf.keras.backend.clear_session() - M=Manipulator(dataset_name=dataset_name) - np.set_printoptions(suppress=True) - fs3=np.load('./npy/'+dataset_name+'/fs3.npy') - - self.M=M - self.fs3=fs3 - - w_plus=np.load('./data/'+dataset_name+'/w_plus.npy') - self.M.dlatents=M.W2S(w_plus) - - if dataset_name=='ffhq': - self.c_threshold=20 - else: - self.c_threshold=100 - self.SetInitP() - - def SetInitP(self): - self.M.alpha=[3] - self.M.num_images=1 - - self.target='' - self.neutral='' - self.GetDt2() - img_index=0 - self.M.dlatent_tmp=[tmp[img_index:(img_index+1)] for tmp in self.M.dlatents] - - - def GetDt2(self): - classnames=[self.target,self.neutral] - dt=GetDt(classnames,self.model) - - self.dt=dt - num_cs=[] - betas=np.arange(0.1,0.3,0.01) - for i in range(len(betas)): - boundary_tmp2,num_c=GetBoundary(self.fs3,self.dt,self.M,threshold=betas[i]) - print(betas[i]) - num_cs.append(num_c) - - num_cs=np.array(num_cs) - select=num_cs>self.c_threshold - - if sum(select)==0: - self.beta=0.1 - else: - self.beta=betas[select][-1] - - - def GetCode(self): - boundary_tmp2,num_c=GetBoundary(self.fs3,self.dt,self.M,threshold=self.beta) - codes=self.M.MSCode(self.M.dlatent_tmp,boundary_tmp2) - return codes - - def GetImg(self): - - codes=self.GetCode() - out=self.M.GenerateImg(codes) - img=out[0,0] - return img - - - - -#%% -if __name__ == "__main__": - style_clip=StyleCLIP() - self=style_clip - - - - - - - - - - - - - - - - - - - - diff --git a/spaces/ECCV2022/bytetrack/yolox/motdt_tracker/basetrack.py b/spaces/ECCV2022/bytetrack/yolox/motdt_tracker/basetrack.py deleted file mode 100644 index 88b16eea14d1cc6f238b7c56becd6e754fbea55c..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/yolox/motdt_tracker/basetrack.py +++ /dev/null @@ -1,56 +0,0 @@ -import numpy as np -from collections import OrderedDict - - -class TrackState(object): - New = 0 - Tracked = 1 - Lost = 2 - Removed = 3 - Replaced = 4 - - -class BaseTrack(object): - _count = 0 - - track_id = 0 - is_activated = False - state = TrackState.New - - history = OrderedDict() - features = [] - curr_feature = None - score = 0 - start_frame = 0 - frame_id = 0 - time_since_update = 0 - - # multi-camera - location = (np.inf, np.inf) - - @property - def end_frame(self): - return self.frame_id - - @staticmethod - def next_id(): - BaseTrack._count += 1 - return BaseTrack._count - - def activate(self, *args): - raise NotImplementedError - - def predict(self): - raise NotImplementedError - - def update(self, *args, **kwargs): - raise NotImplementedError - - def mark_lost(self): - self.state = TrackState.Lost - - def mark_removed(self): - self.state = TrackState.Removed - - def mark_replaced(self): - self.state = TrackState.Replaced diff --git a/spaces/ECCV2022/bytetrack/yolox/utils/demo_utils.py b/spaces/ECCV2022/bytetrack/yolox/utils/demo_utils.py deleted file mode 100644 index 093443cd568a2b0421fa707eb8fda97ec154b142..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/yolox/utils/demo_utils.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -# Copyright (c) 2014-2021 Megvii Inc. All rights reserved. - -import numpy as np - -import os - -__all__ = ["mkdir", "nms", "multiclass_nms", "demo_postprocess"] - - -def mkdir(path): - if not os.path.exists(path): - os.makedirs(path) - - -def nms(boxes, scores, nms_thr): - """Single class NMS implemented in Numpy.""" - x1 = boxes[:, 0] - y1 = boxes[:, 1] - x2 = boxes[:, 2] - y2 = boxes[:, 3] - - areas = (x2 - x1 + 1) * (y2 - y1 + 1) - order = scores.argsort()[::-1] - - keep = [] - while order.size > 0: - i = order[0] - keep.append(i) - xx1 = np.maximum(x1[i], x1[order[1:]]) - yy1 = np.maximum(y1[i], y1[order[1:]]) - xx2 = np.minimum(x2[i], x2[order[1:]]) - yy2 = np.minimum(y2[i], y2[order[1:]]) - - w = np.maximum(0.0, xx2 - xx1 + 1) - h = np.maximum(0.0, yy2 - yy1 + 1) - inter = w * h - ovr = inter / (areas[i] + areas[order[1:]] - inter) - - inds = np.where(ovr <= nms_thr)[0] - order = order[inds + 1] - - return keep - - -def multiclass_nms(boxes, scores, nms_thr, score_thr): - """Multiclass NMS implemented in Numpy""" - final_dets = [] - num_classes = scores.shape[1] - for cls_ind in range(num_classes): - cls_scores = scores[:, cls_ind] - valid_score_mask = cls_scores > score_thr - if valid_score_mask.sum() == 0: - continue - else: - valid_scores = cls_scores[valid_score_mask] - valid_boxes = boxes[valid_score_mask] - keep = nms(valid_boxes, valid_scores, nms_thr) - if len(keep) > 0: - cls_inds = np.ones((len(keep), 1)) * cls_ind - dets = np.concatenate( - [valid_boxes[keep], valid_scores[keep, None], cls_inds], 1 - ) - final_dets.append(dets) - if len(final_dets) == 0: - return None - return np.concatenate(final_dets, 0) - - -def demo_postprocess(outputs, img_size, p6=False): - - grids = [] - expanded_strides = [] - - if not p6: - strides = [8, 16, 32] - else: - strides = [8, 16, 32, 64] - - hsizes = [img_size[0] // stride for stride in strides] - wsizes = [img_size[1] // stride for stride in strides] - - for hsize, wsize, stride in zip(hsizes, wsizes, strides): - xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize)) - grid = np.stack((xv, yv), 2).reshape(1, -1, 2) - grids.append(grid) - shape = grid.shape[:2] - expanded_strides.append(np.full((*shape, 1), stride)) - - grids = np.concatenate(grids, 1) - expanded_strides = np.concatenate(expanded_strides, 1) - outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides - outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides - - return outputs diff --git a/spaces/FantasticGNU/AnomalyGPT/utils/logger.py b/spaces/FantasticGNU/AnomalyGPT/utils/logger.py deleted file mode 100644 index 847c1c7a2f50f310cd5daf96b928838c1c293525..0000000000000000000000000000000000000000 --- a/spaces/FantasticGNU/AnomalyGPT/utils/logger.py +++ /dev/null @@ -1,127 +0,0 @@ -import logging -import torch.distributed as dist - -logger_initialized = {} - -def get_root_logger(log_file=None, log_level=logging.INFO, name='main'): - """Get root logger and add a keyword filter to it. - The logger will be initialized if it has not been initialized. By default a - StreamHandler will be added. If `log_file` is specified, a FileHandler will - also be added. The name of the root logger is the top-level package name, - e.g., "mmdet3d". - Args: - log_file (str, optional): File path of log. Defaults to None. - log_level (int, optional): The level of logger. - Defaults to logging.INFO. - name (str, optional): The name of the root logger, also used as a - filter keyword. Defaults to 'mmdet3d'. - Returns: - :obj:`logging.Logger`: The obtained logger - """ - logger = get_logger(name=name, log_file=log_file, log_level=log_level) - # add a logging filter - logging_filter = logging.Filter(name) - logging_filter.filter = lambda record: record.find(name) != -1 - - return logger - - -def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): - """Initialize and get a logger by name. - If the logger has not been initialized, this method will initialize the - logger by adding one or two handlers, otherwise the initialized logger will - be directly returned. During initialization, a StreamHandler will always be - added. If `log_file` is specified and the process rank is 0, a FileHandler - will also be added. - Args: - name (str): Logger name. - log_file (str | None): The log filename. If specified, a FileHandler - will be added to the logger. - log_level (int): The logger level. Note that only the process of - rank 0 is affected, and other processes will set the level to - "Error" thus be silent most of the time. - file_mode (str): The file mode used in opening log file. - Defaults to 'w'. - Returns: - logging.Logger: The expected logger. - """ - logger = logging.getLogger(name) - if name in logger_initialized: - return logger - # handle hierarchical names - # e.g., logger "a" is initialized, then logger "a.b" will skip the - # initialization since it is a child of "a". - for logger_name in logger_initialized: - if name.startswith(logger_name): - return logger - - # handle duplicate logs to the console - # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET) - # to the root logger. As logger.propagate is True by default, this root - # level handler causes logging messages from rank>0 processes to - # unexpectedly show up on the console, creating much unwanted clutter. - # To fix this issue, we set the root logger's StreamHandler, if any, to log - # at the ERROR level. - for handler in logger.root.handlers: - if type(handler) is logging.StreamHandler: - handler.setLevel(logging.ERROR) - - stream_handler = logging.StreamHandler() - handlers = [stream_handler] - - if dist.is_available() and dist.is_initialized(): - rank = dist.get_rank() - else: - rank = 0 - - # only rank 0 will add a FileHandler - if rank == 0 and log_file is not None: - # Here, the default behaviour of the official logger is 'a'. Thus, we - # provide an interface to change the file mode to the default - # behaviour. - file_handler = logging.FileHandler(log_file, file_mode) - handlers.append(file_handler) - - formatter = logging.Formatter( - '%(asctime)s - %(name)s - %(levelname)s - %(message)s') - for handler in handlers: - handler.setFormatter(formatter) - handler.setLevel(log_level) - logger.addHandler(handler) - - if rank == 0: - logger.setLevel(log_level) - else: - logger.setLevel(logging.ERROR) - - logger_initialized[name] = True - - - return logger - - -def print_log(msg, logger=None, level=logging.INFO): - """Print a log message. - Args: - msg (str): The message to be logged. - logger (logging.Logger | str | None): The logger to be used. - Some special loggers are: - - "silent": no message will be printed. - - other str: the logger obtained with `get_root_logger(logger)`. - - None: The `print()` method will be used to print log messages. - level (int): Logging level. Only available when `logger` is a Logger - object or "root". - """ - if logger is None: - print(msg) - elif isinstance(logger, logging.Logger): - logger.log(level, msg) - elif logger == 'silent': - pass - elif isinstance(logger, str): - _logger = get_logger(logger) - _logger.log(level, msg) - else: - raise TypeError( - 'logger should be either a logging.Logger object, str, ' - f'"silent" or None, but got {type(logger)}') \ No newline at end of file diff --git a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Forefront.py b/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Forefront.py deleted file mode 100644 index e7e89831cc4ec6dc37ea094d9828a7582e981ff1..0000000000000000000000000000000000000000 --- a/spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/Forefront.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import json -import requests -from ...typing import sha256, Dict, get_type_hints - -url = 'https://forefront.com' -model = ['gpt-3.5-turbo'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - json_data = { - 'text': messages[-1]['content'], - 'action': 'noauth', - 'id': '', - 'parentId': '', - 'workspaceId': '', - 'messagePersona': '607e41fe-95be-497e-8e97-010a59b2e2c0', - 'model': 'gpt-4', - 'messages': messages[:-1] if len(messages) > 1 else [], - 'internetMode': 'auto' - } - response = requests.post( 'https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat', - json=json_data, stream=True) - for token in response.iter_lines(): - if b'delta' in token: - token = json.loads(token.decode().split('data: ')[1])['delta'] - yield (token) -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) \ No newline at end of file diff --git a/spaces/FluxWaveCorp/Ghostwriter-Bloom/README.md b/spaces/FluxWaveCorp/Ghostwriter-Bloom/README.md deleted file mode 100644 index 0bbb150ba637ef1a193ddee675b4085e98996240..0000000000000000000000000000000000000000 --- a/spaces/FluxWaveCorp/Ghostwriter-Bloom/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Ghostwriter Bloom -emoji: 🔥 -colorFrom: indigo -colorTo: yellow -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GT4SD/regression_transformer/README.md b/spaces/GT4SD/regression_transformer/README.md deleted file mode 100644 index da16808450fd7ff61e669465e501da439ba2460f..0000000000000000000000000000000000000000 --- a/spaces/GT4SD/regression_transformer/README.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: Regression Transformer -emoji: 💡 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.46.0 -app_file: app.py -pinned: false -python_version: 3.8.13 -pypi_version: 20.2.4 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train10_gptmixcliport2_smaller.sh b/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train10_gptmixcliport2_smaller.sh deleted file mode 100644 index a8ff83c815320f7eaca262aef4c08c3fecabac1c..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train10_gptmixcliport2_smaller.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -#SBATCH -c 10 -#SBATCH -n 1 -#SBATCH -o logs/%j.out -#SBATCH --exclusive -STEPS=${1-'50000'} - - -sh scripts/traintest_scripts/train_test_multi_task_goal_smaller.sh data \ - "[put-block-in-bowl,align-box-corner,color-sorted-container-stack,color-sorted-block-race,Four-corner-pyramid-challenge,triangle-block-arrangement,sort-and-stack-clr-blocks,color-coordinated-sphere-insertion,rainbow-stack,align-pair-colored-blocks-along-line,vertical-insertion-blocks,stack-blocks-in-container]" \ - "[put-block-in-bowl,align-box-corner]" \ - gpt5_mixcliport3_task $STEPS diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/datasets/pascal_voc12.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/datasets/pascal_voc12.py deleted file mode 100644 index ba1d42d0c5781f56dc177d860d856bb34adce555..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/_base_/datasets/pascal_voc12.py +++ /dev/null @@ -1,57 +0,0 @@ -# dataset settings -dataset_type = 'PascalVOCDataset' -data_root = 'data/VOCdevkit/VOC2012' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (512, 512) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations'), - dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), - dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), - dict(type='RandomFlip', prob=0.5), - dict(type='PhotoMetricDistortion'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_semantic_seg']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(2048, 512), - # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=4, - workers_per_gpu=4, - train=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/train.txt', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/val.txt', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - data_root=data_root, - img_dir='JPEGImages', - ann_dir='SegmentationClass', - split='ImageSets/Segmentation/val.txt', - pipeline=test_pipeline)) diff --git a/spaces/Gradio-Themes/theme_builder/run.py b/spaces/Gradio-Themes/theme_builder/run.py deleted file mode 100644 index 3d089dbf28154b49b3c02ce781294542015c31e4..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Themes/theme_builder/run.py +++ /dev/null @@ -1,6 +0,0 @@ -import gradio as gr - -demo = gr.themes.builder - -if __name__ == "__main__": - demo() \ No newline at end of file diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/hubert/pretrain_hubert.py b/spaces/HaloMaster/chinesesummary/fengshen/examples/hubert/pretrain_hubert.py deleted file mode 100644 index 6506364b9498c5b994c085e1a5342082283ef62b..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/hubert/pretrain_hubert.py +++ /dev/null @@ -1,287 +0,0 @@ -import fengshen.data.hubert.hubert_dataset as datasets -from fengshen.data.universal_datamodule import UniversalDataModule -from transformers import HubertConfig, HubertModel -# from transformers.models.hubert.modeling_hubert import _compute_mask_indices -import argparse -from fairseq.data import Dictionary -from pytorch_lightning import ( - LightningModule, - Trainer, - loggers, -) -from pytorch_lightning.callbacks import LearningRateMonitor -import torch -import os -import torch.nn.functional as F -import torch.nn as nn - - -class LabelEncoder(object): - def __init__(self, dictionary: Dictionary): - self.dictionary = dictionary - - def __call__(self, label: str): - return self.dictionary.encode_line( - label, - append_eos=False, - add_if_not_exist=False, - ) - - -class HubertPretrainDataLoader(): - def __init__(self, args): - self.cfg = args - self.dictionaries = self.load_dictionaries() - self.load_datasets = {} - - # TODO 改成HuggingFace Tokenizer - def load_dictionaries(self): - label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir - dictionaries = [ - Dictionary.load(f"{label_dir}/dict.{label}.txt") - for label in self.cfg.labels - ] - return dictionaries - - def get_label_dir(self): - if self.cfg.label_dir is None: - return self.cfg.data - return self.cfg.label_dir - - @property - def datasets(self): - return self.load_datasets - - def load_dataset(self, split: str, **kwargs): - manifest = f"{self.cfg.data}/{split}.tsv" - dicts = self.dictionaries - pad_list = [dict.pad() for dict in dicts] - eos_list = [dict.eos() for dict in dicts] - procs = [LabelEncoder(dict) for dict in dicts] - paths = [f"{self.get_label_dir()}/{split}.{lb}" for lb in self.cfg.labels] - - # hubert v1: pad_audio=True, random_crop=False; - self.load_datasets[split] = datasets.HubertDataset( - manifest, - sample_rate=self.cfg.sample_rate, - label_paths=paths, - label_rates=self.cfg.label_rate, - pad_list=pad_list, - eos_list=eos_list, - label_processors=procs, - max_keep_sample_size=self.cfg.max_keep_size, - min_keep_sample_size=self.cfg.min_sample_size, - max_sample_size=self.cfg.max_sample_size, - pad_audio=self.cfg.pad_audio, - normalize=self.cfg.normalize, - store_labels=False, - random_crop=self.cfg.random_crop, - single_target=self.cfg.single_target, - ) - - -def perpare_data(args): - loader = HubertPretrainDataLoader(args) - loader.load_dataset('train') - loader.load_dataset('valid') - return loader - - -class HubertLightning(LightningModule): - @staticmethod - def add_module_specific_args(parent_parser): - parser = parent_parser.add_argument_group('HuBert Lightning') - parser.add_argument('--pred_masked_weight', type=float, default=1.0) - parser.add_argument('--logit_temp', type=float, default=1.0) - parser.add_argument('--loss_weights', type=float, nargs='+') - # parser.add_argument('--mask_prob', type=float, default=0.65) - # parser.add_argument('--mask_length', type=int, default=10) - # parser.add_argument('--mask_selection', type=str, default='static', - # choice=["static", "uniform", "normal", "poisson"]) - # parser.add_argument('--mask_other', type=float, default=0) - # parser.add_argument('--no_mask_overlap', type=bool, default=False) - # parser.add_argument('--mask_min_space', type=int, default=1) - return parent_parser - - def __init__(self, args, loader, ** kwargs) -> None: - super().__init__() - self.save_hyperparameters(args) - config = HubertConfig.from_pretrained(args.model_path) - self.config = config - self.model = HubertModel(config=config) - self.num_classes = [len(d) for d in loader.dictionaries] - self.label_embs_concat = nn.Parameter( - torch.FloatTensor(sum(self.num_classes), self.config.conv_dim[-1] // 2) - ) - self.final_proj = nn.Linear( - self.config.hidden_size, self.config.conv_dim[-1] // 2 * len(loader.dictionaries) - ) - nn.init.uniform_(self.label_embs_concat) - - def setup(self, stage) -> None: - if stage == 'fit': - train_loader = self.trainer._data_connector._train_dataloader_source.dataloader() - - # Calculate total steps - if self.trainer.max_epochs > 0: - world_size = self.trainer.world_size - tb_size = self.hparams.train_batchsize * max(1, world_size) - ab_size = self.trainer.accumulate_grad_batches - self.total_steps = (len(train_loader.dataset) * - self.trainer.max_epochs // tb_size) // ab_size - else: - self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches - - print('Total steps: {}' .format(self.total_steps)) - - def configure_optimizers(self): - from fengshen.models.model_utils import configure_optimizers - return configure_optimizers(self) - - def compute_nce(self, x, pos, negs): - neg_is_pos = (pos == negs).all(-1) - pos = pos.unsqueeze(0) - targets = torch.cat([pos, negs], dim=0) - - logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x) - logits /= self.hparams.logit_temp - if neg_is_pos.any(): - logits[1:][neg_is_pos] = float("-inf") - logits = logits.transpose(0, 1) # (num_x, num_cls+1) - return logits - - def forward(self, **batch): - - target_list = batch['target_list'] - padding_mask = batch['net_input']['padding_mask'] - input_values = batch['net_input']['source'] - output = self.model(input_values=input_values, - attention_mask=padding_mask, - target_list=target_list, - mask_time_indices=None, - return_dict=False) - - def compute_pred(proj_x, target, label_embs): - # compute logits for the i-th label set - y = torch.index_select(label_embs, 0, target.long()) - negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1) - # proj_x: (S, D) - # y: (S, D) - # negs: (Neg, S, D) - return self.compute_nce(proj_x, y, negs) - - label_embs_list = self.label_embs_concat.split(self.num_classes, 0) - - x, extra_losses, target_list, mask_indices, padding_mask = output[ - 0], output[-4], output[-3], output[-2], output[-1] - - masked_indices = torch.logical_and(~padding_mask, mask_indices) - proj_x_m = self.final_proj(x[masked_indices]) - proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1) - logp_m_list = [ - compute_pred(proj_x_m, t[masked_indices], label_embs_list[i]) - for i, (proj_x_m, t) in enumerate(zip(proj_x_m_list, target_list)) - ] - - targ_m_list = [x.new_zeros(x.size(0), dtype=torch.long) for x in logp_m_list] - - loss = 0.0 - loss_m_list = [] - - for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)): - loss_m = F.cross_entropy(logp_m, targ_m) - loss_m_list.append(loss_m) - self.log(f"loss_m_{i}", loss_m.detach().item()) - - loss += self.hparams.pred_masked_weight * sum(loss_m_list) - - loss_weights = self.hparams.loss_weights - if loss_weights is not None: - if torch.is_tensor(extra_losses): - extra_losses = [extra_losses] - names = ['extra'] - if len(loss_weights) == 1 and len(extra_losses) != 1: - loss_weights = [loss_weights[0]] * len(extra_losses) - assert len(extra_losses) == len( - loss_weights - ), f"{len(extra_losses)}, {len(loss_weights)}" - for p, n, coef in zip(extra_losses, names, loss_weights): - if coef != 0 and p is not None: - p = coef * p.float() - loss += p - self.log(f"loss_{n}", p.item()) - - return {'loss': loss} - - def training_step(self, batch, batch_idx): - output = self(**batch) - self.log('train_loss', output['loss']) - return output - - def comput_metrix(self, logits, labels): - y_pred = torch.argmax(logits, dim=-1) - y_pred = y_pred.view(size=(-1,)) - y_true = labels.view(size=(-1,)).float() - corr = torch.eq(y_pred, y_true) - acc = torch.sum(corr.float()) / y_true.size()[0] - return acc - - def validation_step(self, batch, batch_idx): - output = self(**batch) - # self.log('val_loss', output.loss, sync_dist=True) - # acc = self.comput_metrix(output.logits, batch['labels']) - # self.log('val_acc', acc, sync_dist=True) - return output - - def on_save_checkpoint(self, checkpoint) -> None: - # Save the current loop info in the mid of epoch - # if you lightning <= 1.6.0 uncomment the line below - # checkpoint['loops'] = self.trainer.checkpoint_connector._get_loops_state_dict() - if self.trainer.global_rank == 0: - self.model.save_pretrained(os.path.join( - self.trainer.checkpoint_callback.dirpath, - 'hf_pretrained_epoch{}_step{}'.format(self.trainer.current_epoch, self.trainer.global_step))) - - def on_load_checkpoint(self, checkpoint) -> None: - global_step_offset = checkpoint["global_step"] - if 'global_samples' in checkpoint: - self.consumed_samples = checkpoint['global_samples'] - self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset - - -if __name__ == '__main__': - args_parser = argparse.ArgumentParser() - from fengshen.utils import UniversalCheckpoint - from fengshen.models.model_utils import add_module_args - args_parser = add_module_args(args_parser) - args_parser = datasets.add_data_specific_args(args_parser) - args_parser = UniversalDataModule.add_data_specific_args(args_parser) - args_parser = Trainer.add_argparse_args(args_parser) - args_parser = HubertLightning.add_module_specific_args(args_parser) - args_parser = UniversalCheckpoint.add_argparse_args(args_parser) - args_parser.add_argument('--ckpt_path', type=str, ) - args = args_parser.parse_args() - - data_module = UniversalDataModule(args=args, tokenizer=None, collate_fn=None) - data_loader = perpare_data(args) - data_module.datasets = data_loader.datasets - module = HubertLightning(args, loader=data_loader) - - lr_monitor = LearningRateMonitor(logging_interval='step') - logger = loggers.TensorBoardLogger(save_dir=os.path.join( - args.default_root_dir, 'logs/'), - name=os.path.basename(os.path.dirname(args.model_path))) - checkpoint_callback = UniversalCheckpoint(args).callbacks - - if args.ckpt_path is not None and \ - not os.path.exists(args.ckpt_path): - print('--------warning no checkpoint found--------, remove args') - args.ckpt_path = None - - trainer = Trainer.from_argparse_args(args, - logger=logger, - callbacks=[ - lr_monitor, - checkpoint_callback]) - - trainer.fit(module, data_module, ckpt_path=args.ckpt_path) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/encoders/space_tokenizer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/encoders/space_tokenizer.py deleted file mode 100644 index 925ad41b7c1aee6738c63938c36bd3ee16dca812..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/encoders/space_tokenizer.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import re - -from fairseq.data.encoders import register_tokenizer -from fairseq.dataclass import FairseqDataclass - - -@register_tokenizer("space", dataclass=FairseqDataclass) -class SpaceTokenizer(object): - def __init__(self, *unused): - self.space_tok = re.compile(r"\s+") - - def encode(self, x: str) -> str: - return self.space_tok.sub(" ", x) - - def decode(self, x: str) -> str: - return x diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/roberta/hub_interface.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/roberta/hub_interface.py deleted file mode 100644 index ba298d63ba5da2a5b2f1a44d0384a6b249277ef4..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/roberta/hub_interface.py +++ /dev/null @@ -1,235 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.data import encoders - - -class RobertaHubInterface(nn.Module): - """A simple PyTorch Hub interface to RoBERTa. - - Usage: https://github.com/pytorch/fairseq/tree/main/examples/roberta - """ - - def __init__(self, cfg, task, model): - super().__init__() - self.cfg = cfg - self.task = task - self.model = model - - self.bpe = encoders.build_bpe(cfg.bpe) - - # this is useful for determining the device - self.register_buffer("_float_tensor", torch.tensor([0], dtype=torch.float)) - - @property - def device(self): - return self._float_tensor.device - - def encode( - self, sentence: str, *addl_sentences, no_separator=False - ) -> torch.LongTensor: - """ - BPE-encode a sentence (or multiple sentences). - - Every sequence begins with a beginning-of-sentence (``) symbol. - Every sentence ends with an end-of-sentence (``) and we use an - extra end-of-sentence (``) as a separator. - - Example (single sentence): ` a b c ` - Example (sentence pair): ` d e f 1 2 3 ` - - The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE - requires leading spaces. For example:: - - >>> roberta.encode('Hello world').tolist() - [0, 31414, 232, 2] - >>> roberta.encode(' world').tolist() - [0, 232, 2] - >>> roberta.encode('world').tolist() - [0, 8331, 2] - """ - bpe_sentence = " " + self.bpe.encode(sentence) + " " - for s in addl_sentences: - bpe_sentence += " " if not no_separator else "" - bpe_sentence += " " + self.bpe.encode(s) + " " - tokens = self.task.source_dictionary.encode_line( - bpe_sentence, append_eos=False, add_if_not_exist=False - ) - return tokens.long() - - def decode(self, tokens: torch.LongTensor): - assert tokens.dim() == 1 - tokens = tokens.numpy() - if tokens[0] == self.task.source_dictionary.bos(): - tokens = tokens[1:] # remove - eos_mask = tokens == self.task.source_dictionary.eos() - doc_mask = eos_mask[1:] & eos_mask[:-1] - sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) - sentences = [ - self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences - ] - if len(sentences) == 1: - return sentences[0] - return sentences - - def extract_features( - self, tokens: torch.LongTensor, return_all_hiddens: bool = False - ) -> torch.Tensor: - if tokens.dim() == 1: - tokens = tokens.unsqueeze(0) - if tokens.size(-1) > self.model.max_positions(): - raise ValueError( - "tokens exceeds maximum length: {} > {}".format( - tokens.size(-1), self.model.max_positions() - ) - ) - features, extra = self.model( - tokens.to(device=self.device), - features_only=True, - return_all_hiddens=return_all_hiddens, - ) - if return_all_hiddens: - # convert from T x B x C -> B x T x C - inner_states = extra["inner_states"] - return [inner_state.transpose(0, 1) for inner_state in inner_states] - else: - return features # just the last layer's features - - def register_classification_head( - self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs - ): - self.model.register_classification_head( - name, num_classes=num_classes, embedding_size=embedding_size, **kwargs - ) - - def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): - features = self.extract_features(tokens.to(device=self.device)) - logits = self.model.classification_heads[head](features) - if return_logits: - return logits - return F.log_softmax(logits, dim=-1) - - def extract_features_aligned_to_words( - self, sentence: str, return_all_hiddens: bool = False - ) -> torch.Tensor: - """Extract RoBERTa features, aligned to spaCy's word-level tokenizer.""" - from fairseq.models.roberta import alignment_utils - from spacy.tokens import Doc - - nlp = alignment_utils.spacy_nlp() - tokenizer = alignment_utils.spacy_tokenizer() - - # tokenize both with GPT-2 BPE and spaCy - bpe_toks = self.encode(sentence) - spacy_toks = tokenizer(sentence) - spacy_toks_ws = [t.text_with_ws for t in tokenizer(sentence)] - alignment = alignment_utils.align_bpe_to_words(self, bpe_toks, spacy_toks_ws) - - # extract features and align them - features = self.extract_features( - bpe_toks, return_all_hiddens=return_all_hiddens - ) - features = features.squeeze(0) - aligned_feats = alignment_utils.align_features_to_words( - self, features, alignment - ) - - # wrap in spaCy Doc - doc = Doc( - nlp.vocab, - words=[""] + [x.text for x in spacy_toks] + [""], - spaces=[True] - + [x.endswith(" ") for x in spacy_toks_ws[:-1]] - + [True, False], - ) - assert len(doc) == aligned_feats.size(0) - doc.user_token_hooks["vector"] = lambda token: aligned_feats[token.i] - return doc - - def fill_mask(self, masked_input: str, topk: int = 5): - masked_token = "" - assert ( - masked_token in masked_input and masked_input.count(masked_token) == 1 - ), "Please add one {0} token for the input, eg: 'He is a {0} guy'".format( - masked_token - ) - - text_spans = masked_input.split(masked_token) - text_spans_bpe = ( - (" {0} ".format(masked_token)) - .join([self.bpe.encode(text_span.rstrip()) for text_span in text_spans]) - .strip() - ) - tokens = self.task.source_dictionary.encode_line( - " " + text_spans_bpe + " ", - append_eos=False, - add_if_not_exist=False, - ) - - masked_index = (tokens == self.task.mask_idx).nonzero(as_tuple=False) - if tokens.dim() == 1: - tokens = tokens.unsqueeze(0) - - with utils.model_eval(self.model): - features, extra = self.model( - tokens.long().to(device=self.device), - features_only=False, - return_all_hiddens=False, - ) - logits = features[0, masked_index, :].squeeze() - prob = logits.softmax(dim=0) - values, index = prob.topk(k=topk, dim=0) - topk_predicted_token_bpe = self.task.source_dictionary.string(index) - - topk_filled_outputs = [] - for index, predicted_token_bpe in enumerate( - topk_predicted_token_bpe.split(" ") - ): - predicted_token = self.bpe.decode(predicted_token_bpe) - # Quick hack to fix https://github.com/pytorch/fairseq/issues/1306 - if predicted_token_bpe.startswith("\u2581"): - predicted_token = " " + predicted_token - if " {0}".format(masked_token) in masked_input: - topk_filled_outputs.append( - ( - masked_input.replace( - " {0}".format(masked_token), predicted_token - ), - values[index].item(), - predicted_token, - ) - ) - else: - topk_filled_outputs.append( - ( - masked_input.replace(masked_token, predicted_token), - values[index].item(), - predicted_token, - ) - ) - return topk_filled_outputs - - def disambiguate_pronoun(self, sentence: str) -> bool: - """ - Usage:: - - >>> disambiguate_pronoun('The _trophy_ would not fit in the brown suitcase because [it] was too big.') - True - - >>> disambiguate_pronoun('The trophy would not fit in the brown suitcase because [it] was too big.') - 'The trophy' - """ - assert hasattr( - self.task, "disambiguate_pronoun" - ), "roberta.disambiguate_pronoun() requires a model trained with the WSC task." - with utils.model_eval(self.model): - return self.task.disambiguate_pronoun( - self.model, sentence, use_cuda=self.device.type == "cuda" - ) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/scripts/count_docs.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/scripts/count_docs.py deleted file mode 100644 index 58d85af85e91377a34dbd01f7674436152fd08e8..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/scripts/count_docs.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Count the number of documents and average number of lines and tokens per -document in a large file. Documents should be separated by a single empty line. -""" - -import argparse -import gzip -import sys - -import numpy as np - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("input") - parser.add_argument("--gzip", action="store_true") - args = parser.parse_args() - - def gopen(): - if args.gzip: - return gzip.open(args.input, "r") - else: - return open(args.input, "r", encoding="utf-8") - - num_lines = [] - num_toks = [] - with gopen() as h: - num_docs = 1 - num_lines_in_doc = 0 - num_toks_in_doc = 0 - for i, line in enumerate(h): - if len(line.strip()) == 0: # empty line indicates new document - num_docs += 1 - num_lines.append(num_lines_in_doc) - num_toks.append(num_toks_in_doc) - num_lines_in_doc = 0 - num_toks_in_doc = 0 - else: - num_lines_in_doc += 1 - num_toks_in_doc += len(line.rstrip().split()) - if i % 1000000 == 0: - print(i, file=sys.stderr, end="", flush=True) - elif i % 100000 == 0: - print(".", file=sys.stderr, end="", flush=True) - print(file=sys.stderr, flush=True) - - print("found {} docs".format(num_docs)) - print("average num lines per doc: {}".format(np.mean(num_lines))) - print("average num toks per doc: {}".format(np.mean(num_toks))) - - -if __name__ == "__main__": - main() diff --git a/spaces/HighCWu/GFPGAN-1.3/experiments/pretrained_models/README.md b/spaces/HighCWu/GFPGAN-1.3/experiments/pretrained_models/README.md deleted file mode 100644 index 3401a5ca9b393e0033f58c5af8905961565826d9..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/GFPGAN-1.3/experiments/pretrained_models/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Pre-trained Models and Other Data - -Download pre-trained models and other data. Put them in this folder. - -1. [Pretrained StyleGAN2 model: StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/StyleGAN2_512_Cmul1_FFHQ_B12G4_scratch_800k.pth) -1. [Component locations of FFHQ: FFHQ_eye_mouth_landmarks_512.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/FFHQ_eye_mouth_landmarks_512.pth) -1. [A simple ArcFace model: arcface_resnet18.pth](https://github.com/TencentARC/GFPGAN/releases/download/v0.1.0/arcface_resnet18.pth) diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/serializing.py b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/serializing.py deleted file mode 100644 index aa5a571ab779194af6b183c4cc145c54dabbc271..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/serializing.py +++ /dev/null @@ -1,208 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from pathlib import Path -from typing import Any, Dict - -from gradio import processing_utils, utils - - -class Serializable(ABC): - @abstractmethod - def serialize( - self, x: Any, load_dir: str | Path = "", encryption_key: bytes | None = None - ): - """ - Convert data from human-readable format to serialized format for a browser. - """ - pass - - @abstractmethod - def deserialize( - self, - x: Any, - save_dir: str | Path | None = None, - encryption_key: bytes | None = None, - ): - """ - Convert data from serialized format for a browser to human-readable format. - """ - pass - - -class SimpleSerializable(Serializable): - def serialize( - self, x: Any, load_dir: str | Path = "", encryption_key: bytes | None = None - ) -> Any: - """ - Convert data from human-readable format to serialized format. For SimpleSerializable components, this is a no-op. - Parameters: - x: Input data to serialize - load_dir: Ignored - encryption_key: Ignored - """ - return x - - def deserialize( - self, - x: Any, - save_dir: str | Path | None = None, - encryption_key: bytes | None = None, - ): - """ - Convert data from serialized format to human-readable format. For SimpleSerializable components, this is a no-op. - Parameters: - x: Input data to deserialize - save_dir: Ignored - encryption_key: Ignored - """ - return x - - -class ImgSerializable(Serializable): - def serialize( - self, - x: str | None, - load_dir: str | Path = "", - encryption_key: bytes | None = None, - ) -> str | None: - """ - Convert from human-friendly version of a file (string filepath) to a seralized - representation (base64). - Parameters: - x: String path to file to serialize - load_dir: Path to directory containing x - encryption_key: Used to encrypt the file - """ - if x is None or x == "": - return None - return processing_utils.encode_url_or_file_to_base64( - Path(load_dir) / x, encryption_key=encryption_key - ) - - def deserialize( - self, - x: str | None, - save_dir: str | Path | None = None, - encryption_key: bytes | None = None, - ) -> str | None: - """ - Convert from serialized representation of a file (base64) to a human-friendly - version (string filepath). Optionally, save the file to the directory specified by save_dir - Parameters: - x: Base64 representation of image to deserialize into a string filepath - save_dir: Path to directory to save the deserialized image to - encryption_key: Used to decrypt the file - """ - if x is None or x == "": - return None - file = processing_utils.decode_base64_to_file( - x, dir=save_dir, encryption_key=encryption_key - ) - return file.name - - -class FileSerializable(Serializable): - def serialize( - self, - x: str | None, - load_dir: str | Path = "", - encryption_key: bytes | None = None, - ) -> Dict | None: - """ - Convert from human-friendly version of a file (string filepath) to a - seralized representation (base64) - Parameters: - x: String path to file to serialize - load_dir: Path to directory containing x - encryption_key: Used to encrypt the file - """ - if x is None or x == "": - return None - filename = Path(load_dir) / x - return { - "name": filename, - "data": processing_utils.encode_url_or_file_to_base64( - filename, encryption_key=encryption_key - ), - "orig_name": Path(filename).name, - "is_file": False, - } - - def deserialize( - self, - x: str | Dict | None, - save_dir: Path | str | None = None, - encryption_key: bytes | None = None, - ) -> str | None: - """ - Convert from serialized representation of a file (base64) to a human-friendly - version (string filepath). Optionally, save the file to the directory specified by `save_dir` - Parameters: - x: Base64 representation of file to deserialize into a string filepath - save_dir: Path to directory to save the deserialized file to - encryption_key: Used to decrypt the file - """ - if x is None: - return None - if isinstance(save_dir, Path): - save_dir = str(save_dir) - if isinstance(x, str): - file_name = processing_utils.decode_base64_to_file( - x, dir=save_dir, encryption_key=encryption_key - ).name - elif isinstance(x, dict): - if x.get("is_file", False): - if utils.validate_url(x["name"]): - file_name = x["name"] - else: - file_name = processing_utils.create_tmp_copy_of_file( - x["name"], dir=save_dir - ).name - else: - file_name = processing_utils.decode_base64_to_file( - x["data"], dir=save_dir, encryption_key=encryption_key - ).name - else: - raise ValueError( - f"A FileSerializable component cannot only deserialize a string or a dict, not a: {type(x)}" - ) - return file_name - - -class JSONSerializable(Serializable): - def serialize( - self, - x: str | None, - load_dir: str | Path = "", - encryption_key: bytes | None = None, - ) -> Dict | None: - """ - Convert from a a human-friendly version (string path to json file) to a - serialized representation (json string) - Parameters: - x: String path to json file to read to get json string - load_dir: Path to directory containing x - encryption_key: Ignored - """ - if x is None or x == "": - return None - return processing_utils.file_to_json(Path(load_dir) / x) - - def deserialize( - self, - x: str | Dict, - save_dir: str | Path | None = None, - encryption_key: bytes | None = None, - ) -> str | None: - """ - Convert from serialized representation (json string) to a human-friendly - version (string path to json file). Optionally, save the file to the directory specified by `save_dir` - Parameters: - x: Json string - save_dir: Path to save the deserialized json file to - encryption_key: Ignored - """ - if x is None: - return None - return processing_utils.dict_or_str_to_json_file(x, dir=save_dir).name diff --git a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.164edf37.js b/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.164edf37.js deleted file mode 100644 index acd918a57d762720f7d388cabf14d7956b21ee62..0000000000000000000000000000000000000000 --- a/spaces/HighCWu/anime-colorization-with-hint/gradio-modified/gradio/templates/frontend/assets/index.164edf37.js +++ /dev/null @@ -1,4 +0,0 @@ -import{S as fe,i as ce,s as de,e as E,b as y,d as D,f as C,a8 as ye,l as F,n as H,A as ge,a as j,x as X,K as Ae,I as P,t as _e,h as he,a3 as rt,B as at,g as T,O as x,c as Z,m as G,L as $,j as V,k as Y,o as Q,F as qe,aa as N,w as ve,D as ie,ab as se,ac as ue,E as oe,R as it,T as st,U as ut,V as ot}from"./index.396f4a72.js";import{U as ft}from"./Upload.5d0148e8.js";import{d as ct}from"./dsv.7fe76a93.js";var De=Object.prototype.hasOwnProperty;function ee(n,e){var t,l;if(n===e)return!0;if(n&&e&&(t=n.constructor)===e.constructor){if(t===Date)return n.getTime()===e.getTime();if(t===RegExp)return n.toString()===e.toString();if(t===Array){if((l=n.length)===e.length)for(;l--&&ee(n[l],e[l]););return l===-1}if(!t||typeof n=="object"){l=0;for(t in n)if(De.call(n,t)&&++l&&!De.call(e,t)||!(t in e)||!ee(n[t],e[t]))return!1;return Object.keys(e).length===l}}return n!==n&&e!==e}function Le(n){let e,t,l;return{c(){e=E("input"),y(e,"class","absolute outline-none inset-2 bg-transparent border-0 translate-x-px flex-1 "),y(e,"tabindex","-1"),D(e,"translate-x-px",!n[3]),D(e,"font-bold",n[3])},m(a,i){C(a,e,i),ye(e,n[0]),n[8](e),t||(l=[F(e,"input",n[7]),F(e,"keydown",n[6]),F(e,"blur",ht)],t=!0)},p(a,i){i&1&&e.value!==a[0]&&ye(e,a[0]),i&8&&D(e,"translate-x-px",!a[3]),i&8&&D(e,"font-bold",a[3])},d(a){a&&H(e),n[8](null),t=!1,ge(l)}}}function dt(n){let e;return{c(){e=_e(n[0])},m(t,l){C(t,e,l)},p(t,l){l&1&&he(e,t[0])},d(t){t&&H(e)}}}function gt(n){let e,t;return{c(){e=new rt(!1),t=at(),e.a=t},m(l,a){e.m(n[0],l,a),C(l,t,a)},p(l,a){a&1&&e.p(l[0])},d(l){l&&H(t),l&&e.d()}}}function _t(n){let e,t,l,a,i=n[2]&&Le(n);function _(s,g){return s[4]==="markdown"||s[4]==="html"?gt:dt}let h=_(n),o=h(n);return{c(){i&&i.c(),e=j(),t=E("span"),o.c(),y(t,"tabindex","-1"),y(t,"role","button"),y(t,"class","p-2 outline-none border-0 flex-1"),D(t,"opacity-0",n[2]),D(t,"pointer-events-none",n[2])},m(s,g){i&&i.m(s,g),C(s,e,g),C(s,t,g),o.m(t,null),l||(a=F(t,"dblclick",n[5]),l=!0)},p(s,[g]){s[2]?i?i.p(s,g):(i=Le(s),i.c(),i.m(e.parentNode,e)):i&&(i.d(1),i=null),h===(h=_(s))&&o?o.p(s,g):(o.d(1),o=h(s),o&&(o.c(),o.m(t,null))),g&4&&D(t,"opacity-0",s[2]),g&4&&D(t,"pointer-events-none",s[2])},i:X,o:X,d(s){i&&i.d(s),s&&H(e),s&&H(t),o.d(),l=!1,a()}}}const ht=({currentTarget:n})=>n.setAttribute("tabindex","-1");function bt(n,e,t){let{edit:l}=e,{value:a=""}=e,{el:i}=e,{header:_=!1}=e,{datatype:h="str"}=e;function o(c){Ae.call(this,n,c)}function s(c){Ae.call(this,n,c)}function g(){a=this.value,t(0,a)}function w(c){P[c?"unshift":"push"](()=>{i=c,t(1,i)})}return n.$$set=c=>{"edit"in c&&t(2,l=c.edit),"value"in c&&t(0,a=c.value),"el"in c&&t(1,i=c.el),"header"in c&&t(3,_=c.header),"datatype"in c&&t(4,h=c.datatype)},[a,i,l,_,h,o,s,g,w]}class ze extends fe{constructor(e){super(),ce(this,e,bt,_t,de,{edit:2,value:0,el:1,header:3,datatype:4})}}function Me(n,e,t){const l=n.slice();return l[52]=e[t],l[54]=t,l}function Te(n,e,t){const l=n.slice();return l[55]=e[t].value,l[56]=e[t].id,l[57]=e,l[58]=t,l}function Ee(n,e,t){const l=n.slice();return l[55]=e[t].value,l[56]=e[t].id,l[59]=e,l[54]=t,l}function Be(n){let e,t;return{c(){e=E("p"),t=_e(n[1]),y(e,"class","text-gray-600 text-[0.855rem] mb-2 block dark:text-gray-200 relative z-40")},m(l,a){C(l,e,a),T(e,t)},p(l,a){a[0]&2&&he(t,l[1])},d(l){l&&H(e)}}}function Re(n){let e,t;return{c(){e=E("caption"),t=_e(n[1]),y(e,"class","sr-only")},m(l,a){C(l,e,a),T(e,t)},p(l,a){a[0]&2&&he(t,l[1])},d(l){l&&H(e)}}}function Ce(n,e){let t,l,a,i,_,h,o,s,g,w,c,b=e[56],f,v,O;function u(S){e[30](S,e[56])}function p(){return e[31](e[56])}let M={value:e[55],edit:e[13]===e[56],header:!0};e[10][e[56]].input!==void 0&&(M.el=e[10][e[56]].input),a=new ze({props:M}),P.push(()=>x(a,"el",u)),a.$on("keydown",e[21]),a.$on("dblclick",p);function m(){return e[32](e[54])}const R=()=>e[33](t,b),z=()=>e[33](null,b);return{key:n,first:null,c(){t=E("th"),l=E("div"),Z(a.$$.fragment),_=j(),h=E("div"),o=ve("svg"),s=ve("path"),w=j(),y(s,"d","M4.49999 0L8.3971 6.75H0.602875L4.49999 0Z"),y(o,"width","1em"),y(o,"height","1em"),y(o,"class","fill-current text-[10px]"),y(o,"viewBox","0 0 9 7"),y(o,"fill","none"),y(o,"xmlns","http://www.w3.org/2000/svg"),y(h,"class",g="flex flex-none items-center justify-center p-2 cursor-pointer leading-snug transform transition-all "+(e[12]!==e[54]?"text-gray-200 hover:text-gray-500":"text-orange-500")+" "+(e[12]===e[54]&&e[11]==="des"?"-scale-y-[1]":"")),D(h,"text-gray-200",e[12]!==e[54]),y(l,"class","min-h-[2.3rem] flex outline-none"),y(t,"class","p-0 relative focus-within:ring-1 ring-orange-500 ring-inset outline-none"),y(t,"aria-sort",c=e[15](e[55],e[12],e[11])),D(t,"bg-orange-50",e[13]===e[56]),D(t,"dark:bg-transparent",e[13]===e[56]),D(t,"rounded-tl-lg",e[54]===0),D(t,"rounded-tr-lg",e[54]===e[8].length-1),this.first=t},m(S,B){C(S,t,B),T(t,l),G(a,l,null),T(l,_),T(l,h),T(h,o),T(o,s),T(t,w),R(),f=!0,v||(O=F(h,"click",m),v=!0)},p(S,B){e=S;const I={};B[0]&256&&(I.value=e[55]),B[0]&8448&&(I.edit=e[13]===e[56]),!i&&B[0]&1280&&(i=!0,I.el=e[10][e[56]].input,$(()=>i=!1)),a.$set(I),(!f||B[0]&6400&&g!==(g="flex flex-none items-center justify-center p-2 cursor-pointer leading-snug transform transition-all "+(e[12]!==e[54]?"text-gray-200 hover:text-gray-500":"text-orange-500")+" "+(e[12]===e[54]&&e[11]==="des"?"-scale-y-[1]":"")))&&y(h,"class",g),B[0]&6400&&D(h,"text-gray-200",e[12]!==e[54]),(!f||B[0]&6400&&c!==(c=e[15](e[55],e[12],e[11])))&&y(t,"aria-sort",c),b!==e[56]&&(z(),b=e[56],R()),B[0]&8448&&D(t,"bg-orange-50",e[13]===e[56]),B[0]&8448&&D(t,"dark:bg-transparent",e[13]===e[56]),B[0]&256&&D(t,"rounded-tl-lg",e[54]===0),B[0]&256&&D(t,"rounded-tr-lg",e[54]===e[8].length-1)},i(S){f||(V(a.$$.fragment,S),f=!0)},o(S){Y(a.$$.fragment,S),f=!1},d(S){S&&H(t),Q(a),z(),v=!1,O()}}}function He(n,e){let t,l,a,i,_,h=e[56],o,s,g;function w(m){e[34](m,e[55],e[57],e[58])}function c(m){e[35](m,e[56])}let b={edit:e[6]===e[56],datatype:Array.isArray(e[0])?e[0][e[58]]:e[0]};e[55]!==void 0&&(b.value=e[55]),e[10][e[56]].input!==void 0&&(b.el=e[10][e[56]].input),a=new ze({props:b}),P.push(()=>x(a,"value",w)),P.push(()=>x(a,"el",c));const f=()=>e[36](t,h),v=()=>e[36](null,h);function O(){return e[37](e[56])}function u(){return e[38](e[56])}function p(){return e[39](e[56])}function M(...m){return e[40](e[54],e[58],e[56],...m)}return{key:n,first:null,c(){t=E("td"),l=E("div"),Z(a.$$.fragment),y(l,"class","min-h-[2.3rem] h-full outline-none flex items-center"),D(l,"border-transparent",e[7]!==e[56]),y(t,"tabindex","0"),y(t,"class","outline-none focus-within:ring-1 ring-orange-500 ring-inset focus-within:bg-orange-50 dark:focus-within:bg-gray-800 group-last:first:rounded-bl-lg group-last:last:rounded-br-lg relative"),this.first=t},m(m,R){C(m,t,R),T(t,l),G(a,l,null),f(),o=!0,s||(g=[F(t,"touchstart",O,{passive:!0}),F(t,"click",u),F(t,"dblclick",p),F(t,"keydown",M)],s=!0)},p(m,R){e=m;const z={};R[0]&576&&(z.edit=e[6]===e[56]),R[0]&513&&(z.datatype=Array.isArray(e[0])?e[0][e[58]]:e[0]),!i&&R[0]&512&&(i=!0,z.value=e[55],$(()=>i=!1)),!_&&R[0]&1536&&(_=!0,z.el=e[10][e[56]].input,$(()=>_=!1)),a.$set(z),R[0]&640&&D(l,"border-transparent",e[7]!==e[56]),h!==e[56]&&(v(),h=e[56],f())},i(m){o||(V(a.$$.fragment,m),o=!0)},o(m){Y(a.$$.fragment,m),o=!1},d(m){m&&H(t),Q(a),v(),s=!1,ge(g)}}}function Oe(n,e){let t,l=[],a=new Map,i,_,h=e[52];const o=s=>s[56];for(let s=0;su[56];for(let u=0;uu[52];for(let u=0;u