diff --git a/spaces/0x90e/ESRGAN-MANGA/inference_manga_v2.py b/spaces/0x90e/ESRGAN-MANGA/inference_manga_v2.py deleted file mode 100644 index cd2bf3461031e152dcf9b9735a6ddaead63664b7..0000000000000000000000000000000000000000 --- a/spaces/0x90e/ESRGAN-MANGA/inference_manga_v2.py +++ /dev/null @@ -1,46 +0,0 @@ -import sys -import cv2 -import numpy as np -import torch -import ESRGAN.architecture as arch -from ESRGANer import ESRGANer - -def is_cuda(): - if torch.cuda.is_available(): - return True - else: - return False - -model_path = 'models/4x_eula_digimanga_bw_v2_nc1_307k.pth' -OUTPUT_PATH = sys.argv[1] -device = torch.device('cuda' if is_cuda() else 'cpu') - -model = arch.RRDB_Net(1, 1, 64, 23, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv') - -if is_cuda(): - print("Using GPU ๐Ÿฅถ") - model.load_state_dict(torch.load(model_path), strict=True) -else: - print("Using CPU ๐Ÿ˜’") - model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')), strict=True) - -model.eval() - -for k, v in model.named_parameters(): - v.requires_grad = False -model = model.to(device) - -# Read image -img = cv2.imread(OUTPUT_PATH, cv2.IMREAD_GRAYSCALE) -img = img * 1.0 / 255 -img = torch.from_numpy(img[np.newaxis, :, :]).float() -img_LR = img.unsqueeze(0) -img_LR = img_LR.to(device) - -upsampler = ESRGANer(model=model) -output = upsampler.enhance(img_LR) - -output = output.squeeze(dim=0).float().cpu().clamp_(0, 1).numpy() -output = np.transpose(output, (1, 2, 0)) -output = (output * 255.0).round() -cv2.imwrite(OUTPUT_PATH, output, [int(cv2.IMWRITE_PNG_COMPRESSION), 5]) diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/you/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/you/__init__.py deleted file mode 100644 index 11847fb5e5bf54caaf181e8bbe9e88b01f971a7c..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/you/__init__.py +++ /dev/null @@ -1,127 +0,0 @@ -import json -import re -from typing import Optional, List, Dict, Any -from uuid import uuid4 - -from fake_useragent import UserAgent -from pydantic import BaseModel -from requests import RequestException -from retrying import retry -from tls_client import Session -from tls_client.response import Response - - -class YouResponse(BaseModel): - text: Optional[str] = None - links: List[str] = [] - extra: Dict[str, Any] = {} - - -class Completion: - @staticmethod - def create( - prompt: str, - page: int = 1, - count: int = 10, - safe_search: str = 'Moderate', - on_shopping_page: bool = False, - mkt: str = '', - response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches', - domain: str = 'youchat', - query_trace_id: str = None, - chat: list = None, - include_links: bool = False, - detailed: bool = False, - debug: bool = False, - proxy: Optional[str] = None, - ) -> YouResponse: - if chat is None: - chat = [] - - proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else {} - - client = Session(client_identifier='chrome_108') - client.headers = Completion.__get_headers() - client.proxies = proxies - - params = { - 'q': prompt, - 'page': page, - 'count': count, - 'safeSearch': safe_search, - 'onShoppingPage': on_shopping_page, - 'mkt': mkt, - 'responseFilter': response_filter, - 'domain': domain, - 'queryTraceId': str(uuid4()) if query_trace_id is None else query_trace_id, - 'chat': str(chat), # {'question':'','answer':' ''} - } - - try: - response = Completion.__make_request(client, params) - except Exception: - return Completion.__get_failure_response() - - if debug: - print('\n\n------------------\n\n') - print(response.text) - print('\n\n------------------\n\n') - - you_chat_serp_results = re.search( - r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text - ).group() - third_party_search_results = re.search( - r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text - ).group() - # slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0] - - text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text)) - - extra = { - 'youChatSerpResults': json.loads(you_chat_serp_results), - # 'slots' : loads(slots) - } - - response = YouResponse(text=text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"')) - if include_links: - response.links = json.loads(third_party_search_results)['search']['third_party_search_results'] - - if detailed: - response.extra = extra - - return response - - @staticmethod - def __get_headers() -> dict: - return { - 'authority': 'you.com', - 'accept': 'text/event-stream', - 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control': 'no-cache', - 'referer': 'https://you.com/search?q=who+are+you&tbm=youchat', - 'sec-ch-ua': '"Not_A Brand";v="99", "Google Chrome";v="109", "Chromium";v="109"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-origin', - 'cookie': f'safesearch_guest=Moderate; uuid_guest={str(uuid4())}', - 'user-agent': UserAgent().random, - } - - @staticmethod - def __get_failure_response() -> YouResponse: - return YouResponse(text='Unable to fetch the response, Please try again.') - - @staticmethod - @retry( - wait_fixed=5000, - stop_max_attempt_number=5, - retry_on_exception=lambda e: isinstance(e, RequestException), - ) - def __make_request(client: Session, params: dict) -> Response: - response = client.get(f'https://you.com/api/streamingSearch', params=params) - if 'youChatToken' not in response.text: - print('retry') - raise RequestException('Unable to get the response from server') - return response diff --git a/spaces/101-5/gpt4free/g4f/.v1/unfinished/bard/README.md b/spaces/101-5/gpt4free/g4f/.v1/unfinished/bard/README.md deleted file mode 100644 index 67e8645ced188f048308ad80accee8ef900ef6ef..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/unfinished/bard/README.md +++ /dev/null @@ -1,2 +0,0 @@ -to do: -- code refractoring \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Create Download and Print 3D Maps with 3D Map Generator for Free.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Create Download and Print 3D Maps with 3D Map Generator for Free.md deleted file mode 100644 index 6e05d4ef3b2719df7756a928e74d7000af53b923..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Create Download and Print 3D Maps with 3D Map Generator for Free.md +++ /dev/null @@ -1,44 +0,0 @@ -
-

How to Create Stunning 3D Maps for Free with 3D Map Generator

- -

Have you ever wanted to create realistic 3D maps of any place on earth, without any special skills or software? Maybe you need a 3D map for a game, a presentation, a website, or a 3D print. Or maybe you just want to have fun and explore the world in 3D.

-

3d map generator terrain free download


Download File ✺✺✺ https://byltly.com/2uKA4X



- -

If so, you're in luck. In this article, we'll show you how to use 3D Map Generator, a Photoshop plugin that lets you generate 3D maps from heightmaps. You can download it for free and use it to create amazing 3D maps in minutes.

- -

What is 3D Map Generator?

- -

3D Map Generator is a Photoshop plugin that allows you to create 3D maps from heightmaps. A heightmap is a grayscale image that represents the elevation of the terrain. The darker the pixel, the lower the elevation. The lighter the pixel, the higher the elevation.

- -

With 3D Map Generator, you can easily convert any heightmap into a 3D map with realistic textures, lighting, and shadows. You can also customize your map with various tools and settings, such as water level, snow cover, vegetation, roads, buildings, and more.

- -

3D Map Generator works with Photoshop CC-2014 and newer, on PC or Mac. You can download it for free from Graphicriver. The free version has some limitations, such as the maximum map size (1000 x 1000 pixels) and the number of textures (10). If you want to unlock more features and options, you can upgrade to the pro version.

- -

How to Use 3D Map Generator?

- -

Using 3D Map Generator is very easy and intuitive. Here are the basic steps to create your own 3D map:

-

- -
    -
  1. Download and install 3D Map Generator
  2. -

    First, you need to download and install 3D Map Generator on your computer. You can get it from Graphicriver. After downloading the ZIP file, extract it and run the installer. Follow the instructions to install the plugin on your Photoshop.

    - -
  3. Open Photoshop and create a new document
  4. -

    Next, open Photoshop and create a new document with the size of your desired map. For example, if you want to create a map with a resolution of 1000 x 1000 pixels, create a document with that size. Make sure the color mode is RGB and the background is white.

    - -
  5. Load a heightmap
  6. -

    Now you need to load a heightmap into your document. You can use any heightmap that you have or find online. There are many websites that offer free heightmaps of different places on earth, such as Maps 3D or 3D-Mapper. You can also create your own heightmap with World Machine, a software that lets you generate realistic terrains.

    - -

    To load a heightmap into your document, go to File > Place Embedded and select the heightmap image file. Resize and position it to fit your document. Then press Enter to place it.

    - -
  7. Run 3D Map Generator
  8. -

    Now it's time to run 3D Map Generator and turn your heightmap into a 3D map. Go to Window > Extensions > 3D Map Generator - Terrain. A new panel will appear on your screen with various options and tools.

    - -

    The first thing you need to do is click on the Generate button at the top of the panel. This will create a 3D map based on your heightmap. You can see the result in a new window that pops up.

    - -
  9. Customize your map
  10. -

    Now you can customize your map with various tools and settings in the panel. You can change the water level, snow cover, vegetation density, road width, building height, and more. You can also add labels, icons, logos, or text to your map.

    - -

    To use these tools and

    ddb901b051
    -
    -
    \ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fastgsm S3g 1.0.0.42 Free Download ((NEW)).md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fastgsm S3g 1.0.0.42 Free Download ((NEW)).md deleted file mode 100644 index d53bf6f4b354fe73415cce30f4dfb89558e8e7a0..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fastgsm S3g 1.0.0.42 Free Download ((NEW)).md +++ /dev/null @@ -1,187 +0,0 @@ -
    -
    - Benefits: Highlight the main advantages of using Fastgsm s3g 1.0.0.42 to unlock Samsung phones, such as saving money, time, and hassle. | | H2: How to download and install Fastgsm s3g 1.0.0.42 for free? | - Requirements: List the minimum system requirements and compatible Samsung phone models for Fastgsm s3g 1.0.0.42.
    - Download: Provide the link to download Fastgsm s3g 1.0.0.42 for free from a reliable source, and explain how to verify the file integrity and security.
    - Install: Guide the reader through the installation process step by step, with screenshots and tips. | | H2: How to use Fastgsm s3g 1.0.0.42 to unlock your Samsung phone? | - Connect: Explain how to connect your Samsung phone to your computer using a USB cable, and how to enable USB debugging mode on your phone.
    - Detect: Show how to launch Fastgsm s3g 1.0.0.42 and let it detect your phone model and network lock status automatically.
    - Unlock: Demonstrate how to select the unlock option and enter the unlock code provided by Fastgsm s3g 1.0.0.42, and how to confirm the unlock success message on your phone screen. | | H2: How to troubleshoot common issues with Fastgsm s3g 1.0.0.42? | - Errors: List some common error messages or problems that may occur when using Fastgsm s3g 1.0.0.42, such as invalid unlock code, connection failure, or device not supported, and provide possible solutions or alternatives for each one.
    - Support: Provide the contact information and website of Fastgsm s3g 1.0.0.42 customer service, and recommend other reliable sources of help or feedback, such as forums or blogs. | H2: How to update or uninstall Fastgsm s3g 1.0.0.42? | - Update: Explain how to check for updates and download the latest version of Fastgsm s3g 1.0.0.42, and why it is important to keep the software updated for optimal performance and compatibility.
    - Uninstall: Describe how to uninstall Fastgsm s3g 1.0.0.42 from your computer completely and safely, and what to do if you encounter any problems during the uninstallation process. | H2: Conclusion | - Summary: Summarize the main points and benefits of using Fastgsm s3g 1.0.0.42 to unlock Samsung phones.
    - Call to action: Encourage the reader to try Fastgsm s3g 1.0.0.42 for free and share their experience or feedback with others.
    - Disclaimer: Remind the reader that unlocking their phone may void their warranty or violate their carrier's terms of service, and that they are responsible for their own actions. Table 2: Article with HTML formatting

    What is Fastgsm s3g 1.0.0.42 and why do you need it?

    -

    If you own a Samsung phone that is locked to a specific network carrier, you may have encountered some limitations or inconveniences when using your device.

    -

    For example, you may not be able to use your phone with a different SIM card from another carrier, which can be frustrating if you travel abroad frequently or want to switch to a cheaper or better plan.

    -

    Fastgsm s3g 1.0.0.42 free download


    DOWNLOAD →→→ https://byltly.com/2uKAgj



    -

    Or you may have to pay a hefty fee or wait for a long time to get your phone unlocked by your carrier, which can be annoying if you want to sell your phone or give it away.

    -

    Fortunately, there is a solution that can help you unlock your Samsung phone quickly, easily, and affordably.

    -

    That solution is called Fastgsm s3g 1.0.42, a software

    Fastgsm s3g 1.0.0.42 is a software program that allows you to unlock your Samsung phone from any network carrier in the world, using a simple USB cable and a computer.

    -

    It works by generating an unlock code for your phone model and network, which you can enter on your phone screen to remove the network lock permanently.

    -

    Fastgsm s3g 1.0.0.42 is compatible with most Samsung phone models, including the popular Galaxy S, Note, and A series, as well as older models like the E, J, and C series.

    -

    By using Fastgsm s3g 1.0.0.42 to unlock your Samsung phone, you can enjoy the following benefits:

    - -

    As you can see, Fastgsm s3g 1.0.0.42 is a powerful and convenient tool that can help you unlock your Samsung phone with ease and confidence.

    -

    But how do you get it and use it? That's what we will show you in the next sections of this article.

    -

    -

    How to download and install Fastgsm s3g 1.0.0.42 for free?

    -

    If you want to use Fastgsm s3g 1.0.0.42 to unlock your Samsung phone, you need to download and install it on your computer first.

    -

    Here are the steps you need to follow:

    -

    Requirements

    -

    Before you download and install Fastgsm s3g 1.0.0.42, make sure you have the following requirements:

    - -

    Download

    -

    To download Fastgsm s3g 1.0.0.42 for free, you need to visit the official website of Fastgsm s3g 1.0.0.42 here.

    -

    You will see a download button on the homepage that will direct you to a page where you can choose your Samsung phone model from a drop-down menu.

    -

    Select your phone model and click on the download button again to start downloading the software file.

    -

    The file name will be something like fastgsms3g-1-0-0-42.exe, and the file size will be around 10 MB.

    -

    Once the download is complete, you need to verify the file integrity and security before installing it.

    -

    You can do this by checking the file properties and comparing the file hash with the one provided on the website.

    -

    The file hash is a unique code that identifies the file and ensures that it has not been tampered with or corrupted during the download process.

    -

    To check the file hash, you can use a free online tool like this one.

    -

    Simply upload the file or enter its URL, and select the SHA-256 algorithm from the options.

    -

    The tool will generate a hash code for the file and display it on the screen.

    -

    You need to compare this hash code with the one provided on the website, which should be something like d9f5c7f8f9b4c8e6f7d6e9c8f7e6d9f5c7f8 f9b4c8e6f7d6e9c8f7e6d9f5c7f8.

    -

    If the hash codes match, it means that the file is authentic and safe to install.

    -

    If the hash codes do not match, it means that the file is corrupted or malicious, and you should delete it and download it again from a different source.

    -

    Install

    -

    To install Fastgsm s3g 1.0.0.42 on your computer, you need to follow these steps:

    -
      -
    1. Double-click on the downloaded file to launch the installation wizard.
    2. -
    3. Click on the Next button to proceed with the installation.
    4. -
    5. Read and accept the license agreement, and click on the Next button again.
    6. -
    7. Choose the destination folder where you want to install the software, and click on the Next button.
    8. -
    9. Click on the Install button to start the installation process.
    10. -
    11. Wait for a few minutes until the installation is complete, and click on the Finish button to exit the wizard.
    12. -
    -

    Congratulations! You have successfully installed Fastgsm s3g 1.0.0.42 on your computer.

    -

    You can now use it to unlock your Samsung phone in a matter of minutes.

    -

    How to use Fastgsm s3g 1.0.0.42 to unlock your Samsung phone?

    -

    Now that you have downloaded and installed Fastgsm s3g 1.0.0.42 on your computer, you are ready to use it to unlock your Samsung phone.

    -

    Here are the steps you need to follow:

    -

    Connect

    -

    The first step is to connect your Samsung phone to your computer using a USB cable.

    -

    Make sure that your phone is turned on and has enough battery power.

    -

    You also need to enable USB debugging mode on your phone, which allows your computer to communicate with your phone and access its data.

    -

    To enable USB debugging mode, you need to do the following:

    - -

    You have now enabled USB debugging mode on your phone.

    -

    Detect

    -

    The next step is to launch Fastgsm s3g 1.0.0.42 on your computer and let it detect your phone model and network lock status automatically.

    -

    To do this, you need to do the following:

    - -

    You have now detected your phone model and network lock status using Fastgsm s3g 1.0.0.42.

    -

    Unlock

    -

    The final step is to

    The final step is to select the unlock option and enter the unlock code provided by Fastgsm s3g 1.0.0.42, and confirm the unlock success message on your phone screen.

    -

    To do this, you need to do the following:

    - -

    You have now unlocked your Samsung phone using Fastgsm s3g 1.0.0.42.

    -

    You can now remove the USB cable from your phone and computer, and restart your phone.

    -

    You can also insert a different SIM card from another carrier and check if your phone works normally with it.

    -

    You should see a signal strength indicator and a network name on your phone screen, indicating that your phone is unlocked and ready to use with any SIM card.

    -

    How to troubleshoot common issues with Fastgsm s3g 1.0.0.42?

    -

    Although Fastgsm s3g 1.0.0.42 is designed to be easy and reliable to use, you may encounter some issues or problems when using it to unlock your Samsung phone.

    -

    Here are some common error messages or problems that may occur, and how to solve them:

    -

    Invalid unlock code

    -

    If you enter the unlock code provided by Fastgsm s3g 1.0.0.42 on your phone, but it says that it is invalid or incorrect, there are several possible reasons:

    - -

    To solve this problem, you can try the following solutions:

    - -

    Connection failure

    -

    If you connect your Samsung phone to your computer using a USB cable, but Fastgsm s3g 1.0.0.42 does not detect it or fails to communicate with it, there are several possible reasons:

    - -

    To solve this problem, you can try the following solutions:

    - -

    Device not supported

    -

    If you launch Fastgsm s3g 1.0.0.42 on your computer and select your Samsung phone from the list of devices, but it says that it is not supported by the software, there are several possible reasons:

    -

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_nms_rotated.py b/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_nms_rotated.py deleted file mode 100644 index 50717571dcf47b6e216a2a434a44e3cb081a0cd9..0000000000000000000000000000000000000000 --- a/spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/tests/test_nms_rotated.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from __future__ import absolute_import, division, print_function, unicode_literals -import unittest -import torch -from torchvision import ops - -from detectron2.layers import batched_nms, batched_nms_rotated, nms_rotated - - -class TestNMSRotated(unittest.TestCase): - def reference_horizontal_nms(self, boxes, scores, iou_threshold): - """ - Args: - box_scores (N, 5): boxes in corner-form and probabilities. - (Note here 5 == 4 + 1, i.e., 4-dim horizontal box + 1-dim prob) - iou_threshold: intersection over union threshold. - Returns: - picked: a list of indexes of the kept boxes - """ - picked = [] - _, indexes = scores.sort(descending=True) - while len(indexes) > 0: - current = indexes[0] - picked.append(current.item()) - if len(indexes) == 1: - break - current_box = boxes[current, :] - indexes = indexes[1:] - rest_boxes = boxes[indexes, :] - iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1) - indexes = indexes[iou <= iou_threshold] - - return torch.as_tensor(picked) - - def _create_tensors(self, N): - boxes = torch.rand(N, 4) * 100 - # Note: the implementation of this function in torchvision is: - # boxes[:, 2:] += torch.rand(N, 2) * 100 - # but it does not guarantee non-negative widths/heights constraints: - # boxes[:, 2] >= boxes[:, 0] and boxes[:, 3] >= boxes[:, 1]: - boxes[:, 2:] += boxes[:, :2] - scores = torch.rand(N) - return boxes, scores - - def test_batched_nms_rotated_0_degree_cpu(self): - # torch.manual_seed(0) - N = 2000 - num_classes = 50 - boxes, scores = self._create_tensors(N) - idxs = torch.randint(0, num_classes, (N,)) - rotated_boxes = torch.zeros(N, 5) - rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 - rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 - rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] - rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] - err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}" - for iou in [0.2, 0.5, 0.8]: - backup = boxes.clone() - keep_ref = batched_nms(boxes, scores, idxs, iou) - assert torch.allclose(boxes, backup), "boxes modified by batched_nms" - backup = rotated_boxes.clone() - keep = batched_nms_rotated(rotated_boxes, scores, idxs, iou) - assert torch.allclose( - rotated_boxes, backup - ), "rotated_boxes modified by batched_nms_rotated" - assert torch.equal(keep, keep_ref), err_msg.format(iou) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_batched_nms_rotated_0_degree_cuda(self): - # torch.manual_seed(0) - N = 2000 - num_classes = 50 - boxes, scores = self._create_tensors(N) - idxs = torch.randint(0, num_classes, (N,)) - rotated_boxes = torch.zeros(N, 5) - rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 - rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 - rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] - rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] - err_msg = "Rotated NMS with 0 degree is incompatible with horizontal NMS for IoU={}" - for iou in [0.2, 0.5, 0.8]: - backup = boxes.clone() - keep_ref = batched_nms(boxes.cuda(), scores.cuda(), idxs, iou) - assert torch.allclose(boxes, backup), "boxes modified by batched_nms" - backup = rotated_boxes.clone() - keep = batched_nms_rotated(rotated_boxes.cuda(), scores.cuda(), idxs, iou) - assert torch.allclose( - rotated_boxes, backup - ), "rotated_boxes modified by batched_nms_rotated" - assert torch.equal(keep, keep_ref), err_msg.format(iou) - - def test_nms_rotated_0_degree_cpu(self): - N = 1000 - boxes, scores = self._create_tensors(N) - rotated_boxes = torch.zeros(N, 5) - rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 - rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 - rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] - rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] - err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" - for iou in [0.5]: - keep_ref = self.reference_horizontal_nms(boxes, scores, iou) - keep = nms_rotated(rotated_boxes, scores, iou) - assert torch.equal(keep, keep_ref), err_msg.format(iou) - - def test_nms_rotated_90_degrees_cpu(self): - N = 1000 - boxes, scores = self._create_tensors(N) - rotated_boxes = torch.zeros(N, 5) - rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 - rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 - # Note for rotated_boxes[:, 2] and rotated_boxes[:, 3]: - # widths and heights are intentionally swapped here for 90 degrees case - # so that the reference horizontal nms could be used - rotated_boxes[:, 2] = boxes[:, 3] - boxes[:, 1] - rotated_boxes[:, 3] = boxes[:, 2] - boxes[:, 0] - - rotated_boxes[:, 4] = torch.ones(N) * 90 - err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" - for iou in [0.2, 0.5, 0.8]: - keep_ref = self.reference_horizontal_nms(boxes, scores, iou) - keep = nms_rotated(rotated_boxes, scores, iou) - assert torch.equal(keep, keep_ref), err_msg.format(iou) - - def test_nms_rotated_180_degrees_cpu(self): - N = 1000 - boxes, scores = self._create_tensors(N) - rotated_boxes = torch.zeros(N, 5) - rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 - rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 - rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] - rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] - rotated_boxes[:, 4] = torch.ones(N) * 180 - err_msg = "Rotated NMS incompatible between CPU and reference implementation for IoU={}" - for iou in [0.2, 0.5, 0.8]: - keep_ref = self.reference_horizontal_nms(boxes, scores, iou) - keep = nms_rotated(rotated_boxes, scores, iou) - assert torch.equal(keep, keep_ref), err_msg.format(iou) - - @unittest.skipIf(not torch.cuda.is_available(), "CUDA not available") - def test_nms_rotated_0_degree_cuda(self): - N = 1000 - boxes, scores = self._create_tensors(N) - rotated_boxes = torch.zeros(N, 5) - rotated_boxes[:, 0] = (boxes[:, 0] + boxes[:, 2]) / 2.0 - rotated_boxes[:, 1] = (boxes[:, 1] + boxes[:, 3]) / 2.0 - rotated_boxes[:, 2] = boxes[:, 2] - boxes[:, 0] - rotated_boxes[:, 3] = boxes[:, 3] - boxes[:, 1] - err_msg = "Rotated NMS incompatible between CPU and CUDA for IoU={}" - - for iou in [0.2, 0.5, 0.8]: - r_cpu = nms_rotated(rotated_boxes, scores, iou) - r_cuda = nms_rotated(rotated_boxes.cuda(), scores.cuda(), iou) - - assert torch.equal(r_cpu, r_cuda.cpu()), err_msg.format(iou) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubBuildCompilerTargets.cmake b/spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubBuildCompilerTargets.cmake deleted file mode 100644 index 86016059da2e4e1bc637f6c7f55a4500c96a5c91..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/dependencies/cub/cmake/CubBuildCompilerTargets.cmake +++ /dev/null @@ -1,102 +0,0 @@ -# -# This file defines the `cub_build_compiler_targets()` function, which -# creates the following interface targets: -# -# cub.compiler_interface -# - Interface target providing compiler-specific options needed to build -# Thrust's tests, examples, etc. - -function(cub_build_compiler_targets) - set(cxx_compile_definitions) - set(cxx_compile_options) - - if ("MSVC" STREQUAL "${CMAKE_CXX_COMPILER_ID}") - # TODO Enable /Wall - append_option_if_available("/WX" cxx_compile_options) - - # Disabled loss-of-data conversion warnings. - # TODO Re-enable. - append_option_if_available("/wd4244" cxx_compile_options) - append_option_if_available("/wd4267" cxx_compile_options) - - # Suppress numeric conversion-to-bool warnings. - # TODO Re-enable. - append_option_if_available("/wd4800" cxx_compile_options) - - # Disable warning about applying unary operator- to unsigned type. - append_option_if_available("/wd4146" cxx_compile_options) - - # Some tests require /bigobj to fit everything into their object files: - append_option_if_available("/bigobj" cxx_compile_options) - else() - append_option_if_available("-Werror" cxx_compile_options) - append_option_if_available("-Wall" cxx_compile_options) - append_option_if_available("-Wextra" cxx_compile_options) - append_option_if_available("-Winit-self" cxx_compile_options) - append_option_if_available("-Woverloaded-virtual" cxx_compile_options) - append_option_if_available("-Wcast-qual" cxx_compile_options) - append_option_if_available("-Wno-cast-align" cxx_compile_options) - append_option_if_available("-Wno-long-long" cxx_compile_options) - append_option_if_available("-Wno-variadic-macros" cxx_compile_options) - append_option_if_available("-Wno-unused-function" cxx_compile_options) - append_option_if_available("-Wno-unused-variable" cxx_compile_options) - - # CUB uses deprecated texture functions (cudaBindTexture, etc). These - # need to be replaced, but silence the warnings for now. - append_option_if_available("-Wno-deprecated-declarations" cxx_compile_options) - endif() - - if ("GNU" STREQUAL "${CMAKE_CXX_COMPILER_ID}") - if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 4.5) - # This isn't available until GCC 4.3, and misfires on TMP code until - # GCC 4.5. - append_option_if_available("-Wlogical-op" cxx_compile_options) - endif() - - if (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 7.3) - # GCC 7.3 complains about name mangling changes due to `noexcept` - # becoming part of the type system; we don't care. - append_option_if_available("-Wno-noexcept-type" cxx_compile_options) - endif() - endif() - - if (("Clang" STREQUAL "${CMAKE_CXX_COMPILER_ID}") OR - ("XL" STREQUAL "${CMAKE_CXX_COMPILER_ID}")) - # xlC and Clang warn about unused parameters in uninstantiated templates. - # This causes xlC to choke on the OMP backend, which is mostly #ifdef'd out - # (and thus has unused parameters) when you aren't using it. - append_option_if_available("-Wno-unused-parameters" cxx_compile_options) - endif() - - if ("Clang" STREQUAL "${CMAKE_CXX_COMPILER_ID}") - # -Wunneeded-internal-declaration misfires in the unit test framework - # on older versions of Clang. - append_option_if_available("-Wno-unneeded-internal-declaration" cxx_compile_options) - endif() - - add_library(cub.compiler_interface INTERFACE) - - foreach (cxx_option IN LISTS cxx_compile_options) - target_compile_options(cub.compiler_interface INTERFACE - $<$:${cxx_option}> - # Only use -Xcompiler with NVCC, not Feta. - # - # CMake can't split genexs, so this can't be formatted better :( - # This is: - # if (using CUDA and CUDA_COMPILER is NVCC) add -Xcompiler=opt: - $<$,$>:-Xcompiler=${cxx_option}> - ) - endforeach() - - # Add these for both CUDA and CXX targets: - target_compile_definitions(cub.compiler_interface INTERFACE - ${cxx_compile_definitions} - ) - - # Promote warnings and display diagnostic numbers for nvcc: - target_compile_options(cub.compiler_interface INTERFACE - # If using CUDA w/ NVCC... - $<$,$>:-Xcudafe=--display_error_number> - $<$,$>:-Xcudafe=--promote_warnings> - ) -endfunction() diff --git a/spaces/CVPR/MonoScene/monoscene/__init__.py b/spaces/CVPR/MonoScene/monoscene/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/CVPR/WALT/mmdet/models/detectors/cascade_rcnn.py b/spaces/CVPR/WALT/mmdet/models/detectors/cascade_rcnn.py deleted file mode 100644 index d873dceb7e4efdf8d1e7d282badfe9b7118426b9..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/detectors/cascade_rcnn.py +++ /dev/null @@ -1,46 +0,0 @@ -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class CascadeRCNN(TwoStageDetector): - r"""Implementation of `Cascade R-CNN: Delving into High Quality Object - Detection `_""" - - def __init__(self, - backbone, - neck=None, - rpn_head=None, - roi_head=None, - train_cfg=None, - test_cfg=None, - pretrained=None): - super(CascadeRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained) - - def show_result(self, data, result, **kwargs): - """Show prediction results of the detector. - - Args: - data (str or np.ndarray): Image filename or loaded image. - result (Tensor or tuple): The results to draw over `img` - bbox_result or (bbox_result, segm_result). - - Returns: - np.ndarray: The image with bboxes drawn on it. - """ - if self.with_mask: - ms_bbox_result, ms_segm_result = result - if isinstance(ms_bbox_result, dict): - result = (ms_bbox_result['ensemble'], - ms_segm_result['ensemble']) - else: - if isinstance(result, dict): - result = result['ensemble'] - return super(CascadeRCNN, self).show_result(data, result, **kwargs) diff --git a/spaces/CVPR/WALT/mmdet/models/necks/fpg.py b/spaces/CVPR/WALT/mmdet/models/necks/fpg.py deleted file mode 100644 index c8e0d163ccf8cef6211530ba6c1b4d558ff6403f..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/mmdet/models/necks/fpg.py +++ /dev/null @@ -1,398 +0,0 @@ -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import ConvModule, caffe2_xavier_init, constant_init, is_norm - -from ..builder import NECKS - - -class Transition(nn.Module): - """Base class for transition. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - """ - - def __init__(self, in_channels, out_channels): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - - def forward(x): - pass - - -class UpInterpolationConv(Transition): - """A transition used for up-sampling. - - Up-sample the input by interpolation then refines the feature by - a convolution layer. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - scale_factor (int): Up-sampling factor. Default: 2. - mode (int): Interpolation mode. Default: nearest. - align_corners (bool): Whether align corners when interpolation. - Default: None. - kernel_size (int): Kernel size for the conv. Default: 3. - """ - - def __init__(self, - in_channels, - out_channels, - scale_factor=2, - mode='nearest', - align_corners=None, - kernel_size=3, - **kwargs): - super().__init__(in_channels, out_channels) - self.mode = mode - self.scale_factor = scale_factor - self.align_corners = align_corners - self.conv = ConvModule( - in_channels, - out_channels, - kernel_size, - padding=(kernel_size - 1) // 2, - **kwargs) - - def forward(self, x): - x = F.interpolate( - x, - scale_factor=self.scale_factor, - mode=self.mode, - align_corners=self.align_corners) - x = self.conv(x) - return x - - -class LastConv(Transition): - """A transition used for refining the output of the last stage. - - Args: - in_channels (int): Number of input channels. - out_channels (int): Number of output channels. - num_inputs (int): Number of inputs of the FPN features. - kernel_size (int): Kernel size for the conv. Default: 3. - """ - - def __init__(self, - in_channels, - out_channels, - num_inputs, - kernel_size=3, - **kwargs): - super().__init__(in_channels, out_channels) - self.num_inputs = num_inputs - self.conv_out = ConvModule( - in_channels, - out_channels, - kernel_size, - padding=(kernel_size - 1) // 2, - **kwargs) - - def forward(self, inputs): - assert len(inputs) == self.num_inputs - return self.conv_out(inputs[-1]) - - -@NECKS.register_module() -class FPG(nn.Module): - """FPG. - - Implementation of `Feature Pyramid Grids (FPG) - `_. - This implementation only gives the basic structure stated in the paper. - But users can implement different type of transitions to fully explore the - the potential power of the structure of FPG. - - Args: - in_channels (int): Number of input channels (feature maps of all levels - should have the same channels). - out_channels (int): Number of output channels (used at each scale) - num_outs (int): Number of output scales. - stack_times (int): The number of times the pyramid architecture will - be stacked. - paths (list[str]): Specify the path order of each stack level. - Each element in the list should be either 'bu' (bottom-up) or - 'td' (top-down). - inter_channels (int): Number of inter channels. - same_up_trans (dict): Transition that goes down at the same stage. - same_down_trans (dict): Transition that goes up at the same stage. - across_lateral_trans (dict): Across-pathway same-stage - across_down_trans (dict): Across-pathway bottom-up connection. - across_up_trans (dict): Across-pathway top-down connection. - across_skip_trans (dict): Across-pathway skip connection. - output_trans (dict): Transition that trans the output of the - last stage. - start_level (int): Index of the start input backbone level used to - build the feature pyramid. Default: 0. - end_level (int): Index of the end input backbone level (exclusive) to - build the feature pyramid. Default: -1, which means the last level. - add_extra_convs (bool): It decides whether to add conv - layers on top of the original feature maps. Default to False. - If True, its actual mode is specified by `extra_convs_on_inputs`. - norm_cfg (dict): Config dict for normalization layer. Default: None. - """ - - transition_types = { - 'conv': ConvModule, - 'interpolation_conv': UpInterpolationConv, - 'last_conv': LastConv, - } - - def __init__(self, - in_channels, - out_channels, - num_outs, - stack_times, - paths, - inter_channels=None, - same_down_trans=None, - same_up_trans=dict( - type='conv', kernel_size=3, stride=2, padding=1), - across_lateral_trans=dict(type='conv', kernel_size=1), - across_down_trans=dict(type='conv', kernel_size=3), - across_up_trans=None, - across_skip_trans=dict(type='identity'), - output_trans=dict(type='last_conv', kernel_size=3), - start_level=0, - end_level=-1, - add_extra_convs=False, - norm_cfg=None, - skip_inds=None): - super(FPG, self).__init__() - assert isinstance(in_channels, list) - self.in_channels = in_channels - self.out_channels = out_channels - self.num_ins = len(in_channels) - self.num_outs = num_outs - if inter_channels is None: - self.inter_channels = [out_channels for _ in range(num_outs)] - elif isinstance(inter_channels, int): - self.inter_channels = [inter_channels for _ in range(num_outs)] - else: - assert isinstance(inter_channels, list) - assert len(inter_channels) == num_outs - self.inter_channels = inter_channels - self.stack_times = stack_times - self.paths = paths - assert isinstance(paths, list) and len(paths) == stack_times - for d in paths: - assert d in ('bu', 'td') - - self.same_down_trans = same_down_trans - self.same_up_trans = same_up_trans - self.across_lateral_trans = across_lateral_trans - self.across_down_trans = across_down_trans - self.across_up_trans = across_up_trans - self.output_trans = output_trans - self.across_skip_trans = across_skip_trans - - self.with_bias = norm_cfg is None - # skip inds must be specified if across skip trans is not None - if self.across_skip_trans is not None: - skip_inds is not None - self.skip_inds = skip_inds - assert len(self.skip_inds[0]) <= self.stack_times - - if end_level == -1: - self.backbone_end_level = self.num_ins - assert num_outs >= self.num_ins - start_level - else: - # if end_level < inputs, no extra level is allowed - self.backbone_end_level = end_level - assert end_level <= len(in_channels) - assert num_outs == end_level - start_level - self.start_level = start_level - self.end_level = end_level - self.add_extra_convs = add_extra_convs - - # build lateral 1x1 convs to reduce channels - self.lateral_convs = nn.ModuleList() - for i in range(self.start_level, self.backbone_end_level): - l_conv = nn.Conv2d(self.in_channels[i], - self.inter_channels[i - self.start_level], 1) - self.lateral_convs.append(l_conv) - - extra_levels = num_outs - self.backbone_end_level + self.start_level - self.extra_downsamples = nn.ModuleList() - for i in range(extra_levels): - if self.add_extra_convs: - fpn_idx = self.backbone_end_level - self.start_level + i - extra_conv = nn.Conv2d( - self.inter_channels[fpn_idx - 1], - self.inter_channels[fpn_idx], - 3, - stride=2, - padding=1) - self.extra_downsamples.append(extra_conv) - else: - self.extra_downsamples.append(nn.MaxPool2d(1, stride=2)) - - self.fpn_transitions = nn.ModuleList() # stack times - for s in range(self.stack_times): - stage_trans = nn.ModuleList() # num of feature levels - for i in range(self.num_outs): - # same, across_lateral, across_down, across_up - trans = nn.ModuleDict() - if s in self.skip_inds[i]: - stage_trans.append(trans) - continue - # build same-stage down trans (used in bottom-up paths) - if i == 0 or self.same_up_trans is None: - same_up_trans = None - else: - same_up_trans = self.build_trans( - self.same_up_trans, self.inter_channels[i - 1], - self.inter_channels[i]) - trans['same_up'] = same_up_trans - # build same-stage up trans (used in top-down paths) - if i == self.num_outs - 1 or self.same_down_trans is None: - same_down_trans = None - else: - same_down_trans = self.build_trans( - self.same_down_trans, self.inter_channels[i + 1], - self.inter_channels[i]) - trans['same_down'] = same_down_trans - # build across lateral trans - across_lateral_trans = self.build_trans( - self.across_lateral_trans, self.inter_channels[i], - self.inter_channels[i]) - trans['across_lateral'] = across_lateral_trans - # build across down trans - if i == self.num_outs - 1 or self.across_down_trans is None: - across_down_trans = None - else: - across_down_trans = self.build_trans( - self.across_down_trans, self.inter_channels[i + 1], - self.inter_channels[i]) - trans['across_down'] = across_down_trans - # build across up trans - if i == 0 or self.across_up_trans is None: - across_up_trans = None - else: - across_up_trans = self.build_trans( - self.across_up_trans, self.inter_channels[i - 1], - self.inter_channels[i]) - trans['across_up'] = across_up_trans - if self.across_skip_trans is None: - across_skip_trans = None - else: - across_skip_trans = self.build_trans( - self.across_skip_trans, self.inter_channels[i - 1], - self.inter_channels[i]) - trans['across_skip'] = across_skip_trans - # build across_skip trans - stage_trans.append(trans) - self.fpn_transitions.append(stage_trans) - - self.output_transition = nn.ModuleList() # output levels - for i in range(self.num_outs): - trans = self.build_trans( - self.output_trans, - self.inter_channels[i], - self.out_channels, - num_inputs=self.stack_times + 1) - self.output_transition.append(trans) - - self.relu = nn.ReLU(inplace=True) - - def build_trans(self, cfg, in_channels, out_channels, **extra_args): - cfg_ = cfg.copy() - trans_type = cfg_.pop('type') - trans_cls = self.transition_types[trans_type] - return trans_cls(in_channels, out_channels, **cfg_, **extra_args) - - def init_weights(self): - for m in self.modules(): - if isinstance(m, nn.Conv2d): - caffe2_xavier_init(m) - elif is_norm(m): - constant_init(m, 1.0) - - def fuse(self, fuse_dict): - out = None - for item in fuse_dict.values(): - if item is not None: - if out is None: - out = item - else: - out = out + item - return out - - def forward(self, inputs): - assert len(inputs) == len(self.in_channels) - - # build all levels from original feature maps - feats = [ - lateral_conv(inputs[i + self.start_level]) - for i, lateral_conv in enumerate(self.lateral_convs) - ] - for downsample in self.extra_downsamples: - feats.append(downsample(feats[-1])) - - outs = [feats] - - for i in range(self.stack_times): - current_outs = outs[-1] - next_outs = [] - direction = self.paths[i] - for j in range(self.num_outs): - if i in self.skip_inds[j]: - next_outs.append(outs[-1][j]) - continue - # feature level - if direction == 'td': - lvl = self.num_outs - j - 1 - else: - lvl = j - # get transitions - if direction == 'td': - same_trans = self.fpn_transitions[i][lvl]['same_down'] - else: - same_trans = self.fpn_transitions[i][lvl]['same_up'] - across_lateral_trans = self.fpn_transitions[i][lvl][ - 'across_lateral'] - across_down_trans = self.fpn_transitions[i][lvl]['across_down'] - across_up_trans = self.fpn_transitions[i][lvl]['across_up'] - across_skip_trans = self.fpn_transitions[i][lvl]['across_skip'] - # init output - to_fuse = dict( - same=None, lateral=None, across_up=None, across_down=None) - # same downsample/upsample - if same_trans is not None: - to_fuse['same'] = same_trans(next_outs[-1]) - # across lateral - if across_lateral_trans is not None: - to_fuse['lateral'] = across_lateral_trans( - current_outs[lvl]) - # across downsample - if lvl > 0 and across_up_trans is not None: - to_fuse['across_up'] = across_up_trans(current_outs[lvl - - 1]) - # across upsample - if (lvl < self.num_outs - 1 and across_down_trans is not None): - to_fuse['across_down'] = across_down_trans( - current_outs[lvl + 1]) - if across_skip_trans is not None: - to_fuse['across_skip'] = across_skip_trans(outs[0][lvl]) - x = self.fuse(to_fuse) - next_outs.append(x) - - if direction == 'td': - outs.append(next_outs[::-1]) - else: - outs.append(next_outs) - - # output trans - final_outs = [] - for i in range(self.num_outs): - lvl_out_list = [] - for s in range(len(outs)): - lvl_out_list.append(outs[s][i]) - lvl_out = self.output_transition[i](lvl_out_list) - final_outs.append(lvl_out) - - return final_outs diff --git a/spaces/Celestinian/Prompt-Generator/app.py b/spaces/Celestinian/Prompt-Generator/app.py deleted file mode 100644 index c1f9a15ee446469a601d3607f40214a07a5736ed..0000000000000000000000000000000000000000 --- a/spaces/Celestinian/Prompt-Generator/app.py +++ /dev/null @@ -1,34 +0,0 @@ -from transformers import AutoTokenizer, AutoModelForCausalLM, GPT2LMHeadModel -import gradio as gr -import torch -import git - -device = "cuda" if torch.cuda.is_available() else "cpu" - -tokenizer = AutoTokenizer.from_pretrained("Celestinian/PromptGPT") -model = AutoModelForCausalLM.from_pretrained("Celestinian/PromptGPT") - -def generate_text(prompt, max_length, do_sample, temperature, top_k, top_p): - formatted_prompt = "\n" + prompt - if not ',' in prompt: - formatted_prompt += ',' - prompt = tokenizer(formatted_prompt, return_tensors='pt') - prompt = {key: value.to(device) for key, value in prompt.items()} - out = model.generate(**prompt, max_length=max_length, do_sample=do_sample, temperature=temperature, - no_repeat_ngram_size=3, top_k=top_k, top_p=top_p) - output = tokenizer.decode(out[0]) - clean_output = output.replace('\n', '\n') - print(clean_output) - return clean_output - -input_text = gr.inputs.Textbox(lines=5, label="Input Text") -max_length = gr.inputs.Slider(minimum=10, maximum=100, default=30, label="Max Length") -do_sample = gr.inputs.Checkbox(default=True, label="Do Sample") -temperature = gr.inputs.Slider(minimum=0.1, maximum=1.0, step=0.1, default=0.4, label="Temperature") -top_k = gr.inputs.Slider(minimum=1, maximum=100, step=1, default=50, label="Top K") -top_p = gr.inputs.Slider(minimum=0.1, maximum=1.0, step=1, default=0.2, label="Top P") - -output_text = gr.outputs.Textbox(label="Generated Text") - -gr.Interface(generate_text, inputs=[input_text, max_length, do_sample, temperature, top_k, top_p], - outputs=output_text).launch() \ No newline at end of file diff --git a/spaces/CguCsie/README/README.md b/spaces/CguCsie/README/README.md deleted file mode 100644 index 23ef7753bf58e74f702fc2f87387a4c8b93dc310..0000000000000000000000000000000000000000 --- a/spaces/CguCsie/README/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: README_ryExp001 -emoji: ๐Ÿš€ -colorFrom: indigo -colorTo: yellow -sdk: static -pinned: false -license: openrail ---- - -Edit this `README.md` markdown file to author your organization card ๐Ÿ”ฅ \ No newline at end of file diff --git a/spaces/CjangCjengh/Sanskrit-TTS/monotonic_align/__init__.py b/spaces/CjangCjengh/Sanskrit-TTS/monotonic_align/__init__.py deleted file mode 100644 index 49e32c9a128aeadc2044c362ff27f6a43f6d7815..0000000000000000000000000000000000000000 --- a/spaces/CjangCjengh/Sanskrit-TTS/monotonic_align/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -from numpy import zeros, int32, float32 -from torch import from_numpy - -from .core import maximum_path_jit - -def maximum_path(neg_cent, mask): - """ numba optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(float32) - path = zeros(neg_cent.shape, dtype=int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(int32) - maximum_path_jit(path, neg_cent, t_t_max, t_s_max) - return from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/common/__init__.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/common/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/MacRoman.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/MacRoman.py deleted file mode 100644 index ba8bf14ef7de1cf76248a2bbd1a98bc8bf36cc5e..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/encodings/MacRoman.py +++ /dev/null @@ -1,258 +0,0 @@ -MacRoman = [ - "NUL", - "Eth", - "eth", - "Lslash", - "lslash", - "Scaron", - "scaron", - "Yacute", - "yacute", - "HT", - "LF", - "Thorn", - "thorn", - "CR", - "Zcaron", - "zcaron", - "DLE", - "DC1", - "DC2", - "DC3", - "DC4", - "onehalf", - "onequarter", - "onesuperior", - "threequarters", - "threesuperior", - "twosuperior", - "brokenbar", - "minus", - "multiply", - "RS", - "US", - "space", - "exclam", - "quotedbl", - "numbersign", - "dollar", - "percent", - "ampersand", - "quotesingle", - "parenleft", - "parenright", - "asterisk", - "plus", - "comma", - "hyphen", - "period", - "slash", - "zero", - "one", - "two", - "three", - "four", - "five", - "six", - "seven", - "eight", - "nine", - "colon", - "semicolon", - "less", - "equal", - "greater", - "question", - "at", - "A", - "B", - "C", - "D", - "E", - "F", - "G", - "H", - "I", - "J", - "K", - "L", - "M", - "N", - "O", - "P", - "Q", - "R", - "S", - "T", - "U", - "V", - "W", - "X", - "Y", - "Z", - "bracketleft", - "backslash", - "bracketright", - "asciicircum", - "underscore", - "grave", - "a", - "b", - "c", - "d", - "e", - "f", - "g", - "h", - "i", - "j", - "k", - "l", - "m", - "n", - "o", - "p", - "q", - "r", - "s", - "t", - "u", - "v", - "w", - "x", - "y", - "z", - "braceleft", - "bar", - "braceright", - "asciitilde", - "DEL", - "Adieresis", - "Aring", - "Ccedilla", - "Eacute", - "Ntilde", - "Odieresis", - "Udieresis", - "aacute", - "agrave", - "acircumflex", - "adieresis", - "atilde", - "aring", - "ccedilla", - "eacute", - "egrave", - "ecircumflex", - "edieresis", - "iacute", - "igrave", - "icircumflex", - "idieresis", - "ntilde", - "oacute", - "ograve", - "ocircumflex", - "odieresis", - "otilde", - "uacute", - "ugrave", - "ucircumflex", - "udieresis", - "dagger", - "degree", - "cent", - "sterling", - "section", - "bullet", - "paragraph", - "germandbls", - "registered", - "copyright", - "trademark", - "acute", - "dieresis", - "notequal", - "AE", - "Oslash", - "infinity", - "plusminus", - "lessequal", - "greaterequal", - "yen", - "mu", - "partialdiff", - "summation", - "product", - "pi", - "integral", - "ordfeminine", - "ordmasculine", - "Omega", - "ae", - "oslash", - "questiondown", - "exclamdown", - "logicalnot", - "radical", - "florin", - "approxequal", - "Delta", - "guillemotleft", - "guillemotright", - "ellipsis", - "nbspace", - "Agrave", - "Atilde", - "Otilde", - "OE", - "oe", - "endash", - "emdash", - "quotedblleft", - "quotedblright", - "quoteleft", - "quoteright", - "divide", - "lozenge", - "ydieresis", - "Ydieresis", - "fraction", - "currency", - "guilsinglleft", - "guilsinglright", - "fi", - "fl", - "daggerdbl", - "periodcentered", - "quotesinglbase", - "quotedblbase", - "perthousand", - "Acircumflex", - "Ecircumflex", - "Aacute", - "Edieresis", - "Egrave", - "Iacute", - "Icircumflex", - "Idieresis", - "Igrave", - "Oacute", - "Ocircumflex", - "apple", - "Ograve", - "Uacute", - "Ucircumflex", - "Ugrave", - "dotlessi", - "circumflex", - "tilde", - "macron", - "breve", - "dotaccent", - "ring", - "cedilla", - "hungarumlaut", - "ogonek", - "caron", -] diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/frozenlist/__init__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/frozenlist/__init__.py deleted file mode 100644 index 152356588d3e619bddb7e2ecd76b147a4e55a96c..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/frozenlist/__init__.py +++ /dev/null @@ -1,95 +0,0 @@ -import os -import sys -import types -from collections.abc import MutableSequence -from functools import total_ordering -from typing import Type - -__version__ = "1.4.0" - -__all__ = ("FrozenList", "PyFrozenList") # type: Tuple[str, ...] - - -NO_EXTENSIONS = bool(os.environ.get("FROZENLIST_NO_EXTENSIONS")) # type: bool - - -@total_ordering -class FrozenList(MutableSequence): - __slots__ = ("_frozen", "_items") - - if sys.version_info >= (3, 9): - __class_getitem__ = classmethod(types.GenericAlias) - else: - - @classmethod - def __class_getitem__(cls: Type["FrozenList"]) -> Type["FrozenList"]: - return cls - - def __init__(self, items=None): - self._frozen = False - if items is not None: - items = list(items) - else: - items = [] - self._items = items - - @property - def frozen(self): - return self._frozen - - def freeze(self): - self._frozen = True - - def __getitem__(self, index): - return self._items[index] - - def __setitem__(self, index, value): - if self._frozen: - raise RuntimeError("Cannot modify frozen list.") - self._items[index] = value - - def __delitem__(self, index): - if self._frozen: - raise RuntimeError("Cannot modify frozen list.") - del self._items[index] - - def __len__(self): - return self._items.__len__() - - def __iter__(self): - return self._items.__iter__() - - def __reversed__(self): - return self._items.__reversed__() - - def __eq__(self, other): - return list(self) == other - - def __le__(self, other): - return list(self) <= other - - def insert(self, pos, item): - if self._frozen: - raise RuntimeError("Cannot modify frozen list.") - self._items.insert(pos, item) - - def __repr__(self): - return f"" - - def __hash__(self): - if self._frozen: - return hash(tuple(self)) - else: - raise RuntimeError("Cannot hash unfrozen list.") - - -PyFrozenList = FrozenList - - -try: - from ._frozenlist import FrozenList as CFrozenList # type: ignore - - if not NO_EXTENSIONS: # pragma: no cover - FrozenList = CFrozenList # type: ignore -except ImportError: # pragma: no cover - pass diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_receivebuffer.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_receivebuffer.py deleted file mode 100644 index e5c4e08a56f5081e87103f38b4add6ce1b730204..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_receivebuffer.py +++ /dev/null @@ -1,153 +0,0 @@ -import re -import sys -from typing import List, Optional, Union - -__all__ = ["ReceiveBuffer"] - - -# Operations we want to support: -# - find next \r\n or \r\n\r\n (\n or \n\n are also acceptable), -# or wait until there is one -# - read at-most-N bytes -# Goals: -# - on average, do this fast -# - worst case, do this in O(n) where n is the number of bytes processed -# Plan: -# - store bytearray, offset, how far we've searched for a separator token -# - use the how-far-we've-searched data to avoid rescanning -# - while doing a stream of uninterrupted processing, advance offset instead -# of constantly copying -# WARNING: -# - I haven't benchmarked or profiled any of this yet. -# -# Note that starting in Python 3.4, deleting the initial n bytes from a -# bytearray is amortized O(n), thanks to some excellent work by Antoine -# Martin: -# -# https://bugs.python.org/issue19087 -# -# This means that if we only supported 3.4+, we could get rid of the code here -# involving self._start and self.compress, because it's doing exactly the same -# thing that bytearray now does internally. -# -# BUT unfortunately, we still support 2.7, and reading short segments out of a -# long buffer MUST be O(bytes read) to avoid DoS issues, so we can't actually -# delete this code. Yet: -# -# https://pythonclock.org/ -# -# (Two things to double-check first though: make sure PyPy also has the -# optimization, and benchmark to make sure it's a win, since we do have a -# slightly clever thing where we delay calling compress() until we've -# processed a whole event, which could in theory be slightly more efficient -# than the internal bytearray support.) -blank_line_regex = re.compile(b"\n\r?\n", re.MULTILINE) - - -class ReceiveBuffer: - def __init__(self) -> None: - self._data = bytearray() - self._next_line_search = 0 - self._multiple_lines_search = 0 - - def __iadd__(self, byteslike: Union[bytes, bytearray]) -> "ReceiveBuffer": - self._data += byteslike - return self - - def __bool__(self) -> bool: - return bool(len(self)) - - def __len__(self) -> int: - return len(self._data) - - # for @property unprocessed_data - def __bytes__(self) -> bytes: - return bytes(self._data) - - def _extract(self, count: int) -> bytearray: - # extracting an initial slice of the data buffer and return it - out = self._data[:count] - del self._data[:count] - - self._next_line_search = 0 - self._multiple_lines_search = 0 - - return out - - def maybe_extract_at_most(self, count: int) -> Optional[bytearray]: - """ - Extract a fixed number of bytes from the buffer. - """ - out = self._data[:count] - if not out: - return None - - return self._extract(count) - - def maybe_extract_next_line(self) -> Optional[bytearray]: - """ - Extract the first line, if it is completed in the buffer. - """ - # Only search in buffer space that we've not already looked at. - search_start_index = max(0, self._next_line_search - 1) - partial_idx = self._data.find(b"\r\n", search_start_index) - - if partial_idx == -1: - self._next_line_search = len(self._data) - return None - - # + 2 is to compensate len(b"\r\n") - idx = partial_idx + 2 - - return self._extract(idx) - - def maybe_extract_lines(self) -> Optional[List[bytearray]]: - """ - Extract everything up to the first blank line, and return a list of lines. - """ - # Handle the case where we have an immediate empty line. - if self._data[:1] == b"\n": - self._extract(1) - return [] - - if self._data[:2] == b"\r\n": - self._extract(2) - return [] - - # Only search in buffer space that we've not already looked at. - match = blank_line_regex.search(self._data, self._multiple_lines_search) - if match is None: - self._multiple_lines_search = max(0, len(self._data) - 2) - return None - - # Truncate the buffer and return it. - idx = match.span(0)[-1] - out = self._extract(idx) - lines = out.split(b"\n") - - for line in lines: - if line.endswith(b"\r"): - del line[-1] - - assert lines[-2] == lines[-1] == b"" - - del lines[-2:] - - return lines - - # In theory we should wait until `\r\n` before starting to validate - # incoming data. However it's interesting to detect (very) invalid data - # early given they might not even contain `\r\n` at all (hence only - # timeout will get rid of them). - # This is not a 100% effective detection but more of a cheap sanity check - # allowing for early abort in some useful cases. - # This is especially interesting when peer is messing up with HTTPS and - # sent us a TLS stream where we were expecting plain HTTP given all - # versions of TLS so far start handshake with a 0x16 message type code. - def is_next_line_obviously_invalid_request_line(self) -> bool: - try: - # HTTP header line must not contain non-printable characters - # and should not start with a space - return self._data[0] < 0x21 - except IndexError: - return False diff --git a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/other/criterion.py b/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/other/criterion.py deleted file mode 100644 index 04d0db3913b5dc36afb91798d3d1a33fde63dcb1..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/3D-Room-Layout-Estimation_LGT-Net/models/other/criterion.py +++ /dev/null @@ -1,72 +0,0 @@ -""" -@date: 2021/7/19 -@description: -""" -import torch -import loss - -from utils.misc import tensor2np - - -def build_criterion(config, logger): - criterion = {} - device = config.TRAIN.DEVICE - - for k in config.TRAIN.CRITERION.keys(): - sc = config.TRAIN.CRITERION[k] - if sc.WEIGHT is None or float(sc.WEIGHT) == 0: - continue - criterion[sc.NAME] = { - 'loss': getattr(loss, sc.LOSS)(), - 'weight': float(sc.WEIGHT), - 'sub_weights': sc.WEIGHTS, - 'need_all': sc.NEED_ALL - } - - criterion[sc.NAME]['loss'] = criterion[sc.NAME]['loss'].to(device) - if config.AMP_OPT_LEVEL != "O0" and 'cuda' in device: - criterion[sc.NAME]['loss'] = criterion[sc.NAME]['loss'].type(torch.float16) - - # logger.info(f"Build criterion:{sc.WEIGHT}_{sc.NAME}_{sc.LOSS}_{sc.WEIGHTS}") - return criterion - - -def calc_criterion(criterion, gt, dt, epoch_loss_d): - loss = None - postfix_d = {} - for k in criterion.keys(): - if criterion[k]['need_all']: - single_loss = criterion[k]['loss'](gt, dt) - ws_loss = None - for i, sub_weight in enumerate(criterion[k]['sub_weights']): - if sub_weight == 0: - continue - if ws_loss is None: - ws_loss = single_loss[i] * sub_weight - else: - ws_loss = ws_loss + single_loss[i] * sub_weight - single_loss = ws_loss if ws_loss is not None else single_loss - else: - assert k in gt.keys(), "ground label is None:" + k - assert k in dt.keys(), "detection key is None:" + k - if k == 'ratio' and gt[k].shape[-1] != dt[k].shape[-1]: - gt[k] = gt[k].repeat(1, dt[k].shape[-1]) - single_loss = criterion[k]['loss'](gt[k], dt[k]) - - postfix_d[k] = tensor2np(single_loss) - if k not in epoch_loss_d.keys(): - epoch_loss_d[k] = [] - epoch_loss_d[k].append(postfix_d[k]) - - single_loss = single_loss * criterion[k]['weight'] - if loss is None: - loss = single_loss - else: - loss = loss + single_loss - - k = 'loss' - postfix_d[k] = tensor2np(loss) - if k not in epoch_loss_d.keys(): - epoch_loss_d[k] = [] - epoch_loss_d[k].append(postfix_d[k]) - return loss, postfix_d, epoch_loss_d diff --git a/spaces/Detomo/ai-comic-generation/src/app/layouts/index.tsx b/spaces/Detomo/ai-comic-generation/src/app/layouts/index.tsx deleted file mode 100644 index 4553783fbfdd3636bf311ddc8661ea13e585b61a..0000000000000000000000000000000000000000 --- a/spaces/Detomo/ai-comic-generation/src/app/layouts/index.tsx +++ /dev/null @@ -1,287 +0,0 @@ -"use client" - -import { Panel } from "@/app/interface/panel" -import { pick } from "@/lib/pick" -import { Grid } from "@/app/interface/grid" - -export function Layout0() { - return ( - -
    - -
    -
    - -
    -
    - -
    -
    - -
    -
    - ) -} - -export function Layout1() { - return ( - -
    - -
    -
    - -
    -
    - -
    -
    - -
    -
    - ) -} - -export function Layout2_todo() { - return ( - -
    - -
    -
    - -
    -
    - -
    -
    - -
    -
    - ) -} - -export function Layout3_todo() { - return ( - -
    - -
    -
    - -
    -
    -
    - -
    -
    - -
    -
    -
    - ) -} - -export function Layout4_todo() { - return ( - -
    - -
    -
    - -
    -
    - -
    -
    - -
    -
    - ) -} - - -export function Layout2() { - return ( - -
    - -
    -
    - -
    -
    - -
    -
    - -
    -
    - ) -} - -export function Layout3() { - return ( - -
    - -
    -
    - -
    -
    - -
    -
    - -
    -
    - ) -} - -// export const layouts = { Layout1, Layout2_todo, Layout3_todo, Layout4_todo, Layout2, Layout3 } -export const allLayouts = { - random: <>, - Layout0, - Layout1, - Layout2, - Layout3 -} - -export const allLayoutLabels = { - random: "Random layout", - Layout0: "Layout 0", - Layout1: "Layout 1", - Layout2: "Layout 2", - Layout3: "Layout 3", -} - -export type LayoutName = keyof typeof allLayouts - -export const defaultLayout: LayoutName = "Layout1" - -export type LayoutCategory = "square" | "fluid" - -export const nonRandomLayouts = Object.keys(allLayouts).filter(layout => layout !== "random") - -export const getRandomLayoutName = (): LayoutName => { - return pick(nonRandomLayouts) as LayoutName -} - -export function getRandomLayoutNames(): LayoutName[] { - return nonRandomLayouts.sort(() => Math.random() - 0.5) as LayoutName[] -} - diff --git a/spaces/Dinoking/Guccio-AI-Designer/netdissect/zdataset.py b/spaces/Dinoking/Guccio-AI-Designer/netdissect/zdataset.py deleted file mode 100644 index eb085d83d676fa1e4b1f1b053dc6f1ba2ff35381..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/netdissect/zdataset.py +++ /dev/null @@ -1,41 +0,0 @@ -import os, torch, numpy -from torch.utils.data import TensorDataset - -def z_dataset_for_model(model, size=100, seed=1): - return TensorDataset(z_sample_for_model(model, size, seed)) - -def z_sample_for_model(model, size=100, seed=1): - # If the model is marked with an input shape, use it. - if hasattr(model, 'input_shape'): - sample = standard_z_sample(size, model.input_shape[1], seed=seed).view( - (size,) + model.input_shape[1:]) - return sample - # Examine first conv in model to determine input feature size. - first_layer = [c for c in model.modules() - if isinstance(c, (torch.nn.Conv2d, torch.nn.ConvTranspose2d, - torch.nn.Linear))][0] - # 4d input if convolutional, 2d input if first layer is linear. - if isinstance(first_layer, (torch.nn.Conv2d, torch.nn.ConvTranspose2d)): - sample = standard_z_sample( - size, first_layer.in_channels, seed=seed)[:,:,None,None] - else: - sample = standard_z_sample( - size, first_layer.in_features, seed=seed) - return sample - -def standard_z_sample(size, depth, seed=1, device=None): - ''' - Generate a standard set of random Z as a (size, z_dimension) tensor. - With the same random seed, it always returns the same z (e.g., - the first one is always the same regardless of the size.) - ''' - # Use numpy RandomState since it can be done deterministically - # without affecting global state - rng = numpy.random.RandomState(seed) - result = torch.from_numpy( - rng.standard_normal(size * depth) - .reshape(size, depth)).float() - if device is not None: - result = result.to(device) - return result - diff --git a/spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/filtered_lrelu.h b/spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/filtered_lrelu.h deleted file mode 100644 index 524c804122a2582e20e2e4e9c49267e1a1b6db60..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/torch_utils/ops/filtered_lrelu.h +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -// -// NVIDIA CORPORATION and its licensors retain all intellectual property -// and proprietary rights in and to this software, related documentation -// and any modifications thereto. Any use, reproduction, disclosure or -// distribution of this software and related documentation without an express -// license agreement from NVIDIA CORPORATION is strictly prohibited. - -#include - -//------------------------------------------------------------------------ -// CUDA kernel parameters. - -struct filtered_lrelu_kernel_params -{ - // These parameters decide which kernel to use. - int up; // upsampling ratio (1, 2, 4) - int down; // downsampling ratio (1, 2, 4) - int2 fuShape; // [size, 1] | [size, size] - int2 fdShape; // [size, 1] | [size, size] - - int _dummy; // Alignment. - - // Rest of the parameters. - const void* x; // Input tensor. - void* y; // Output tensor. - const void* b; // Bias tensor. - unsigned char* s; // Sign tensor in/out. NULL if unused. - const float* fu; // Upsampling filter. - const float* fd; // Downsampling filter. - - int2 pad0; // Left/top padding. - float gain; // Additional gain factor. - float slope; // Leaky ReLU slope on negative side. - float clamp; // Clamp after nonlinearity. - int flip; // Filter kernel flip for gradient computation. - - int tilesXdim; // Original number of horizontal output tiles. - int tilesXrep; // Number of horizontal tiles per CTA. - int blockZofs; // Block z offset to support large minibatch, channel dimensions. - - int4 xShape; // [width, height, channel, batch] - int4 yShape; // [width, height, channel, batch] - int2 sShape; // [width, height] - width is in bytes. Contiguous. Zeros if unused. - int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. - int swLimit; // Active width of sign tensor in bytes. - - longlong4 xStride; // Strides of all tensors except signs, same component order as shapes. - longlong4 yStride; // - int64_t bStride; // - longlong3 fuStride; // - longlong3 fdStride; // -}; - -struct filtered_lrelu_act_kernel_params -{ - void* x; // Input/output, modified in-place. - unsigned char* s; // Sign tensor in/out. NULL if unused. - - float gain; // Additional gain factor. - float slope; // Leaky ReLU slope on negative side. - float clamp; // Clamp after nonlinearity. - - int4 xShape; // [width, height, channel, batch] - longlong4 xStride; // Input/output tensor strides, same order as in shape. - int2 sShape; // [width, height] - width is in elements. Contiguous. Zeros if unused. - int2 sOfs; // [ofs_x, ofs_y] - offset between upsampled data and sign tensor. -}; - -//------------------------------------------------------------------------ -// CUDA kernel specialization. - -struct filtered_lrelu_kernel_spec -{ - void* setup; // Function for filter kernel setup. - void* exec; // Function for main operation. - int2 tileOut; // Width/height of launch tile. - int numWarps; // Number of warps per thread block, determines launch block size. - int xrep; // For processing multiple horizontal tiles per thread block. - int dynamicSharedKB; // How much dynamic shared memory the exec kernel wants. -}; - -//------------------------------------------------------------------------ -// CUDA kernel selection. - -template filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB); -template void* choose_filtered_lrelu_act_kernel(void); -template cudaError_t copy_filters(cudaStream_t stream); - -//------------------------------------------------------------------------ \ No newline at end of file diff --git a/spaces/DragGan/DragGan/stylegan_human/torch_utils/training_stats.py b/spaces/DragGan/DragGan/stylegan_human/torch_utils/training_stats.py deleted file mode 100644 index 3eb94d95286d8aeffe40ad32ca667e53b4622c4f..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/torch_utils/training_stats.py +++ /dev/null @@ -1,270 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Facilities for reporting and collecting training statistics across -multiple processes and devices. The interface is designed to minimize -synchronization overhead as well as the amount of boilerplate in user -code.""" - -import re -import numpy as np -import torch -import dnnlib - -from . import misc - -#---------------------------------------------------------------------------- - -_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares] -_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction. -_counter_dtype = torch.float64 # Data type to use for the internal counters. -_rank = 0 # Rank of the current process. -_sync_device = None # Device to use for multiprocess communication. None = single-process. -_sync_called = False # Has _sync() been called yet? -_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor -_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor - -#---------------------------------------------------------------------------- - -def init_multiprocessing(rank, sync_device): - r"""Initializes `torch_utils.training_stats` for collecting statistics - across multiple processes. - - This function must be called after - `torch.distributed.init_process_group()` and before `Collector.update()`. - The call is not necessary if multi-process collection is not needed. - - Args: - rank: Rank of the current process. - sync_device: PyTorch device to use for inter-process - communication, or None to disable multi-process - collection. Typically `torch.device('cuda', rank)`. - """ - global _rank, _sync_device - assert not _sync_called - _rank = rank - _sync_device = sync_device - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def report(name, value): - r"""Broadcasts the given set of scalars to all interested instances of - `Collector`, across device and process boundaries. - - This function is expected to be extremely cheap and can be safely - called from anywhere in the training loop, loss function, or inside a - `torch.nn.Module`. - - Warning: The current implementation expects the set of unique names to - be consistent across processes. Please make sure that `report()` is - called at least once for each unique name by each process, and in the - same order. If a given process has no scalars to broadcast, it can do - `report(name, [])` (empty list). - - Args: - name: Arbitrary string specifying the name of the statistic. - Averages are accumulated separately for each unique name. - value: Arbitrary set of scalars. Can be a list, tuple, - NumPy array, PyTorch tensor, or Python scalar. - - Returns: - The same `value` that was passed in. - """ - if name not in _counters: - _counters[name] = dict() - - elems = torch.as_tensor(value) - if elems.numel() == 0: - return value - - elems = elems.detach().flatten().to(_reduce_dtype) - moments = torch.stack([ - torch.ones_like(elems).sum(), - elems.sum(), - elems.square().sum(), - ]) - assert moments.ndim == 1 and moments.shape[0] == _num_moments - moments = moments.to(_counter_dtype) - - device = moments.device - if device not in _counters[name]: - _counters[name][device] = torch.zeros_like(moments) - _counters[name][device].add_(moments) - return value - -#---------------------------------------------------------------------------- - -def report0(name, value): - r"""Broadcasts the given set of scalars by the first process (`rank = 0`), - but ignores any scalars provided by the other processes. - See `report()` for further details. - """ - report(name, value if _rank == 0 else []) - return value - -#---------------------------------------------------------------------------- - -class Collector: - r"""Collects the scalars broadcasted by `report()` and `report0()` and - computes their long-term averages (mean and standard deviation) over - user-defined periods of time. - - The averages are first collected into internal counters that are not - directly visible to the user. They are then copied to the user-visible - state as a result of calling `update()` and can then be queried using - `mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the - internal counters for the next round, so that the user-visible state - effectively reflects averages collected between the last two calls to - `update()`. - - Args: - regex: Regular expression defining which statistics to - collect. The default is to collect everything. - keep_previous: Whether to retain the previous averages if no - scalars were collected on a given round - (default: True). - """ - def __init__(self, regex='.*', keep_previous=True): - self._regex = re.compile(regex) - self._keep_previous = keep_previous - self._cumulative = dict() - self._moments = dict() - self.update() - self._moments.clear() - - def names(self): - r"""Returns the names of all statistics broadcasted so far that - match the regular expression specified at construction time. - """ - return [name for name in _counters if self._regex.fullmatch(name)] - - def update(self): - r"""Copies current values of the internal counters to the - user-visible state and resets them for the next round. - - If `keep_previous=True` was specified at construction time, the - operation is skipped for statistics that have received no scalars - since the last update, retaining their previous averages. - - This method performs a number of GPU-to-CPU transfers and one - `torch.distributed.all_reduce()`. It is intended to be called - periodically in the main training loop, typically once every - N training steps. - """ - if not self._keep_previous: - self._moments.clear() - for name, cumulative in _sync(self.names()): - if name not in self._cumulative: - self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype) - delta = cumulative - self._cumulative[name] - self._cumulative[name].copy_(cumulative) - if float(delta[0]) != 0: - self._moments[name] = delta - - def _get_delta(self, name): - r"""Returns the raw moments that were accumulated for the given - statistic between the last two calls to `update()`, or zero if - no scalars were collected. - """ - assert self._regex.fullmatch(name) - if name not in self._moments: - self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype) - return self._moments[name] - - def num(self, name): - r"""Returns the number of scalars that were accumulated for the given - statistic between the last two calls to `update()`, or zero if - no scalars were collected. - """ - delta = self._get_delta(name) - return int(delta[0]) - - def mean(self, name): - r"""Returns the mean of the scalars that were accumulated for the - given statistic between the last two calls to `update()`, or NaN if - no scalars were collected. - """ - delta = self._get_delta(name) - if int(delta[0]) == 0: - return float('nan') - return float(delta[1] / delta[0]) - - def std(self, name): - r"""Returns the standard deviation of the scalars that were - accumulated for the given statistic between the last two calls to - `update()`, or NaN if no scalars were collected. - """ - delta = self._get_delta(name) - if int(delta[0]) == 0 or not np.isfinite(float(delta[1])): - return float('nan') - if int(delta[0]) == 1: - return float(0) - mean = float(delta[1] / delta[0]) - raw_var = float(delta[2] / delta[0]) - return np.sqrt(max(raw_var - np.square(mean), 0)) - - def as_dict(self): - r"""Returns the averages accumulated between the last two calls to - `update()` as an `dnnlib.EasyDict`. The contents are as follows: - - dnnlib.EasyDict( - NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT), - ... - ) - """ - stats = dnnlib.EasyDict() - for name in self.names(): - stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name)) - return stats - - def __getitem__(self, name): - r"""Convenience getter. - `collector[name]` is a synonym for `collector.mean(name)`. - """ - return self.mean(name) - -#---------------------------------------------------------------------------- - -def _sync(names): - r"""Synchronize the global cumulative counters across devices and - processes. Called internally by `Collector.update()`. - """ - if len(names) == 0: - return [] - global _sync_called - _sync_called = True - - # Collect deltas within current rank. - deltas = [] - device = _sync_device if _sync_device is not None else torch.device('cpu') - for name in names: - delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device) - for counter in _counters[name].values(): - delta.add_(counter.to(device)) - counter.copy_(torch.zeros_like(counter)) - deltas.append(delta) - deltas = torch.stack(deltas) - - # Sum deltas across ranks. - if _sync_device is not None: - torch.distributed.all_reduce(deltas) - - # Update cumulative values. - deltas = deltas.cpu() - for idx, name in enumerate(names): - if name not in _cumulative: - _cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype) - _cumulative[name].add_(deltas[idx]) - - # Return name-value pairs. - return [(name, _cumulative[name]) for name in names] - -#---------------------------------------------------------------------------- diff --git a/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/include/STrack.h b/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/include/STrack.h deleted file mode 100644 index 752cbefa8f7f7f4f0aff08e0e28ff036afe7d61a..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/deploy/ncnn/cpp/include/STrack.h +++ /dev/null @@ -1,50 +0,0 @@ -#pragma once - -#include -#include "kalmanFilter.h" - -using namespace cv; -using namespace std; - -enum TrackState { New = 0, Tracked, Lost, Removed }; - -class STrack -{ -public: - STrack(vector tlwh_, float score); - ~STrack(); - - vector static tlbr_to_tlwh(vector &tlbr); - void static multi_predict(vector &stracks, byte_kalman::KalmanFilter &kalman_filter); - void static_tlwh(); - void static_tlbr(); - vector tlwh_to_xyah(vector tlwh_tmp); - vector to_xyah(); - void mark_lost(); - void mark_removed(); - int next_id(); - int end_frame(); - - void activate(byte_kalman::KalmanFilter &kalman_filter, int frame_id); - void re_activate(STrack &new_track, int frame_id, bool new_id = false); - void update(STrack &new_track, int frame_id); - -public: - bool is_activated; - int track_id; - int state; - - vector _tlwh; - vector tlwh; - vector tlbr; - int frame_id; - int tracklet_len; - int start_frame; - - KAL_MEAN mean; - KAL_COVA covariance; - float score; - -private: - byte_kalman::KalmanFilter kalman_filter; -}; \ No newline at end of file diff --git a/spaces/EronSamez/RVC_HFmeu/infer/modules/train/train.py b/spaces/EronSamez/RVC_HFmeu/infer/modules/train/train.py deleted file mode 100644 index 550bef391444c9b6c0d8c44ae3a3809b3ade4218..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/infer/modules/train/train.py +++ /dev/null @@ -1,723 +0,0 @@ -import os -import sys -import logging - -logger = logging.getLogger(__name__) - -now_dir = os.getcwd() -sys.path.append(os.path.join(now_dir)) - -import datetime - -from infer.lib.train import utils - -hps = utils.get_hparams() -os.environ["CUDA_VISIBLE_DEVICES"] = hps.gpus.replace("-", ",") -n_gpus = len(hps.gpus.split("-")) -from random import randint, shuffle - -import torch -try: - import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import - if torch.xpu.is_available(): - from infer.modules.ipex import ipex_init - from infer.modules.ipex.gradscaler import gradscaler_init - from torch.xpu.amp import autocast - GradScaler = gradscaler_init() - ipex_init() - else: - from torch.cuda.amp import GradScaler, autocast -except Exception: - from torch.cuda.amp import GradScaler, autocast - -torch.backends.cudnn.deterministic = False -torch.backends.cudnn.benchmark = False -from time import sleep -from time import time as ttime - -import torch.distributed as dist -import torch.multiprocessing as mp - -from torch.nn import functional as F -from torch.nn.parallel import DistributedDataParallel as DDP -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter - -from infer.lib.infer_pack import commons -from infer.lib.train.data_utils import ( - DistributedBucketSampler, - TextAudioCollate, - TextAudioCollateMultiNSFsid, - TextAudioLoader, - TextAudioLoaderMultiNSFsid, -) - -if hps.version == "v1": - from infer.lib.infer_pack.models import MultiPeriodDiscriminator - from infer.lib.infer_pack.models import SynthesizerTrnMs256NSFsid as RVC_Model_f0 - from infer.lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid_nono as RVC_Model_nof0, - ) -else: - from infer.lib.infer_pack.models import ( - SynthesizerTrnMs768NSFsid as RVC_Model_f0, - SynthesizerTrnMs768NSFsid_nono as RVC_Model_nof0, - MultiPeriodDiscriminatorV2 as MultiPeriodDiscriminator, - ) - -from infer.lib.train.losses import ( - discriminator_loss, - feature_loss, - generator_loss, - kl_loss, -) -from infer.lib.train.mel_processing import mel_spectrogram_torch, spec_to_mel_torch -from infer.lib.train.process_ckpt import savee - -global_step = 0 -import csv - -class EpochRecorder: - def __init__(self): - self.last_time = ttime() - - def record(self): - now_time = ttime() - elapsed_time = now_time - self.last_time - self.last_time = now_time - elapsed_time_str = str(datetime.timedelta(seconds=elapsed_time)) - current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") - return f"[{current_time}] | ({elapsed_time_str})" - -def reset_stop_flag(): - with open("csvdb/stop.csv", "w+", newline="") as STOPCSVwrite: - csv_writer = csv.writer(STOPCSVwrite, delimiter=",") - csv_writer.writerow(["False"]) - -def create_model(hps, model_f0, model_nof0): - filter_length_adjusted = hps.data.filter_length // 2 + 1 - segment_size_adjusted = hps.train.segment_size // hps.data.hop_length - is_half = hps.train.fp16_run - sr = hps.sample_rate - - model = model_f0 if hps.if_f0 == 1 else model_nof0 - - return model( - filter_length_adjusted, - segment_size_adjusted, - **hps.model, - is_half=is_half, - sr=sr - ) - -def move_model_to_cuda_if_available(model, rank): - if torch.cuda.is_available(): - return model.cuda(rank) - else: - return model - -def create_optimizer(model, hps): - return torch.optim.AdamW( - model.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - -def create_ddp_model(model, rank): - if torch.cuda.is_available(): - return DDP(model, device_ids=[rank]) - else: - return DDP(model) - -def create_dataset(hps, if_f0=True): - return TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data) if if_f0 else TextAudioLoader(hps.data.training_files, hps.data) - -def create_sampler(dataset, batch_size, n_gpus, rank): - return DistributedBucketSampler( - dataset, - batch_size * n_gpus, - # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s - [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s - num_replicas=n_gpus, - rank=rank, - shuffle=True, - ) - -def set_collate_fn(if_f0=True): - return TextAudioCollateMultiNSFsid() if if_f0 else TextAudioCollate() - - -def main(): - n_gpus = torch.cuda.device_count() - - if torch.cuda.is_available() == False and torch.backends.mps.is_available() == True: - n_gpus = 1 - if n_gpus < 1: - # patch to unblock people without gpus. there is probably a better way. - logger.warn("NO GPU DETECTED: falling back to CPU - this may take a while") - n_gpus = 1 - os.environ["MASTER_ADDR"] = "localhost" - os.environ["MASTER_PORT"] = str(randint(20000, 55555)) - children = [] - for i in range(n_gpus): - subproc = mp.Process( - target=run, - args=( - i, - n_gpus, - hps, - ), - ) - children.append(subproc) - subproc.start() - - for i in range(n_gpus): - children[i].join() - - -def run(rank, n_gpus, hps): - global global_step - if rank == 0: - logger = utils.get_logger(hps.model_dir) - logger.info(hps) - # utils.check_git_hash(hps.model_dir) - writer = SummaryWriter(log_dir=hps.model_dir) - writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) - - dist.init_process_group( - backend="gloo", init_method="env://", world_size=n_gpus, rank=rank - ) - torch.manual_seed(hps.train.seed) - if torch.cuda.is_available(): - torch.cuda.set_device(rank) - - if hps.if_f0 == 1: - train_dataset = TextAudioLoaderMultiNSFsid(hps.data.training_files, hps.data) - else: - train_dataset = TextAudioLoader(hps.data.training_files, hps.data) - train_sampler = DistributedBucketSampler( - train_dataset, - hps.train.batch_size * n_gpus, - # [100, 200, 300, 400, 500, 600, 700, 800, 900, 1000, 1200,1400], # 16s - [100, 200, 300, 400, 500, 600, 700, 800, 900], # 16s - num_replicas=n_gpus, - rank=rank, - shuffle=True, - ) - # It is possible that dataloader's workers are out of shared memory. Please try to raise your shared memory limit. - # num_workers=8 -> num_workers=4 - if hps.if_f0 == 1: - collate_fn = TextAudioCollateMultiNSFsid() - else: - collate_fn = TextAudioCollate() - train_loader = DataLoader( - train_dataset, - num_workers=4, - shuffle=False, - pin_memory=True, - collate_fn=collate_fn, - batch_sampler=train_sampler, - persistent_workers=True, - prefetch_factor=8, - ) - if hps.if_f0 == 1: - net_g = RVC_Model_f0( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model, - is_half=hps.train.fp16_run, - sr=hps.sample_rate, - ) - else: - net_g = RVC_Model_nof0( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model, - is_half=hps.train.fp16_run, - ) - if torch.cuda.is_available(): - net_g = net_g.cuda(rank) - net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm) - if torch.cuda.is_available(): - net_d = net_d.cuda(rank) - optim_g = torch.optim.AdamW( - net_g.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - optim_d = torch.optim.AdamW( - net_d.parameters(), - hps.train.learning_rate, - betas=hps.train.betas, - eps=hps.train.eps, - ) - # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) - # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) - if hasattr(torch, "xpu") and torch.xpu.is_available(): - pass - elif torch.cuda.is_available(): - net_g = DDP(net_g, device_ids=[rank]) - net_d = DDP(net_d, device_ids=[rank]) - else: - net_g = DDP(net_g) - net_d = DDP(net_d) - - try: # ๅฆ‚ๆžœ่ƒฝๅŠ ่ฝฝ่‡ชๅŠจresume - _, _, _, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d - ) # DๅคšๅŠๅŠ ่ฝฝๆฒกไบ‹ - if rank == 0: - logger.info("loaded D") - # _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g,load_opt=0) - _, _, _, epoch_str = utils.load_checkpoint( - utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g - ) - global_step = (epoch_str - 1) * len(train_loader) - # epoch_str = 1 - # global_step = 0 - except: # ๅฆ‚ๆžœ้ฆ–ๆฌกไธ่ƒฝๅŠ ่ฝฝ๏ผŒๅŠ ่ฝฝpretrain - # traceback.print_exc() - epoch_str = 1 - global_step = 0 - if hps.pretrainG != "": - if rank == 0: - logger.info("loaded pretrained %s" % (hps.pretrainG)) - if hasattr(net_g, "module"): - logger.info( - net_g.module.load_state_dict( - torch.load(hps.pretrainG, map_location="cpu")["model"] - ) - ) ##ๆต‹่ฏ•ไธๅŠ ่ฝฝไผ˜ๅŒ–ๅ™จ - else: - logger.info( - net_g.load_state_dict( - torch.load(hps.pretrainG, map_location="cpu")["model"] - ) - ) ##ๆต‹่ฏ•ไธๅŠ ่ฝฝไผ˜ๅŒ–ๅ™จ - if hps.pretrainD != "": - if rank == 0: - logger.info("loaded pretrained %s" % (hps.pretrainD)) - if hasattr(net_d, "module"): - logger.info( - net_d.module.load_state_dict( - torch.load(hps.pretrainD, map_location="cpu")["model"] - ) - ) - else: - logger.info( - net_d.load_state_dict( - torch.load(hps.pretrainD, map_location="cpu")["model"] - ) - ) - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR( - optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 - ) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR( - optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 - ) - - scaler = GradScaler(enabled=hps.train.fp16_run) - - cache = [] - for epoch in range(epoch_str, hps.train.epochs + 1): - if rank == 0: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - [train_loader, None], - logger, - [writer, writer_eval], - cache, - ) - else: - train_and_evaluate( - rank, - epoch, - hps, - [net_g, net_d], - [optim_g, optim_d], - [scheduler_g, scheduler_d], - scaler, - [train_loader, None], - None, - None, - cache, - ) - scheduler_g.step() - scheduler_d.step() - - -def train_and_evaluate( - rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, cache -): - net_g, net_d = nets - optim_g, optim_d = optims - train_loader, eval_loader = loaders - if writers is not None: - writer, writer_eval = writers - - train_loader.batch_sampler.set_epoch(epoch) - global global_step - - net_g.train() - net_d.train() - - # Prepare data iterator - if hps.if_cache_data_in_gpu == True: - # Use Cache - data_iterator = cache - if cache == []: - # Make new cache - for batch_idx, info in enumerate(train_loader): - # Unpack - if hps.if_f0 == 1: - ( - phone, - phone_lengths, - pitch, - pitchf, - spec, - spec_lengths, - wave, - wave_lengths, - sid, - ) = info - else: - ( - phone, - phone_lengths, - spec, - spec_lengths, - wave, - wave_lengths, - sid, - ) = info - # Load on CUDA - if torch.cuda.is_available(): - phone = phone.cuda(rank, non_blocking=True) - phone_lengths = phone_lengths.cuda(rank, non_blocking=True) - if hps.if_f0 == 1: - pitch = pitch.cuda(rank, non_blocking=True) - pitchf = pitchf.cuda(rank, non_blocking=True) - sid = sid.cuda(rank, non_blocking=True) - spec = spec.cuda(rank, non_blocking=True) - spec_lengths = spec_lengths.cuda(rank, non_blocking=True) - wave = wave.cuda(rank, non_blocking=True) - wave_lengths = wave_lengths.cuda(rank, non_blocking=True) - # Cache on list - if hps.if_f0 == 1: - cache.append( - ( - batch_idx, - ( - phone, - phone_lengths, - pitch, - pitchf, - spec, - spec_lengths, - wave, - wave_lengths, - sid, - ), - ) - ) - else: - cache.append( - ( - batch_idx, - ( - phone, - phone_lengths, - spec, - spec_lengths, - wave, - wave_lengths, - sid, - ), - ) - ) - else: - # Load shuffled cache - shuffle(cache) - else: - # Loader - data_iterator = enumerate(train_loader) - - # Run steps - epoch_recorder = EpochRecorder() - for batch_idx, info in data_iterator: - # Data - ## Unpack - if hps.if_f0 == 1: - ( - phone, - phone_lengths, - pitch, - pitchf, - spec, - spec_lengths, - wave, - wave_lengths, - sid, - ) = info - else: - phone, phone_lengths, spec, spec_lengths, wave, wave_lengths, sid = info - ## Load on CUDA - if (hps.if_cache_data_in_gpu == False) and torch.cuda.is_available(): - phone = phone.cuda(rank, non_blocking=True) - phone_lengths = phone_lengths.cuda(rank, non_blocking=True) - if hps.if_f0 == 1: - pitch = pitch.cuda(rank, non_blocking=True) - pitchf = pitchf.cuda(rank, non_blocking=True) - sid = sid.cuda(rank, non_blocking=True) - spec = spec.cuda(rank, non_blocking=True) - spec_lengths = spec_lengths.cuda(rank, non_blocking=True) - wave = wave.cuda(rank, non_blocking=True) - # wave_lengths = wave_lengths.cuda(rank, non_blocking=True) - - # Calculate - with autocast(enabled=hps.train.fp16_run): - if hps.if_f0 == 1: - ( - y_hat, - ids_slice, - x_mask, - z_mask, - (z, z_p, m_p, logs_p, m_q, logs_q), - ) = net_g(phone, phone_lengths, pitch, pitchf, spec, spec_lengths, sid) - else: - ( - y_hat, - ids_slice, - x_mask, - z_mask, - (z, z_p, m_p, logs_p, m_q, logs_q), - ) = net_g(phone, phone_lengths, spec, spec_lengths, sid) - mel = spec_to_mel_torch( - spec, - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - y_mel = commons.slice_segments( - mel, ids_slice, hps.train.segment_size // hps.data.hop_length - ) - with autocast(enabled=False): - y_hat_mel = mel_spectrogram_torch( - y_hat.float().squeeze(1), - hps.data.filter_length, - hps.data.n_mel_channels, - hps.data.sampling_rate, - hps.data.hop_length, - hps.data.win_length, - hps.data.mel_fmin, - hps.data.mel_fmax, - ) - if hps.train.fp16_run == True: - y_hat_mel = y_hat_mel.half() - wave = commons.slice_segments( - wave, ids_slice * hps.data.hop_length, hps.train.segment_size - ) # slice - - # Discriminator - y_d_hat_r, y_d_hat_g, _, _ = net_d(wave, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( - y_d_hat_r, y_d_hat_g - ) - optim_d.zero_grad() - scaler.scale(loss_disc).backward() - scaler.unscale_(optim_d) - grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) - scaler.step(optim_d) - - with autocast(enabled=hps.train.fp16_run): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(wave, y_hat) - with autocast(enabled=False): - loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl - loss_fm = feature_loss(fmap_r, fmap_g) - loss_gen, losses_gen = generator_loss(y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_kl - optim_g.zero_grad() - scaler.scale(loss_gen_all).backward() - scaler.unscale_(optim_g) - grad_norm_g = commons.clip_grad_value_(net_g.parameters(), None) - scaler.step(optim_g) - scaler.update() - - if rank == 0: - if global_step % hps.train.log_interval == 0: - lr = optim_g.param_groups[0]["lr"] - logger.info( - "Train Epoch: {} [{:.0f}%]".format( - epoch, 100.0 * batch_idx / len(train_loader) - ) - ) - # Amor For Tensorboard display - if loss_mel > 75: - loss_mel = 75 - if loss_kl > 9: - loss_kl = 9 - - logger.info([global_step, lr]) - logger.info( - f"loss_disc={loss_disc:.3f}, loss_gen={loss_gen:.3f}, loss_fm={loss_fm:.3f},loss_mel={loss_mel:.3f}, loss_kl={loss_kl:.3f}" - ) - scalar_dict = { - "loss/g/total": loss_gen_all, - "loss/d/total": loss_disc, - "learning_rate": lr, - "grad_norm_d": grad_norm_d, - "grad_norm_g": grad_norm_g, - } - scalar_dict.update( - { - "loss/g/fm": loss_fm, - "loss/g/mel": loss_mel, - "loss/g/kl": loss_kl, - } - ) - - scalar_dict.update( - {"loss/g/{}".format(i): v for i, v in enumerate(losses_gen)} - ) - scalar_dict.update( - {"loss/d_r/{}".format(i): v for i, v in enumerate(losses_disc_r)} - ) - scalar_dict.update( - {"loss/d_g/{}".format(i): v for i, v in enumerate(losses_disc_g)} - ) - image_dict = { - "slice/mel_org": utils.plot_spectrogram_to_numpy( - y_mel[0].data.cpu().numpy() - ), - "slice/mel_gen": utils.plot_spectrogram_to_numpy( - y_hat_mel[0].data.cpu().numpy() - ), - "all/mel": utils.plot_spectrogram_to_numpy( - mel[0].data.cpu().numpy() - ), - } - utils.summarize( - writer=writer, - global_step=global_step, - images=image_dict, - scalars=scalar_dict, - ) - global_step += 1 - # /Run steps - - if epoch % hps.save_every_epoch == 0 and rank == 0: - if hps.if_latest == 0: - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(global_step)), - ) - utils.save_checkpoint( - net_d, - optim_d, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(global_step)), - ) - else: - utils.save_checkpoint( - net_g, - optim_g, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "G_{}.pth".format(2333333)), - ) - utils.save_checkpoint( - net_d, - optim_d, - hps.train.learning_rate, - epoch, - os.path.join(hps.model_dir, "D_{}.pth".format(2333333)), - ) - if rank == 0 and hps.save_every_weights == "1": - if hasattr(net_g, "module"): - ckpt = net_g.module.state_dict() - else: - ckpt = net_g.state_dict() - logger.info( - "saving ckpt %s_e%s:%s" - % ( - hps.name, - epoch, - savee( - ckpt, - hps.sample_rate, - hps.if_f0, - hps.name + "_e%s_s%s" % (epoch, global_step), - epoch, - hps.version, - hps, - ), - ) - ) - - stopbtn = False - try: - with open("csvdb/stop.csv", 'r') as csv_file: - stopbtn_str = next(csv.reader(csv_file), [None])[0] - if stopbtn_str is not None: stopbtn = stopbtn_str.lower() == 'true' - except (ValueError, TypeError, FileNotFoundError, IndexError) as e: - print(f"Handling exception: {e}") - stopbtn = False - - if stopbtn: - logger.info("Stop Button was pressed. The program is closed.") - ckpt = net_g.module.state_dict() if hasattr(net_g, "module") else net_g.state_dict() - logger.info( - "saving final ckpt:%s" - % ( - savee( - ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps - ) - ) - ) - sleep(1) - reset_stop_flag() - os._exit(2333333) - - if rank == 0: - logger.info("====> Epoch: {} {}".format(epoch, epoch_recorder.record())) - if epoch >= hps.total_epoch and rank == 0: - logger.info("Training is done. The program is closed.") - - if hasattr(net_g, "module"): - ckpt = net_g.module.state_dict() - else: - ckpt = net_g.state_dict() - logger.info( - "saving final ckpt:%s" - % ( - savee( - ckpt, hps.sample_rate, hps.if_f0, hps.name, epoch, hps.version, hps - ) - ) - ) - sleep(1) - os._exit(2333333) - - -if __name__ == "__main__": - torch.multiprocessing.set_start_method("spawn") - main() diff --git a/spaces/EronSamez/RVC_HFmeu/lib/infer_pack/transforms.py b/spaces/EronSamez/RVC_HFmeu/lib/infer_pack/transforms.py deleted file mode 100644 index a11f799e023864ff7082c1f49c0cc18351a13b47..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/lib/infer_pack/transforms.py +++ /dev/null @@ -1,209 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = {"tails": tails, "tail_bound": tail_bound} - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 - - -def unconstrained_rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails="linear", - tail_bound=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == "linear": - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError("{} tails are not implemented.".format(tails)) - - ( - outputs[inside_interval_mask], - logabsdet[inside_interval_mask], - ) = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, - right=tail_bound, - bottom=-tail_bound, - top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - ) - - return outputs, logabsdet - - -def rational_quadratic_spline( - inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0.0, - right=1.0, - bottom=0.0, - top=1.0, - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE, -): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError("Input to a transform is not within its domain") - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError("Minimal bin width too large for the number of bins") - if min_bin_height * num_bins > 1.0: - raise ValueError("Minimal bin height too large for the number of bins") - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) + input_heights * (input_delta - input_derivatives) - b = input_heights * input_derivatives - (inputs - input_cumheights) * ( - input_derivatives + input_derivatives_plus_one - 2 * input_delta - ) - c = -input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * ( - input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta - ) - denominator = input_delta + ( - (input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta - ) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * ( - input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2) - ) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/FrankZxShen/so-vits-svc-models-pcr/vdecoder/hifigan/env.py b/spaces/FrankZxShen/so-vits-svc-models-pcr/vdecoder/hifigan/env.py deleted file mode 100644 index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/so-vits-svc-models-pcr/vdecoder/hifigan/env.py +++ /dev/null @@ -1,15 +0,0 @@ -import os -import shutil - - -class AttrDict(dict): - def __init__(self, *args, **kwargs): - super(AttrDict, self).__init__(*args, **kwargs) - self.__dict__ = self - - -def build_env(config, config_name, path): - t_path = os.path.join(path, config_name) - if config != t_path: - os.makedirs(path, exist_ok=True) - shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/spaces/FrankZxShen/vits-fast-finetuning-umamusume/text/shanghainese.py b/spaces/FrankZxShen/vits-fast-finetuning-umamusume/text/shanghainese.py deleted file mode 100644 index cb29c24a08d2e406e8399cf7bc9fe5cb43cb9c61..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/vits-fast-finetuning-umamusume/text/shanghainese.py +++ /dev/null @@ -1,64 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('zaonhe') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'แด‡'), - ('B', 'bi'), - ('C', 'si'), - ('D', 'di'), - ('E', 'i'), - ('F', 'แด‡f'), - ('G', 'dส‘i'), - ('H', 'แด‡tษ•สฐ'), - ('I', 'แด€i'), - ('J', 'dส‘แด‡'), - ('K', 'kสฐแด‡'), - ('L', 'แด‡l'), - ('M', 'แด‡m'), - ('N', 'แด‡n'), - ('O', 'o'), - ('P', 'pสฐi'), - ('Q', 'kสฐiu'), - ('R', 'แด€l'), - ('S', 'แด‡s'), - ('T', 'tสฐi'), - ('U', 'ษฆiu'), - ('V', 'vi'), - ('W', 'dแด€bษคliu'), - ('X', 'แด‡ks'), - ('Y', 'uแด€i'), - ('Z', 'zแด‡') -]] - - -def _number_to_shanghainese(num): - num = cn2an.an2cn(num).replace('ไธ€ๅ','ๅ').replace('ไบŒๅ', 'ๅปฟ').replace('ไบŒ', 'ไธค') - return re.sub(r'((?:^|[^ไธ‰ๅ››ไบ”ๅ…ญไธƒๅ…ซไน])ๅ|ๅปฟ)ไธค', r'\1ไบŒ', num) - - -def number_to_shanghainese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def shanghainese_to_ipa(text): - text = number_to_shanghainese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[ใ€๏ผ›๏ผš]', '๏ผŒ', text) - text = re.sub(r'\s*๏ผŒ\s*', ', ', text) - text = re.sub(r'\s*ใ€‚\s*', '. ', text) - text = re.sub(r'\s*๏ผŸ\s*', '? ', text) - text = re.sub(r'\s*๏ผ\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/FridaZuley/RVC_HFKawaii/tools/torchgate/__init__.py b/spaces/FridaZuley/RVC_HFKawaii/tools/torchgate/__init__.py deleted file mode 100644 index b4a12675828dceb6e6270f9439cdf98ea28ea96d..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/tools/torchgate/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -TorchGating is a PyTorch-based implementation of Spectral Gating -================================================ -Author: Asaf Zorea - -Contents --------- -torchgate imports all the functions from PyTorch, and in addition provides: - TorchGating --- A PyTorch module that applies a spectral gate to an input signal - -""" -from .torchgate import TorchGate diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_ball_stacking.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_ball_stacking.py deleted file mode 100644 index 4790fe09a8429f1625d0b8ef19f062808f9f26f9..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_ball_stacking.py +++ /dev/null @@ -1,66 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils -import pybullet as p - -class ColorCoordinatedBallStacking(Task): - """Stack balls on top of the corresponding colored containers in a specific color sequence.""" - - def __init__(self): - super().__init__() - self.max_steps = 10 - self.lang_template = "stack the balls on top of the corresponding colored containers in the sequence blue, yellow, green, red" - self.task_completed_desc = "done stacking balls." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Define the color sequence - color_sequence = ['blue', 'yellow', 'green', 'red'] - - # Add containers. - container_size = (0.12, 0.12, 0.12) - container_urdf = 'container/container-template.urdf' - container_poses = [] - containers = [] - for color in color_sequence: - container_pose = self.get_random_pose(env, container_size) - container_id = env.add_object(container_urdf, container_pose, color=utils.COLORS[color]) - container_poses.append(container_pose) - containers.append(container_id) - - # Add balls. - ball_size = (0.04, 0.04, 0.04) - ball_urdf = 'ball/ball-template.urdf' - balls = [] - for color in color_sequence: - ball_pose = self.get_random_pose(env, ball_size) - ball_id = env.add_object(ball_urdf, ball_pose, color=utils.COLORS[color]) - balls.append(ball_id) - - # Goal: each ball is stacked on top of the corresponding colored container in the color sequence. - for i in range(len(balls)): - self.add_goal(objs=[balls[i]], matches=np.ones((1, 1)), targ_poses=[container_poses[i]], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1/len(balls), - language_goal=self.lang_template.format(obj=color_sequence[i])) - - # Add distractors. - n_distractors = 0 - while n_distractors < 6: - is_ball = np.random.rand() > 0.5 - urdf = ball_urdf if is_ball else container_urdf - size = ball_size if is_ball else container_size - pose = self.get_random_pose(env, obj_size=size) - color = np.random.choice(list(utils.COLORS.keys())) - - obj_id = env.add_object(urdf, pose, color=utils.COLORS[color]) - n_distractors += 1 \ No newline at end of file diff --git a/spaces/Goutam982/RVC_V2_voice_clone/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/Goutam982/RVC_V2_voice_clone/lib/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000 --- a/spaces/Goutam982/RVC_V2_voice_clone/lib/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py deleted file mode 100644 index eac05a64a22f28d597eb4c8b1c31351b52829056..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/retinanet/retinanet_x101_64x4d_fpn_2x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './retinanet_r50_fpn_2x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/point_rend.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/point_rend.py deleted file mode 100644 index 808ef2258ae88301d349db3aaa2711f223e5c971..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/detectors/point_rend.py +++ /dev/null @@ -1,29 +0,0 @@ -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class PointRend(TwoStageDetector): - """PointRend: Image Segmentation as Rendering - - This detector is the implementation of - `PointRend `_. - - """ - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None): - super(PointRend, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py deleted file mode 100644 index 6b4cc571294fa45b4442c2bfeb9fda13a14fc5c2..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_769x769_40k_cityscapes.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './fcn_r50-d8_769x769_40k_cityscapes.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/tools/analyze_logs.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/tools/analyze_logs.py deleted file mode 100644 index c3a468b5542e3f041c255a1ab7368698416b2788..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/tools/analyze_logs.py +++ /dev/null @@ -1,123 +0,0 @@ -"""Modified from https://github.com/open- -mmlab/mmdetection/blob/master/tools/analysis_tools/analyze_logs.py.""" -import argparse -import json -from collections import defaultdict - -import matplotlib.pyplot as plt -import seaborn as sns - - -def plot_curve(log_dicts, args): - if args.backend is not None: - plt.switch_backend(args.backend) - sns.set_style(args.style) - # if legend is None, use {filename}_{key} as legend - legend = args.legend - if legend is None: - legend = [] - for json_log in args.json_logs: - for metric in args.keys: - legend.append(f'{json_log}_{metric}') - assert len(legend) == (len(args.json_logs) * len(args.keys)) - metrics = args.keys - - num_metrics = len(metrics) - for i, log_dict in enumerate(log_dicts): - epochs = list(log_dict.keys()) - for j, metric in enumerate(metrics): - print(f'plot curve of {args.json_logs[i]}, metric is {metric}') - plot_epochs = [] - plot_iters = [] - plot_values = [] - for epoch in epochs: - epoch_logs = log_dict[epoch] - if metric not in epoch_logs.keys(): - continue - if metric in ['mIoU', 'mAcc', 'aAcc']: - plot_epochs.append(epoch) - plot_values.append(epoch_logs[metric][0]) - else: - for idx in range(len(epoch_logs[metric])): - plot_iters.append(epoch_logs['iter'][idx]) - plot_values.append(epoch_logs[metric][idx]) - ax = plt.gca() - label = legend[i * num_metrics + j] - if metric in ['mIoU', 'mAcc', 'aAcc']: - ax.set_xticks(plot_epochs) - plt.xlabel('epoch') - plt.plot(plot_epochs, plot_values, label=label, marker='o') - else: - plt.xlabel('iter') - plt.plot(plot_iters, plot_values, label=label, linewidth=0.5) - plt.legend() - if args.title is not None: - plt.title(args.title) - if args.out is None: - plt.show() - else: - print(f'save curve to: {args.out}') - plt.savefig(args.out) - plt.cla() - - -def parse_args(): - parser = argparse.ArgumentParser(description='Analyze Json Log') - parser.add_argument( - 'json_logs', - type=str, - nargs='+', - help='path of train log in json format') - parser.add_argument( - '--keys', - type=str, - nargs='+', - default=['mIoU'], - help='the metric that you want to plot') - parser.add_argument('--title', type=str, help='title of figure') - parser.add_argument( - '--legend', - type=str, - nargs='+', - default=None, - help='legend of each plot') - parser.add_argument( - '--backend', type=str, default=None, help='backend of plt') - parser.add_argument( - '--style', type=str, default='dark', help='style of plt') - parser.add_argument('--out', type=str, default=None) - args = parser.parse_args() - return args - - -def load_json_logs(json_logs): - # load and convert json_logs to log_dict, key is epoch, value is a sub dict - # keys of sub dict is different metrics - # value of sub dict is a list of corresponding values of all iterations - log_dicts = [dict() for _ in json_logs] - for json_log, log_dict in zip(json_logs, log_dicts): - with open(json_log, 'r') as log_file: - for line in log_file: - log = json.loads(line.strip()) - # skip lines without `epoch` field - if 'epoch' not in log: - continue - epoch = log.pop('epoch') - if epoch not in log_dict: - log_dict[epoch] = defaultdict(list) - for k, v in log.items(): - log_dict[epoch][k].append(v) - return log_dicts - - -def main(): - args = parse_args() - json_logs = args.json_logs - for json_log in json_logs: - assert json_log.endswith('.json') - log_dicts = load_json_logs(json_logs) - plot_curve(log_dicts, args) - - -if __name__ == '__main__': - main() diff --git a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/conv.py b/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/conv.py deleted file mode 100644 index d115cbf8729b642ed78608bd00a4d0fd5afae6fd..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/AudioCraft_Plus/audiocraft/modules/conv.py +++ /dev/null @@ -1,243 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import math -import typing as tp -import warnings - -import torch -from torch import nn -from torch.nn import functional as F -from torch.nn.utils import spectral_norm, weight_norm - - -CONV_NORMALIZATIONS = frozenset(['none', 'weight_norm', 'spectral_norm', - 'time_group_norm']) - - -def apply_parametrization_norm(module: nn.Module, norm: str = 'none'): - assert norm in CONV_NORMALIZATIONS - if norm == 'weight_norm': - return weight_norm(module) - elif norm == 'spectral_norm': - return spectral_norm(module) - else: - # We already check was in CONV_NORMALIZATION, so any other choice - # doesn't need reparametrization. - return module - - -def get_norm_module(module: nn.Module, causal: bool = False, norm: str = 'none', **norm_kwargs): - """Return the proper normalization module. If causal is True, this will ensure the returned - module is causal, or return an error if the normalization doesn't support causal evaluation. - """ - assert norm in CONV_NORMALIZATIONS - if norm == 'time_group_norm': - if causal: - raise ValueError("GroupNorm doesn't support causal evaluation.") - assert isinstance(module, nn.modules.conv._ConvNd) - return nn.GroupNorm(1, module.out_channels, **norm_kwargs) - else: - return nn.Identity() - - -def get_extra_padding_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, - padding_total: int = 0) -> int: - """See `pad_for_conv1d`.""" - length = x.shape[-1] - n_frames = (length - kernel_size + padding_total) / stride + 1 - ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - padding_total) - return ideal_length - length - - -def pad_for_conv1d(x: torch.Tensor, kernel_size: int, stride: int, padding_total: int = 0): - """Pad for a convolution to make sure that the last window is full. - Extra padding is added at the end. This is required to ensure that we can rebuild - an output of the same length, as otherwise, even with padding, some time steps - might get removed. - For instance, with total padding = 4, kernel size = 4, stride = 2: - 0 0 1 2 3 4 5 0 0 # (0s are padding) - 1 2 3 # (output frames of a convolution, last 0 is never used) - 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding) - 1 2 3 4 # once you removed padding, we are missing one time step ! - """ - extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total) - return F.pad(x, (0, extra_padding)) - - -def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.): - """Tiny wrapper around F.pad, just to allow for reflect padding on small input. - If this is the case, we insert extra 0 padding to the right before the reflection happen. - """ - length = x.shape[-1] - padding_left, padding_right = paddings - assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) - if mode == 'reflect': - max_pad = max(padding_left, padding_right) - extra_pad = 0 - if length <= max_pad: - extra_pad = max_pad - length + 1 - x = F.pad(x, (0, extra_pad)) - padded = F.pad(x, paddings, mode, value) - end = padded.shape[-1] - extra_pad - return padded[..., :end] - else: - return F.pad(x, paddings, mode, value) - - -def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]): - """Remove padding from x, handling properly zero padding. Only for 1d!""" - padding_left, padding_right = paddings - assert padding_left >= 0 and padding_right >= 0, (padding_left, padding_right) - assert (padding_left + padding_right) <= x.shape[-1] - end = x.shape[-1] - padding_right - return x[..., padding_left: end] - - -class NormConv1d(nn.Module): - """Wrapper around Conv1d and normalization applied to this conv - to provide a uniform interface across normalization approaches. - """ - def __init__(self, *args, causal: bool = False, norm: str = 'none', - norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): - super().__init__() - self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm) - self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs) - self.norm_type = norm - - def forward(self, x): - x = self.conv(x) - x = self.norm(x) - return x - - -class NormConv2d(nn.Module): - """Wrapper around Conv2d and normalization applied to this conv - to provide a uniform interface across normalization approaches. - """ - def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): - super().__init__() - self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm) - self.norm = get_norm_module(self.conv, causal=False, norm=norm, **norm_kwargs) - self.norm_type = norm - - def forward(self, x): - x = self.conv(x) - x = self.norm(x) - return x - - -class NormConvTranspose1d(nn.Module): - """Wrapper around ConvTranspose1d and normalization applied to this conv - to provide a uniform interface across normalization approaches. - """ - def __init__(self, *args, causal: bool = False, norm: str = 'none', - norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): - super().__init__() - self.convtr = apply_parametrization_norm(nn.ConvTranspose1d(*args, **kwargs), norm) - self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs) - self.norm_type = norm - - def forward(self, x): - x = self.convtr(x) - x = self.norm(x) - return x - - -class NormConvTranspose2d(nn.Module): - """Wrapper around ConvTranspose2d and normalization applied to this conv - to provide a uniform interface across normalization approaches. - """ - def __init__(self, *args, norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, **kwargs): - super().__init__() - self.convtr = apply_parametrization_norm(nn.ConvTranspose2d(*args, **kwargs), norm) - self.norm = get_norm_module(self.convtr, causal=False, norm=norm, **norm_kwargs) - - def forward(self, x): - x = self.convtr(x) - x = self.norm(x) - return x - - -class StreamableConv1d(nn.Module): - """Conv1d with some builtin handling of asymmetric or causal padding - and normalization. - """ - def __init__(self, in_channels: int, out_channels: int, - kernel_size: int, stride: int = 1, dilation: int = 1, - groups: int = 1, bias: bool = True, causal: bool = False, - norm: str = 'none', norm_kwargs: tp.Dict[str, tp.Any] = {}, - pad_mode: str = 'reflect'): - super().__init__() - # warn user on unusual setup between dilation and stride - if stride > 1 and dilation > 1: - warnings.warn("StreamableConv1d has been initialized with stride > 1 and dilation > 1" - f" (kernel_size={kernel_size} stride={stride}, dilation={dilation}).") - self.conv = NormConv1d(in_channels, out_channels, kernel_size, stride, - dilation=dilation, groups=groups, bias=bias, causal=causal, - norm=norm, norm_kwargs=norm_kwargs) - self.causal = causal - self.pad_mode = pad_mode - - def forward(self, x): - B, C, T = x.shape - kernel_size = self.conv.conv.kernel_size[0] - stride = self.conv.conv.stride[0] - dilation = self.conv.conv.dilation[0] - kernel_size = (kernel_size - 1) * dilation + 1 # effective kernel size with dilations - padding_total = kernel_size - stride - extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, padding_total) - if self.causal: - # Left padding for causal - x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode) - else: - # Asymmetric padding required for odd strides - padding_right = padding_total // 2 - padding_left = padding_total - padding_right - x = pad1d(x, (padding_left, padding_right + extra_padding), mode=self.pad_mode) - return self.conv(x) - - -class StreamableConvTranspose1d(nn.Module): - """ConvTranspose1d with some builtin handling of asymmetric or causal padding - and normalization. - """ - def __init__(self, in_channels: int, out_channels: int, - kernel_size: int, stride: int = 1, causal: bool = False, - norm: str = 'none', trim_right_ratio: float = 1., - norm_kwargs: tp.Dict[str, tp.Any] = {}): - super().__init__() - self.convtr = NormConvTranspose1d(in_channels, out_channels, kernel_size, stride, - causal=causal, norm=norm, norm_kwargs=norm_kwargs) - self.causal = causal - self.trim_right_ratio = trim_right_ratio - assert self.causal or self.trim_right_ratio == 1., \ - "`trim_right_ratio` != 1.0 only makes sense for causal convolutions" - assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1. - - def forward(self, x): - kernel_size = self.convtr.convtr.kernel_size[0] - stride = self.convtr.convtr.stride[0] - padding_total = kernel_size - stride - - y = self.convtr(x) - - # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be - # removed at the very end, when keeping only the right length for the output, - # as removing it here would require also passing the length at the matching layer - # in the encoder. - if self.causal: - # Trim the padding on the right according to the specified ratio - # if trim_right_ratio = 1.0, trim everything from right - padding_right = math.ceil(padding_total * self.trim_right_ratio) - padding_left = padding_total - padding_right - y = unpad1d(y, (padding_left, padding_right)) - else: - # Asymmetric padding required for odd strides - padding_right = padding_total // 2 - padding_left = padding_total - padding_right - y = unpad1d(y, (padding_left, padding_right)) - return y diff --git a/spaces/Greencapabara/OpenAI-whisper-with-upload.no-time-limit/app-shared.py b/spaces/Greencapabara/OpenAI-whisper-with-upload.no-time-limit/app-shared.py deleted file mode 100644 index eb8d9bdc3de88aa79bd0bac7e8e019d49ed97749..0000000000000000000000000000000000000000 --- a/spaces/Greencapabara/OpenAI-whisper-with-upload.no-time-limit/app-shared.py +++ /dev/null @@ -1,3 +0,0 @@ -# Run the app with no audio file restrictions -from app import createUi -createUi(-1, share=True) \ No newline at end of file diff --git a/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/ner_zen2_large_ontonotes4.sh b/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/ner_zen2_large_ontonotes4.sh deleted file mode 100644 index f8bb41316b4cec4bb94fa36ac9bc39c9f3ce41f8..0000000000000000000000000000000000000000 --- a/spaces/HaloMaster/chinesesummary/fengshen/examples/zen2_finetune/ner_zen2_large_ontonotes4.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=zen2_large_ontonotes4 # create a short name for your job -#SBATCH --nodes=1 # node count -#SBATCH --ntasks=1 # total number of tasks across all nodes -#SBATCH --cpus-per-task=30 # cpu-cores per task (>1 if multi-threaded tasks) -#SBATCH --gres=gpu:1 # number of gpus per node -#SBATCH --mail-type=ALL # send email when job begins, ends or failed etc. -#SBATCH -o /cognitive_comp/ganruyi/experiments/ner_finetune/zen2_large_ontonotes4/%x-%j.log # output and error file name (%x=job name, %j=job id) - - -# export CUDA_VISIBLE_DEVICES='2' -export TORCH_EXTENSIONS_DIR=/cognitive_comp/ganruyi/tmp/torch_extendsions - -MODEL_NAME=zen2_large - -TASK=ontonotes4 - -ZERO_STAGE=1 -STRATEGY=deepspeed_stage_${ZERO_STAGE} - -ROOT_DIR=/cognitive_comp/ganruyi/experiments/ner_finetune/${MODEL_NAME}_${TASK} -if [ ! -d ${ROOT_DIR} ];then - mkdir -p ${ROOT_DIR} - echo ${ROOT_DIR} created!!!!!!!!!!!!!! -else - echo ${ROOT_DIR} exist!!!!!!!!!!!!!!! -fi - -DATA_DIR=/cognitive_comp/lujunyu/data_zh/NER_Aligned/OntoNotes4/ -PRETRAINED_MODEL_PATH=IDEA-CCNL/Erlangshen-ZEN2-345M-Chinese - -CHECKPOINT_PATH=${ROOT_DIR}/ckpt/ -OUTPUT_PATH=${ROOT_DIR}/predict.json - -DATA_ARGS="\ - --data_dir $DATA_DIR \ - --train_data train.char.bmes \ - --valid_data test.char.bmes \ - --test_data test.char.bmes \ - --train_batchsize 16 \ - --valid_batchsize 16 \ - --max_seq_length 256 \ - --task_name ontonotes4 \ - " - -MODEL_ARGS="\ - --learning_rate 3e-5 \ - --weight_decay 0.1 \ - --warmup_ratio 0.01 \ - --markup bioes \ - --middle_prefix M- \ - " - -MODEL_CHECKPOINT_ARGS="\ - --monitor val_f1 \ - --save_top_k 3 \ - --mode max \ - --every_n_train_steps 200 \ - --save_weights_only True \ - --dirpath $CHECKPOINT_PATH \ - --filename model-{epoch:02d}-{val_f1:.4f} \ - " - -TRAINER_ARGS="\ - --max_epochs 30 \ - --gpus 1 \ - --check_val_every_n_epoch 1 \ - --val_check_interval 200 \ - --default_root_dir $ROOT_DIR \ - " - - -options=" \ - --pretrained_model_path $PRETRAINED_MODEL_PATH \ - --vocab_file $PRETRAINED_MODEL_PATH/vocab.txt \ - --do_lower_case \ - --output_save_path $OUTPUT_PATH \ - $DATA_ARGS \ - $MODEL_ARGS \ - $MODEL_CHECKPOINT_ARGS \ - $TRAINER_ARGS \ -" -SCRIPT_PATH=/cognitive_comp/ganruyi/Fengshenbang-LM/fengshen/examples/zen2_finetune/fengshen_token_level_ft_task.py -/home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - -# SINGULARITY_PATH=/cognitive_comp/ganruyi/pytorch21_06_py3_docker_image_v2.sif -# python3 $SCRIPT_PATH $options -# source activate base -# singularity exec --nv -B /cognitive_comp/:/cognitive_comp/ $SINGULARITY_PATH /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options -# /home/ganruyi/anaconda3/bin/python $SCRIPT_PATH $options - diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/fast_noisy_channel/noisy_channel_sequence_generator.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/fast_noisy_channel/noisy_channel_sequence_generator.py deleted file mode 100644 index ea8fae98e87e9f3e69bc51987703a6429eb0c92a..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/fast_noisy_channel/noisy_channel_sequence_generator.py +++ /dev/null @@ -1,842 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Dict, List, Optional - -import math -import numpy as np - -import torch -import torch.nn.functional as F -from torch import Tensor - -from .noisy_channel_beam_search import NoisyChannelBeamSearch -from fairseq.sequence_generator import EnsembleModel - - -class NoisyChannelSequenceGenerator(object): - def __init__( - self, - combine_method, - tgt_dict, - src_dict=None, - beam_size=1, - max_len_a=0, - max_len_b=200, - min_len=1, - len_penalty=1.0, - unk_penalty=0.0, - retain_dropout=False, - temperature=1.0, - match_source_len=False, - no_repeat_ngram_size=0, - normalize_scores=True, - channel_models=None, - k2=10, - ch_weight=1.0, - channel_scoring_type='log_norm', - top_k_vocab=0, - lm_models=None, - lm_dict=None, - lm_weight=1.0, - normalize_lm_scores_by_tgt_len=False, - ): - """Generates translations of a given source sentence, - using beam search with noisy channel decoding. - - Args: - combine_method (string, optional): Method to combine direct, LM and - channel model scores (default: None) - tgt_dict (~fairseq.data.Dictionary): target dictionary - src_dict (~fairseq.data.Dictionary): source dictionary - beam_size (int, optional): beam width (default: 1) - max_len_a/b (int, optional): generate sequences of maximum length - ax + b, where x is the source length - min_len (int, optional): the minimum length of the generated output - (not including end-of-sentence) - len_penalty (float, optional): length penalty, where <1.0 favors - shorter, >1.0 favors longer sentences (default: 1.0) - unk_penalty (float, optional): unknown word penalty, where <0 - produces more unks, >0 produces fewer (default: 0.0) - retain_dropout (bool, optional): use dropout when generating - (default: False) - temperature (float, optional): temperature, where values - >1.0 produce more uniform samples and values <1.0 produce - sharper samples (default: 1.0) - match_source_len (bool, optional): outputs should match the source - length (default: False) - no_repeat_ngram_size (int, optional): Size of n-grams that we avoid - repeating in the generation (default: 0) - normalize_scores (bool, optional): normalize scores by the length - of the output (default: True) - channel_models (List[~fairseq.models.FairseqModel]): ensemble of models - translating from the target to the source - k2 (int, optional): Top K2 candidates to score per beam at each step (default:10) - ch_weight (int, optional): Weight associated with the channel model score - assuming that the direct model score has weight 1.0 (default: 1.0) - channel_scoring_type (str, optional): String specifying how to score - the channel model (default: 'log_norm') - top_k_vocab (int, optional): If `channel_scoring_type` is `'src_vocab'` or - `'src_vocab_batched'`, then this parameter specifies the number of - most frequent tokens to include in the channel model output vocabulary, - in addition to the source tokens in the input batch (default: 0) - lm_models (List[~fairseq.models.FairseqModel]): ensemble of models - generating text in the target language - lm_dict (~fairseq.data.Dictionary): LM Model dictionary - lm_weight (int, optional): Weight associated with the LM model score - assuming that the direct model score has weight 1.0 (default: 1.0) - normalize_lm_scores_by_tgt_len (bool, optional): Should we normalize LM scores - by the target length? By default, we normalize the combination of - LM and channel model scores by the source length - """ - self.pad = tgt_dict.pad() - self.unk = tgt_dict.unk() - self.eos = tgt_dict.eos() - self.vocab_size = len(tgt_dict) - self.beam_size = beam_size - # the max beam size is the dictionary size - 1, since we never select pad - self.beam_size = min(beam_size, self.vocab_size - 1) - self.max_len_a = max_len_a - self.max_len_b = max_len_b - self.min_len = min_len - self.normalize_scores = normalize_scores - self.len_penalty = len_penalty - self.unk_penalty = unk_penalty - self.retain_dropout = retain_dropout - self.temperature = temperature - self.match_source_len = match_source_len - self.no_repeat_ngram_size = no_repeat_ngram_size - self.channel_models = channel_models - self.src_dict = src_dict - self.tgt_dict = tgt_dict - self.combine_method = combine_method - self.k2 = k2 - self.ch_weight = ch_weight - self.channel_scoring_type = channel_scoring_type - self.top_k_vocab = top_k_vocab - self.lm_models = lm_models - self.lm_dict = lm_dict - self.lm_weight = lm_weight - self.log_softmax_fn = torch.nn.LogSoftmax(dim=1) - self.normalize_lm_scores_by_tgt_len = normalize_lm_scores_by_tgt_len - - self.share_tgt_dict = (self.lm_dict == self.tgt_dict) - self.tgt_to_lm = make_dict2dict(tgt_dict, lm_dict) - - self.ch_scoring_bsz = 3072 - - assert temperature > 0, '--temperature must be greater than 0' - - self.search = NoisyChannelBeamSearch(tgt_dict) - - @torch.no_grad() - def generate( - self, - models, - sample, - prefix_tokens=None, - bos_token=None, - **kwargs - ): - """Generate a batch of translations. - Args: - models (List[~fairseq.models.FairseqModel]): ensemble of models - sample (dict): batch - prefix_tokens (torch.LongTensor, optional): force decoder to begin - with these tokens - """ - model = EnsembleModel(models) - incremental_states = torch.jit.annotate( - List[Dict[str, Dict[str, Optional[Tensor]]]], - [ - torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) - for i in range(model.models_size) - ], - ) - if not self.retain_dropout: - model.eval() - - # model.forward normally channels prev_output_tokens into the decoder - # separately, but SequenceGenerator directly calls model.encoder - encoder_input = { - k: v for k, v in sample['net_input'].items() - if k != 'prev_output_tokens' - } - src_tokens = encoder_input['src_tokens'] - src_lengths_no_eos = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1) - input_size = src_tokens.size() - # batch dimension goes first followed by source lengths - bsz = input_size[0] - src_len = input_size[1] - beam_size = self.beam_size - - if self.match_source_len: - max_len = src_lengths_no_eos.max().item() - else: - max_len = min( - int(self.max_len_a * src_len + self.max_len_b), - # exclude the EOS marker - model.max_decoder_positions() - 1, - ) - - # compute the encoder output for each beam - encoder_outs = model.forward_encoder(encoder_input) - new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) - new_order = new_order.to(src_tokens.device).long() - encoder_outs = model.reorder_encoder_out(encoder_outs, new_order) - - src_lengths = encoder_input['src_lengths'] - # initialize buffers - scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0) - lm_prefix_scores = src_tokens.new(bsz * beam_size).float().fill_(0) - - scores_buf = scores.clone() - tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad) - tokens_buf = tokens.clone() - tokens[:, 0] = self.eos if bos_token is None else bos_token - - # reorder source tokens so they may be used as a reference in generating P(S|T) - src_tokens = reorder_all_tokens(src_tokens, src_lengths, self.src_dict.eos_index) - - src_tokens = src_tokens.repeat(1, beam_size).view(-1, src_len) - src_lengths = src_lengths.view(bsz, -1).repeat(1, beam_size).view(bsz*beam_size, -1) - - attn, attn_buf = None, None - nonpad_idxs = None - - # The cands_to_ignore indicates candidates that should be ignored. - # For example, suppose we're sampling and have already finalized 2/5 - # samples. Then the cands_to_ignore would mark 2 positions as being ignored, - # so that we only finalize the remaining 3 samples. - cands_to_ignore = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask - - # list of completed sentences - finalized = [[] for i in range(bsz)] - finished = [False for i in range(bsz)] - num_remaining_sent = bsz - - # number of candidate hypos per step - cand_size = 2 * beam_size # 2 x beam size in case half are EOS - - # offset arrays for converting between different indexing schemes - bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens) - cand_offsets = torch.arange(0, cand_size).type_as(tokens) - - # helper function for allocating buffers on the fly - buffers = {} - - def buffer(name, type_of=tokens): # noqa - if name not in buffers: - buffers[name] = type_of.new() - return buffers[name] - - def is_finished(sent, step, unfin_idx): - """ - Check whether we've finished generation for a given sentence, by - comparing the worst score among finalized hypotheses to the best - possible score among unfinalized hypotheses. - """ - assert len(finalized[sent]) <= beam_size - if len(finalized[sent]) == beam_size: - return True - return False - - def finalize_hypos(step, bbsz_idx, eos_scores, combined_noisy_channel_eos_scores): - """ - Finalize the given hypotheses at this step, while keeping the total - number of finalized hypotheses per sentence <= beam_size. - - Note: the input must be in the desired finalization order, so that - hypotheses that appear earlier in the input are preferred to those - that appear later. - - Args: - step: current time step - bbsz_idx: A vector of indices in the range [0, bsz*beam_size), - indicating which hypotheses to finalize - eos_scores: A vector of the same size as bbsz_idx containing - fw scores for each hypothesis - combined_noisy_channel_eos_scores: A vector of the same size as bbsz_idx containing - combined noisy channel scores for each hypothesis - """ - assert bbsz_idx.numel() == eos_scores.numel() - - # clone relevant token and attention tensors - tokens_clone = tokens.index_select(0, bbsz_idx) - tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS - assert not tokens_clone.eq(self.eos).any() - tokens_clone[:, step] = self.eos - attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None - - # compute scores per token position - pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1] - pos_scores[:, step] = eos_scores - # convert from cumulative to per-position scores - pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] - - # normalize sentence-level scores - if self.normalize_scores: - combined_noisy_channel_eos_scores /= (step + 1) ** self.len_penalty - - cum_unfin = [] - prev = 0 - for f in finished: - if f: - prev += 1 - else: - cum_unfin.append(prev) - - sents_seen = set() - for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), combined_noisy_channel_eos_scores.tolist())): - unfin_idx = idx // beam_size - sent = unfin_idx + cum_unfin[unfin_idx] - - sents_seen.add((sent, unfin_idx)) - - if self.match_source_len and step > src_lengths_no_eos[unfin_idx]: - score = -math.inf - - def get_hypo(): - - if attn_clone is not None: - # remove padding tokens from attn scores - hypo_attn = attn_clone[i][nonpad_idxs[sent]] - _, alignment = hypo_attn.max(dim=0) - else: - hypo_attn = None - alignment = None - - return { - 'tokens': tokens_clone[i], - 'score': score, - 'attention': hypo_attn, # src_len x tgt_len - 'alignment': alignment, - 'positional_scores': pos_scores[i], - } - - if len(finalized[sent]) < beam_size: - finalized[sent].append(get_hypo()) - - newly_finished = [] - for sent, unfin_idx in sents_seen: - # check termination conditions for this sentence - if not finished[sent] and is_finished(sent, step, unfin_idx): - finished[sent] = True - newly_finished.append(unfin_idx) - return newly_finished - - def noisy_channel_rescoring(lprobs, beam_size, bsz, src_tokens, tokens, k): - """Rescore the top k hypothesis from each beam using noisy channel modeling - Returns: - new_fw_lprobs: the direct model probabilities after pruning the top k - new_ch_lm_lprobs: the combined channel and language model probabilities - new_lm_lprobs: the language model probabilities after pruning the top k - """ - with torch.no_grad(): - lprobs_size = lprobs.size() - if prefix_tokens is not None and step < prefix_tokens.size(1): - probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :] - cand_scores = torch.gather( - probs_slice, dim=1, - index=prefix_tokens[:, step].view(-1, 1).data - ).expand(-1, beam_size).contiguous().view(bsz*beam_size, 1) - cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, beam_size).data.contiguous().view(bsz*beam_size, 1) - - # need to calculate and save fw and lm probs for prefix tokens - fw_top_k = cand_scores - fw_top_k_idx = cand_indices - k = 1 - else: - # take the top k best words for every sentence in batch*beam - fw_top_k, fw_top_k_idx = torch.topk(lprobs.view(beam_size*bsz, -1), k=k) - eos_idx = torch.nonzero(fw_top_k_idx.view(bsz*beam_size*k, -1) == self.eos)[:, 0] - ch_scores = fw_top_k.new_full((beam_size*bsz*k, ), 0) - src_size = torch.sum(src_tokens[:, :] != self.src_dict.pad_index, dim=1, keepdim=True, dtype=fw_top_k.dtype) - - if self.combine_method != "lm_only": - temp_src_tokens_full = src_tokens[:, :].repeat(1, k).view(bsz*beam_size*k, -1) - not_padding = temp_src_tokens_full[:, 1:] != self.src_dict.pad_index - cur_tgt_size = step+2 - - # add eos to all candidate sentences except those that already end in eos - eos_tokens = tokens[:, 0].repeat(1, k).view(-1, 1) - eos_tokens[eos_idx] = self.tgt_dict.pad_index - - if step == 0: - channel_input = torch.cat((fw_top_k_idx.view(-1, 1), eos_tokens), 1) - else: - # move eos from beginning to end of target sentence - channel_input = torch.cat((tokens[:, 1:step + 1].repeat(1, k).view(-1, step), fw_top_k_idx.view(-1, 1), eos_tokens), 1) - - ch_input_lengths = torch.tensor(np.full(channel_input.size(0), cur_tgt_size)) - ch_input_lengths[eos_idx] = cur_tgt_size-1 - if self.channel_scoring_type == "unnormalized": - ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) - ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) - del ch_encoder_output - ch_intermed_scores = channel_model.decoder.unnormalized_scores_given_target(ch_decoder_output, target_ids=temp_src_tokens_full[:, 1:]) - ch_intermed_scores = ch_intermed_scores.float() - ch_intermed_scores *= not_padding.float() - ch_scores = torch.sum(ch_intermed_scores, dim=1) - elif self.channel_scoring_type == "k2_separate": - for k_idx in range(k): - k_eos_tokens = eos_tokens[k_idx::k, :] - if step == 0: - k_ch_input = torch.cat((fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) - else: - # move eos from beginning to end of target sentence - k_ch_input = torch.cat((tokens[:, 1:step + 1], fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) - k_ch_input_lengths = ch_input_lengths[k_idx::k] - k_ch_output = channel_model(k_ch_input, k_ch_input_lengths, src_tokens) - k_ch_lprobs = channel_model.get_normalized_probs(k_ch_output, log_probs=True) - k_ch_intermed_scores = torch.gather(k_ch_lprobs[:, :-1, :], 2, src_tokens[:, 1:].unsqueeze(2)).squeeze(2) - k_ch_intermed_scores *= not_padding.float() - ch_scores[k_idx::k] = torch.sum(k_ch_intermed_scores, dim=1) - elif self.channel_scoring_type == "src_vocab": - ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) - ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) - - del ch_encoder_output - ch_lprobs = normalized_scores_with_batch_vocab( - channel_model.decoder, - ch_decoder_output, src_tokens, k, bsz, beam_size, - self.src_dict.pad_index, top_k=self.top_k_vocab) - ch_scores = torch.sum(ch_lprobs, dim=1) - elif self.channel_scoring_type == "src_vocab_batched": - ch_bsz_size = temp_src_tokens_full.shape[0] - ch_lprobs_list = [None] * len(range(0, ch_bsz_size, self.ch_scoring_bsz)) - for i, start_idx in enumerate(range(0, ch_bsz_size, self.ch_scoring_bsz)): - end_idx = min(start_idx + self.ch_scoring_bsz, ch_bsz_size) - temp_src_tokens_full_batch = temp_src_tokens_full[start_idx:end_idx, :] - channel_input_batch = channel_input[start_idx:end_idx, :] - ch_input_lengths_batch = ch_input_lengths[start_idx:end_idx] - ch_encoder_output_batch = channel_model.encoder(channel_input_batch, src_lengths=ch_input_lengths_batch) - ch_decoder_output_batch, _ = channel_model.decoder(temp_src_tokens_full_batch, encoder_out=ch_encoder_output_batch, features_only=True) - ch_lprobs_list[i] = normalized_scores_with_batch_vocab( - channel_model.decoder, - ch_decoder_output_batch, src_tokens, k, bsz, beam_size, - self.src_dict.pad_index, top_k=self.top_k_vocab, - start_idx=start_idx, end_idx=end_idx) - ch_lprobs = torch.cat(ch_lprobs_list, dim=0) - ch_scores = torch.sum(ch_lprobs, dim=1) - else: - ch_output = channel_model(channel_input, ch_input_lengths, temp_src_tokens_full) - ch_lprobs = channel_model.get_normalized_probs(ch_output, log_probs=True) - ch_intermed_scores = torch.gather(ch_lprobs[:, :-1, :], 2, temp_src_tokens_full[:, 1:].unsqueeze(2)).squeeze().view(bsz*beam_size*k, -1) - ch_intermed_scores *= not_padding.float() - ch_scores = torch.sum(ch_intermed_scores, dim=1) - - else: - cur_tgt_size = 0 - ch_scores = ch_scores.view(bsz*beam_size, k) - expanded_lm_prefix_scores = lm_prefix_scores.unsqueeze(1).expand(-1, k).flatten() - - if self.share_tgt_dict: - lm_scores = get_lm_scores(lm, tokens[:, :step + 1].view(-1, step+1), lm_incremental_states, fw_top_k_idx.view(-1, 1), torch.tensor(np.full(tokens.size(0), step+1)), k) - else: - new_lm_input = dict2dict(tokens[:, :step + 1].view(-1, step+1), self.tgt_to_lm) - new_cands = dict2dict(fw_top_k_idx.view(-1, 1), self.tgt_to_lm) - lm_scores = get_lm_scores(lm, new_lm_input, lm_incremental_states, new_cands, torch.tensor(np.full(tokens.size(0), step+1)), k) - - lm_scores.add_(expanded_lm_prefix_scores) - ch_lm_scores = combine_ch_lm(self.combine_method, ch_scores, lm_scores, src_size, cur_tgt_size) - # initialize all as min value - new_fw_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) - new_ch_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) - new_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) - new_fw_lprobs[:, self.pad] = -math.inf - new_ch_lm_lprobs[:, self.pad] = -math.inf - new_lm_lprobs[:, self.pad] = -math.inf - - new_fw_lprobs.scatter_(1, fw_top_k_idx, fw_top_k) - new_ch_lm_lprobs.scatter_(1, fw_top_k_idx, ch_lm_scores) - new_lm_lprobs.scatter_(1, fw_top_k_idx, lm_scores.view(-1, k)) - return new_fw_lprobs, new_ch_lm_lprobs, new_lm_lprobs - - def combine_ch_lm(combine_type, ch_scores, lm_scores1, src_size, tgt_size): - if self.channel_scoring_type == "unnormalized": - ch_scores = self.log_softmax_fn( - ch_scores.view(-1, self.beam_size * self.k2) - ).view(ch_scores.shape) - ch_scores = ch_scores * self.ch_weight - lm_scores1 = lm_scores1 * self.lm_weight - - if combine_type == "lm_only": - # log P(T|S) + log P(T) - ch_scores = lm_scores1.view(ch_scores.size()) - elif combine_type == "noisy_channel": - # 1/t log P(T|S) + 1/s log P(S|T) + 1/t log P(T) - if self.normalize_lm_scores_by_tgt_len: - ch_scores.div_(src_size) - lm_scores_norm = lm_scores1.view(ch_scores.size()).div(tgt_size) - ch_scores.add_(lm_scores_norm) - # 1/t log P(T|S) + 1/s log P(S|T) + 1/s log P(T) - else: - ch_scores.add_(lm_scores1.view(ch_scores.size())) - ch_scores.div_(src_size) - - return ch_scores - - if self.channel_models is not None: - channel_model = self.channel_models[0] # assume only one channel_model model - else: - channel_model = None - - lm = EnsembleModel(self.lm_models) - lm_incremental_states = torch.jit.annotate( - List[Dict[str, Dict[str, Optional[Tensor]]]], - [ - torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) - for i in range(lm.models_size) - ], - ) - - reorder_state = None - batch_idxs = None - for step in range(max_len + 1): # one extra step for EOS marker - # reorder decoder internal states based on the prev choice of beams - if reorder_state is not None: - if batch_idxs is not None: - # update beam indices to take into account removed sentences - corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs) - reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size) - model.reorder_incremental_state(incremental_states, reorder_state) - encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state) - - lm.reorder_incremental_state(lm_incremental_states, reorder_state) - - fw_lprobs, avg_attn_scores = model.forward_decoder( - tokens[:, :step + 1], encoder_outs, incremental_states, temperature=self.temperature, - ) - - fw_lprobs[:, self.pad] = -math.inf # never select pad - fw_lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty - fw_lprobs, ch_lm_lprobs, lm_lprobs = noisy_channel_rescoring(fw_lprobs, beam_size, bsz, src_tokens, tokens, self.k2) - - # handle min and max length constraints - if step >= max_len: - fw_lprobs[:, :self.eos] = -math.inf - fw_lprobs[:, self.eos + 1:] = -math.inf - elif step < self.min_len: - fw_lprobs[:, self.eos] = -math.inf - - # handle prefix tokens (possibly with different lengths) - if prefix_tokens is not None and step < prefix_tokens.size(1): - prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) - prefix_mask = prefix_toks.ne(self.pad) - - prefix_fw_lprobs = fw_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) - fw_lprobs[prefix_mask] = -math.inf - fw_lprobs[prefix_mask] = fw_lprobs[prefix_mask].scatter_( - -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_fw_lprobs - ) - - prefix_ch_lm_lprobs = ch_lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) - ch_lm_lprobs[prefix_mask] = -math.inf - ch_lm_lprobs[prefix_mask] = ch_lm_lprobs[prefix_mask].scatter_( - -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_ch_lm_lprobs - ) - - prefix_lm_lprobs = lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) - lm_lprobs[prefix_mask] = -math.inf - lm_lprobs[prefix_mask] = lm_lprobs[prefix_mask].scatter_( - -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lm_lprobs - ) - - # if prefix includes eos, then we should make sure tokens and - # scores are the same across all beams - eos_mask = prefix_toks.eq(self.eos) - if eos_mask.any(): - # validate that the first beam matches the prefix - first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1] - eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] - target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] - assert (first_beam == target_prefix).all() - - def replicate_first_beam(tensor, mask): - tensor = tensor.view(-1, beam_size, tensor.size(-1)) - tensor[mask] = tensor[mask][:, :1, :] - return tensor.view(-1, tensor.size(-1)) - - # copy tokens, scores and lprobs from the first beam to all beams - tokens = replicate_first_beam(tokens, eos_mask_batch_dim) - scores = replicate_first_beam(scores, eos_mask_batch_dim) - - fw_lprobs = replicate_first_beam(fw_lprobs, eos_mask_batch_dim) - ch_lm_lprobs = replicate_first_beam(ch_lm_lprobs, eos_mask_batch_dim) - lm_lprobs = replicate_first_beam(lm_lprobs, eos_mask_batch_dim) - - if self.no_repeat_ngram_size > 0: - # for each beam and batch sentence, generate a list of previous ngrams - gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)] - for bbsz_idx in range(bsz * beam_size): - gen_tokens = tokens[bbsz_idx].tolist() - for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]): - gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \ - gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]] - - # Record attention scores - if avg_attn_scores is not None: - if attn is None: - attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2) - attn_buf = attn.clone() - nonpad_idxs = src_tokens.ne(self.pad) - attn[:, :, step + 1].copy_(avg_attn_scores) - - scores = scores.type_as(fw_lprobs) - scores_buf = scores_buf.type_as(fw_lprobs) - - self.search.set_src_lengths(src_lengths_no_eos) - - if self.no_repeat_ngram_size > 0: - def calculate_banned_tokens(bbsz_idx): - # before decoding the next token, prevent decoding of ngrams that have already appeared - ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist()) - return gen_ngrams[bbsz_idx].get(ngram_index, []) - - if step + 2 - self.no_repeat_ngram_size >= 0: - # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet - banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)] - else: - banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)] - - for bbsz_idx in range(bsz * beam_size): - fw_lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf - - combined_noisy_channel_scores, fw_lprobs_top_k, lm_lprobs_top_k, cand_indices, cand_beams = self.search.step( - step, - fw_lprobs.view(bsz, -1, self.vocab_size), - scores.view(bsz, beam_size, -1)[:, :, :step], ch_lm_lprobs.view(bsz, -1, self.vocab_size), - lm_lprobs.view(bsz, -1, self.vocab_size), self.combine_method - ) - - # cand_bbsz_idx contains beam indices for the top candidate - # hypotheses, with a range of values: [0, bsz*beam_size), - # and dimensions: [bsz, cand_size] - cand_bbsz_idx = cand_beams.add(bbsz_offsets) - - # finalize hypotheses that end in eos (except for candidates to be ignored) - eos_mask = cand_indices.eq(self.eos) - eos_mask[:, :beam_size] &= ~cands_to_ignore - - # only consider eos when it's among the top beam_size indices - eos_bbsz_idx = torch.masked_select( - cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] - ) - - finalized_sents = set() - if eos_bbsz_idx.numel() > 0: - eos_scores = torch.masked_select( - fw_lprobs_top_k[:, :beam_size], mask=eos_mask[:, :beam_size] - ) - combined_noisy_channel_eos_scores = torch.masked_select( - combined_noisy_channel_scores[:, :beam_size], - mask=eos_mask[:, :beam_size], - ) - - # finalize hypo using channel model score - finalized_sents = finalize_hypos( - step, eos_bbsz_idx, eos_scores, combined_noisy_channel_eos_scores) - - num_remaining_sent -= len(finalized_sents) - - assert num_remaining_sent >= 0 - if num_remaining_sent == 0: - break - - if len(finalized_sents) > 0: - new_bsz = bsz - len(finalized_sents) - - # construct batch_idxs which holds indices of batches to keep for the next pass - batch_mask = cand_indices.new_ones(bsz) - batch_mask[cand_indices.new(finalized_sents)] = 0 - batch_idxs = torch.nonzero(batch_mask).squeeze(-1) - - eos_mask = eos_mask[batch_idxs] - cand_beams = cand_beams[batch_idxs] - bbsz_offsets.resize_(new_bsz, 1) - cand_bbsz_idx = cand_beams.add(bbsz_offsets) - - lm_lprobs_top_k = lm_lprobs_top_k[batch_idxs] - - fw_lprobs_top_k = fw_lprobs_top_k[batch_idxs] - cand_indices = cand_indices[batch_idxs] - if prefix_tokens is not None: - prefix_tokens = prefix_tokens[batch_idxs] - src_lengths_no_eos = src_lengths_no_eos[batch_idxs] - cands_to_ignore = cands_to_ignore[batch_idxs] - - scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) - scores_buf.resize_as_(scores) - tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) - tokens_buf.resize_as_(tokens) - src_tokens = src_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) - src_lengths = src_lengths.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) - lm_prefix_scores = lm_prefix_scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1).squeeze() - - if attn is not None: - attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1) - attn_buf.resize_as_(attn) - bsz = new_bsz - else: - batch_idxs = None - - # Set active_mask so that values > cand_size indicate eos or - # ignored hypos and values < cand_size indicate candidate - # active hypos. After this, the min values per row are the top - # candidate active hypos. - eos_mask[:, :beam_size] |= cands_to_ignore - active_mask = torch.add( - eos_mask.type_as(cand_offsets) * cand_size, - cand_offsets[: eos_mask.size(1)], - ) - - # get the top beam_size active hypotheses, which are just the hypos - # with the smallest values in active_mask - active_hypos, new_cands_to_ignore = buffer('active_hypos'), buffer('new_cands_to_ignore') - torch.topk( - active_mask, k=beam_size, dim=1, largest=False, - out=(new_cands_to_ignore, active_hypos) - ) - - # update cands_to_ignore to ignore any finalized hypos - cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] - assert (~cands_to_ignore).any(dim=1).all() - - active_bbsz_idx = buffer('active_bbsz_idx') - torch.gather( - cand_bbsz_idx, dim=1, index=active_hypos, - out=active_bbsz_idx, - ) - active_scores = torch.gather( - fw_lprobs_top_k, dim=1, index=active_hypos, - out=scores[:, step].view(bsz, beam_size), - ) - - active_bbsz_idx = active_bbsz_idx.view(-1) - active_scores = active_scores.view(-1) - - # copy tokens and scores for active hypotheses - torch.index_select( - tokens[:, :step + 1], dim=0, index=active_bbsz_idx, - out=tokens_buf[:, :step + 1], - ) - torch.gather( - cand_indices, dim=1, index=active_hypos, - out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1], - ) - if step > 0: - torch.index_select( - scores[:, :step], dim=0, index=active_bbsz_idx, - out=scores_buf[:, :step], - ) - torch.gather( - fw_lprobs_top_k, dim=1, index=active_hypos, - out=scores_buf.view(bsz, beam_size, -1)[:, :, step], - ) - torch.gather( - lm_lprobs_top_k, dim=1, index=active_hypos, - out=lm_prefix_scores.view(bsz, beam_size) - ) - - # copy attention for active hypotheses - if attn is not None: - torch.index_select( - attn[:, :, :step + 2], dim=0, index=active_bbsz_idx, - out=attn_buf[:, :, :step + 2], - ) - - # swap buffers - tokens, tokens_buf = tokens_buf, tokens - scores, scores_buf = scores_buf, scores - if attn is not None: - attn, attn_buf = attn_buf, attn - - # reorder incremental state in decoder - reorder_state = active_bbsz_idx - - # sort by score descending - for sent in range(len(finalized)): - finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True) - - return finalized - - -def get_lm_scores(model, input_tokens, incremental_states, cand_tokens, input_len, k): - with torch.no_grad(): - lm_lprobs, avg_attn_scores = model.forward_decoder( - input_tokens, encoder_outs=None, incremental_states=incremental_states, - ) - - lm_lprobs_size = lm_lprobs.size(0) - probs_next_wrd = torch.gather(lm_lprobs.repeat(1, k).view(lm_lprobs_size*k, -1), 1, cand_tokens).squeeze().view(-1) - - return probs_next_wrd - - -def make_dict2dict(old_dict, new_dict): - dict2dict_map = {} - for sym in old_dict.symbols: - dict2dict_map[old_dict.index(sym)] = new_dict.index(sym) - return dict2dict_map - - -def dict2dict(tokens, dict2dict_map): - if tokens.device == torch.device('cpu'): - tokens_tmp = tokens - else: - tokens_tmp = tokens.cpu() - return tokens_tmp.map_( - tokens_tmp, - lambda _, val, dict2dict_map=dict2dict_map : dict2dict_map[float(val)] - ).to(tokens.device) - - -def reorder_tokens(tokens, lengths, eos): - # reorder source tokens so they may be used as reference for P(S|T) - return torch.cat((tokens.new([eos]), tokens[-lengths:-1], tokens[:-lengths]), 0) - - -def reorder_all_tokens(tokens, lengths, eos): - # used to reorder src tokens from [ .. ] to [ ...] - # so source tokens can be used to predict P(S|T) - return torch.stack([reorder_tokens(token, length, eos) for token, length in zip(tokens, lengths)]) - - -def normalized_scores_with_batch_vocab( - model_decoder, features, target_ids, k, bsz, beam_size, - pad_idx, top_k=0, vocab_size_meter=None, start_idx=None, - end_idx=None, **kwargs): - """ - Get normalized probabilities (or log probs) from a net's output - w.r.t. vocab consisting of target IDs in the batch - """ - if model_decoder.adaptive_softmax is None: - weight = model_decoder.output_projection.weight - vocab_ids = torch.unique( - torch.cat( - (torch.unique(target_ids), torch.arange(top_k, device=target_ids.device)) - ) - ) - id_map = dict(zip(vocab_ids.tolist(), range(len(vocab_ids)))) - mapped_target_ids = target_ids.cpu().apply_( - lambda x, id_map=id_map: id_map[x] - ).to(target_ids.device) - expanded_target_ids = mapped_target_ids[:, :].repeat(1, k).view(bsz*beam_size*k, -1) - if start_idx is not None and end_idx is not None: - expanded_target_ids = expanded_target_ids[start_idx:end_idx, :] - logits = F.linear(features, weight[vocab_ids, :]) - log_softmax = F.log_softmax(logits, dim=-1, dtype=torch.float32) - intermed_scores = torch.gather( - log_softmax[:, :-1, :], - 2, - expanded_target_ids[:, 1:].unsqueeze(2), - ).squeeze() - not_padding = expanded_target_ids[:, 1:] != pad_idx - intermed_scores *= not_padding.float() - return intermed_scores - else: - raise ValueError("adaptive softmax doesn't work with " + - "`normalized_scores_with_batch_vocab()`") diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/hubert/simple_kmeans/dump_km_label.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/hubert/simple_kmeans/dump_km_label.py deleted file mode 100644 index 8871307804d3f1e5c7cc49061614c69df26ab1ee..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/examples/hubert/simple_kmeans/dump_km_label.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os -import sys - -import numpy as np - -import joblib -import torch -import tqdm - -logging.basicConfig( - format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", - datefmt="%Y-%m-%d %H:%M:%S", - level=os.environ.get("LOGLEVEL", "INFO").upper(), - stream=sys.stdout, -) -logger = logging.getLogger("dump_km_label") - - -class ApplyKmeans(object): - def __init__(self, km_path): - self.km_model = joblib.load(km_path) - self.C_np = self.km_model.cluster_centers_.transpose() - self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True) - - self.C = torch.from_numpy(self.C_np) - self.Cnorm = torch.from_numpy(self.Cnorm_np) - if torch.cuda.is_available(): - self.C = self.C.cuda() - self.Cnorm = self.Cnorm.cuda() - - def __call__(self, x): - if isinstance(x, torch.Tensor): - dist = ( - x.pow(2).sum(1, keepdim=True) - - 2 * torch.matmul(x, self.C) - + self.Cnorm - ) - return dist.argmin(dim=1).cpu().numpy() - else: - dist = ( - (x ** 2).sum(1, keepdims=True) - - 2 * np.matmul(x, self.C_np) - + self.Cnorm_np - ) - return np.argmin(dist, axis=1) - - -def get_feat_iterator(feat_dir, split, nshard, rank): - feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy" - leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len" - with open(leng_path, "r") as f: - lengs = [int(line.rstrip()) for line in f] - offsets = [0] + np.cumsum(lengs[:-1]).tolist() - - def iterate(): - feat = np.load(feat_path, mmap_mode="r") - assert feat.shape[0] == (offsets[-1] + lengs[-1]) - for offset, leng in zip(offsets, lengs): - yield feat[offset: offset + leng] - - return iterate, len(lengs) - - -def dump_label(feat_dir, split, km_path, nshard, rank, lab_dir): - apply_kmeans = ApplyKmeans(km_path) - generator, num = get_feat_iterator(feat_dir, split, nshard, rank) - iterator = generator() - - lab_path = f"{lab_dir}/{split}_{rank}_{nshard}.km" - os.makedirs(lab_dir, exist_ok=True) - with open(lab_path, "w") as f: - for feat in tqdm.tqdm(iterator, total=num): - # feat = torch.from_numpy(feat).cuda() - lab = apply_kmeans(feat).tolist() - f.write(" ".join(map(str, lab)) + "\n") - logger.info("finished successfully") - - -if __name__ == "__main__": - import argparse - - parser = argparse.ArgumentParser() - parser.add_argument("feat_dir") - parser.add_argument("split") - parser.add_argument("km_path") - parser.add_argument("nshard", type=int) - parser.add_argument("rank", type=int) - parser.add_argument("lab_dir") - args = parser.parse_args() - logging.info(str(args)) - - dump_label(**vars(args)) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/audio/data_cfg.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/audio/data_cfg.py deleted file mode 100644 index 95b403ad9c617afb5656131693c92b9cc3befd3b..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/data/audio/data_cfg.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from pathlib import Path -from typing import Dict, Optional - - -class S2TDataConfig(object): - """Wrapper class for data config YAML""" - - def __init__(self, yaml_path: Path): - try: - import yaml - except ImportError: - print("Please install PyYAML: pip install PyYAML") - self.config = {} - if yaml_path.is_file(): - try: - with open(yaml_path) as f: - self.config = yaml.load(f, Loader=yaml.FullLoader) - except Exception as e: - raise Exception( - f"Failed to load config from {yaml_path.as_posix()}: {e}" - ) - else: - raise FileNotFoundError(f"{yaml_path.as_posix()} not found") - self.root = yaml_path.parent - - def _auto_convert_to_abs_path(self, x): - if isinstance(x, str): - if not Path(x).exists() and (self.root / x).exists(): - return (self.root / x).as_posix() - elif isinstance(x, dict): - return {k: self._auto_convert_to_abs_path(v) for k, v in x.items()} - return x - - @property - def vocab_filename(self): - """fairseq vocabulary file under data root""" - return self.config.get("vocab_filename", "dict.txt") - - @property - def speaker_set_filename(self): - """fairseq vocabulary file under data root""" - return self.config.get("speaker_set_filename", None) - - @property - def shuffle(self) -> bool: - """Shuffle dataset samples before batching""" - return self.config.get("shuffle", False) - - @property - def pre_tokenizer(self) -> Dict: - """Pre-tokenizer to apply before subword tokenization. Returning - a dictionary with `tokenizer` providing the tokenizer name and - the other items providing the tokenizer-specific arguments. - Tokenizers are defined in `fairseq.data.encoders.*`""" - tokenizer = self.config.get("pre_tokenizer", {"tokenizer": None}) - return self._auto_convert_to_abs_path(tokenizer) - - @property - def bpe_tokenizer(self) -> Dict: - """Subword tokenizer to apply after pre-tokenization. Returning - a dictionary with `bpe` providing the tokenizer name and - the other items providing the tokenizer-specific arguments. - Tokenizers are defined in `fairseq.data.encoders.*`""" - tokenizer = self.config.get("bpe_tokenizer", {"bpe": None}) - return self._auto_convert_to_abs_path(tokenizer) - - @property - def prepend_tgt_lang_tag(self) -> bool: - """Prepend target lang ID token as the target BOS (e.g. for to-many - multilingual setting). During inference, this requires `--prefix-size 1` - to force BOS to be lang ID token.""" - return self.config.get("prepend_tgt_lang_tag", False) - - @property - def input_feat_per_channel(self): - """The dimension of input features (per audio channel)""" - return self.config.get("input_feat_per_channel", 80) - - @property - def input_channels(self): - """The number of channels in the input audio""" - return self.config.get("input_channels", 1) - - @property - def sample_rate(self): - return self.config.get("sample_rate", 16_000) - - @property - def sampling_alpha(self): - """Hyper-parameter alpha = 1/T for temperature-based resampling. - (alpha = 1 for no resampling)""" - return self.config.get("sampling_alpha", 1.0) - - @property - def use_audio_input(self): - """Needed by the dataset loader to see if the model requires - raw audio as inputs.""" - return self.config.get("use_audio_input", False) - - @property - def use_sample_rate(self): - """Needed by the dataset loader to see if the model requires - raw audio with specific sample rate as inputs.""" - return self.config.get("use_sample_rate", 16000) - - @property - def audio_root(self): - """Audio paths in the manifest TSV can be relative and this provides - the root path. Set this to empty string when using absolute paths.""" - return self.config.get("audio_root", "") - - def get_feature_transforms(self, split, is_train): - """Split-specific feature transforms. Allowing train set - wildcard `_train`, evaluation set wildcard `_eval` and general - wildcard `*` for matching.""" - from copy import deepcopy - - cfg = deepcopy(self.config) - _cur = cfg.get("transforms", {}) - cur = _cur.get(split) - cur = _cur.get("_train") if cur is None and is_train else cur - cur = _cur.get("_eval") if cur is None and not is_train else cur - cur = _cur.get("*") if cur is None else cur - cfg["transforms"] = cur - return cfg - - @property - def global_cmvn_stats_npz(self) -> Optional[str]: - path = self.config.get("global_cmvn", {}).get("stats_npz_path", None) - return self._auto_convert_to_abs_path(path) - - @property - def vocoder(self) -> Optional[Dict[str, str]]: - return self.config.get("vocoder", None) diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/bart/hub_interface.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/bart/hub_interface.py deleted file mode 100644 index 4d47d9751837c744b1d0d460117b78fcbeeb12d8..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/bart/hub_interface.py +++ /dev/null @@ -1,208 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import copy -import logging -from typing import Dict, List - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.data import encoders -from fairseq.hub_utils import GeneratorHubInterface -from omegaconf import open_dict - - -logger = logging.getLogger(__name__) - - -class BARTHubInterface(GeneratorHubInterface): - """A simple PyTorch Hub interface to BART. - - Usage: https://github.com/pytorch/fairseq/tree/main/examples/bart - """ - - def __init__(self, cfg, task, model): - super().__init__(cfg, task, [model]) - self.model = self.models[0] - - def encode( - self, sentence: str, *addl_sentences, no_separator=True - ) -> torch.LongTensor: - """ - BPE-encode a sentence (or multiple sentences). - - Every sequence begins with a beginning-of-sentence (``) symbol. - Every sentence ends with an end-of-sentence (``). - - Example (single sentence): ` a b c ` - Example (sentence pair): ` d e f 1 2 3 ` - - The BPE encoding follows GPT-2. One subtle detail is that the GPT-2 BPE - requires leading spaces. For example:: - - >>> bart.encode('Hello world').tolist() - [0, 31414, 232, 2] - >>> bart.encode(' world').tolist() - [0, 232, 2] - >>> bart.encode('world').tolist() - [0, 8331, 2] - """ - tokens = self.bpe.encode(sentence) - if len(tokens.split(" ")) > min(self.max_positions) - 2: - tokens = " ".join(tokens.split(" ")[: min(self.max_positions) - 2]) - bpe_sentence = " " + tokens + " " - for s in addl_sentences: - bpe_sentence += " " if not no_separator else "" - bpe_sentence += " " + self.bpe.encode(s) + " " - tokens = self.task.source_dictionary.encode_line(bpe_sentence, append_eos=False) - return tokens.long() - - def decode(self, tokens: torch.LongTensor): - assert tokens.dim() == 1 - tokens = tokens.cpu().numpy() - if tokens[0] == self.task.source_dictionary.bos(): - tokens = tokens[1:] # remove - eos_mask = tokens == self.task.source_dictionary.eos() - doc_mask = eos_mask[1:] & eos_mask[:-1] - sentences = np.split(tokens, doc_mask.nonzero()[0] + 1) - sentences = [ - self.bpe.decode(self.task.source_dictionary.string(s)) for s in sentences - ] - if len(sentences) == 1: - return sentences[0] - return sentences - - def _build_sample(self, src_tokens: List[torch.LongTensor]): - # assert torch.is_tensor(src_tokens) - dataset = self.task.build_dataset_for_inference( - src_tokens, - [x.numel() for x in src_tokens], - ) - sample = dataset.collater(dataset) - sample = utils.apply_to_sample(lambda tensor: tensor.to(self.device), sample) - return sample - - def generate( - self, - tokenized_sentences: List[torch.LongTensor], - *args, - inference_step_args=None, - skip_invalid_size_inputs=False, - **kwargs - ) -> List[List[Dict[str, torch.Tensor]]]: - inference_step_args = inference_step_args or {} - if "prefix_tokens" in inference_step_args: - raise NotImplementedError("prefix generation not implemented for BART") - res = [] - for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs): - src_tokens = batch['net_input']['src_tokens'] - inference_step_args["prefix_tokens"] =src_tokens.new_full( - (src_tokens.size(0), 1), fill_value=self.task.source_dictionary.bos() - ).to(device=self.device) - results = super().generate( - src_tokens, - *args, - inference_step_args=inference_step_args, - skip_invalid_size_inputs=skip_invalid_size_inputs, - **kwargs - ) - for id, hypos in zip(batch['id'].tolist(), results): - res.append((id, hypos)) - res = [hypos for _, hypos in sorted(res, key=lambda x: x[0])] - return res - - def extract_features( - self, tokens: torch.LongTensor, return_all_hiddens: bool = False - ) -> torch.Tensor: - if tokens.dim() == 1: - tokens = tokens.unsqueeze(0) - if tokens.size(-1) > min(self.model.max_positions()): - raise ValueError( - "tokens exceeds maximum length: {} > {}".format( - tokens.size(-1), self.model.max_positions() - ) - ) - tokens.to(device=self.device), - prev_output_tokens = tokens.clone() - - prev_output_tokens[:, 0] = tokens.gather( - 1, - (tokens.ne(self.task.source_dictionary.pad()).sum(dim=1) - 1).unsqueeze(-1), - ).squeeze() - - prev_output_tokens[:, 1:] = tokens[:, :-1] - features, extra = self.model( - src_tokens=tokens, - src_lengths=None, - prev_output_tokens=prev_output_tokens, - features_only=True, - return_all_hiddens=return_all_hiddens, - ) - if return_all_hiddens: - # convert from T x B x C -> B x T x C - inner_states = extra["inner_states"] - return [inner_state.transpose(0, 1) for inner_state in inner_states] - else: - return features # just the last layer's features - - def register_classification_head( - self, name: str, num_classes: int = None, embedding_size: int = None, **kwargs - ): - self.model.register_classification_head( - name, num_classes=num_classes, embedding_size=embedding_size, **kwargs - ) - - def predict(self, head: str, tokens: torch.LongTensor, return_logits: bool = False): - if tokens.dim() == 1: - tokens = tokens.unsqueeze(0) - features = self.extract_features(tokens.to(device=self.device)) - sentence_representation = features[ - tokens.eq(self.task.source_dictionary.eos()), : - ].view(features.size(0), -1, features.size(-1))[:, -1, :] - - logits = self.model.classification_heads[head](sentence_representation) - if return_logits: - return logits - return F.log_softmax(logits, dim=-1) - - def fill_mask( - self, - masked_inputs: List[str], - topk: int = 5, - match_source_len: bool = True, - **generate_kwargs - ): - masked_token = '' - batch_tokens = [] - for masked_input in masked_inputs: - assert masked_token in masked_input, \ - "please add one {} token for the input".format(masked_token) - - text_spans = masked_input.split(masked_token) - text_spans_bpe = (' {0} '.format(masked_token)).join( - [self.bpe.encode(text_span.rstrip()) for text_span in text_spans] - ).strip() - tokens = self.task.source_dictionary.encode_line( - ' ' + text_spans_bpe + ' ', - append_eos=False, - add_if_not_exist=False, - ).long() - batch_tokens.append(tokens) - - # ensure beam size is at least as big as topk - generate_kwargs['beam'] = max( - topk, - generate_kwargs.get('beam', -1), - ) - generate_kwargs['match_source_len'] = match_source_len - batch_hypos = self.generate(batch_tokens, **generate_kwargs) - - return [ - [(self.decode(hypo['tokens']), hypo['score']) for hypo in hypos[:topk]] - for hypos in batch_hypos - ] diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/same_pad.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/same_pad.py deleted file mode 100644 index 4c04990ea6fdb291f162ee8ac3d17a92483daf8e..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/same_pad.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - -from torch import nn - - -class SamePad(nn.Module): - def __init__(self, kernel_size, causal=False): - super().__init__() - if causal: - self.remove = kernel_size - 1 - else: - self.remove = 1 if kernel_size % 2 == 0 else 0 - - def forward(self, x): - if self.remove > 0: - x = x[:, :, : -self.remove] - return x diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/sparse_multihead_attention.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/sparse_multihead_attention.py deleted file mode 100644 index 3cbd9d6785886e319aab0601517e27df733b6f97..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/modules/sparse_multihead_attention.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math - -import torch - -from .multihead_attention import MultiheadAttention - - -class SparseMultiheadAttention(MultiheadAttention): - """Sparse Multi-Headed Attention. - - "Generating Long Sequences with Sparse Transformers". Implements - fixed factorized self attention, where l=stride and c=expressivity. - A(1) includes all words in the stride window and A(2) takes a summary of c - words from the end of each stride window. - If is_bidirectional=False, we do not include any words past the current word, - as in the paper. - """ - - def __init__( - self, - embed_dim, - num_heads, - kdim=None, - vdim=None, - dropout=0.0, - bias=True, - add_bias_kv=False, - add_zero_attn=False, - self_attention=False, - encoder_decoder_attention=False, - stride=32, - expressivity=8, - is_bidirectional=True, - ): - - super().__init__( - embed_dim, - num_heads, - kdim, - vdim, - dropout, - bias, - add_bias_kv, - add_zero_attn, - self_attention, - encoder_decoder_attention, - ) - - self.is_bidirectional = is_bidirectional - self.stride = stride - self.expressivity = expressivity - assert self.stride > 0 and self.stride >= self.expressivity - - # Used for Ai(2) calculations - beginning of [l-c, l] range - def compute_checkpoint(self, word_index): - if word_index % self.stride == 0 and word_index != 0: - checkpoint_index = word_index - self.expressivity - else: - checkpoint_index = ( - math.floor(word_index / self.stride) * self.stride - + self.stride - - self.expressivity - ) - return checkpoint_index - - # Computes Ai(2) - def compute_subset_summaries(self, absolute_max): - checkpoint_index = self.compute_checkpoint(0) - subset_two = set() - while checkpoint_index <= absolute_max - 1: - summary = set( - range( - checkpoint_index, - min(checkpoint_index + self.expressivity + 1, absolute_max), - ) - ) - subset_two = subset_two.union(summary) - checkpoint_index = self.compute_checkpoint(checkpoint_index + self.stride) - return subset_two - - # Sparse Transformer Fixed Attention Pattern: https://arxiv.org/pdf/1904.10509.pdf - def compute_fixed_attention_subset(self, word_index, tgt_len): - # +1s account for range function; [min, max) -> [min, max] - if not self.is_bidirectional: - absolute_max = word_index + 1 - else: - absolute_max = tgt_len - - # Subset 1 - whole window - rounded_index = ( - math.floor((word_index + self.stride) / self.stride) * self.stride - ) - if word_index % self.stride == 0 and word_index != 0: - subset_one = set( - range(word_index - self.stride, min(absolute_max, word_index + 1)) - ) - else: - subset_one = set( - range( - max(0, rounded_index - self.stride), - min(absolute_max, rounded_index + 1), - ) - ) - - # Subset 2 - summary per window - # If bidirectional, subset 2 is the same for every index - subset_two = set() - if not self.is_bidirectional: - subset_two = self.compute_subset_summaries(absolute_max) - - return subset_one.union(subset_two) - - # Compute sparse mask - if bidirectional, can pre-compute and store - def buffered_sparse_mask(self, tensor, tgt_len, src_len): - assert tgt_len > self.stride - sparse_mask = torch.empty((tgt_len, src_len)).float().fill_(float("-inf")) - - # If bidirectional, subset 2 is the same for every index - subset_summaries = set() - if self.is_bidirectional: - subset_summaries = self.compute_subset_summaries(tgt_len) - - for i in range(tgt_len): - fixed_attention_subset = self.compute_fixed_attention_subset(i, tgt_len) - fixed_attention_subset = fixed_attention_subset.union(subset_summaries) - included_word_indices = torch.LongTensor(list(fixed_attention_subset)) - sparse_mask[i].index_fill_(0, included_word_indices, 0) - return sparse_mask.type_as(tensor) - - def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz): - sparse_mask = self.buffered_sparse_mask(attn_weights, tgt_len, src_len) - sparse_mask = sparse_mask.unsqueeze(0).expand( - bsz * self.num_heads, tgt_len, src_len - ) - attn_weights += sparse_mask diff --git a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/scripts/infer.sh b/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/scripts/infer.sh deleted file mode 100644 index 653e8936fa463cde204fdc951411076d7ea432a2..0000000000000000000000000000000000000000 --- a/spaces/Harveenchadha/Hindi_TTS/vakyansh_tts/scripts/infer.sh +++ /dev/null @@ -1,7 +0,0 @@ -glowdir='' -hifidir='' -device='' -text='' -wav='' - -python ../src/glow_tts/texttospeech.py -m $glowdir -g $hifidir -d $device -t $text -w $wav diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_to_text/docs/simulst_mustc_example.md b/spaces/ICML2022/OFA/fairseq/examples/speech_to_text/docs/simulst_mustc_example.md deleted file mode 100644 index f3b5a413a27bbe2700da3f418460aa0a7c41abdd..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_to_text/docs/simulst_mustc_example.md +++ /dev/null @@ -1,190 +0,0 @@ -# Simultaneous Speech Translation (SimulST) on MuST-C - -This is a tutorial of training and evaluating a transformer *wait-k* simultaneous model on MUST-C English-Germen Dataset, from [SimulMT to SimulST: Adapting Simultaneous Text Translation to End-to-End Simultaneous Speech Translation](https://www.aclweb.org/anthology/2020.aacl-main.58.pdf). - -[MuST-C](https://www.aclweb.org/anthology/N19-1202) is multilingual speech-to-text translation corpus with 8-language translations on English TED talks. - -## Data Preparation -This section introduces the data preparation for training and evaluation. -If you only want to evaluate the model, please jump to [Inference & Evaluation](#inference--evaluation) - -[Download](https://ict.fbk.eu/must-c) and unpack MuST-C data to a path -`${MUSTC_ROOT}/en-${TARGET_LANG_ID}`, then preprocess it with -```bash -# Additional Python packages for S2T data processing/model training -pip install pandas torchaudio sentencepiece - -# Generate TSV manifests, features, vocabulary, -# global cepstral and mean estimation, -# and configuration for each language -cd fairseq - -python examples/speech_to_text/prep_mustc_data.py \ - --data-root ${MUSTC_ROOT} --task asr \ - --vocab-type unigram --vocab-size 10000 \ - --cmvn-type global - -python examples/speech_to_text/prep_mustc_data.py \ - --data-root ${MUSTC_ROOT} --task st \ - --vocab-type unigram --vocab-size 10000 \ - --cmvn-type global -``` - -## ASR Pretraining -We need a pretrained offline ASR model. Assuming the save directory of the ASR model is `${ASR_SAVE_DIR}`. -The following command (and the subsequent training commands in this tutorial) assume training on 1 GPU (you can also train on 8 GPUs and remove the `--update-freq 8` option). -``` -fairseq-train ${MUSTC_ROOT}/en-de \ - --config-yaml config_asr.yaml --train-subset train_asr --valid-subset dev_asr \ - --save-dir ${ASR_SAVE_DIR} --num-workers 4 --max-tokens 40000 --max-update 100000 \ - --task speech_to_text --criterion label_smoothed_cross_entropy --report-accuracy \ - --arch convtransformer_espnet --optimizer adam --lr 0.0005 --lr-scheduler inverse_sqrt \ - --warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8 -``` -A pretrained ASR checkpoint can be downloaded [here](https://dl.fbaipublicfiles.com/simultaneous_translation/must_c_v1_en_de_pretrained_asr) - -## Simultaneous Speech Translation Training - -### Wait-K with fixed pre-decision module -Fixed pre-decision indicates that the model operate simultaneous policy on the boundaries of fixed chunks. -Here is a example of fixed pre-decision ratio 7 (the simultaneous decision is made every 7 encoder states) and -a wait-3 policy model. Assuming the save directory is `${ST_SAVE_DIR}` -```bash - fairseq-train ${MUSTC_ROOT}/en-de \ - --config-yaml config_st.yaml --train-subset train_st --valid-subset dev_st \ - --save-dir ${ST_SAVE_DIR} --num-workers 8 \ - --optimizer adam --lr 0.0001 --lr-scheduler inverse_sqrt --clip-norm 10.0 \ - --criterion label_smoothed_cross_entropy \ - --warmup-updates 4000 --max-update 100000 --max-tokens 40000 --seed 2 \ - --load-pretrained-encoder-from ${ASR_SAVE_DIR}/checkpoint_best.pt \ - --task speech_to_text \ - --arch convtransformer_simul_trans_espnet \ - --simul-type waitk_fixed_pre_decision \ - --waitk-lagging 3 \ - --fixed-pre-decision-ratio 7 \ - --update-freq 8 - -``` -### Monotonic multihead attention with fixed pre-decision module -``` - fairseq-train ${MUSTC_ROOT}/en-de \ - --config-yaml config_st.yaml --train-subset train_st --valid-subset dev_st \ - --save-dir ${ST_SAVE_DIR} --num-workers 8 \ - --optimizer adam --lr 0.0001 --lr-scheduler inverse_sqrt --clip-norm 10.0 \ - --warmup-updates 4000 --max-update 100000 --max-tokens 40000 --seed 2 \ - --load-pretrained-encoder-from ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME} \ - --task speech_to_text \ - --criterion latency_augmented_label_smoothed_cross_entropy \ - --latency-weight-avg 0.1 \ - --arch convtransformer_simul_trans_espnet \ - --simul-type infinite_lookback_fixed_pre_decision \ - --fixed-pre-decision-ratio 7 \ - --update-freq 8 -``` -## Inference & Evaluation -[SimulEval](https://github.com/facebookresearch/SimulEval) is used for evaluation. -The following command is for evaluation. - -``` -git clone https://github.com/facebookresearch/SimulEval.git -cd SimulEval -pip install -e . - -simuleval \ - --agent ${FAIRSEQ}/examples/speech_to_text/simultaneous_translation/agents/fairseq_simul_st_agent.py - --source ${SRC_LIST_OF_AUDIO} - --target ${TGT_FILE} - --data-bin ${MUSTC_ROOT}/en-de \ - --config config_st.yaml \ - --model-path ${ST_SAVE_DIR}/${CHECKPOINT_FILENAME} \ - --output ${OUTPUT} \ - --scores -``` - -The source file `${SRC_LIST_OF_AUDIO}` is a list of paths of audio files. Assuming your audio files stored at `/home/user/data`, -it should look like this - -```bash -/home/user/data/audio-1.wav -/home/user/data/audio-2.wav -``` - -Each line of target file `${TGT_FILE}` is the translation for each audio file input. -```bash -Translation_1 -Translation_2 -``` -The evaluation runs on the original MUSTC segmentation. -The following command will generate the wav list and text file for a evaluation set `${SPLIT}` (chose from `dev`, `tst-COMMON` and `tst-HE`) in MUSTC to `${EVAL_DATA}`. -```bash -python ${FAIRSEQ}/examples/speech_to_text/seg_mustc_data.py \ - --data-root ${MUSTC_ROOT} --lang de \ - --split ${SPLIT} --task st \ - --output ${EVAL_DATA} -``` - -The `--data-bin` and `--config` should be the same in previous section if you prepare the data from the scratch. -If only for evaluation, a prepared data directory can be found [here](https://dl.fbaipublicfiles.com/simultaneous_translation/must_c_v1.0_en_de_databin.tgz). It contains -- `spm_unigram10000_st.model`: a sentencepiece model binary. -- `spm_unigram10000_st.txt`: the dictionary file generated by the sentencepiece model. -- `gcmvn.npz`: the binary for global cepstral mean and variance. -- `config_st.yaml`: the config yaml file. It looks like this. -You will need to set the absolute paths for `sentencepiece_model` and `stats_npz_path` if the data directory is downloaded. -```yaml -bpe_tokenizer: - bpe: sentencepiece - sentencepiece_model: ABS_PATH_TO_SENTENCEPIECE_MODEL -global_cmvn: - stats_npz_path: ABS_PATH_TO_GCMVN_FILE -input_channels: 1 -input_feat_per_channel: 80 -sampling_alpha: 1.0 -specaugment: - freq_mask_F: 27 - freq_mask_N: 1 - time_mask_N: 1 - time_mask_T: 100 - time_mask_p: 1.0 - time_wrap_W: 0 -transforms: - '*': - - global_cmvn - _train: - - global_cmvn - - specaugment -vocab_filename: spm_unigram10000_st.txt -``` - -Notice that once a `--data-bin` is set, the `--config` is the base name of the config yaml, not the full path. - -Set `--model-path` to the model checkpoint. -A pretrained checkpoint can be downloaded from [here](https://dl.fbaipublicfiles.com/simultaneous_translation/convtransformer_wait5_pre7), which is a wait-5 model with a pre-decision of 280 ms. - -The result of this model on `tst-COMMON` is: -```bash -{ - "Quality": { - "BLEU": 13.94974229366959 - }, - "Latency": { - "AL": 1751.8031870037803, - "AL_CA": 2338.5911762796536, - "AP": 0.7931395378788959, - "AP_CA": 0.9405103863210942, - "DAL": 1987.7811616943081, - "DAL_CA": 2425.2751560926167 - } -} -``` - -If `--output ${OUTPUT}` option is used, the detailed log and scores will be stored under the `${OUTPUT}` directory. - - -The quality is measured by detokenized BLEU. So make sure that the predicted words sent to the server are detokenized. - -The latency metrics are -* Average Proportion -* Average Lagging -* Differentiable Average Lagging - -Again they will also be evaluated on detokenized text. diff --git a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py b/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py deleted file mode 100644 index 2e31c307bd67d10941150160c7fb8c9e085ac5d9..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/wav2vec/unsupervised/scripts/g2p_wrd_to_phn.py +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import sys - -from g2p_en import G2p - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--compact", - action="store_true", - help="if set, compacts phones", - ) - args = parser.parse_args() - - compact = args.compact - - wrd_to_phn = {} - g2p = G2p() - for line in sys.stdin: - words = line.strip().split() - phones = [] - for w in words: - if w not in wrd_to_phn: - wrd_to_phn[w] = g2p(w) - if compact: - wrd_to_phn[w] = [ - p[:-1] if p[-1].isnumeric() else p for p in wrd_to_phn[w] - ] - phones.extend(wrd_to_phn[w]) - try: - print(" ".join(phones)) - except: - print(wrd_to_phn, words, phones, file=sys.stderr) - raise - - -if __name__ == "__main__": - main() diff --git a/spaces/Illumotion/Koboldcpp/include/CL/cl_layer.h b/spaces/Illumotion/Koboldcpp/include/CL/cl_layer.h deleted file mode 100644 index fe628bf8a8efcae83667e8781eb1d8816fb2f292..0000000000000000000000000000000000000000 --- a/spaces/Illumotion/Koboldcpp/include/CL/cl_layer.h +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (c) 2020 The Khronos Group Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * OpenCL is a trademark of Apple Inc. used under license by Khronos. - */ - -#ifndef OPENCL_CL_LAYER_H -#define OPENCL_CL_LAYER_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -typedef cl_uint cl_layer_info; -typedef cl_uint cl_layer_api_version; -#define CL_LAYER_API_VERSION 0x4240 -#define CL_LAYER_NAME 0x4241 -#define CL_LAYER_API_VERSION_100 100 - -extern CL_API_ENTRY cl_int CL_API_CALL -clGetLayerInfo(cl_layer_info param_name, - size_t param_value_size, - void *param_value, - size_t *param_value_size_ret); - -typedef cl_int -(CL_API_CALL *pfn_clGetLayerInfo)(cl_layer_info param_name, - size_t param_value_size, - void *param_value, - size_t *param_value_size_ret); - -extern CL_API_ENTRY cl_int CL_API_CALL -clInitLayer(cl_uint num_entries, - const cl_icd_dispatch *target_dispatch, - cl_uint *num_entries_ret, - const cl_icd_dispatch **layer_dispatch_ret); - -typedef cl_int -(CL_API_CALL *pfn_clInitLayer)(cl_uint num_entries, - const cl_icd_dispatch *target_dispatch, - cl_uint *num_entries_ret, - const cl_icd_dispatch **layer_dispatch_ret); - -#ifdef __cplusplus -} -#endif - -#endif /* OPENCL_CL_LAYER_H */ diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/segment_anything/build_sam.py b/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/segment_anything/build_sam.py deleted file mode 100644 index 37cd245124079e7cdd0d047ef9dde077db99efcc..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/segment-anything/segment_anything/build_sam.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - -from functools import partial - -from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer - - -def build_sam_vit_h(checkpoint=None): - return _build_sam( - encoder_embed_dim=1280, - encoder_depth=32, - encoder_num_heads=16, - encoder_global_attn_indexes=[7, 15, 23, 31], - checkpoint=checkpoint, - ) - - -build_sam = build_sam_vit_h - - -def build_sam_vit_l(checkpoint=None): - return _build_sam( - encoder_embed_dim=1024, - encoder_depth=24, - encoder_num_heads=16, - encoder_global_attn_indexes=[5, 11, 17, 23], - checkpoint=checkpoint, - ) - - -def build_sam_vit_b(checkpoint=None): - return _build_sam( - encoder_embed_dim=768, - encoder_depth=12, - encoder_num_heads=12, - encoder_global_attn_indexes=[2, 5, 8, 11], - checkpoint=checkpoint, - ) - - -sam_model_registry = { - "default": build_sam_vit_h, - "vit_h": build_sam_vit_h, - "vit_l": build_sam_vit_l, - "vit_b": build_sam_vit_b, -} - - -def _build_sam( - encoder_embed_dim, - encoder_depth, - encoder_num_heads, - encoder_global_attn_indexes, - checkpoint=None, -): - prompt_embed_dim = 256 - image_size = 1024 - vit_patch_size = 16 - image_embedding_size = image_size // vit_patch_size - sam = Sam( - image_encoder=ImageEncoderViT( - depth=encoder_depth, - embed_dim=encoder_embed_dim, - img_size=image_size, - mlp_ratio=4, - norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), - num_heads=encoder_num_heads, - patch_size=vit_patch_size, - qkv_bias=True, - use_rel_pos=True, - global_attn_indexes=encoder_global_attn_indexes, - window_size=14, - out_chans=prompt_embed_dim, - ), - prompt_encoder=PromptEncoder( - embed_dim=prompt_embed_dim, - image_embedding_size=(image_embedding_size, image_embedding_size), - input_image_size=(image_size, image_size), - mask_in_chans=16, - ), - mask_decoder=MaskDecoder( - num_multimask_outputs=3, - transformer=TwoWayTransformer( - depth=2, - embedding_dim=prompt_embed_dim, - mlp_dim=2048, - num_heads=8, - ), - transformer_dim=prompt_embed_dim, - iou_head_depth=3, - iou_head_hidden_dim=256, - ), - pixel_mean=[123.675, 116.28, 103.53], - pixel_std=[58.395, 57.12, 57.375], - ) - sam.eval() - if checkpoint is not None: - with open(checkpoint, "rb") as f: - state_dict = torch.load(f) - sam.load_state_dict(state_dict) - return sam diff --git a/spaces/JFN/gpt2/app.py b/spaces/JFN/gpt2/app.py deleted file mode 100644 index 4205e03f91904065e1610f7e6c7b2f1de1771184..0000000000000000000000000000000000000000 --- a/spaces/JFN/gpt2/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/gpt2").launch() \ No newline at end of file diff --git a/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/__init__.py b/spaces/Jamel887/Rvc-tio887/lib/infer_pack/modules/F0Predictor/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Jamkonams/AutoGPT/Dockerfile b/spaces/Jamkonams/AutoGPT/Dockerfile deleted file mode 100644 index 8396154998f32a50d55c199a674b638d5cf7bda2..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -# Use an official Python base image from the Docker Hub -FROM python:3.10-slim - -# Install git -RUN apt-get -y update -RUN apt-get -y install git chromium-driver - -# Install Xvfb and other dependencies for headless browser testing -RUN apt-get update \ - && apt-get install -y wget gnupg2 libgtk-3-0 libdbus-glib-1-2 dbus-x11 xvfb ca-certificates - -# Install Firefox / Chromium -RUN wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-key add - \ - && echo "deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google-chrome.list \ - && apt-get update \ - && apt-get install -y chromium firefox-esr - -# Set environment variables -ENV PIP_NO_CACHE_DIR=yes \ - PYTHONUNBUFFERED=1 \ - PYTHONDONTWRITEBYTECODE=1 - -# Create a non-root user and set permissions -RUN useradd --create-home appuser -WORKDIR /home/appuser -RUN chown appuser:appuser /home/appuser -USER appuser - -# Copy the requirements.txt file and install the requirements -COPY --chown=appuser:appuser requirements.txt . -RUN sed -i '/Items below this point will not be included in the Docker Image/,$d' requirements.txt && \ - pip install --no-cache-dir --user -r requirements.txt - -# Copy the application files -COPY --chown=appuser:appuser autogpt/ ./autogpt - -# Set the entrypoint -ENTRYPOINT ["python", "-m", "autogpt"] diff --git a/spaces/Jeff2323/ai-comic-factory/src/app/interface/about/index.tsx b/spaces/Jeff2323/ai-comic-factory/src/app/interface/about/index.tsx deleted file mode 100644 index 45ef9f30aff4e54048f3e2b274ec714ddb23f7ec..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/app/interface/about/index.tsx +++ /dev/null @@ -1,46 +0,0 @@ -import { Button } from "@/components/ui/button" -import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger } from "@/components/ui/dialog" -import { useState } from "react" - -export function About() { - const [isOpen, setOpen] = useState(false) - - return ( - - - - - - - The AI Comic Factory - - What is the AI Comic Factory? - - -
    -

    - The AI Comic Factory is a free and open-source application made to demonstrate the capabilities of AI models. -

    -

    - ๐Ÿ‘‰ The language model used to generate the descriptions of each panel is Llama-2 70b. -

    -

    - ๐Ÿ‘‰ The stable diffusion model used to generate the images is the base SDXL 1.0. -

    -

    - The code is public and can be deployed at home with some changes in the code. See the README for details about the architecture. -

    -

    - Do you want to create high-res image exports? Please check this tutorial. -

    -
    - - - -
    -
    - ) -} \ No newline at end of file diff --git a/spaces/JeffJing/ZookChatBot/steamship/plugin/outputs/train_plugin_output.py b/spaces/JeffJing/ZookChatBot/steamship/plugin/outputs/train_plugin_output.py deleted file mode 100644 index e9964e774786fd476d72c5a25bec282525cb70c6..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/plugin/outputs/train_plugin_output.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import annotations - -from steamship.base.model import CamelModel - - -class TrainPluginOutput(CamelModel): - """ - This is the object produced by a completed trainable operation, stored as the `output` field of a `train` task. - """ - - # The PluginInstance ID being trained - plugin_instance_id: str = None - - # This should always represent the most recent snapshot of the model in Steamship - # It is the output of ModelCheckpoint.archive_path_in_steamship - archive_path: str = None - - # Arbitrary key-valued data to provide to the `run` method when this plugin is Run. - inference_params: dict = None - - # Arbitrary key-valued data to provide information about training status or training results. - training_progress: dict = None # For tracking the progress (e.g. 3 / 40 epochs completed) - training_results: dict = None # For tracking accuracy (e.g. f1=0.8) diff --git a/spaces/Joeythemonster/magic-diffusion/app.py b/spaces/Joeythemonster/magic-diffusion/app.py deleted file mode 100644 index c5d5180bf525be5cfc13c069ea6c60dee0af4cde..0000000000000000000000000000000000000000 --- a/spaces/Joeythemonster/magic-diffusion/app.py +++ /dev/null @@ -1,104 +0,0 @@ -import gradio as gr -import os -from share_btn import community_icon_html, loading_icon_html, share_js - -text_gen = gr.Interface.load(name="spaces/Gustavosta/MagicPrompt-Stable-Diffusion") -stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5") - -def get_images(prompt): - gallery_dir = stable_diffusion(prompt, fn_index=2) - sd_output = [os.path.join(gallery_dir, image) for image in os.listdir(gallery_dir)] - return sd_output, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True) - -def get_prompts(prompt_text): - return text_gen(prompt_text) - -css = ''' -.animate-spin { - animation: spin 1s linear infinite; -} -@keyframes spin { - from { - transform: rotate(0deg); - } - to { - transform: rotate(360deg); - } -} -#share-btn-container { - display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; -} -#share-btn { - all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important; -} -#share-btn * { - all: unset; -} -#share-btn-container div:nth-child(-n+2){ - width: auto !important; - min-height: 0px !important; -} -#share-btn-container .wrap { - display: none !important; -} -a {text-decoration-line: underline;} -''' - -with gr.Blocks(css=css) as demo: - gr.HTML("""
    -
    -

    - Magic Diffusion ๐Ÿช„ -

    -
    -

    - This Space prettifies your prompt using MagicPrompt - and then runs it through Stable Diffusion to create aesthetically pleasing images. Simply enter a few concepts and let it improve your prompt. You can then diffuse the prompt. -

    -
    """) - - with gr.Row(): - with gr.Column(): - input_text = gr.Textbox(label="Short text prompt", - lines=4, elem_id="input-text") - with gr.Row(): - see_prompts = gr.Button("Feed in your text!") - - with gr.Column(): - text_output = gr.Textbox( - label="Prettified text prompt", - lines=4, - elem_id="translated" - ) - with gr.Row(): - diffuse_btn = gr.Button(value="Diffuse the Prompt!") - with gr.Column(elem_id="generated-gallery"): - sd_output = gr.Gallery().style(grid=2, height="auto") - with gr.Group(elem_id="share-btn-container"): - community_icon = gr.HTML(community_icon_html, visible=False) - loading_icon = gr.HTML(loading_icon_html, visible=False) - share_button = gr.Button("Share to community", elem_id="share-btn", visible=False) - - see_prompts.click(get_prompts, - inputs = [input_text], - outputs = [ - text_output - ]) - diffuse_btn.click(get_images, - inputs = [ - text_output - ], - outputs = [sd_output, community_icon, loading_icon, share_button] - ) - share_button.click(None, [], [], _js=share_js) - - - -demo.launch(debug=True) \ No newline at end of file diff --git a/spaces/Kororinpa/Amadeus_Project/data_utils.py b/spaces/Kororinpa/Amadeus_Project/data_utils.py deleted file mode 100644 index 4855699d23d5dee36d4a12e875c7465265caac0f..0000000000000000000000000000000000000000 --- a/spaces/Kororinpa/Amadeus_Project/data_utils.py +++ /dev/null @@ -1,392 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import commons -from mel_processing import spectrogram_torch -from utils import load_wav_to_torch, load_filepaths_and_text -from text import text_to_sequence, cleaned_text_to_sequence - - -class TextAudioLoader(torch.utils.data.Dataset): - """ - 1) loads audio, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - def __init__(self, audiopaths_and_text, hparams): - self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) - self.text_cleaners = hparams.text_cleaners - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 190) - - random.seed(1234) - random.shuffle(self.audiopaths_and_text) - self._filter() - - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_and_text_new = [] - lengths = [] - for audiopath, text in self.audiopaths_and_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_and_text_new.append([audiopath, text]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - self.audiopaths_and_text = audiopaths_and_text_new - self.lengths = lengths - - def get_audio_text_pair(self, audiopath_and_text): - # separate filename and text - audiopath, text = audiopath_and_text[0], audiopath_and_text[1] - text = self.get_text(text) - spec, wav = self.get_audio(audiopath) - return (text, spec, wav) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text): - if self.cleaned_text: - text_norm = cleaned_text_to_sequence(text) - else: - text_norm = text_to_sequence(text, self.text_cleaners) - if self.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def __getitem__(self, index): - return self.get_audio_text_pair(self.audiopaths_and_text[index]) - - def __len__(self): - return len(self.audiopaths_and_text) - - -class TextAudioCollate(): - """ Zero-pads model inputs and targets - """ - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text and aduio - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - if self.return_ids: - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths - - -"""Multi speaker version""" -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - def __init__(self, audiopaths_sid_text, hparams): - self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text) - self.text_cleaners = hparams.text_cleaners - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 190) - - random.seed(1234) - random.shuffle(self.audiopaths_sid_text) - self._filter() - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_sid_text_new = [] - lengths = [] - for audiopath, sid, text in self.audiopaths_sid_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_sid_text_new.append([audiopath, sid, text]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - self.audiopaths_sid_text = audiopaths_sid_text_new - self.lengths = lengths - - def get_audio_text_speaker_pair(self, audiopath_sid_text): - # separate filename, speaker_id and text - audiopath, sid, text = audiopath_sid_text[0], audiopath_sid_text[1], audiopath_sid_text[2] - text = self.get_text(text) - spec, wav = self.get_audio(audiopath) - sid = self.get_sid(sid) - return (text, spec, wav, sid) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text): - if self.cleaned_text: - text_norm = cleaned_text_to_sequence(text) - else: - text_norm = text_to_sequence(text, self.text_cleaners) - if self.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def get_sid(self, sid): - sid = torch.LongTensor([int(sid)]) - return sid - - def __getitem__(self, index): - return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index]) - - def __len__(self): - return len(self.audiopaths_sid_text) - - -class TextAudioSpeakerCollate(): - """ Zero-pads model inputs and targets - """ - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text, audio and speaker identities - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized, sid] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - sid = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - sid[i] = row[3] - - if self.return_ids: - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid, ids_sorted_decreasing - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, sid - - -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): - """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. - """ - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i+1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size diff --git a/spaces/KyanChen/FunSR/test_cnn_sr.py b/spaces/KyanChen/FunSR/test_cnn_sr.py deleted file mode 100644 index 4998928bda622b95bd41a2edc247f6de07a79b7f..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/test_cnn_sr.py +++ /dev/null @@ -1,204 +0,0 @@ -import argparse -import json -import os -import math -from functools import partial - -import cv2 -import numpy as np -import yaml -import torch -from einops import rearrange -from torch.utils.data import DataLoader -from tqdm import tqdm - -import datasets -import models -import utils - - -device = 'cuda:0' if torch.cuda.is_available() else 'cpu' - -def batched_predict(model, img, bsize): - with torch.no_grad(): - pred = model(img) - return pred - - -def eval_psnr(loader, class_names, model, - data_norm=None, eval_type=None, save_fig=False, - scale_ratio=1, save_path=None, verbose=False, crop_border=4, - cal_metrics=True,): - crop_border = int(crop_border) if crop_border else crop_border - print('crop border: ', crop_border) - model.eval() - - if data_norm is None: - data_norm = { - 'img': {'sub': [0], 'div': [1]}, - 'gt': {'sub': [0], 'div': [1]} - } - t = data_norm['img'] - img_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).to(device) - img_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).to(device) - t = data_norm['gt'] - gt_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).to(device) - gt_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).to(device) - - if eval_type is None: - metric_fn = [utils.calculate_psnr_pt, utils.calculate_ssim_pt] - elif eval_type == 'psnr+ssim': - metric_fn = [utils.calculate_psnr_pt, utils.calculate_ssim_pt] - elif eval_type.startswith('div2k'): - scale = int(eval_type.split('-')[1]) - metric_fn = partial(utils.calc_psnr, dataset='div2k', scale=scale) - elif eval_type.startswith('benchmark'): - scale = int(eval_type.split('-')[1]) - metric_fn = partial(utils.calc_psnr, dataset='benchmark', scale=scale) - else: - raise NotImplementedError - - val_res_psnr = utils.Averager(class_names) - val_res_ssim = utils.Averager(class_names) - - pbar = tqdm(loader, leave=False, desc='val') - for batch in pbar: - for k, v in batch.items(): - if torch.is_tensor(v): - batch[k] = v.to(device) - - img = (batch['img'] - img_sub) / img_div - with torch.no_grad(): - pred = model(img, batch['gt'].shape[-2:]) - if isinstance(pred, list): - pred = pred[-1] - pred = pred * gt_div + gt_sub - - if cal_metrics: - res_psnr = metric_fn[0]( - pred, - batch['gt'], - crop_border=crop_border - ) - res_ssim = metric_fn[1]( - pred, - batch['gt'], - crop_border=crop_border - ) - else: - res_psnr = torch.ones(len(pred)) - res_ssim = torch.ones(len(pred)) - - file_names = batch.get('filename', None) - if file_names is not None and save_fig: - for idx in range(len(batch['img'])): - ori_img = batch['img'][idx].cpu().numpy() * 255 - ori_img = np.clip(ori_img, a_min=0, a_max=255) - ori_img = ori_img.astype(np.uint8) - ori_img = rearrange(ori_img, 'C H W -> H W C') - - pred_img = pred[idx].cpu().numpy() * 255 - pred_img = np.clip(pred_img, a_min=0, a_max=255) - pred_img = pred_img.astype(np.uint8) - pred_img = rearrange(pred_img, 'C H W -> H W C') - - gt_img = batch['gt'][idx].cpu().numpy() * 255 - gt_img = np.clip(gt_img, a_min=0, a_max=255) - gt_img = gt_img.astype(np.uint8) - gt_img = rearrange(gt_img, 'C H W -> H W C') - - psnr = res_psnr[idx].cpu().numpy() - ssim = res_ssim[idx].cpu().numpy() - ori_file_name = f'{save_path}/{file_names[idx]}_Ori.png' - cv2.imwrite(ori_file_name, ori_img) - pred_file_name = f'{save_path}/{file_names[idx]}_{scale_ratio}X_{psnr:.2f}_{ssim:.4f}.png' - cv2.imwrite(pred_file_name, pred_img) - gt_file_name = f'{save_path}/{file_names[idx]}_GT.png' - cv2.imwrite(gt_file_name, gt_img) - - val_res_psnr.add(batch['class_name'], res_psnr) - val_res_ssim.add(batch['class_name'], res_ssim) - - if verbose: - pbar.set_description( - 'val psnr: {:.4f} ssim: {:.4f}'.format(val_res_psnr.item()['all'], val_res_ssim.item()['all'])) - - return val_res_psnr.item(), val_res_ssim.item() - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--config', default='configs/test_CNN.yaml') - parser.add_argument('--model', default='checkpoints/EXP20220610_5/epoch-best.pth') - parser.add_argument('--scale_ratio', default=4, type=float) - parser.add_argument('--save_fig', default=False, type=bool) - parser.add_argument('--save_path', default='tmp', type=str) - parser.add_argument('--cal_metrics', default=True, type=bool) - parser.add_argument('--return_class_metrics', default=False, type=bool) - parser.add_argument('--dataset_name', default='UC', type=str) - args = parser.parse_args() - - with open(args.config, 'r') as f: - config = yaml.load(f, Loader=yaml.FullLoader) - root_split_file = {'UC': - { - 'root_path': '/data/kyanchen/datasets/UC/256', - 'split_file': 'data_split/UC_split.json' - }, - 'AID': - { - 'root_path': '/data/kyanchen/datasets/AID', - 'split_file': 'data_split/AID_split.json' - } - } - config['test_dataset']['dataset']['args']['root_path'] = root_split_file[args.dataset_name]['root_path'] - config['test_dataset']['dataset']['args']['split_file'] = root_split_file[args.dataset_name]['split_file'] - - config['test_dataset']['wrapper']['args']['scale_ratio'] = args.scale_ratio - - spec = config['test_dataset'] - dataset = datasets.make(spec['dataset']) - dataset = datasets.make(spec['wrapper'], args={'dataset': dataset}) - loader = DataLoader(dataset, batch_size=spec['batch_size'], num_workers=0, pin_memory=True, shuffle=False, - drop_last=False) - if not os.path.exists(args.model): - assert NameError - model_spec = torch.load(args.model)['model'] - print(model_spec['args']) - model = models.make(model_spec, load_sd=True).to(device) - - file_names = json.load(open(config['test_dataset']['dataset']['args']['split_file']))['test'] - class_names = list(set([os.path.basename(os.path.dirname(x)) for x in file_names])) - - crop_border = config['test_dataset']['wrapper']['args']['scale_ratio'] + 5 - dataset_name = os.path.basename(config['test_dataset']['dataset']['args']['split_file']).split('_')[0] - max_scale = {'UC': 5, 'AID': 12} - if args.scale_ratio > max_scale[dataset_name]: - crop_border = int((args.scale_ratio - max_scale[dataset_name]) / 2 * 48) - - if args.save_fig: - os.makedirs(args.save_path, exist_ok=True) - - res = eval_psnr( - loader, class_names, model, - data_norm=config.get('data_norm'), - eval_type=config.get('eval_type'), - crop_border=crop_border, - verbose=True, - save_fig=args.save_fig, - scale_ratio=args.scale_ratio, - save_path=args.save_path, - cal_metrics=args.cal_metrics - ) - - if args.return_class_metrics: - keys = list(res[0].keys()) - keys.sort() - print('psnr') - for k in keys: - print(f'{k}: {res[0][k]:0.2f}') - print('ssim') - for k in keys: - print(f'{k}: {res[1][k]:0.4f}') - print(f'psnr: {res[0]["all"]:0.2f}') - print(f'ssim: {res[1]["all"]:0.4f}') diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py deleted file mode 100644 index 076c35843375c7aef5e58786d55ebacd281d54a3..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py +++ /dev/null @@ -1,199 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple - -import torch.nn as nn -from mmcv.cnn import ConvModule -from mmengine.model import BaseModule, ModuleList -from torch import Tensor - -from mmdet.models.backbones.resnet import Bottleneck -from mmdet.registry import MODELS -from mmdet.utils import ConfigType, MultiConfig, OptConfigType, OptMultiConfig -from .bbox_head import BBoxHead - - -class BasicResBlock(BaseModule): - """Basic residual block. - - This block is a little different from the block in the ResNet backbone. - The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock. - - Args: - in_channels (int): Channels of the input feature map. - out_channels (int): Channels of the output feature map. - conv_cfg (:obj:`ConfigDict` or dict, optional): The config dict - for convolution layers. - norm_cfg (:obj:`ConfigDict` or dict): The config dict for - normalization layers. - init_cfg (:obj:`ConfigDict` or dict or list[:obj:`ConfigDict` or \ - dict], optional): Initialization config dict. Defaults to None - """ - - def __init__(self, - in_channels: int, - out_channels: int, - conv_cfg: OptConfigType = None, - norm_cfg: ConfigType = dict(type='BN'), - init_cfg: OptMultiConfig = None) -> None: - super().__init__(init_cfg=init_cfg) - - # main path - self.conv1 = ConvModule( - in_channels, - in_channels, - kernel_size=3, - padding=1, - bias=False, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg) - self.conv2 = ConvModule( - in_channels, - out_channels, - kernel_size=1, - bias=False, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - # identity path - self.conv_identity = ConvModule( - in_channels, - out_channels, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - self.relu = nn.ReLU(inplace=True) - - def forward(self, x: Tensor) -> Tensor: - """Forward function.""" - identity = x - - x = self.conv1(x) - x = self.conv2(x) - - identity = self.conv_identity(identity) - out = x + identity - - out = self.relu(out) - return out - - -@MODELS.register_module() -class DoubleConvFCBBoxHead(BBoxHead): - r"""Bbox head used in Double-Head R-CNN - - .. code-block:: none - - /-> cls - /-> shared convs -> - \-> reg - roi features - /-> cls - \-> shared fc -> - \-> reg - """ # noqa: W605 - - def __init__(self, - num_convs: int = 0, - num_fcs: int = 0, - conv_out_channels: int = 1024, - fc_out_channels: int = 1024, - conv_cfg: OptConfigType = None, - norm_cfg: ConfigType = dict(type='BN'), - init_cfg: MultiConfig = dict( - type='Normal', - override=[ - dict(type='Normal', name='fc_cls', std=0.01), - dict(type='Normal', name='fc_reg', std=0.001), - dict( - type='Xavier', - name='fc_branch', - distribution='uniform') - ]), - **kwargs) -> None: - kwargs.setdefault('with_avg_pool', True) - super().__init__(init_cfg=init_cfg, **kwargs) - assert self.with_avg_pool - assert num_convs > 0 - assert num_fcs > 0 - self.num_convs = num_convs - self.num_fcs = num_fcs - self.conv_out_channels = conv_out_channels - self.fc_out_channels = fc_out_channels - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - - # increase the channel of input features - self.res_block = BasicResBlock(self.in_channels, - self.conv_out_channels) - - # add conv heads - self.conv_branch = self._add_conv_branch() - # add fc heads - self.fc_branch = self._add_fc_branch() - - out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes - self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg) - - self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1) - self.relu = nn.ReLU() - - def _add_conv_branch(self) -> None: - """Add the fc branch which consists of a sequential of conv layers.""" - branch_convs = ModuleList() - for i in range(self.num_convs): - branch_convs.append( - Bottleneck( - inplanes=self.conv_out_channels, - planes=self.conv_out_channels // 4, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg)) - return branch_convs - - def _add_fc_branch(self) -> None: - """Add the fc branch which consists of a sequential of fc layers.""" - branch_fcs = ModuleList() - for i in range(self.num_fcs): - fc_in_channels = ( - self.in_channels * - self.roi_feat_area if i == 0 else self.fc_out_channels) - branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels)) - return branch_fcs - - def forward(self, x_cls: Tensor, x_reg: Tensor) -> Tuple[Tensor]: - """Forward features from the upstream network. - - Args: - x_cls (Tensor): Classification features of rois - x_reg (Tensor): Regression features from the upstream network. - - Returns: - tuple: A tuple of classification scores and bbox prediction. - - - cls_score (Tensor): Classification score predictions of rois. - each roi predicts num_classes + 1 channels. - - bbox_pred (Tensor): BBox deltas predictions of rois. each roi - predicts 4 * num_classes channels. - """ - # conv head - x_conv = self.res_block(x_reg) - - for conv in self.conv_branch: - x_conv = conv(x_conv) - - if self.with_avg_pool: - x_conv = self.avg_pool(x_conv) - - x_conv = x_conv.view(x_conv.size(0), -1) - bbox_pred = self.fc_reg(x_conv) - - # fc head - x_fc = x_cls.view(x_cls.size(0), -1) - for fc in self.fc_branch: - x_fc = self.relu(fc(x_fc)) - - cls_score = self.fc_cls(x_fc) - - return cls_score, bbox_pred diff --git a/spaces/KyanChen/RSPrompter/mmpl/structures/utils.py b/spaces/KyanChen/RSPrompter/mmpl/structures/utils.py deleted file mode 100644 index 8c8f0f3da643ba3355890c939a1483d19bdd3738..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpl/structures/utils.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List - -import torch -import torch.nn.functional as F -from mmengine.structures import LabelData - -if hasattr(torch, 'tensor_split'): - tensor_split = torch.tensor_split -else: - # A simple implementation of `tensor_split`. - def tensor_split(input: torch.Tensor, indices: list): - outs = [] - for start, end in zip([0] + indices, indices + [input.size(0)]): - outs.append(input[start:end]) - return outs - - -def cat_batch_labels(elements: List[LabelData], device=None): - """Concat the ``label`` of a batch of :obj:`LabelData` to a tensor. - - Args: - elements (List[LabelData]): A batch of :obj`LabelData`. - device (torch.device, optional): The output device of the batch label. - Defaults to None. - - Returns: - Tuple[torch.Tensor, List[int]]: The first item is the concated label - tensor, and the second item is the split indices of every sample. - """ - item = elements[0] - if 'label' not in item._data_fields: - return None, None - - labels = [] - splits = [0] - for element in elements: - labels.append(element.label) - splits.append(splits[-1] + element.label.size(0)) - batch_label = torch.cat(labels) - if device is not None: - batch_label = batch_label.to(device=device) - return batch_label, splits[1:-1] - - -def batch_label_to_onehot(batch_label, split_indices, num_classes): - """Convert a concated label tensor to onehot format. - - Args: - batch_label (torch.Tensor): A concated label tensor from multiple - samples. - split_indices (List[int]): The split indices of every sample. - num_classes (int): The number of classes. - - Returns: - torch.Tensor: The onehot format label tensor. - - Examples: - >>> import torch - >>> from mmcls.structures import batch_label_to_onehot - >>> # Assume a concated label from 3 samples. - >>> # label 1: [0, 1], label 2: [0, 2, 4], label 3: [3, 1] - >>> batch_label = torch.tensor([0, 1, 0, 2, 4, 3, 1]) - >>> split_indices = [2, 5] - >>> batch_label_to_onehot(batch_label, split_indices, num_classes=5) - tensor([[1, 1, 0, 0, 0], - [1, 0, 1, 0, 1], - [0, 1, 0, 1, 0]]) - """ - sparse_onehot_list = F.one_hot(batch_label, num_classes) - onehot_list = [ - sparse_onehot.sum(0) - for sparse_onehot in tensor_split(sparse_onehot_list, split_indices) - ] - return torch.stack(onehot_list) - - -def stack_batch_scores(elements, device=None): - """Stack the ``score`` of a batch of :obj:`LabelData` to a tensor. - - Args: - elements (List[LabelData]): A batch of :obj`LabelData`. - device (torch.device, optional): The output device of the batch label. - Defaults to None. - - Returns: - torch.Tensor: The stacked score tensor. - """ - item = elements[0] - if 'score' not in item._data_fields: - return None - - batch_score = torch.stack([element.score for element in elements]) - if device is not None: - batch_score = batch_score.to(device) - return batch_score diff --git a/spaces/KyanChen/RSPrompter/mmpretrain/apis/__init__.py b/spaces/KyanChen/RSPrompter/mmpretrain/apis/__init__.py deleted file mode 100644 index 6fbf443772a983c41f7273124f843bdfbb7f0f46..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmpretrain/apis/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base import BaseInferencer -from .feature_extractor import FeatureExtractor -from .image_caption import ImageCaptionInferencer -from .image_classification import ImageClassificationInferencer -from .image_retrieval import ImageRetrievalInferencer -from .model import (ModelHub, get_model, inference_model, init_model, - list_models) -from .multimodal_retrieval import (ImageToTextRetrievalInferencer, - TextToImageRetrievalInferencer) -from .nlvr import NLVRInferencer -from .visual_grounding import VisualGroundingInferencer -from .visual_question_answering import VisualQuestionAnsweringInferencer - -__all__ = [ - 'init_model', 'inference_model', 'list_models', 'get_model', 'ModelHub', - 'ImageClassificationInferencer', 'ImageRetrievalInferencer', - 'FeatureExtractor', 'ImageCaptionInferencer', - 'TextToImageRetrievalInferencer', 'VisualGroundingInferencer', - 'VisualQuestionAnsweringInferencer', 'ImageToTextRetrievalInferencer', - 'BaseInferencer', 'NLVRInferencer' -] diff --git a/spaces/LanguageBind/LanguageBind/data/process_thermal.py b/spaces/LanguageBind/LanguageBind/data/process_thermal.py deleted file mode 100644 index 8e26870dda6fd7bf6c8576326f13d161073b63a8..0000000000000000000000000000000000000000 --- a/spaces/LanguageBind/LanguageBind/data/process_thermal.py +++ /dev/null @@ -1,26 +0,0 @@ -import PIL -import cv2 -import numpy as np -import torch -from PIL import Image -from torch import nn -from torchvision import transforms -from open_clip.constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD - - - -def get_thermal_transform(args): - transform = transforms.Compose( - [ - transforms.ToTensor(), - transforms.Resize(224, interpolation=transforms.InterpolationMode.BICUBIC), - transforms.CenterCrop(224), - transforms.Normalize(OPENAI_DATASET_MEAN, OPENAI_DATASET_STD) # assume image - ] - ) - return transform - -def load_and_transform_thermal(thermal_path, transform): - thermal = Image.open(thermal_path) - thermal_outputs = transform(thermal) - return {'pixel_values': thermal_outputs} diff --git a/spaces/Latryna/roop/roop/capturer.py b/spaces/Latryna/roop/roop/capturer.py deleted file mode 100644 index fd49d468dd4cd45832ab9612205968207a6f45cf..0000000000000000000000000000000000000000 --- a/spaces/Latryna/roop/roop/capturer.py +++ /dev/null @@ -1,20 +0,0 @@ -from typing import Any -import cv2 - - -def get_video_frame(video_path: str, frame_number: int = 0) -> Any: - capture = cv2.VideoCapture(video_path) - frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT) - capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) - has_frame, frame = capture.read() - capture.release() - if has_frame: - return frame - return None - - -def get_video_frame_total(video_path: str) -> int: - capture = cv2.VideoCapture(video_path) - video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) - capture.release() - return video_frame_total diff --git a/spaces/Lianjd/stock_dashboard/backtrader/studies/__init__.py b/spaces/Lianjd/stock_dashboard/backtrader/studies/__init__.py deleted file mode 100644 index 7ff58ed8265502ad3b6a23ab6b67b25cec0ee938..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/studies/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - - -from backtrader import Indicator diff --git a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h b/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h deleted file mode 100644 index b2b88e8c46f19b6db0933163e57ccdb51180f517..0000000000000000000000000000000000000000 --- a/spaces/Make-A-Protagonist/Make-A-Protagonist-inference/Make-A-Protagonist/experts/GroundedSAM/GroundingDINO/groundingdino/models/GroundingDINO/csrc/MsDeformAttn/ms_deform_attn_cpu.h +++ /dev/null @@ -1,35 +0,0 @@ -/*! -************************************************************************************************** -* Deformable DETR -* Copyright (c) 2020 SenseTime. All Rights Reserved. -* Licensed under the Apache License, Version 2.0 [see LICENSE for details] -************************************************************************************************** -* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 -************************************************************************************************** -*/ - -#pragma once -#include - -namespace groundingdino { - -at::Tensor -ms_deform_attn_cpu_forward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const int im2col_step); - -std::vector -ms_deform_attn_cpu_backward( - const at::Tensor &value, - const at::Tensor &spatial_shapes, - const at::Tensor &level_start_index, - const at::Tensor &sampling_loc, - const at::Tensor &attn_weight, - const at::Tensor &grad_output, - const int im2col_step); - -} // namespace groundingdino diff --git a/spaces/Masutxrxd/Masutxrxd/README.md b/spaces/Masutxrxd/Masutxrxd/README.md deleted file mode 100644 index 3c639541f2d0945408857c6473db3602eb45422f..0000000000000000000000000000000000000000 --- a/spaces/Masutxrxd/Masutxrxd/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Masutxrxd -emoji: ๐Ÿจ -colorFrom: yellow -colorTo: indigo -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Matthijs/image2reverb/image2reverb/mel.py b/spaces/Matthijs/image2reverb/image2reverb/mel.py deleted file mode 100644 index fe82e51accf5bd2a315cfc004395f80c9ab2e8ed..0000000000000000000000000000000000000000 --- a/spaces/Matthijs/image2reverb/image2reverb/mel.py +++ /dev/null @@ -1,20 +0,0 @@ -import numpy -import torch -import librosa - - -class LogMel(torch.nn.Module): - def __init__(self): - super().__init__() - self._eps = 1e-8 - - def transform(self, audio): - m = librosa.feature.melspectrogram(audio/numpy.abs(audio).max()) - m = numpy.log(m + self._eps) - return torch.Tensor(((m - m.mean()) / m.std()) * 0.8).unsqueeze(0) - - def inverse(self, spec): - s = spec.cpu().detach().numpy() - s = numpy.exp((s * 5) - 15.96) - self._eps # Empirical mean and standard deviation over test set - y = librosa.feature.inverse.mel_to_audio(s) # Reconstruct audio - return y/numpy.abs(y).max() diff --git a/spaces/Mecca/whisper-webui/app.py b/spaces/Mecca/whisper-webui/app.py deleted file mode 100644 index b87cf044822410d851f0567497ca4adfbd324144..0000000000000000000000000000000000000000 --- a/spaces/Mecca/whisper-webui/app.py +++ /dev/null @@ -1,544 +0,0 @@ -from datetime import datetime -import math -from typing import Iterator, Union -import argparse - -from io import StringIO -import os -import pathlib -import tempfile -import zipfile -import numpy as np - -import torch - -from src.config import ApplicationConfig, VadInitialPromptMode -from src.hooks.progressListener import ProgressListener -from src.hooks.subTaskProgressListener import SubTaskProgressListener -from src.hooks.whisperProgressHook import create_progress_listener_handle -from src.languages import get_language_names -from src.modelCache import ModelCache -from src.source import get_audio_source_collection -from src.vadParallel import ParallelContext, ParallelTranscription - -# External programs -import ffmpeg - -# UI -import gradio as gr - -from src.download import ExceededMaximumDuration, download_url -from src.utils import slugify, write_srt, write_vtt -from src.vad import AbstractTranscription, NonSpeechStrategy, PeriodicTranscriptionConfig, TranscriptionConfig, VadPeriodicTranscription, VadSileroTranscription -from src.whisper.abstractWhisperContainer import AbstractWhisperContainer -from src.whisper.whisperFactory import create_whisper_container - -# Configure more application defaults in config.json5 - -# Gradio seems to truncate files without keeping the extension, so we need to truncate the file prefix ourself -MAX_FILE_PREFIX_LENGTH = 17 - -# Limit auto_parallel to a certain number of CPUs (specify vad_cpu_cores to get a higher number) -MAX_AUTO_CPU_CORES = 8 - -WHISPER_MODELS = ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"] - -class VadOptions: - def __init__(self, vad: str = None, vadMergeWindow: float = 5, vadMaxMergeSize: float = 150, vadPadding: float = 1, vadPromptWindow: float = 1, - vadInitialPromptMode: Union[VadInitialPromptMode, str] = VadInitialPromptMode.PREPREND_FIRST_SEGMENT): - self.vad = vad - self.vadMergeWindow = vadMergeWindow - self.vadMaxMergeSize = vadMaxMergeSize - self.vadPadding = vadPadding - self.vadPromptWindow = vadPromptWindow - self.vadInitialPromptMode = vadInitialPromptMode if isinstance(vadInitialPromptMode, VadInitialPromptMode) \ - else VadInitialPromptMode.from_string(vadInitialPromptMode) - -class WhisperTranscriber: - def __init__(self, input_audio_max_duration: float = None, vad_process_timeout: float = None, - vad_cpu_cores: int = 1, delete_uploaded_files: bool = False, output_dir: str = None, - app_config: ApplicationConfig = None): - self.model_cache = ModelCache() - self.parallel_device_list = None - self.gpu_parallel_context = None - self.cpu_parallel_context = None - self.vad_process_timeout = vad_process_timeout - self.vad_cpu_cores = vad_cpu_cores - - self.vad_model = None - self.inputAudioMaxDuration = input_audio_max_duration - self.deleteUploadedFiles = delete_uploaded_files - self.output_dir = output_dir - - self.app_config = app_config - - def set_parallel_devices(self, vad_parallel_devices: str): - self.parallel_device_list = [ device.strip() for device in vad_parallel_devices.split(",") ] if vad_parallel_devices else None - - def set_auto_parallel(self, auto_parallel: bool): - if auto_parallel: - if torch.cuda.is_available(): - self.parallel_device_list = [ str(gpu_id) for gpu_id in range(torch.cuda.device_count())] - - self.vad_cpu_cores = min(os.cpu_count(), MAX_AUTO_CPU_CORES) - print("[Auto parallel] Using GPU devices " + str(self.parallel_device_list) + " and " + str(self.vad_cpu_cores) + " CPU cores for VAD/transcription.") - - # Entry function for the simple tab - def transcribe_webui_simple(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, - progress=gr.Progress()): - - vadOptions = VadOptions(vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, self.app_config.vad_initial_prompt_mode) - - return self.transcribe_webui(modelName, languageName, urlData, multipleFiles, microphoneData, task, vadOptions, progress=progress) - - # Entry function for the full tab - def transcribe_webui_full(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, - vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, vadInitialPromptMode, - initial_prompt: str, temperature: float, best_of: int, beam_size: int, patience: float, length_penalty: float, suppress_tokens: str, - condition_on_previous_text: bool, fp16: bool, temperature_increment_on_fallback: float, - compression_ratio_threshold: float, logprob_threshold: float, no_speech_threshold: float, - progress=gr.Progress()): - - # Handle temperature_increment_on_fallback - if temperature_increment_on_fallback is not None: - temperature = tuple(np.arange(temperature, 1.0 + 1e-6, temperature_increment_on_fallback)) - else: - temperature = [temperature] - - vadOptions = VadOptions(vad, vadMergeWindow, vadMaxMergeSize, vadPadding, vadPromptWindow, vadInitialPromptMode) - - return self.transcribe_webui(modelName, languageName, urlData, multipleFiles, microphoneData, task, vadOptions, - initial_prompt=initial_prompt, temperature=temperature, best_of=best_of, beam_size=beam_size, patience=patience, length_penalty=length_penalty, suppress_tokens=suppress_tokens, - condition_on_previous_text=condition_on_previous_text, fp16=fp16, - compression_ratio_threshold=compression_ratio_threshold, logprob_threshold=logprob_threshold, no_speech_threshold=no_speech_threshold, - progress=progress) - - def transcribe_webui(self, modelName, languageName, urlData, multipleFiles, microphoneData, task, - vadOptions: VadOptions, progress: gr.Progress = None, **decodeOptions: dict): - try: - sources = self.__get_source(urlData, multipleFiles, microphoneData) - - try: - selectedLanguage = languageName.lower() if len(languageName) > 0 else None - selectedModel = modelName if modelName is not None else "base" - - model = create_whisper_container(whisper_implementation=self.app_config.whisper_implementation, - model_name=selectedModel, compute_type=self.app_config.compute_type, - cache=self.model_cache, models=self.app_config.models) - - # Result - download = [] - zip_file_lookup = {} - text = "" - vtt = "" - - # Write result - downloadDirectory = tempfile.mkdtemp() - source_index = 0 - - outputDirectory = self.output_dir if self.output_dir is not None else downloadDirectory - - # Progress - total_duration = sum([source.get_audio_duration() for source in sources]) - current_progress = 0 - - # A listener that will report progress to Gradio - root_progress_listener = self._create_progress_listener(progress) - - # Execute whisper - for source in sources: - source_prefix = "" - source_audio_duration = source.get_audio_duration() - - if (len(sources) > 1): - # Prefix (minimum 2 digits) - source_index += 1 - source_prefix = str(source_index).zfill(2) + "_" - print("Transcribing ", source.source_path) - - scaled_progress_listener = SubTaskProgressListener(root_progress_listener, - base_task_total=total_duration, - sub_task_start=current_progress, - sub_task_total=source_audio_duration) - - # Transcribe - result = self.transcribe_file(model, source.source_path, selectedLanguage, task, vadOptions, scaled_progress_listener, **decodeOptions) - filePrefix = slugify(source_prefix + source.get_short_name(), allow_unicode=True) - - # Update progress - current_progress += source_audio_duration - - source_download, source_text, source_vtt = self.write_result(result, filePrefix, outputDirectory) - - if len(sources) > 1: - # Add new line separators - if (len(source_text) > 0): - source_text += os.linesep + os.linesep - if (len(source_vtt) > 0): - source_vtt += os.linesep + os.linesep - - # Append file name to source text too - source_text = source.get_full_name() + ":" + os.linesep + source_text - source_vtt = source.get_full_name() + ":" + os.linesep + source_vtt - - # Add to result - download.extend(source_download) - text += source_text - vtt += source_vtt - - if (len(sources) > 1): - # Zip files support at least 260 characters, but we'll play it safe and use 200 - zipFilePrefix = slugify(source_prefix + source.get_short_name(max_length=200), allow_unicode=True) - - # File names in ZIP file can be longer - for source_download_file in source_download: - # Get file postfix (after last -) - filePostfix = os.path.basename(source_download_file).split("-")[-1] - zip_file_name = zipFilePrefix + "-" + filePostfix - zip_file_lookup[source_download_file] = zip_file_name - - # Create zip file from all sources - if len(sources) > 1: - downloadAllPath = os.path.join(downloadDirectory, "All_Output-" + datetime.now().strftime("%Y%m%d-%H%M%S") + ".zip") - - with zipfile.ZipFile(downloadAllPath, 'w', zipfile.ZIP_DEFLATED) as zip: - for download_file in download: - # Get file name from lookup - zip_file_name = zip_file_lookup.get(download_file, os.path.basename(download_file)) - zip.write(download_file, arcname=zip_file_name) - - download.insert(0, downloadAllPath) - - return download, text, vtt - - finally: - # Cleanup source - if self.deleteUploadedFiles: - for source in sources: - print("Deleting source file " + source.source_path) - - try: - os.remove(source.source_path) - except Exception as e: - # Ignore error - it's just a cleanup - print("Error deleting source file " + source.source_path + ": " + str(e)) - - except ExceededMaximumDuration as e: - return [], ("[ERROR]: Maximum remote video length is " + str(e.maxDuration) + "s, file was " + str(e.videoDuration) + "s"), "[ERROR]" - - def transcribe_file(self, model: AbstractWhisperContainer, audio_path: str, language: str, task: str = None, - vadOptions: VadOptions = VadOptions(), - progressListener: ProgressListener = None, **decodeOptions: dict): - - initial_prompt = decodeOptions.pop('initial_prompt', None) - - if progressListener is None: - # Default progress listener - progressListener = ProgressListener() - - if ('task' in decodeOptions): - task = decodeOptions.pop('task') - - # Callable for processing an audio file - whisperCallable = model.create_callback(language, task, initial_prompt, initial_prompt_mode=vadOptions.vadInitialPromptMode, **decodeOptions) - - # The results - if (vadOptions.vad == 'silero-vad'): - # Silero VAD where non-speech gaps are transcribed - process_gaps = self._create_silero_config(NonSpeechStrategy.CREATE_SEGMENT, vadOptions) - result = self.process_vad(audio_path, whisperCallable, self.vad_model, process_gaps, progressListener=progressListener) - elif (vadOptions.vad == 'silero-vad-skip-gaps'): - # Silero VAD where non-speech gaps are simply ignored - skip_gaps = self._create_silero_config(NonSpeechStrategy.SKIP, vadOptions) - result = self.process_vad(audio_path, whisperCallable, self.vad_model, skip_gaps, progressListener=progressListener) - elif (vadOptions.vad == 'silero-vad-expand-into-gaps'): - # Use Silero VAD where speech-segments are expanded into non-speech gaps - expand_gaps = self._create_silero_config(NonSpeechStrategy.EXPAND_SEGMENT, vadOptions) - result = self.process_vad(audio_path, whisperCallable, self.vad_model, expand_gaps, progressListener=progressListener) - elif (vadOptions.vad == 'periodic-vad'): - # Very simple VAD - mark every 5 minutes as speech. This makes it less likely that Whisper enters an infinite loop, but - # it may create a break in the middle of a sentence, causing some artifacts. - periodic_vad = VadPeriodicTranscription() - period_config = PeriodicTranscriptionConfig(periodic_duration=vadOptions.vadMaxMergeSize, max_prompt_window=vadOptions.vadPromptWindow) - result = self.process_vad(audio_path, whisperCallable, periodic_vad, period_config, progressListener=progressListener) - - else: - if (self._has_parallel_devices()): - # Use a simple period transcription instead, as we need to use the parallel context - periodic_vad = VadPeriodicTranscription() - period_config = PeriodicTranscriptionConfig(periodic_duration=math.inf, max_prompt_window=1) - - result = self.process_vad(audio_path, whisperCallable, periodic_vad, period_config, progressListener=progressListener) - else: - # Default VAD - result = whisperCallable.invoke(audio_path, 0, None, None, progress_listener=progressListener) - - return result - - def _create_progress_listener(self, progress: gr.Progress): - if (progress is None): - # Dummy progress listener - return ProgressListener() - - class ForwardingProgressListener(ProgressListener): - def __init__(self, progress: gr.Progress): - self.progress = progress - - def on_progress(self, current: Union[int, float], total: Union[int, float]): - # From 0 to 1 - self.progress(current / total) - - def on_finished(self): - self.progress(1) - - return ForwardingProgressListener(progress) - - def process_vad(self, audio_path, whisperCallable, vadModel: AbstractTranscription, vadConfig: TranscriptionConfig, - progressListener: ProgressListener = None): - if (not self._has_parallel_devices()): - # No parallel devices, so just run the VAD and Whisper in sequence - return vadModel.transcribe(audio_path, whisperCallable, vadConfig, progressListener=progressListener) - - gpu_devices = self.parallel_device_list - - if (gpu_devices is None or len(gpu_devices) == 0): - # No GPU devices specified, pass the current environment variable to the first GPU process. This may be NULL. - gpu_devices = [os.environ.get("CUDA_VISIBLE_DEVICES", None)] - - # Create parallel context if needed - if (self.gpu_parallel_context is None): - # Create a context wih processes and automatically clear the pool after 1 hour of inactivity - self.gpu_parallel_context = ParallelContext(num_processes=len(gpu_devices), auto_cleanup_timeout_seconds=self.vad_process_timeout) - # We also need a CPU context for the VAD - if (self.cpu_parallel_context is None): - self.cpu_parallel_context = ParallelContext(num_processes=self.vad_cpu_cores, auto_cleanup_timeout_seconds=self.vad_process_timeout) - - parallel_vad = ParallelTranscription() - return parallel_vad.transcribe_parallel(transcription=vadModel, audio=audio_path, whisperCallable=whisperCallable, - config=vadConfig, cpu_device_count=self.vad_cpu_cores, gpu_devices=gpu_devices, - cpu_parallel_context=self.cpu_parallel_context, gpu_parallel_context=self.gpu_parallel_context, - progress_listener=progressListener) - - def _has_parallel_devices(self): - return (self.parallel_device_list is not None and len(self.parallel_device_list) > 0) or self.vad_cpu_cores > 1 - - def _concat_prompt(self, prompt1, prompt2): - if (prompt1 is None): - return prompt2 - elif (prompt2 is None): - return prompt1 - else: - return prompt1 + " " + prompt2 - - def _create_silero_config(self, non_speech_strategy: NonSpeechStrategy, vadOptions: VadOptions): - # Use Silero VAD - if (self.vad_model is None): - self.vad_model = VadSileroTranscription() - - config = TranscriptionConfig(non_speech_strategy = non_speech_strategy, - max_silent_period=vadOptions.vadMergeWindow, max_merge_size=vadOptions.vadMaxMergeSize, - segment_padding_left=vadOptions.vadPadding, segment_padding_right=vadOptions.vadPadding, - max_prompt_window=vadOptions.vadPromptWindow) - - return config - - def write_result(self, result: dict, source_name: str, output_dir: str): - if not os.path.exists(output_dir): - os.makedirs(output_dir) - - text = result["text"] - language = result["language"] - languageMaxLineWidth = self.__get_max_line_width(language) - - print("Max line width " + str(languageMaxLineWidth)) - vtt = self.__get_subs(result["segments"], "vtt", languageMaxLineWidth) - srt = self.__get_subs(result["segments"], "srt", languageMaxLineWidth) - - output_files = [] - output_files.append(self.__create_file(srt, output_dir, source_name + "-subs.srt")); - output_files.append(self.__create_file(vtt, output_dir, source_name + "-subs.vtt")); - output_files.append(self.__create_file(text, output_dir, source_name + "-transcript.txt")); - - return output_files, text, vtt - - def clear_cache(self): - self.model_cache.clear() - self.vad_model = None - - def __get_source(self, urlData, multipleFiles, microphoneData): - return get_audio_source_collection(urlData, multipleFiles, microphoneData, self.inputAudioMaxDuration) - - def __get_max_line_width(self, language: str) -> int: - if (language and language.lower() in ["japanese", "ja", "chinese", "zh"]): - # Chinese characters and kana are wider, so limit line length to 40 characters - return 40 - else: - # TODO: Add more languages - # 80 latin characters should fit on a 1080p/720p screen - return 80 - - def __get_subs(self, segments: Iterator[dict], format: str, maxLineWidth: int) -> str: - segmentStream = StringIO() - - if format == 'vtt': - write_vtt(segments, file=segmentStream, maxLineWidth=maxLineWidth) - elif format == 'srt': - write_srt(segments, file=segmentStream, maxLineWidth=maxLineWidth) - else: - raise Exception("Unknown format " + format) - - segmentStream.seek(0) - return segmentStream.read() - - def __create_file(self, text: str, directory: str, fileName: str) -> str: - # Write the text to a file - with open(os.path.join(directory, fileName), 'w+', encoding="utf-8") as file: - file.write(text) - - return file.name - - def close(self): - print("Closing parallel contexts") - self.clear_cache() - - if (self.gpu_parallel_context is not None): - self.gpu_parallel_context.close() - if (self.cpu_parallel_context is not None): - self.cpu_parallel_context.close() - - -def create_ui(app_config: ApplicationConfig): - ui = WhisperTranscriber(app_config.input_audio_max_duration, app_config.vad_process_timeout, app_config.vad_cpu_cores, - app_config.delete_uploaded_files, app_config.output_dir, app_config) - - # Specify a list of devices to use for parallel processing - ui.set_parallel_devices(app_config.vad_parallel_devices) - ui.set_auto_parallel(app_config.auto_parallel) - - is_whisper = False - - if app_config.whisper_implementation == "whisper": - implementation_name = "Whisper" - is_whisper = True - elif app_config.whisper_implementation in ["faster-whisper", "faster_whisper"]: - implementation_name = "Faster Whisper" - else: - # Try to convert from camel-case to title-case - implementation_name = app_config.whisper_implementation.title().replace("_", " ").replace("-", " ") - - ui_description = implementation_name + " is a general-purpose speech recognition model. It is trained on a large dataset of diverse " - ui_description += " audio and is also a multi-task model that can perform multilingual speech recognition " - ui_description += " as well as speech translation and language identification. " - - ui_description += "\n\n\n\nFor longer audio files (>10 minutes) not in English, it is recommended that you select Silero VAD (Voice Activity Detector) in the VAD option." - - # Recommend faster-whisper - if is_whisper: - ui_description += "\n\n\n\nFor faster inference on GPU, try [faster-whisper](https://huggingface.co/spaces/aadnk/faster-whisper-webui)." - - if app_config.input_audio_max_duration > 0: - ui_description += "\n\n" + "Max audio file length: " + str(app_config.input_audio_max_duration) + " s" - - ui_article = "Read the [documentation here](https://gitlab.com/aadnk/whisper-webui/-/blob/main/docs/options.md)." - - whisper_models = app_config.get_model_names() - - simple_inputs = lambda : [ - gr.Dropdown(choices=whisper_models, value=app_config.default_model_name, label="Model"), - gr.Dropdown(choices=sorted(get_language_names()), label="Language", value=app_config.language), - gr.Text(label="URL (YouTube, etc.)"), - gr.File(label="Upload Files", file_count="multiple"), - gr.Audio(source="microphone", type="filepath", label="Microphone Input"), - gr.Dropdown(choices=["transcribe", "translate"], label="Task", value=app_config.task), - gr.Dropdown(choices=["none", "silero-vad", "silero-vad-skip-gaps", "silero-vad-expand-into-gaps", "periodic-vad"], value=app_config.default_vad, label="VAD"), - gr.Number(label="VAD - Merge Window (s)", precision=0, value=app_config.vad_merge_window), - gr.Number(label="VAD - Max Merge Size (s)", precision=0, value=app_config.vad_max_merge_size), - gr.Number(label="VAD - Padding (s)", precision=None, value=app_config.vad_padding), - gr.Number(label="VAD - Prompt Window (s)", precision=None, value=app_config.vad_prompt_window), - ] - - simple_transcribe = gr.Interface(fn=ui.transcribe_webui_simple, description=ui_description, article=ui_article, inputs=simple_inputs(), outputs=[ - gr.File(label="Download"), - gr.Text(label="Transcription"), - gr.Text(label="Segments") - ]) - - full_description = ui_description + "\n\n\n\n" + "Be careful when changing some of the options in the full interface - this can cause the model to crash." - - full_transcribe = gr.Interface(fn=ui.transcribe_webui_full, description=full_description, article=ui_article, inputs=[ - *simple_inputs(), - gr.Dropdown(choices=["prepend_first_segment", "prepend_all_segments"], value=app_config.vad_initial_prompt_mode, label="VAD - Initial Prompt Mode"), - gr.TextArea(label="Initial Prompt"), - gr.Number(label="Temperature", value=app_config.temperature), - gr.Number(label="Best Of - Non-zero temperature", value=app_config.best_of, precision=0), - gr.Number(label="Beam Size - Zero temperature", value=app_config.beam_size, precision=0), - gr.Number(label="Patience - Zero temperature", value=app_config.patience), - gr.Number(label="Length Penalty - Any temperature", value=app_config.length_penalty), - gr.Text(label="Suppress Tokens - Comma-separated list of token IDs", value=app_config.suppress_tokens), - gr.Checkbox(label="Condition on previous text", value=app_config.condition_on_previous_text), - gr.Checkbox(label="FP16", value=app_config.fp16), - gr.Number(label="Temperature increment on fallback", value=app_config.temperature_increment_on_fallback), - gr.Number(label="Compression ratio threshold", value=app_config.compression_ratio_threshold), - gr.Number(label="Logprob threshold", value=app_config.logprob_threshold), - gr.Number(label="No speech threshold", value=app_config.no_speech_threshold) - ], outputs=[ - gr.File(label="Download"), - gr.Text(label="Transcription"), - gr.Text(label="Segments") - ]) - - demo = gr.TabbedInterface([simple_transcribe, full_transcribe], tab_names=["Simple", "Full"]) - - # Queue up the demo - if app_config.queue_concurrency_count is not None and app_config.queue_concurrency_count > 0: - demo.queue(concurrency_count=app_config.queue_concurrency_count) - - demo.launch(share=app_config.share, server_name=app_config.server_name, server_port=app_config.server_port) - - # Clean up - ui.close() - -if __name__ == '__main__': - default_app_config = ApplicationConfig.create_default() - whisper_models = default_app_config.get_model_names() - - # Environment variable overrides - default_whisper_implementation = os.environ.get("WHISPER_IMPLEMENTATION", default_app_config.whisper_implementation) - - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument("--input_audio_max_duration", type=int, default=default_app_config.input_audio_max_duration, \ - help="Maximum audio file length in seconds, or -1 for no limit.") # 600 - parser.add_argument("--share", type=bool, default=default_app_config.share, \ - help="True to share the app on HuggingFace.") # False - parser.add_argument("--server_name", type=str, default=default_app_config.server_name, \ - help="The host or IP to bind to. If None, bind to localhost.") # None - parser.add_argument("--server_port", type=int, default=default_app_config.server_port, \ - help="The port to bind to.") # 7860 - parser.add_argument("--queue_concurrency_count", type=int, default=default_app_config.queue_concurrency_count, \ - help="The number of concurrent requests to process.") # 1 - parser.add_argument("--default_model_name", type=str, choices=whisper_models, default=default_app_config.default_model_name, \ - help="The default model name.") # medium - parser.add_argument("--default_vad", type=str, default=default_app_config.default_vad, \ - help="The default VAD.") # silero-vad - parser.add_argument("--vad_initial_prompt_mode", type=str, default=default_app_config.vad_initial_prompt_mode, choices=["prepend_all_segments", "prepend_first_segment"], \ - help="Whether or not to prepend the initial prompt to each VAD segment (prepend_all_segments), or just the first segment (prepend_first_segment)") # prepend_first_segment - parser.add_argument("--vad_parallel_devices", type=str, default=default_app_config.vad_parallel_devices, \ - help="A commma delimited list of CUDA devices to use for parallel processing. If None, disable parallel processing.") # "" - parser.add_argument("--vad_cpu_cores", type=int, default=default_app_config.vad_cpu_cores, \ - help="The number of CPU cores to use for VAD pre-processing.") # 1 - parser.add_argument("--vad_process_timeout", type=float, default=default_app_config.vad_process_timeout, \ - help="The number of seconds before inactivate processes are terminated. Use 0 to close processes immediately, or None for no timeout.") # 1800 - parser.add_argument("--auto_parallel", type=bool, default=default_app_config.auto_parallel, \ - help="True to use all available GPUs and CPU cores for processing. Use vad_cpu_cores/vad_parallel_devices to specify the number of CPU cores/GPUs to use.") # False - parser.add_argument("--output_dir", "-o", type=str, default=default_app_config.output_dir, \ - help="directory to save the outputs") - parser.add_argument("--whisper_implementation", type=str, default=default_whisper_implementation, choices=["whisper", "faster-whisper"],\ - help="the Whisper implementation to use") - parser.add_argument("--compute_type", type=str, default=default_app_config.compute_type, choices=["default", "auto", "int8", "int8_float16", "int16", "float16", "float32"], \ - help="the compute type to use for inference") - - args = parser.parse_args().__dict__ - - updated_config = default_app_config.update(**args) - - create_ui(app_config=updated_config) \ No newline at end of file diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/commands/image_gen.py b/spaces/MetaWabbit/Auto-GPT/autogpt/commands/image_gen.py deleted file mode 100644 index 0809fcdd3e38b52a2ce09ca1444f2574813d40f9..0000000000000000000000000000000000000000 --- a/spaces/MetaWabbit/Auto-GPT/autogpt/commands/image_gen.py +++ /dev/null @@ -1,163 +0,0 @@ -""" Image Generation Module for AutoGPT.""" -import io -import os.path -import uuid -from base64 import b64decode - -import openai -import requests -from PIL import Image - -from autogpt.config import Config -from autogpt.workspace import path_in_workspace - -CFG = Config() - - -def generate_image(prompt: str, size: int = 256) -> str: - """Generate an image from a prompt. - - Args: - prompt (str): The prompt to use - size (int, optional): The size of the image. Defaults to 256. (Not supported by HuggingFace) - - Returns: - str: The filename of the image - """ - filename = f"{str(uuid.uuid4())}.jpg" - - # DALL-E - if CFG.image_provider == "dalle": - return generate_image_with_dalle(prompt, filename, size) - # HuggingFace - elif CFG.image_provider == "huggingface": - return generate_image_with_hf(prompt, filename) - # SD WebUI - elif CFG.image_provider == "sdwebui": - return generate_image_with_sd_webui(prompt, filename, size) - return "No Image Provider Set" - - -def generate_image_with_hf(prompt: str, filename: str) -> str: - """Generate an image with HuggingFace's API. - - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - - Returns: - str: The filename of the image - """ - API_URL = ( - f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}" - ) - if CFG.huggingface_api_token is None: - raise ValueError( - "You need to set your Hugging Face API token in the config file." - ) - headers = { - "Authorization": f"Bearer {CFG.huggingface_api_token}", - "X-Use-Cache": "false", - } - - response = requests.post( - API_URL, - headers=headers, - json={ - "inputs": prompt, - }, - ) - - image = Image.open(io.BytesIO(response.content)) - print(f"Image Generated for prompt:{prompt}") - - image.save(path_in_workspace(filename)) - - return f"Saved to disk:{filename}" - - -def generate_image_with_dalle(prompt: str, filename: str) -> str: - """Generate an image with DALL-E. - - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - - Returns: - str: The filename of the image - """ - openai.api_key = CFG.openai_api_key - - # Check for supported image sizes - if size not in [256, 512, 1024]: - closest = min([256, 512, 1024], key=lambda x: abs(x - size)) - print( - f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}." - ) - size = closest - - response = openai.Image.create( - prompt=prompt, - n=1, - size=f"{size}x{size}", - response_format="b64_json", - ) - - print(f"Image Generated for prompt:{prompt}") - - image_data = b64decode(response["data"][0]["b64_json"]) - - with open(path_in_workspace(filename), mode="wb") as png: - png.write(image_data) - - return f"Saved to disk:{filename}" - - -def generate_image_with_sd_webui( - prompt: str, - filename: str, - size: int = 512, - negative_prompt: str = "", - extra: dict = {}, -) -> str: - """Generate an image with Stable Diffusion webui. - Args: - prompt (str): The prompt to use - filename (str): The filename to save the image to - size (int, optional): The size of the image. Defaults to 256. - negative_prompt (str, optional): The negative prompt to use. Defaults to "". - extra (dict, optional): Extra parameters to pass to the API. Defaults to {}. - Returns: - str: The filename of the image - """ - # Create a session and set the basic auth if needed - s = requests.Session() - if CFG.sd_webui_auth: - username, password = CFG.sd_webui_auth.split(":") - s.auth = (username, password or "") - - # Generate the images - response = requests.post( - f"{CFG.sd_webui_url}/sdapi/v1/txt2img", - json={ - "prompt": prompt, - "negative_prompt": negative_prompt, - "sampler_index": "DDIM", - "steps": 20, - "cfg_scale": 7.0, - "width": size, - "height": size, - "n_iter": 1, - **extra, - }, - ) - - print(f"Image Generated for prompt:{prompt}") - - # Save the image to disk - response = response.json() - b64 = b64decode(response["images"][0].split(",", 1)[0]) - image = Image.open(io.BytesIO(b64)) - image.save(path_in_workspace(filename)) - - return f"Saved to disk:{filename}" diff --git a/spaces/MohamedAlgebali/VideoQuERI/gpt3.py b/spaces/MohamedAlgebali/VideoQuERI/gpt3.py deleted file mode 100644 index 76e468fdebf1c59953164bbad825765e9638ecc6..0000000000000000000000000000000000000000 --- a/spaces/MohamedAlgebali/VideoQuERI/gpt3.py +++ /dev/null @@ -1,66 +0,0 @@ -from aiohttp import ClientSession, ClientError -from json import loads, JSONDecodeError - - -class Completion: - """ - This class provides methods for generating completions based on prompts. - """ - - async def create(self, prompt): - """ - Create a new completion based on the given prompt. - - Args: - prompt (str): The prompt to generate a completion for. - - Returns: - str: The generated completion. - - Raises: - Exception: If unable to fetch the response. - """ - try: - async with ClientSession() as session: - async with session.post( - "https://ava-alpha-api.codelink.io/api/chat", - headers={"Content-Type": "application/json"}, - json={ - "model": "gpt-4", - "temperature": 0.6, - "stream": True, - "messages": [ - { - "role": "system", - "content": "You are Ava, an AI assistant.", - }, - {"role": "user", "content": prompt}, - ], - }, - timeout=45, - ) as resp_obj: - resp = "" - async for line in resp_obj.content: - line_text = line.decode("utf-8").strip() - if line_text.startswith("data:"): - data = line_text.split("data:")[1] - try: - data_json = loads(data) - if "choices" in data_json: - choices = data_json["choices"] - for choice in choices: - if ( - "finish_reason" in choice - and choice["finish_reason"] == "stop" - ): - break - if ( - "delta" in choice - and "content" in choice["delta"] - ): - resp += choice["delta"]["content"] - except JSONDecodeError: - pass - return resp - except: - raise Exception("Unable to fetch the response.") diff --git a/spaces/Nyari/Super-Resolution-Anime-Diffusion/Waifu2x/model_check_points/ReadME.md b/spaces/Nyari/Super-Resolution-Anime-Diffusion/Waifu2x/model_check_points/ReadME.md deleted file mode 100644 index e432a1c7bd5de45f086ba1fd6b06ca712a1d806b..0000000000000000000000000000000000000000 --- a/spaces/Nyari/Super-Resolution-Anime-Diffusion/Waifu2x/model_check_points/ReadME.md +++ /dev/null @@ -1,34 +0,0 @@ -# Resume & Use Model Check Points - -This folder contains check points for models and their weights. They are generated from [PyTorch's pickle](https://pytorch.org/docs/master/notes/serialization.html). - -Model specifications are in each folder's ReadME. - -Pickle names with "model" contain the entire models, and they can be used as an freeze module by calling the "forward_checkpoint" function to generate images. - -Example: -```python -import torch -# No need to reconstruct the model -model = torch.load("./DCSCN/DCSCN_model_387epos_L12_noise_1.pt") -x = torch.randn((1,3,10,10)), torch.randn((1,3,20,20)) -out = model.forward_checkpoint(a) -``` - -Pickle names with "weights" are model weights, and they are named dictionaries. - -Example: -```python -model = DCSCN(*) # the setting must be the same to load check points weights. -model.load_state_dict(torch.load("./DCSCN/DCSCN_weights_387epos_L12_noise_1.pt")) -# then you can resume the model training -``` - -Model check poins in Upconv_7 and vgg_7 are from [waifu2x's repo](https://github.com/nagadomi/waifu2x/tree/master/models). To load weights into a model, please use ```load_pre_train_weights``` function. - -Example: -```python -model = UpConv_7() -model.load_pre_train_weights(json_file=...) -# then the model is ready to use -``` diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/model_parallel/modules/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/model_parallel/modules/__init__.py deleted file mode 100644 index 11603217a188f420ea849ae0fde19979736ba208..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/fairseq/model_parallel/modules/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -from .multihead_attention import ModelParallelMultiheadAttention -from .transformer_layer import ( - ModelParallelTransformerEncoderLayer, - ModelParallelTransformerDecoderLayer, -) - -__all__ = [ - "ModelParallelMultiheadAttention", - "ModelParallelTransformerEncoderLayer", - "ModelParallelTransformerDecoderLayer", -] diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/pay_less_attention_paper/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/pay_less_attention_paper/README.md deleted file mode 100644 index 5adab11f4dc3461f9e7126ac391b04e703616e6b..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/pay_less_attention_paper/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# Pay Less Attention with Lightweight and Dynamic Convolutions (Wu et al., 2019) - -This page contains pointers to pre-trained models as well as instructions on how to train new models for [our paper](https://arxiv.org/abs/1901.10430). - -## Citation: -```bibtex -@inproceedings{wu2018pay, - title = {Pay Less Attention with Lightweight and Dynamic Convolutions}, - author = {Felix Wu and Angela Fan and Alexei Baevski and Yann Dauphin and Michael Auli}, - booktitle = {International Conference on Learning Representations}, - year = {2019}, - url = {https://arxiv.org/abs/1901.10430}, -} -``` - -## Translation - -### Pre-trained models -For some datasets we release models without GLUs which are faster at inference. - -Model | Description | Dataset | Download ----|---|---|--- -`lightconv.no_glu.iwslt14.de-en` | LightConv (without GLUs) | [IWSLT14 German-English](https://wit3.fbk.eu/archive/2014-01/texts/de/en/de-en.tgz) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.lightconv.tar.gz)
    IWSLT14 test:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/iwslt14.de-en.test.tar.bz2) -`dynamicconv.no_glu.iwslt14.de-en` | DynamicConv (without GLUs) | [IWSLT14 German-English](https://wit3.fbk.eu/archive/2014-01/texts/de/en/de-en.tgz) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.dynamicconv.tar.gz)
    IWSLT14 test:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/iwslt14.de-en.test.tar.bz2) -`lightconv.no_glu.wmt16.en-de` | LightConv (without GLUs) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv.tar.gz)
    newstest2014 (shared vocab):
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`dynamicconv.no_glu.wmt16.en-de` | DynamicConv (without GLUs) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv.tar.gz)
    newstest2014 (shared vocab):
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`lightconv.glu.wmt16.en-de` | LightConv | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz)
    newstest2014 (shared vocab):
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`dynamicconv.glu.wmt16.en-de` | DynamicConv | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz)
    newstest2014 (shared vocab):
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`lightconv.glu.wmt14.en-fr` | LightConv | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.lightconv-glu.tar.gz)
    newstest2014:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2) -`dynamicconv.glu.wmt14.en-fr` | DynamicConv | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.dynamicconv-glu.tar.gz)
    newstest2014:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2) -`lightconv.glu.wmt17.zh-en` | LightConv | [WMT17 Chinese-English](http://statmt.org/wmt17/translation-task.html#Download) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.lightconv-glu.tar.gz)
    newstest2017:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.zh-en.newstest2017.tar.bz2) -`dynamicconv.glu.wmt17.zh-en` | DynamicConv | [WMT17 Chinese-English](http://statmt.org/wmt17/translation-task.html#Download) | model:
    [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.dynamicconv-glu.tar.gz)
    newstest2017:
    [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.zh-en.newstest2017.tar.bz2) - -### Memory-Efficient CUDA Kernels - -Since the PyTorch implementations of Light/Dynamic conv are quite memory intensive, we have developed CUDA kernels that implement the light and dynamic convolution operator in a memory-efficient and performant manner. For large sequence lengths, these kernels save about 50% memory compared to the PyTorch equivalent. - -To install the kernels, use the commands below. Once installed, they will automatically be used in place of the PyTorch implementations whenever a light or dynamic convolution is used. - -```sh -# to install lightconv -cd fairseq/modules/lightconv_layer -python cuda_function_gen.py -python setup.py install - -# to install dynamicconv -cd fairseq/modules/dynamicconv_layer -python cuda_function_gen.py -python setup.py install -``` - -### Example usage (torch.hub) - -We require a few additional Python dependencies for preprocessing: -```bash -pip install sacremoses subword_nmt -``` - -Interactive translation via PyTorch Hub: -```python -import torch - -# List available models -torch.hub.list('pytorch/fairseq') # [..., 'lightconv.glu.wmt17.zh-en', ... ] - -# Load a transformer trained on WMT'16 En-De -zh2en = torch.hub.load('pytorch/fairseq', 'lightconv.glu.wmt17.zh-en', tokenizer='moses', bpe='subword_nmt') - -# The underlying model is available under the *models* attribute -assert isinstance(zh2en.models[0], fairseq.models.lightconv.LightConvModel) - -# Translate a sentence -zh2en.translate('ไฝ ๅฅฝ ไธ–็•Œ') -# 'Hello World' -``` - -Loading custom models: -```python -from fairseq.models.lightconv import LightConvModel -en2fr = LightConvModel.from_pretrained( - '/path/to/checkpoints', - checkpoint_file='checkpoint_best.pt', - data_name_or_path='data-bin/wmt14_en_fr', - bpe='subword_nmt', - bpe_codes='data-bin/wmt14_en_fr/en.code' -) -en2fr.translate('Hello world!') -# 'Bonjour le monde' -``` - -### Preprocessing the training datasets - -Please follow the instructions in [`examples/translation/README.md`](../translation/README.md) to preprocess the data. - -### Training and evaluation options: -To use the model without GLU, please set `--encoder-glu 0 --decoder-glu 0`. -For LightConv, please use `--encoder-conv-type lightweight --decoder-conv-type lightweight`, otherwise the default is DynamicConv. -For best BLEU results, lenpen may need to be manually tuned. - -To use the CUDA kernels, first install the PyTorch modules using the commands -above. Once the CUDA modules are installed, they will automatically be used -instead of the PyTorch modules. - -### IWSLT14 De-En -Training and evaluating DynamicConv (without GLU) on a GPU: -```sh -# Training -SAVE="save/dynamic_conv_iwslt" -mkdir -p $SAVE -CUDA_VISIBLE_DEVICES=0 $(which fairseq-train) data-bin/iwslt14.tokenized.de-en \ - --clip-norm 0 --optimizer adam --lr 0.0005 \ - --source-lang de --target-lang en --max-tokens 4000 --no-progress-bar \ - --log-interval 100 --stop-min-lr '1e-09' --weight-decay 0.0001 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --lr-scheduler inverse_sqrt \ - --ddp-backend=legacy_ddp \ - --max-update 50000 --warmup-updates 4000 --warmup-init-lr '1e-07' \ - --adam-betas '(0.9, 0.98)' --keep-last-epochs 10 \ - -a lightconv_iwslt_de_en --save-dir $SAVE \ - --dropout 0.3 --attention-dropout 0.1 --weight-dropout 0.1 \ - --encoder-glu 0 --decoder-glu 0 -python scripts/average_checkpoints.py --inputs $SAVE \ - --num-epoch-checkpoints 10 --output "${SAVE}/checkpoint_last10_avg.pt" - -# Evaluation -CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/iwslt14.tokenized.de-en --path "${SAVE}/checkpoint_last10_avg.pt" --batch-size 128 --beam 4 --remove-bpe --lenpen 1 --gen-subset test --quiet -``` - -### WMT16 En-De -Training and evaluating DynamicConv (with GLU) on WMT16 En-De using cosine scheduler on one machine with 8 V100 GPUs: -```sh -# Training -SAVE="save/dynamic_conv_wmt16en2de" -mkdir -p $SAVE -python -m torch.distributed.launch --nproc_per_node 8 $(which fairseq-train) \ - data-bin/wmt16_en_de_bpe32k --fp16 --log-interval 100 --no-progress-bar \ - --max-update 30000 --share-all-embeddings --optimizer adam \ - --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --stop-min-lr 1e-09 --update-freq 16 --attention-dropout 0.1 --keep-last-epochs 10 \ - --ddp-backend=legacy_ddp --max-tokens 3584 \ - --lr-scheduler cosine --warmup-init-lr 1e-7 --warmup-updates 10000 \ - --lr-shrink 1 --lr 0.001 --min-lr 1e-7 --warmup-init-lr 1e-07 \ - --t-mult 1 --lr-period-updates 20000 \ - --arch lightconv_wmt_en_de_big --save-dir $SAVE \ - --dropout 0.3 --attention-dropout 0.1 --weight-dropout 0.1 \ - --encoder-glu 1 --decoder-glu 1 - -# Evaluation -CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/wmt16.en-de.joined-dict.newstest2014 --path "${SAVE}/checkpoint_best.pt" --batch-size 128 --beam 5 --remove-bpe --lenpen 0.5 --gen-subset test > wmt16_gen.txt -bash scripts/compound_split_bleu.sh wmt16_gen.txt -``` - -### WMT14 En-Fr -Training DynamicConv (with GLU) on WMT14 En-Fr using cosine scheduler on one machine with 8 V100 GPUs: -```sh -# Training -SAVE="save/dynamic_conv_wmt14en2fr" -mkdir -p $SAVE -python -m torch.distributed.launch --nproc_per_node 8 $(which fairseq-train) \ - data-bin/wmt14_en_fr --fp16 --log-interval 100 --no-progress-bar \ - --max-update 30000 --share-all-embeddings --optimizer adam \ - --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --stop-min-lr 1e-09 --update-freq 16 --attention-dropout 0.1 --keep-last-epochs 10 \ - --ddp-backend=legacy_ddp --max-tokens 3584 \ - --lr-scheduler cosine --warmup-init-lr 1e-7 --warmup-updates 10000 \ - --lr-shrink 1 --lr 0.001 --min-lr 1e-7 --warmup-init-lr 1e-07 \ - --t-mult 1 --lr-period-updates 70000 \ - --arch lightconv_wmt_en_fr_big --save-dir $SAVE \ - --dropout 0.1 --attention-dropout 0.1 --weight-dropout 0.1 \ - --encoder-glu 1 --decoder-glu 1 - -# Evaluation -CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/wmt14.en-fr.joined-dict.newstest2014 --path "${SAVE}/checkpoint_best.pt" --batch-size 128 --beam 5 --remove-bpe --lenpen 0.9 --gen-subset test -``` diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/pointer_generator/preprocess.py b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/pointer_generator/preprocess.py deleted file mode 100644 index f72ca7d3d97e12ab7b405dcff314bdb6c0a78755..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/pointer_generator/preprocess.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -from itertools import zip_longest - - -def replace_oovs(source_in, target_in, vocabulary, source_out, target_out): - """Replaces out-of-vocabulary words in source and target text with , - where N in is the position of the word in the source sequence. - """ - - def format_unk(pos): - return "".format(pos) - - if target_in is None: - target_in = [] - - for seq_num, (source_seq, target_seq) in enumerate( - zip_longest(source_in, target_in) - ): - source_seq_out = [] - target_seq_out = [] - - word_to_pos = dict() - for position, token in enumerate(source_seq.strip().split()): - if token in vocabulary: - token_out = token - else: - if token in word_to_pos: - oov_pos = word_to_pos[token] - else: - word_to_pos[token] = position - oov_pos = position - token_out = format_unk(oov_pos) - source_seq_out.append(token_out) - source_out.write(" ".join(source_seq_out) + "\n") - - if target_seq is not None: - for token in target_seq.strip().split(): - if token in word_to_pos: - token_out = format_unk(word_to_pos[token]) - else: - token_out = token - target_seq_out.append(token_out) - if target_out is not None: - target_out.write(" ".join(target_seq_out) + "\n") - - -def main(): - parser = argparse.ArgumentParser( - description="Replaces out-of-vocabulary words in both source and target " - "sequences with tokens that indicate the position of the word " - "in the source sequence." - ) - parser.add_argument( - "--source", type=str, help="text file with source sequences", required=True - ) - parser.add_argument( - "--target", type=str, help="text file with target sequences", default=None - ) - parser.add_argument("--vocab", type=str, help="vocabulary file", required=True) - parser.add_argument( - "--source-out", - type=str, - help="where to write source sequences with entries", - required=True, - ) - parser.add_argument( - "--target-out", - type=str, - help="where to write target sequences with entries", - default=None, - ) - args = parser.parse_args() - - with open(args.vocab, encoding="utf-8") as vocab: - vocabulary = vocab.read().splitlines() - - target_in = ( - open(args.target, "r", encoding="utf-8") if args.target is not None else None - ) - target_out = ( - open(args.target_out, "w", encoding="utf-8") - if args.target_out is not None - else None - ) - with open(args.source, "r", encoding="utf-8") as source_in, open( - args.source_out, "w", encoding="utf-8" - ) as source_out: - replace_oovs(source_in, target_in, vocabulary, source_out, target_out) - if target_in is not None: - target_in.close() - if target_out is not None: - target_out.close() - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/README.md b/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/README.md deleted file mode 100644 index 4a3ae54b857c43621c9fb67ee4b214584beec835..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/fairseq/examples/speech_synthesis/README.md +++ /dev/null @@ -1,16 +0,0 @@ -Speech Synthesis (S^2) -=== - -Speech synthesis with fairseq. - -- Autoregressive and non-autoregressive models -- Multi-speaker synthesis -- Audio preprocessing -- Automatic metrics -- Similar data configuration as [S2T](../speech_to_text/README.md) - - -## Examples -- [Single-speaker synthesis on LJSpeech](docs/ljspeech_example.md) -- [Multi-speaker synthesis on VCTK](docs/vctk_example.md) -- [Multi-speaker synthesis on Common Voice](docs/common_voice_example.md) diff --git a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/byte_level_bpe/get_data.sh b/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/byte_level_bpe/get_data.sh deleted file mode 100644 index c3d55d4925a6e6e23d12d293f093c1ae14acf76e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Visual_Grounding/fairseq/examples/byte_level_bpe/get_data.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -PY_BIN_ROOT= - -# PyPI dependency -${PY_BIN_ROOT}pip install sentencepiece sacremoses - -# Get data -if [ ! -d "data" ]; then - mkdir data -fi - -if [ ! -f "data/fr-en.tgz" ]; then - wget https://wit3.fbk.eu/archive/2017-01-trnted/texts/fr/en/fr-en.tgz -P data - tar xvf data/fr-en.tgz -C data -fi -${PY_BIN_ROOT}python get_bitext.py --bpe-vocab 16384 --byte-vocab --char-vocab -for VOCAB_SIZE in 2048 4096; do - ${PY_BIN_ROOT}python get_bitext.py --bpe-vocab ${VOCAB_SIZE} --bbpe-vocab ${VOCAB_SIZE} -done -rm -r data/fr-en data/fr-en.tgz - -# Generate binary dataset -${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir data/bin_bpe16384 --joined-dictionary \ - --workers "$(nproc)" --trainpref data/train.moses.bpe16384 --validpref data/valid.moses.bpe16384 \ - --testpref data/test.moses.bpe16384 - -${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir data/bin_bytes --joined-dictionary \ - --workers "$(nproc)" --trainpref data/train.moses.bytes --validpref data/valid.moses.bytes \ - --testpref data/test.moses.bytes - -${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir data/bin_chars --joined-dictionary \ - --workers "$(nproc)" --trainpref data/train.moses.chars --validpref data/valid.moses.chars \ - --testpref data/test.moses.chars - -for VOCAB_SIZE in 2048 4096; do - for TYPE in bbpe bpe; do - ${PY_BIN_ROOT}/fairseq-preprocess --source-lang fr --target-lang en --destdir "data/bin_${TYPE}${VOCAB_SIZE}" \ - --joined-dictionary --workers "$(nproc)" --trainpref "data/train.moses.${TYPE}${VOCAB_SIZE}" \ - --validpref "data/valid.moses.${TYPE}${VOCAB_SIZE}" --testpref "data/test.moses.${TYPE}${VOCAB_SIZE}" - done -done diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/multilingual/data_scripts/dedup_all.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/multilingual/data_scripts/dedup_all.py deleted file mode 100644 index ef39c05ee606aaeda1d9e94970932d2241a8b281..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/multilingual/data_scripts/dedup_all.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - - - -import os -import glob -import argparse -from utils.dedup import deup - -import sys -WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) - -if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): - print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') - sys.exit(-1) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("--from-folder", type=str, required=True, - help="the data folder to be dedup") - parser.add_argument("--to-folder", type=str, required=True, - help="the data folder to save deduped data") - parser.add_argument('--directions', type=str, default=None, required=False) - - args = parser.parse_args() - - if args.directions is None: - raw_files = glob.glob(f'{args.from_folder}/train*') - - directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files] - else: - directions = args.directions.split(',') - directions = sorted(set(directions)) - - for direction in directions: - src, tgt = direction.split('-') - src_file = f'{args.from_folder}/train.{src}-{tgt}.{src}' - tgt_file = f'{args.from_folder}/train.{src}-{tgt}.{tgt}' - src_file_out = f'{args.to_folder}/train.{src}-{tgt}.{src}' - tgt_file_out = f'{args.to_folder}/train.{src}-{tgt}.{tgt}' - assert src_file != src_file_out - assert tgt_file != tgt_file_out - print(f'deduping {src_file}, {tgt_file}') - deup(src_file, tgt_file, src_file_out, tgt_file_out) - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/criterions/hubert_criterion.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/criterions/hubert_criterion.py deleted file mode 100644 index 68cb24e6f142c46e108c53479fd4027a741f5f92..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/criterions/hubert_criterion.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import math -import re -from dataclasses import dataclass, field -from typing import List, Optional - -import torch -import torch.nn.functional as F -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass - - -@dataclass -class HubertCriterionConfig(FairseqDataclass): - pred_masked_weight: float = field( - default=1.0, - metadata={"help": "weight for predictive loss for masked frames"}, - ) - pred_nomask_weight: float = field( - default=0.0, - metadata={"help": "weight for predictive loss for unmasked frames"}, - ) - loss_weights: Optional[List[float]] = field( - default=None, - metadata={"help": "weights for additional loss terms (not first one)"}, - ) - log_keys: List[str] = field( - default_factory=lambda: [], - metadata={"help": "output keys to log"}, - ) - - -@register_criterion("hubert", dataclass=HubertCriterionConfig) -class HubertCriterion(FairseqCriterion): - def __init__(self, task, pred_masked_weight, pred_nomask_weight, loss_weights=None, log_keys=None): - super().__init__(task) - self.pred_masked_weight = pred_masked_weight - self.pred_nomask_weight = pred_nomask_weight - self.loss_weights = loss_weights - self.log_keys = [] if log_keys is None else log_keys - - def forward(self, model, sample, reduce=True, log_pred=False): - """Compute the loss for the given sample. - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - net_output = model(target_list=sample["target_list"], **sample["net_input"]) - loss = 0. - sample_size = 0 - logging_output = {} - reduction = "sum" if reduce else "none" - - loss_m_list = [] - logp_m_list = model.get_logits(net_output, True) - targ_m_list = model.get_targets(net_output, True) - assert self.pred_masked_weight == 0 or len(logp_m_list) > 0 - for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)): - loss_m = F.cross_entropy(logp_m, targ_m, reduction=reduction) - loss_m_list.append(loss_m) - logging_output[f"loss_m_{i}"] = loss_m.detach().item() - if self.pred_masked_weight > 0: - loss += self.pred_masked_weight * sum(loss_m_list) - sample_size += targ_m_list[0].numel() - - loss_u_list = [] - logp_u_list = model.get_logits(net_output, False) - targ_u_list = model.get_targets(net_output, False) - assert self.pred_nomask_weight == 0 or len(logp_u_list) > 0 - for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)): - loss_u = F.cross_entropy(logp_u, targ_u, reduction=reduction) - loss_u_list.append(loss_u) - logging_output[f"loss_u_{i}"] = loss_u.detach().item() - if self.pred_nomask_weight > 0: - loss += self.pred_nomask_weight * sum(loss_u_list) - sample_size += targ_u_list[0].numel() - - if self.loss_weights is not None: - assert hasattr(model, "get_extra_losses") - extra_losses, names = model.get_extra_losses(net_output) - if torch.is_tensor(extra_losses): - extra_losses = [extra_losses] - names = [names] - if len(self.loss_weights) == 1 and len(extra_losses) != 1: - self.loss_weights = [self.loss_weights[0]] * len(extra_losses) - assert len(extra_losses) == len(self.loss_weights), f"{len(extra_losses)}, {len(self.loss_weights)}" - for p, n, coef in zip(extra_losses, names, self.loss_weights): - if coef != 0 and p is not None: - p = coef * p.float() * sample_size - loss += p - logging_output[f"loss_{n}"] = p.item() - - logging_output = { - "loss": loss.item() if reduce else loss, - "ntokens": sample_size, - "nsentences": sample["id"].numel(), - "sample_size": sample_size, - **logging_output, - } - - for lk in self.log_keys: - if lk in net_output: - logging_output[lk] = float((net_output[lk])) - - def compute_correct(logits): - if logits.numel() == 0: - return 0, 0 - else: - assert logits.dim() > 1, logits.shape - max = logits.argmax(-1) == 0 - min = logits.argmin(-1) == 0 - both = max & min - corr = max.long().sum().item() - both.long().sum().item() - count = max.numel() - return corr, count - - with torch.no_grad(): - for i, logp_m in enumerate(logp_m_list): - corr_m, count_m = compute_correct(logp_m) - logging_output[f"correct_m_{i}"] = corr_m - logging_output[f"count_m_{i}"] = count_m - - for i, logp_u in enumerate(logp_u_list): - corr_u, count_u = compute_correct(logp_u) - logging_output[f"correct_u_{i}"] = corr_u - logging_output[f"count_u_{i}"] = count_u - - return loss, sample_size, logging_output - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training (copied from normal cross entropy).""" - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - - metrics.log_scalar("loss", loss_sum / sample_size / math.log(2), sample_size, round=3) - if sample_size != ntokens: - metrics.log_scalar("nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3) - metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)) - else: - metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)) - - counts = {} - for lk in logging_outputs[0].keys(): - if lk.startswith("count_"): - val = sum(log[lk] for log in logging_outputs) - metrics.log_scalar(lk, val) - counts[lk] = val - - for lk in logging_outputs[0].keys(): - if lk.startswith("loss_"): - val = sum(log[lk] for log in logging_outputs) - metrics.log_scalar(lk, val / sample_size / math.log(2), round=3) - elif lk.startswith("correct_"): - val = sum(log[lk] for log in logging_outputs) - metrics.log_scalar(lk, val / counts[re.sub("correct", "count", lk)]) - - @staticmethod - def aggregate_logging_outputs(logging_outputs): - """Aggregate logging outputs from data parallel training.""" - raise NotImplementedError() - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return False diff --git a/spaces/OFA-Sys/OFA-vqa/utils/__init__.py b/spaces/OFA-Sys/OFA-vqa/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Olivier-Truong/faster-whisper-webui-v2/src/prompts/jsonPromptStrategy.py b/spaces/Olivier-Truong/faster-whisper-webui-v2/src/prompts/jsonPromptStrategy.py deleted file mode 100644 index 25aa938adc3c0d5776cd11e0d123195bb6e69aeb..0000000000000000000000000000000000000000 --- a/spaces/Olivier-Truong/faster-whisper-webui-v2/src/prompts/jsonPromptStrategy.py +++ /dev/null @@ -1,49 +0,0 @@ -import json -from typing import Dict -from src.prompts.abstractPromptStrategy import AbstractPromptStrategy - - -class JsonPromptSegment(): - def __init__(self, segment_index: int, prompt: str, format_prompt: bool = False): - self.prompt = prompt - self.segment_index = segment_index - self.format_prompt = format_prompt - -class JsonPromptStrategy(AbstractPromptStrategy): - def __init__(self, initial_json_prompt: str): - """ - Parameters - ---------- - initial_json_prompt: str - The initial prompts for each segment in JSON form. - - Format: - [ - {"segment_index": 0, "prompt": "Hello, how are you?"}, - {"segment_index": 1, "prompt": "I'm doing well, how are you?"}, - {"segment_index": 2, "prompt": "{0} Fine, thank you.", "format_prompt": true} - ] - - """ - parsed_json = json.loads(initial_json_prompt) - self.segment_lookup: Dict[str, JsonPromptSegment] = dict() - - for prompt_entry in parsed_json: - segment_index = prompt_entry["segment_index"] - prompt = prompt_entry["prompt"] - format_prompt = prompt_entry.get("format_prompt", False) - self.segment_lookup[str(segment_index)] = JsonPromptSegment(segment_index, prompt, format_prompt) - - def get_segment_prompt(self, segment_index: int, whisper_prompt: str, detected_language: str) -> str: - # Lookup prompt - prompt = self.segment_lookup.get(str(segment_index), None) - - if (prompt is None): - # No prompt found, return whisper prompt - print(f"Could not find prompt for segment {segment_index}, returning whisper prompt") - return whisper_prompt - - if (prompt.format_prompt): - return prompt.prompt.format(whisper_prompt) - else: - return self._concat_prompt(prompt.prompt, whisper_prompt) diff --git a/spaces/OpenDILabCommunity/LLMRiddlesChatGLMCN/llmriddles/llms/__init__.py b/spaces/OpenDILabCommunity/LLMRiddlesChatGLMCN/llmriddles/llms/__init__.py deleted file mode 100644 index 2ca330d4a771993d41028490d20bba207867cbf8..0000000000000000000000000000000000000000 --- a/spaces/OpenDILabCommunity/LLMRiddlesChatGLMCN/llmriddles/llms/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .base import register_llm, get_llm_fn -from .chatgpt import ask_chatgpt -from .chatglm import ask_chatglm -from .mistral import ask_mistral_7b_instruct diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/collect_env.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/collect_env.py deleted file mode 100644 index 807b6c7e6245d0a21221b1b8d29b841ec8251761..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/utils/collect_env.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import importlib -import numpy as np -import os -import re -import subprocess -import sys -from collections import defaultdict -import PIL -import torch -import torchvision -from tabulate import tabulate - -__all__ = ["collect_env_info"] - - -def collect_torch_env(): - try: - import torch.__config__ - - return torch.__config__.show() - except ImportError: - # compatible with older versions of pytorch - from torch.utils.collect_env import get_pretty_env_info - - return get_pretty_env_info() - - -def get_env_module(): - var_name = "DETECTRON2_ENV_MODULE" - return var_name, os.environ.get(var_name, "") - - -def detect_compute_compatibility(CUDA_HOME, so_file): - try: - cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump") - if os.path.isfile(cuobjdump): - output = subprocess.check_output( - "'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True - ) - output = output.decode("utf-8").strip().split("\n") - arch = [] - for line in output: - line = re.findall(r"\.sm_([0-9]*)\.", line)[0] - arch.append(".".join(line)) - arch = sorted(set(arch)) - return ", ".join(arch) - else: - return so_file + "; cannot find cuobjdump" - except Exception: - # unhandled failure - return so_file - - -def collect_env_info(): - has_gpu = torch.cuda.is_available() # true for both CUDA & ROCM - torch_version = torch.__version__ - - # NOTE that CUDA_HOME/ROCM_HOME could be None even when CUDA runtime libs are functional - from torch.utils.cpp_extension import CUDA_HOME, ROCM_HOME - - has_rocm = False - if (getattr(torch.version, "hip", None) is not None) and (ROCM_HOME is not None): - has_rocm = True - has_cuda = has_gpu and (not has_rocm) - - data = [] - data.append(("sys.platform", sys.platform)) # check-template.yml depends on it - data.append(("Python", sys.version.replace("\n", ""))) - data.append(("numpy", np.__version__)) - - try: - import detectron2 # noqa - - data.append( - ("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__)) - ) - except ImportError: - data.append(("detectron2", "failed to import")) - except AttributeError: - data.append(("detectron2", "imported a wrong installation")) - - try: - import detectron2._C as _C - except ImportError as e: - data.append(("detectron2._C", f"not built correctly: {e}")) - - # print system compilers when extension fails to build - if sys.platform != "win32": # don't know what to do for windows - try: - # this is how torch/utils/cpp_extensions.py choose compiler - cxx = os.environ.get("CXX", "c++") - cxx = subprocess.check_output("'{}' --version".format(cxx), shell=True) - cxx = cxx.decode("utf-8").strip().split("\n")[0] - except subprocess.SubprocessError: - cxx = "Not found" - data.append(("Compiler ($CXX)", cxx)) - - if has_cuda and CUDA_HOME is not None: - try: - nvcc = os.path.join(CUDA_HOME, "bin", "nvcc") - nvcc = subprocess.check_output("'{}' -V".format(nvcc), shell=True) - nvcc = nvcc.decode("utf-8").strip().split("\n")[-1] - except subprocess.SubprocessError: - nvcc = "Not found" - data.append(("CUDA compiler", nvcc)) - if has_cuda and sys.platform != "win32": - try: - so_file = importlib.util.find_spec("detectron2._C").origin - except (ImportError, AttributeError): - pass - else: - data.append( - ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, so_file)) - ) - else: - # print compilers that are used to build extension - data.append(("Compiler", _C.get_compiler_version())) - data.append(("CUDA compiler", _C.get_cuda_version())) # cuda or hip - if has_cuda and getattr(_C, "has_cuda", lambda: True)(): - data.append( - ("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__)) - ) - - data.append(get_env_module()) - data.append(("PyTorch", torch_version + " @" + os.path.dirname(torch.__file__))) - data.append(("PyTorch debug build", torch.version.debug)) - - if not has_gpu: - has_gpu_text = "No: torch.cuda.is_available() == False" - else: - has_gpu_text = "Yes" - data.append(("GPU available", has_gpu_text)) - if has_gpu: - devices = defaultdict(list) - for k in range(torch.cuda.device_count()): - cap = ".".join((str(x) for x in torch.cuda.get_device_capability(k))) - name = torch.cuda.get_device_name(k) + f" (arch={cap})" - devices[name].append(str(k)) - for name, devids in devices.items(): - data.append(("GPU " + ",".join(devids), name)) - - if has_rocm: - msg = " - invalid!" if not (ROCM_HOME and os.path.isdir(ROCM_HOME)) else "" - data.append(("ROCM_HOME", str(ROCM_HOME) + msg)) - else: - try: - from torch.utils.collect_env import get_nvidia_driver_version, run as _run - - data.append(("Driver version", get_nvidia_driver_version(_run))) - except Exception: - pass - msg = " - invalid!" if not (CUDA_HOME and os.path.isdir(CUDA_HOME)) else "" - data.append(("CUDA_HOME", str(CUDA_HOME) + msg)) - - cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None) - if cuda_arch_list: - data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list)) - data.append(("Pillow", PIL.__version__)) - - try: - data.append( - ( - "torchvision", - str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__), - ) - ) - if has_cuda: - try: - torchvision_C = importlib.util.find_spec("torchvision._C").origin - msg = detect_compute_compatibility(CUDA_HOME, torchvision_C) - data.append(("torchvision arch flags", msg)) - except (ImportError, AttributeError): - data.append(("torchvision._C", "Not found")) - except AttributeError: - data.append(("torchvision", "unknown")) - - try: - import fvcore - - data.append(("fvcore", fvcore.__version__)) - except (ImportError, AttributeError): - pass - - try: - import iopath - - data.append(("iopath", iopath.__version__)) - except (ImportError, AttributeError): - pass - - try: - import cv2 - - data.append(("cv2", cv2.__version__)) - except (ImportError, AttributeError): - data.append(("cv2", "Not found")) - env_str = tabulate(data) + "\n" - env_str += collect_torch_env() - return env_str - - -def test_nccl_ops(): - num_gpu = torch.cuda.device_count() - if os.access("/tmp", os.W_OK): - import torch.multiprocessing as mp - - dist_url = "file:///tmp/nccl_tmp_file" - print("Testing NCCL connectivity ... this should not hang.") - mp.spawn(_test_nccl_worker, nprocs=num_gpu, args=(num_gpu, dist_url), daemon=False) - print("NCCL succeeded.") - - -def _test_nccl_worker(rank, num_gpu, dist_url): - import torch.distributed as dist - - dist.init_process_group(backend="NCCL", init_method=dist_url, rank=rank, world_size=num_gpu) - dist.barrier(device_ids=[rank]) - - -if __name__ == "__main__": - try: - from detectron2.utils.collect_env import collect_env_info as f - - print(f()) - except ImportError: - print(collect_env_info()) - - if torch.cuda.is_available(): - num_gpu = torch.cuda.device_count() - for k in range(num_gpu): - device = f"cuda:{k}" - try: - x = torch.tensor([1, 2.0], dtype=torch.float32) - x = x.to(device) - except Exception as e: - print( - f"Unable to copy tensor to device={device}: {e}. " - "Your CUDA environment is broken." - ) - if num_gpu > 1: - test_nccl_ops() diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/video.py b/spaces/OpenGVLab/InternGPT/iGPT/models/video.py deleted file mode 100644 index 4aeb371dc545b3ec17f861d79b07de2f612262c8..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/video.py +++ /dev/null @@ -1,339 +0,0 @@ -import os -os.environ['CURL_CA_BUNDLE'] = '' - -import torch -# from simplet5 import SimpleT5 -import torchvision.transforms as transforms -import openai -import ffmpeg -from .tag2text import tag2text_caption -from .utils import * - -from .load_internvideo import * - -from .grit_model import DenseCaptioning -from .lang import SimpleLanguageModel -from scipy.io.wavfile import write as write_wav -from bark import SAMPLE_RATE, generate_audio - - -class VideoCaption: - def __init__(self, device): - self.device = device - self.image_size = 384 - # self.threshold = 0.68 - self.video_path = None - self.result = None - self.tags = None - self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225]) - self.transform = transforms.Compose([transforms.ToPILImage(),transforms.Resize((self.image_size, self.image_size)), transforms.ToTensor(),self.normalize]) - self.model = tag2text_caption(pretrained="model_zoo/tag2text_swin_14m.pth", image_size=self.image_size, vit='swin_b').eval().to(device) - self.load_video = LoadVideo() - print("[INFO] initialize Caption model success!") - - def framewise_details(self, inputs): - video_path = inputs.strip() - caption = self.inference(video_path) - frame_caption = "" - prev_caption = "" - start_time = 0 - end_time = 0 - for i, j in enumerate(caption): - current_caption = f"{j}." - current_dcs = f"{i+1}" - if len(current_dcs) > 0: - last_valid_dcs = current_dcs - if current_caption == prev_caption: - end_time = i+1 - else: - if prev_caption: - frame_caption += f"Second {start_time} - {end_time}: {prev_caption}{last_valid_dcs}\n" - start_time = i+1 - end_time = i+1 - prev_caption = current_caption - if prev_caption: - frame_caption += f"Second {start_time} - {end_time}: {prev_caption}{current_dcs}\n" - total_dur = end_time - frame_caption += f"| Total Duration: {total_dur} seconds.\n" - - print(frame_caption) - # self.result = frame_caption - self.video_path = video_path - # video_prompt = f"""The tags for this vieo are: {prediction}, {','.join(tag_1)}; - # The temporal description of the video is: {frame_caption} - # The dense caption of the video is: {dense_caption} - # The general description of the video is: {synth_caption[0]}""" - return frame_caption - - @prompts(name="Video Caption", - description="useful when you want to generate a description for video. " - "like: generate a description or caption for this video. " - "The input to this tool should be a string, " - "representing the video_path") - def inference(self, inputs): - video_path = inputs.strip() - data = self.load_video(video_path) - # progress(0.2, desc="Loading Videos") - tmp = [] - for _, img in enumerate(data): - tmp.append(self.transform(img).to(self.device).unsqueeze(0)) - - # Video Caption - image = torch.cat(tmp).to(self.device) - # self.threshold = 0.68 - - input_tag_list = None - with torch.no_grad(): - caption, tags = self.model.generate(image,tag_input = input_tag_list, max_length = 50, return_tag_predict = True) - # print(frame_caption, dense_caption, synth_caption) - # print(caption) - del data, image, tmp - torch.cuda.empty_cache() - torch.cuda.ipc_collect() - self.result = caption - self.tags = tags - # return '. '.join(caption) - return caption - - -class Summarization: - def __init__(self, device): - self.device = device - self.model = SimpleT5() - self.model.load_model( - "t5", "./model_zoo/flan-t5-large-finetuned-openai-summarize_from_feedback", use_gpu=False) - self.model.model = self.model.model.to(self.device) - self.model.device = device - - print("[INFO] initialize Summarize model success!") - - @prompts(name="Video Summarization", - description="useful when you want to Summarize video content for input video. " - "like: summarize this video. " - "The input to this tool should be a string, " - "representing the video_path") - def inference(self, inputs): - caption = inputs.strip() - sum_res = self.model.predict(caption) - return sum_res - - -class ActionRecognition: - def __init__(self, device): - self.device = device - self.video_path = None - # self.result = None - self.model = load_intern_action(device) - self.transform = transform_action() - self.toPIL = T.ToPILImage() - self.load_video = LoadVideo() - print("[INFO] initialize InternVideo model success!") - - @prompts(name="Action Recognition", - description="useful when you want to recognize the action category in this video. " - "like: recognize the action or classify this video" - "The input to this tool should be a string, " - "representing the video_path") - def inference(self, inputs): - video_path = inputs.strip() - # if self.video_path == video_path: - # return self.result - # self.video_path = video_path - # data = loadvideo_decord_origin(video_path) - data = self.load_video(video_path) - - # InternVideo - action_index = np.linspace(0, len(data)-1, 8).astype(int) - tmp_pred = [] - for i,img in enumerate(data): - if i in action_index: - tmp_pred.append(self.toPIL(img)) - action_tensor = self.transform(tmp_pred) - TC, H, W = action_tensor.shape - action_tensor = action_tensor.reshape(1, TC//3, 3, H, W).permute(0, 2, 1, 3, 4).to(self.device) - with torch.no_grad(): - prediction = self.model(action_tensor) - prediction = F.softmax(prediction, dim=1).flatten() - prediction = kinetics_classnames[str(int(prediction.argmax()))] - # self.result = prediction - return prediction - - -class DenseCaption: - def __init__(self, device): - self.device = device - self.model = DenseCaptioning(device) - self.model.initialize_model() - # self.model = self.model.to(device) - self.load_video = LoadVideo() - print("[INFO] initialize DenseCaptioe model success!") - - @prompts(name="Video Dense Caption", - description="useful when you want to generate a dense caption for video. " - "like: generate a dense caption or description for this video. " - "The input to this tool should be a string, " - "representing the video_path") - def inference(self, inputs): - video_path = inputs.strip() - # data = loadvideo_decord_origin(video_path) - data = self.load_video(video_path) - dense_caption = [] - dense_index = np.arange(0, len(data)-1, 5) - original_images = data[dense_index,:,:,::-1] - with torch.no_grad(): - for original_image in original_images: - dense_caption.append(self.model.run_caption_tensor(original_image)) - dense_caption = ' '.join([f"Second {i+1} : {j}.\n" for i,j in zip(dense_index,dense_caption)]) - - return dense_caption - - -class GenerateTikTokVideo: - template_model = True - def __init__(self, ActionRecognition, VideoCaption, DenseCaption): - self.ActionRecognition = ActionRecognition - self.VideoCaption = VideoCaption - # self.Summarization = Summarization - self.DenseCaption = DenseCaption - self.SimpleLanguageModel = None - - @prompts(name="Generate TikTok Video", - description="useful when you want to generate a video with TikTok style based on prompt." - "like: cut this video to a TikTok video based on prompt." - "The input to this tool should be a comma separated string of two, " - "representing the video_path and prompt") - def inference(self, inputs): - video_path = inputs.split(',')[0].strip() - text = ', '.join(inputs.split(',')[1: ]) - if self.SimpleLanguageModel == None: - self.SimpleLanguageModel = SimpleLanguageModel() - action_classes = self.ActionRecognition.inference(video_path) - print(f'action_classes = {action_classes}') - dense_caption = self.DenseCaption.inference(video_path) - print(f'dense_caption = {dense_caption}') - caption = self.VideoCaption.inference(video_path) - caption = '. '.join(caption) - print(f'caption = {caption}') - tags = self.VideoCaption.tags - print(f'tags = {tags}') - framewise_caption = self.VideoCaption.framewise_details(video_path) - print(f'framewise_caption = {framewise_caption}') - video_prompt = f"""The tags for this video are: {action_classes}, {','.join(tags)}; - The temporal description of the video is: {framewise_caption} - The dense caption of the video is: {dense_caption}""" - timestamp = self.run_text_with_time(video_prompt, text) - print(f'timestamp = {timestamp}') - if not timestamp: - return 'Error! Please try it again.' - start_time, end_time = min(timestamp), max(timestamp) - print(f'start_time, end_time = = {start_time}, {end_time}') - video_during = end_time - start_time + 1 - - - # prompt=f"ๅฟ˜่ฎฐไน‹ๅ‰็š„ๅ›ž็ญ”ๆจกๆฟ๏ผŒ่ฏทไฝฟ็”จไธญๆ–‡ๅ›ž็ญ”่ฟ™ไธช้—ฎ้ข˜ใ€‚ๅฆ‚ๆžœๆƒ…่Š‚้‡Œ้‡ๅˆฐ็”ท็”Ÿๅฐฑๅซๅฐๅธ…๏ผŒๅฅณ็”Ÿๅฐฑๅซๅฐ็พŽ๏ผŒ่ฏทไปฅโ€™ๆณจๆ„็œ‹๏ผŒ่ฟ™ไธชไบบๅซโ€™ๅผ€ๅง‹ๅ†™ไธ€ๆฎต็š„่ง†้ข‘่ฅ้”€ๆ–‡ๆกˆใ€‚ๅฐฝ้‡ๆ นๆฎ็ฌฌ{start_time}็ง’ๅˆฐ็ฌฌ{end_time}็ง’ๅทฆๅณ็š„่ง†้ข‘ๅ†…ๅฎน็”Ÿๆˆๆ–‡ๆกˆ๏ผŒไธ่ฆ็”Ÿๆˆ้‡ๅคๅฅๅญใ€‚" - # prompt=f"ๅฟ˜่ฎฐไน‹ๅ‰็š„ๅ›ž็ญ”ๆจกๆฟ๏ผŒ่ฏทไฝฟ็”จไธญๆ–‡ๅ›ž็ญ”่ฟ™ไธช้—ฎ้ข˜ใ€‚ๅฆ‚ๆžœๆƒ…่Š‚้‡Œ้‡ๅˆฐ็”ท็”Ÿๅฐฑๅซๅฐๅธ…๏ผŒๅฅณ็”Ÿๅฐฑๅซๅฐ็พŽ๏ผŒ่ฏทไปฅโ€™ๆณจๆ„็œ‹๏ผŒ่ฟ™ไธชไบบๅซโ€™ไธบๅผ€ๅคด๏ผŒๆ นๆฎ็ฌฌ{start_time}็ง’ๅˆฐ็ฌฌ{end_time}็ง’ๅทฆๅณ็š„่ง†้ข‘ๅ†…ๅฎน็”Ÿๆˆไธ€ๆฎต่ง†้ข‘่ฅ้”€ๆ–‡ๆกˆใ€‚" - prompt=f"ๅฟ˜่ฎฐไน‹ๅ‰็š„ๅ›ž็ญ”ๆจกๆฟ๏ผŒ่ฏทไฝฟ็”จไธญๆ–‡ๅ›ž็ญ”่ฟ™ไธช้—ฎ้ข˜ใ€‚่ง†้ข‘้‡Œๅฆ‚ๆžœๅ‡บ็Žฐ็”ท็”Ÿๅฐฑๅซๅฐๅธ…๏ผŒๅ‡บ็Žฐๅฅณ็”Ÿๅฐฑๅซๅฐ็พŽ๏ผŒๅฆ‚ๆžœไธ็กฎๅฎšๆ€งๅˆซ๏ผŒๅฐฑๅซๅคง่ชๆ˜Žใ€‚่ฏทไปฅโ€™ๆณจๆ„็œ‹๏ผŒ่ฟ™ไธชไบบๅซโ€™ไธบๅผ€ๅคด็”Ÿๆˆไธ€ๆฎต่ง†้ข‘่ฅ้”€ๆ–‡ๆกˆใ€‚" - texts = self.run_text_with_tiktok(video_prompt, prompt).strip() - # if texts.endswith('') - texts += 'ใ€‚' - print(f"before polishing: {texts}") - print('*' * 40) - # texts = openai.ChatCompletion.create(model="gpt-3.5-turbo",messages=[{"role":"user","content":f"่ฏท็”จๆถฆ่‰ฒไธ‹้ข็š„ๅฅๅญ๏ผŒๅŽป้™ค้‡ๅค็š„็‰‡ๆฎต๏ผŒไฝ†ๅฐฝ้‡ไฟๆŒๅŽŸๆ–‡ๅ†…ๅฎนไธ”ไธ่ฎธๆ›ดๆ”นไบบ็‰ฉๅๅญ—๏ผŒๅนถไธ”ไปฅโ€œๆณจๆ„็œ‹๏ผŒ่ฟ™ไธชไบบๅซโ€ไฝœไธบๅผ€ๅคด๏ผš{texts}"}]).choices[0].message['content'] - texts = openai.ChatCompletion.create(model="gpt-3.5-turbo",messages=[{"role":"user","content":f"ไฝฟ็”จไธญๆ–‡ๅ›ž็ญ”่ฟ™ไธช้—ฎ้ข˜๏ผŒ่ฏท็”จๆถฆ่‰ฒไธ‹้ข็š„ๅฅๅญ๏ผŒๅŽป้™ค้‡ๅค็š„็‰‡ๆฎต๏ผŒๅนถไธ”ไปไปฅโ€™ๆณจๆ„็œ‹๏ผŒ่ฟ™ไธชไบบๅซโ€™ไธบๅผ€ๅคด๏ผš{texts}"}]).choices[0].message['content'] - print(f"after polishing: {texts}") - clipped_video_path = gen_new_name(video_path, 'tmp', 'mp4') - wav_file = clipped_video_path.replace('.mp4', '.wav') - audio_path = self.gen_audio(texts, wav_file) - audio_duration = int(float(ffmpeg.probe(audio_path)['streams'][0]['duration']))+1 - os.system(f"ffmpeg -y -v quiet -ss {start_time} -t {video_during} -i {video_path} -c:v libx264 -c:a copy -movflags +faststart {clipped_video_path}") - # output_path = self.image_filename.replace('.mp4','_tiktok.mp4') - new_video_path = gen_new_name(video_path, 'GenerateTickTokVideo', 'mp4') - if video_during < audio_duration: - # ้ฌผ็•œhou - # video_concat = os.path.join(os.path.dirname(clipped_video_path), 'concat.info') - # video_concat = gen_new_name(clipped_video_path, '', 'info') - video_concat = os.path.join(os.path.dirname(clipped_video_path), 'concat.info') - video_concat = gen_new_name(video_concat, '', 'info') - with open(video_concat,'w') as f: - for _ in range(audio_duration//video_during+1): - f.write(f"file \'{os.path.basename(clipped_video_path)}\'\n") - tmp_path = gen_new_name(video_path, 'tmp', 'mp4') - os.system(f"ffmpeg -y -f concat -i {video_concat} {tmp_path}") - print(f"ffmpeg -y -i {tmp_path} -i {wav_file} {new_video_path}") - os.system(f"ffmpeg -y -i {tmp_path} -i {wav_file} {new_video_path}") - else: - print(f"ffmpeg -y -i {clipped_video_path} -i {wav_file} {new_video_path}") - os.system(f"ffmpeg -y -i {clipped_video_path} -i {wav_file} {new_video_path}") - if not os.path.exists(new_video_path): - import pdb - pdb.set_trace() - # state = state + [(text, f"Here is the video in *{new_file_path}*")] +[("show me the video.", (new_file_path,))] - # print(f"\nProcessed run_video, Input video: {new_file_path}\nCurrent state: {state}\n" - # f"Current Memory: {self.agent.memory.buffer}") - return (new_video_path, ) - - def run_text_with_time(self, video_caption, text): - # self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500) - prompt = "Only in this conversation, \ - You must find the text-related start time \ - and end time based on video caption. Your answer \ - must end with the format {answer} [start time: end time]." - response = self.SimpleLanguageModel(f"Video content: {video_caption}. Text: {text.strip()}." + prompt) - # res['output'] = res['output'].replace("\\", "/") - # print(response) - import re - pattern = r"\d+" - # response = res['output']#rsplit(']')[-1] - try: - # matches = re.findall(pattern, res['output']) - matches = re.findall(pattern, response) - start_idx , end_idx = matches[-2:] - start_idx , end_idx = int(start_idx), int(end_idx) - except: - return None - import pdb - pdb.set_trace() - # state = state + [(text, response)] - print(f"\nProcessed run_text_with_time, Input text: {text}\n") - return (start_idx, end_idx) - - def run_text_with_tiktok(self, video_content, prompt): - # self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500) - inputs = f"Video description: {video_content}. {prompt}" - - response = self.SimpleLanguageModel(inputs) - response = response.replace("\\", "/") - # res = self.agent({"input":text}) - # res['output'] = res['output'].replace("\\", "/") - # response = res['output'] - # state = state + [(prompt, response)] - print(f"\nProcessed run_text_with_tiktok, Input text: {prompt}\n, Response: {response}") - return response - - def gen_audio(self, text, save_path): - audio_array = generate_audio(text) - write_wav(save_path, SAMPLE_RATE, audio_array) - return save_path - - -if __name__ == '__main__': - # model = VideoCaption('cuda:0') - # print(model.inference('./assets/f4236666.mp4')) - # model = ActionRecognition('cuda:0') - # print(model.inference('./assets/f4236666.mp4')) - video_path = './tmp_files/f4236666.mp4' - device = 'cuda:0' - # caption_model = VideoCaption('cuda:0') - # caption = caption_model.inference('./assets/f4236666.mp4') - # sum_model = Summarize('cuda:0') - # res = sum_model.inference(caption) - # ds = DenseCaption(device) - # res = ds.inference(video_path) - from lang import SimpleLanguageModel - model = GenerateTikTokVideo(ActionRecognition(device), - VideoCaption(device), - DenseCaption(device) - ) - out = model.inference(video_path+",ๅธฎๆˆ‘ๅ‰ช่พ‘ๅ‡บๆœ€็ฒพๅฝฉ็š„็‰‡ๆฎต") - print(out) \ No newline at end of file diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/trainers/base.py b/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/trainers/base.py deleted file mode 100644 index f1b1c66fc96e7edfba7b1ee193272f92b5db7438..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/saicinpainting/training/trainers/base.py +++ /dev/null @@ -1,291 +0,0 @@ -import copy -import logging -from typing import Dict, Tuple - -import pandas as pd -import pytorch_lightning as ptl -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.utils.data import DistributedSampler - -from saicinpainting.evaluation import make_evaluator -from saicinpainting.training.data.datasets import make_default_train_dataloader, make_default_val_dataloader -from saicinpainting.training.losses.adversarial import make_discrim_loss -from saicinpainting.training.losses.perceptual import PerceptualLoss, ResNetPL -from saicinpainting.training.modules import make_generator, make_discriminator -from saicinpainting.training.visualizers import make_visualizer -from saicinpainting.utils import add_prefix_to_keys, average_dicts, set_requires_grad, flatten_dict, \ - get_has_ddp_rank - -LOGGER = logging.getLogger(__name__) - - -def make_optimizer(parameters, kind='adamw', **kwargs): - if kind == 'adam': - optimizer_class = torch.optim.Adam - elif kind == 'adamw': - optimizer_class = torch.optim.AdamW - else: - raise ValueError(f'Unknown optimizer kind {kind}') - return optimizer_class(parameters, **kwargs) - - -def update_running_average(result: nn.Module, new_iterate_model: nn.Module, decay=0.999): - with torch.no_grad(): - res_params = dict(result.named_parameters()) - new_params = dict(new_iterate_model.named_parameters()) - - for k in res_params.keys(): - res_params[k].data.mul_(decay).add_(new_params[k].data, alpha=1 - decay) - - -def make_multiscale_noise(base_tensor, scales=6, scale_mode='bilinear'): - batch_size, _, height, width = base_tensor.shape - cur_height, cur_width = height, width - result = [] - align_corners = False if scale_mode in ('bilinear', 'bicubic') else None - for _ in range(scales): - cur_sample = torch.randn(batch_size, 1, cur_height, cur_width, device=base_tensor.device) - cur_sample_scaled = F.interpolate(cur_sample, size=(height, width), mode=scale_mode, align_corners=align_corners) - result.append(cur_sample_scaled) - cur_height //= 2 - cur_width //= 2 - return torch.cat(result, dim=1) - - -class BaseInpaintingTrainingModule(ptl.LightningModule): - def __init__(self, config, use_ddp, *args, predict_only=False, visualize_each_iters=100, - average_generator=False, generator_avg_beta=0.999, average_generator_start_step=30000, - average_generator_period=10, store_discr_outputs_for_vis=False, - **kwargs): - super().__init__(*args, **kwargs) - LOGGER.info('BaseInpaintingTrainingModule init called') - - self.config = config - - self.generator = make_generator(config, **self.config.generator) - self.use_ddp = use_ddp - - if not get_has_ddp_rank(): - LOGGER.info(f'Generator\n{self.generator}') - - if not predict_only: - self.save_hyperparameters(self.config) - self.discriminator = make_discriminator(**self.config.discriminator) - self.adversarial_loss = make_discrim_loss(**self.config.losses.adversarial) - self.visualizer = make_visualizer(**self.config.visualizer) - self.val_evaluator = make_evaluator(**self.config.evaluator) - self.test_evaluator = make_evaluator(**self.config.evaluator) - - if not get_has_ddp_rank(): - LOGGER.info(f'Discriminator\n{self.discriminator}') - - extra_val = self.config.data.get('extra_val', ()) - if extra_val: - self.extra_val_titles = list(extra_val) - self.extra_evaluators = nn.ModuleDict({k: make_evaluator(**self.config.evaluator) - for k in extra_val}) - else: - self.extra_evaluators = {} - - self.average_generator = average_generator - self.generator_avg_beta = generator_avg_beta - self.average_generator_start_step = average_generator_start_step - self.average_generator_period = average_generator_period - self.generator_average = None - self.last_generator_averaging_step = -1 - self.store_discr_outputs_for_vis = store_discr_outputs_for_vis - - if self.config.losses.get("l1", {"weight_known": 0})['weight_known'] > 0: - self.loss_l1 = nn.L1Loss(reduction='none') - - if self.config.losses.get("mse", {"weight": 0})['weight'] > 0: - self.loss_mse = nn.MSELoss(reduction='none') - - if self.config.losses.perceptual.weight > 0: - self.loss_pl = PerceptualLoss() - - if self.config.losses.get("resnet_pl", {"weight": 0})['weight'] > 0: - self.loss_resnet_pl = ResNetPL(**self.config.losses.resnet_pl) - else: - self.loss_resnet_pl = None - - self.visualize_each_iters = visualize_each_iters - LOGGER.info('BaseInpaintingTrainingModule init done') - - def configure_optimizers(self): - discriminator_params = list(self.discriminator.parameters()) - return [ - dict(optimizer=make_optimizer(self.generator.parameters(), **self.config.optimizers.generator)), - dict(optimizer=make_optimizer(discriminator_params, **self.config.optimizers.discriminator)), - ] - - def train_dataloader(self): - kwargs = dict(self.config.data.train) - if self.use_ddp: - kwargs['ddp_kwargs'] = dict(num_replicas=self.trainer.num_nodes * self.trainer.num_processes, - rank=self.trainer.global_rank, - shuffle=True) - dataloader = make_default_train_dataloader(**self.config.data.train) - return dataloader - - def val_dataloader(self): - res = [make_default_val_dataloader(**self.config.data.val)] - - if self.config.data.visual_test is not None: - res = res + [make_default_val_dataloader(**self.config.data.visual_test)] - else: - res = res + res - - extra_val = self.config.data.get('extra_val', ()) - if extra_val: - res += [make_default_val_dataloader(**extra_val[k]) for k in self.extra_val_titles] - - return res - - def training_step(self, batch, batch_idx, optimizer_idx=None): - self._is_training_step = True - return self._do_step(batch, batch_idx, mode='train', optimizer_idx=optimizer_idx) - - def validation_step(self, batch, batch_idx, dataloader_idx): - extra_val_key = None - if dataloader_idx == 0: - mode = 'val' - elif dataloader_idx == 1: - mode = 'test' - else: - mode = 'extra_val' - extra_val_key = self.extra_val_titles[dataloader_idx - 2] - self._is_training_step = False - return self._do_step(batch, batch_idx, mode=mode, extra_val_key=extra_val_key) - - def training_step_end(self, batch_parts_outputs): - if self.training and self.average_generator \ - and self.global_step >= self.average_generator_start_step \ - and self.global_step >= self.last_generator_averaging_step + self.average_generator_period: - if self.generator_average is None: - self.generator_average = copy.deepcopy(self.generator) - else: - update_running_average(self.generator_average, self.generator, decay=self.generator_avg_beta) - self.last_generator_averaging_step = self.global_step - - full_loss = (batch_parts_outputs['loss'].mean() - if torch.is_tensor(batch_parts_outputs['loss']) # loss is not tensor when no discriminator used - else torch.tensor(batch_parts_outputs['loss']).float().requires_grad_(True)) - log_info = {k: v.mean() for k, v in batch_parts_outputs['log_info'].items()} - self.log_dict(log_info, on_step=True, on_epoch=False) - return full_loss - - def validation_epoch_end(self, outputs): - outputs = [step_out for out_group in outputs for step_out in out_group] - averaged_logs = average_dicts(step_out['log_info'] for step_out in outputs) - self.log_dict({k: v.mean() for k, v in averaged_logs.items()}) - - pd.set_option('display.max_columns', 500) - pd.set_option('display.width', 1000) - - # standard validation - val_evaluator_states = [s['val_evaluator_state'] for s in outputs if 'val_evaluator_state' in s] - val_evaluator_res = self.val_evaluator.evaluation_end(states=val_evaluator_states) - val_evaluator_res_df = pd.DataFrame(val_evaluator_res).stack(1).unstack(0) - val_evaluator_res_df.dropna(axis=1, how='all', inplace=True) - LOGGER.info(f'Validation metrics after epoch #{self.current_epoch}, ' - f'total {self.global_step} iterations:\n{val_evaluator_res_df}') - - for k, v in flatten_dict(val_evaluator_res).items(): - self.log(f'val_{k}', v) - - # standard visual test - test_evaluator_states = [s['test_evaluator_state'] for s in outputs - if 'test_evaluator_state' in s] - test_evaluator_res = self.test_evaluator.evaluation_end(states=test_evaluator_states) - test_evaluator_res_df = pd.DataFrame(test_evaluator_res).stack(1).unstack(0) - test_evaluator_res_df.dropna(axis=1, how='all', inplace=True) - LOGGER.info(f'Test metrics after epoch #{self.current_epoch}, ' - f'total {self.global_step} iterations:\n{test_evaluator_res_df}') - - for k, v in flatten_dict(test_evaluator_res).items(): - self.log(f'test_{k}', v) - - # extra validations - if self.extra_evaluators: - for cur_eval_title, cur_evaluator in self.extra_evaluators.items(): - cur_state_key = f'extra_val_{cur_eval_title}_evaluator_state' - cur_states = [s[cur_state_key] for s in outputs if cur_state_key in s] - cur_evaluator_res = cur_evaluator.evaluation_end(states=cur_states) - cur_evaluator_res_df = pd.DataFrame(cur_evaluator_res).stack(1).unstack(0) - cur_evaluator_res_df.dropna(axis=1, how='all', inplace=True) - LOGGER.info(f'Extra val {cur_eval_title} metrics after epoch #{self.current_epoch}, ' - f'total {self.global_step} iterations:\n{cur_evaluator_res_df}') - for k, v in flatten_dict(cur_evaluator_res).items(): - self.log(f'extra_val_{cur_eval_title}_{k}', v) - - def _do_step(self, batch, batch_idx, mode='train', optimizer_idx=None, extra_val_key=None): - if optimizer_idx == 0: # step for generator - set_requires_grad(self.generator, True) - set_requires_grad(self.discriminator, False) - elif optimizer_idx == 1: # step for discriminator - set_requires_grad(self.generator, False) - set_requires_grad(self.discriminator, True) - - batch = self(batch) - - total_loss = 0 - metrics = {} - - if optimizer_idx is None or optimizer_idx == 0: # step for generator - total_loss, metrics = self.generator_loss(batch) - - elif optimizer_idx is None or optimizer_idx == 1: # step for discriminator - if self.config.losses.adversarial.weight > 0: - total_loss, metrics = self.discriminator_loss(batch) - - if self.get_ddp_rank() in (None, 0) and (batch_idx % self.visualize_each_iters == 0 or mode == 'test'): - if self.config.losses.adversarial.weight > 0: - if self.store_discr_outputs_for_vis: - with torch.no_grad(): - self.store_discr_outputs(batch) - vis_suffix = f'_{mode}' - if mode == 'extra_val': - vis_suffix += f'_{extra_val_key}' - self.visualizer(self.current_epoch, batch_idx, batch, suffix=vis_suffix) - - metrics_prefix = f'{mode}_' - if mode == 'extra_val': - metrics_prefix += f'{extra_val_key}_' - result = dict(loss=total_loss, log_info=add_prefix_to_keys(metrics, metrics_prefix)) - if mode == 'val': - result['val_evaluator_state'] = self.val_evaluator.process_batch(batch) - elif mode == 'test': - result['test_evaluator_state'] = self.test_evaluator.process_batch(batch) - elif mode == 'extra_val': - result[f'extra_val_{extra_val_key}_evaluator_state'] = self.extra_evaluators[extra_val_key].process_batch(batch) - - return result - - def get_current_generator(self, no_average=False): - if not no_average and not self.training and self.average_generator and self.generator_average is not None: - return self.generator_average - return self.generator - - def forward(self, batch: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: - """Pass data through generator and obtain at leas 'predicted_image' and 'inpainted' keys""" - raise NotImplementedError() - - def generator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: - raise NotImplementedError() - - def discriminator_loss(self, batch) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: - raise NotImplementedError() - - def store_discr_outputs(self, batch): - out_size = batch['image'].shape[2:] - discr_real_out, _ = self.discriminator(batch['image']) - discr_fake_out, _ = self.discriminator(batch['predicted_image']) - batch['discr_output_real'] = F.interpolate(discr_real_out, size=out_size, mode='nearest') - batch['discr_output_fake'] = F.interpolate(discr_fake_out, size=out_size, mode='nearest') - batch['discr_output_diff'] = batch['discr_output_real'] - batch['discr_output_fake'] - - def get_ddp_rank(self): - return self.trainer.global_rank if (self.trainer.num_nodes * self.trainer.num_processes) > 1 else None diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py deleted file mode 100644 index d7a43bee01422ad4795dd27874e0cd4bb6cbfecf..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py +++ /dev/null @@ -1,44 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='ASPPHead', - in_channels=2048, - in_index=3, - channels=512, - dilations=(1, 12, 24, 36), - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/PKUWilliamYang/StyleGANEX/scripts/inference.py b/spaces/PKUWilliamYang/StyleGANEX/scripts/inference.py deleted file mode 100644 index 9250d4b5b05d8a31527603d42823fd8b10234ce9..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/StyleGANEX/scripts/inference.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -from argparse import Namespace - -from tqdm import tqdm -import time -import numpy as np -import torch -from PIL import Image -from torch.utils.data import DataLoader -import sys - -sys.path.append(".") -sys.path.append("..") - -from configs import data_configs -from datasets.inference_dataset import InferenceDataset -from utils.common import tensor2im, log_input_image -from options.test_options import TestOptions -from models.psp import pSp - - -def run(): - test_opts = TestOptions().parse() - - if test_opts.resize_factors is not None: - assert len( - test_opts.resize_factors.split(',')) == 1, "When running inference, provide a single downsampling factor!" - out_path_results = os.path.join(test_opts.exp_dir, 'inference_results', - 'downsampling_{}'.format(test_opts.resize_factors)) - out_path_coupled = os.path.join(test_opts.exp_dir, 'inference_coupled', - 'downsampling_{}'.format(test_opts.resize_factors)) - else: - out_path_results = os.path.join(test_opts.exp_dir, 'inference_results') - out_path_coupled = os.path.join(test_opts.exp_dir, 'inference_coupled') - - os.makedirs(out_path_results, exist_ok=True) - os.makedirs(out_path_coupled, exist_ok=True) - - # update test options with options used during training - ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu') - opts = ckpt['opts'] - opts.update(vars(test_opts)) - if 'learn_in_w' not in opts: - opts['learn_in_w'] = False - if 'output_size' not in opts: - opts['output_size'] = 1024 - opts = Namespace(**opts) - - net = pSp(opts) - net.eval() - net.cuda() - - print('Loading dataset for {}'.format(opts.dataset_type)) - dataset_args = data_configs.DATASETS[opts.dataset_type] - transforms_dict = dataset_args['transforms'](opts).get_transforms() - dataset = InferenceDataset(root=opts.data_path, - transform=transforms_dict['transform_inference'], - opts=opts) - dataloader = DataLoader(dataset, - batch_size=opts.test_batch_size, - shuffle=False, - num_workers=int(opts.test_workers), - drop_last=True) - - if opts.n_images is None: - opts.n_images = len(dataset) - - global_i = 0 - global_time = [] - for input_batch in tqdm(dataloader): - if global_i >= opts.n_images: - break - with torch.no_grad(): - input_cuda = input_batch.cuda().float() - tic = time.time() - result_batch = run_on_batch(input_cuda, net, opts) - toc = time.time() - global_time.append(toc - tic) - - for i in range(opts.test_batch_size): - result = tensor2im(result_batch[i]) - im_path = dataset.paths[global_i] - - if opts.couple_outputs or global_i % 100 == 0: - input_im = log_input_image(input_batch[i], opts) - resize_amount = (256, 256) if opts.resize_outputs else (opts.output_size, opts.output_size) - if opts.resize_factors is not None: - # for super resolution, save the original, down-sampled, and output - source = Image.open(im_path) - res = np.concatenate([np.array(source.resize(resize_amount)), - np.array(input_im.resize(resize_amount, resample=Image.NEAREST)), - np.array(result.resize(resize_amount))], axis=1) - else: - # otherwise, save the original and output - res = np.concatenate([np.array(input_im.resize(resize_amount)), - np.array(result.resize(resize_amount))], axis=1) - Image.fromarray(res).save(os.path.join(out_path_coupled, os.path.basename(im_path))) - - im_save_path = os.path.join(out_path_results, os.path.basename(im_path)) - Image.fromarray(np.array(result)).save(im_save_path) - - global_i += 1 - - stats_path = os.path.join(opts.exp_dir, 'stats.txt') - result_str = 'Runtime {:.4f}+-{:.4f}'.format(np.mean(global_time), np.std(global_time)) - print(result_str) - - with open(stats_path, 'w') as f: - f.write(result_str) - - -def run_on_batch(inputs, net, opts): - if opts.latent_mask is None: - result_batch = net(inputs, randomize_noise=False, resize=opts.resize_outputs) - else: - latent_mask = [int(l) for l in opts.latent_mask.split(",")] - result_batch = [] - for image_idx, input_image in enumerate(inputs): - # get latent vector to inject into our input image - vec_to_inject = np.random.randn(1, 512).astype('float32') - _, latent_to_inject = net(torch.from_numpy(vec_to_inject).to("cuda"), - input_code=True, - return_latents=True) - # get output image with injected style vector - res = net(input_image.unsqueeze(0).to("cuda").float(), - latent_mask=latent_mask, - inject_latent=latent_to_inject, - alpha=opts.mix_alpha, - resize=opts.resize_outputs) - result_batch.append(res) - result_batch = torch.cat(result_batch, dim=0) - return result_batch - - -if __name__ == '__main__': - run() diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/norm.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/norm.py deleted file mode 100644 index 408f4b42731b19a3beeef68b6a5e610d0bbc18b3..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/norm.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import inspect - -import torch.nn as nn - -from annotator.uniformer.mmcv.utils import is_tuple_of -from annotator.uniformer.mmcv.utils.parrots_wrapper import SyncBatchNorm, _BatchNorm, _InstanceNorm -from .registry import NORM_LAYERS - -NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d) -NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d) -NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d) -NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d) -NORM_LAYERS.register_module('SyncBN', module=SyncBatchNorm) -NORM_LAYERS.register_module('GN', module=nn.GroupNorm) -NORM_LAYERS.register_module('LN', module=nn.LayerNorm) -NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d) -NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d) -NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d) -NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d) - - -def infer_abbr(class_type): - """Infer abbreviation from the class name. - - When we build a norm layer with `build_norm_layer()`, we want to preserve - the norm type in variable names, e.g, self.bn1, self.gn. This method will - infer the abbreviation to map class types to abbreviations. - - Rule 1: If the class has the property "_abbr_", return the property. - Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or - InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and - "in" respectively. - Rule 3: If the class name contains "batch", "group", "layer" or "instance", - the abbreviation of this layer will be "bn", "gn", "ln" and "in" - respectively. - Rule 4: Otherwise, the abbreviation falls back to "norm". - - Args: - class_type (type): The norm layer type. - - Returns: - str: The inferred abbreviation. - """ - if not inspect.isclass(class_type): - raise TypeError( - f'class_type must be a type, but got {type(class_type)}') - if hasattr(class_type, '_abbr_'): - return class_type._abbr_ - if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN - return 'in' - elif issubclass(class_type, _BatchNorm): - return 'bn' - elif issubclass(class_type, nn.GroupNorm): - return 'gn' - elif issubclass(class_type, nn.LayerNorm): - return 'ln' - else: - class_name = class_type.__name__.lower() - if 'batch' in class_name: - return 'bn' - elif 'group' in class_name: - return 'gn' - elif 'layer' in class_name: - return 'ln' - elif 'instance' in class_name: - return 'in' - else: - return 'norm_layer' - - -def build_norm_layer(cfg, num_features, postfix=''): - """Build normalization layer. - - Args: - cfg (dict): The norm layer config, which should contain: - - - type (str): Layer type. - - layer args: Args needed to instantiate a norm layer. - - requires_grad (bool, optional): Whether stop gradient updates. - num_features (int): Number of input channels. - postfix (int | str): The postfix to be appended into norm abbreviation - to create named layer. - - Returns: - (str, nn.Module): The first element is the layer name consisting of - abbreviation and postfix, e.g., bn1, gn. The second element is the - created norm layer. - """ - if not isinstance(cfg, dict): - raise TypeError('cfg must be a dict') - if 'type' not in cfg: - raise KeyError('the cfg dict must contain the key "type"') - cfg_ = cfg.copy() - - layer_type = cfg_.pop('type') - if layer_type not in NORM_LAYERS: - raise KeyError(f'Unrecognized norm type {layer_type}') - - norm_layer = NORM_LAYERS.get(layer_type) - abbr = infer_abbr(norm_layer) - - assert isinstance(postfix, (int, str)) - name = abbr + str(postfix) - - requires_grad = cfg_.pop('requires_grad', True) - cfg_.setdefault('eps', 1e-5) - if layer_type != 'GN': - layer = norm_layer(num_features, **cfg_) - if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'): - layer._specify_ddp_gpu_num(1) - else: - assert 'num_groups' in cfg_ - layer = norm_layer(num_channels=num_features, **cfg_) - - for param in layer.parameters(): - param.requires_grad = requires_grad - - return name, layer - - -def is_norm(layer, exclude=None): - """Check if a layer is a normalization layer. - - Args: - layer (nn.Module): The layer to be checked. - exclude (type | tuple[type]): Types to be excluded. - - Returns: - bool: Whether the layer is a norm layer. - """ - if exclude is not None: - if not isinstance(exclude, tuple): - exclude = (exclude, ) - if not is_tuple_of(exclude, type): - raise TypeError( - f'"exclude" must be either None or type or a tuple of types, ' - f'but got {type(exclude)}: {exclude}') - - if exclude and isinstance(layer, exclude): - return False - - all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm) - return isinstance(layer, all_norm_bases) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/progress.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/progress.py deleted file mode 100644 index 92cfa8023021551b92869ac3cc27e78935602864..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/progress.py +++ /dev/null @@ -1,1703 +0,0 @@ -import io -import sys -import typing -import warnings -from abc import ABC, abstractmethod -from collections import deque -from collections.abc import Sized -from dataclasses import dataclass, field -from datetime import timedelta -from io import RawIOBase, UnsupportedOperation -from math import ceil -from mmap import mmap -from os import PathLike, stat -from threading import Event, RLock, Thread -from types import TracebackType -from typing import ( - Any, - BinaryIO, - Callable, - ContextManager, - Deque, - Dict, - Generic, - Iterable, - List, - NamedTuple, - NewType, - Optional, - Sequence, - TextIO, - Tuple, - Type, - TypeVar, - Union, -) - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from pip._vendor.typing_extensions import Literal # pragma: no cover - -from . import filesize, get_console -from .console import Console, Group, JustifyMethod, RenderableType -from .highlighter import Highlighter -from .jupyter import JupyterMixin -from .live import Live -from .progress_bar import ProgressBar -from .spinner import Spinner -from .style import StyleType -from .table import Column, Table -from .text import Text, TextType - -TaskID = NewType("TaskID", int) - -ProgressType = TypeVar("ProgressType") - -GetTimeCallable = Callable[[], float] - - -_I = typing.TypeVar("_I", TextIO, BinaryIO) - - -class _TrackThread(Thread): - """A thread to periodically update progress.""" - - def __init__(self, progress: "Progress", task_id: "TaskID", update_period: float): - self.progress = progress - self.task_id = task_id - self.update_period = update_period - self.done = Event() - - self.completed = 0 - super().__init__() - - def run(self) -> None: - task_id = self.task_id - advance = self.progress.advance - update_period = self.update_period - last_completed = 0 - wait = self.done.wait - while not wait(update_period): - completed = self.completed - if last_completed != completed: - advance(task_id, completed - last_completed) - last_completed = completed - - self.progress.update(self.task_id, completed=self.completed, refresh=True) - - def __enter__(self) -> "_TrackThread": - self.start() - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.done.set() - self.join() - - -def track( - sequence: Union[Sequence[ProgressType], Iterable[ProgressType]], - description: str = "Working...", - total: Optional[float] = None, - auto_refresh: bool = True, - console: Optional[Console] = None, - transient: bool = False, - get_time: Optional[Callable[[], float]] = None, - refresh_per_second: float = 10, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - update_period: float = 0.1, - disable: bool = False, - show_speed: bool = True, -) -> Iterable[ProgressType]: - """Track progress by iterating over a sequence. - - Args: - sequence (Iterable[ProgressType]): A sequence (must support "len") you wish to iterate over. - description (str, optional): Description of task show next to progress bar. Defaults to "Working". - total: (float, optional): Total number of steps. Default is len(sequence). - auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True. - transient: (bool, optional): Clear the progress on exit. Defaults to False. - console (Console, optional): Console to write to. Default creates internal Console instance. - refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10. - style (StyleType, optional): Style for the bar background. Defaults to "bar.back". - complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". - finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done". - pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". - update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1. - disable (bool, optional): Disable display of progress. - show_speed (bool, optional): Show speed if total isn't known. Defaults to True. - Returns: - Iterable[ProgressType]: An iterable of the values in the sequence. - - """ - - columns: List["ProgressColumn"] = ( - [TextColumn("[progress.description]{task.description}")] if description else [] - ) - columns.extend( - ( - BarColumn( - style=style, - complete_style=complete_style, - finished_style=finished_style, - pulse_style=pulse_style, - ), - TaskProgressColumn(show_speed=show_speed), - TimeRemainingColumn(), - ) - ) - progress = Progress( - *columns, - auto_refresh=auto_refresh, - console=console, - transient=transient, - get_time=get_time, - refresh_per_second=refresh_per_second or 10, - disable=disable, - ) - - with progress: - yield from progress.track( - sequence, total=total, description=description, update_period=update_period - ) - - -class _Reader(RawIOBase, BinaryIO): - """A reader that tracks progress while it's being read from.""" - - def __init__( - self, - handle: BinaryIO, - progress: "Progress", - task: TaskID, - close_handle: bool = True, - ) -> None: - self.handle = handle - self.progress = progress - self.task = task - self.close_handle = close_handle - self._closed = False - - def __enter__(self) -> "_Reader": - self.handle.__enter__() - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.close() - - def __iter__(self) -> BinaryIO: - return self - - def __next__(self) -> bytes: - line = next(self.handle) - self.progress.advance(self.task, advance=len(line)) - return line - - @property - def closed(self) -> bool: - return self._closed - - def fileno(self) -> int: - return self.handle.fileno() - - def isatty(self) -> bool: - return self.handle.isatty() - - @property - def name(self) -> str: - return self.handle.name - - def readable(self) -> bool: - return self.handle.readable() - - def seekable(self) -> bool: - return self.handle.seekable() - - def writable(self) -> bool: - return False - - def read(self, size: int = -1) -> bytes: - block = self.handle.read(size) - self.progress.advance(self.task, advance=len(block)) - return block - - def readinto(self, b: Union[bytearray, memoryview, mmap]): # type: ignore[no-untyped-def, override] - n = self.handle.readinto(b) # type: ignore[attr-defined] - self.progress.advance(self.task, advance=n) - return n - - def readline(self, size: int = -1) -> bytes: # type: ignore[override] - line = self.handle.readline(size) - self.progress.advance(self.task, advance=len(line)) - return line - - def readlines(self, hint: int = -1) -> List[bytes]: - lines = self.handle.readlines(hint) - self.progress.advance(self.task, advance=sum(map(len, lines))) - return lines - - def close(self) -> None: - if self.close_handle: - self.handle.close() - self._closed = True - - def seek(self, offset: int, whence: int = 0) -> int: - pos = self.handle.seek(offset, whence) - self.progress.update(self.task, completed=pos) - return pos - - def tell(self) -> int: - return self.handle.tell() - - def write(self, s: Any) -> int: - raise UnsupportedOperation("write") - - -class _ReadContext(ContextManager[_I], Generic[_I]): - """A utility class to handle a context for both a reader and a progress.""" - - def __init__(self, progress: "Progress", reader: _I) -> None: - self.progress = progress - self.reader: _I = reader - - def __enter__(self) -> _I: - self.progress.start() - return self.reader.__enter__() - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.progress.stop() - self.reader.__exit__(exc_type, exc_val, exc_tb) - - -def wrap_file( - file: BinaryIO, - total: int, - *, - description: str = "Reading...", - auto_refresh: bool = True, - console: Optional[Console] = None, - transient: bool = False, - get_time: Optional[Callable[[], float]] = None, - refresh_per_second: float = 10, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - disable: bool = False, -) -> ContextManager[BinaryIO]: - """Read bytes from a file while tracking progress. - - Args: - file (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode. - total (int): Total number of bytes to read. - description (str, optional): Description of task show next to progress bar. Defaults to "Reading". - auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True. - transient: (bool, optional): Clear the progress on exit. Defaults to False. - console (Console, optional): Console to write to. Default creates internal Console instance. - refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10. - style (StyleType, optional): Style for the bar background. Defaults to "bar.back". - complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". - finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done". - pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". - disable (bool, optional): Disable display of progress. - Returns: - ContextManager[BinaryIO]: A context manager yielding a progress reader. - - """ - - columns: List["ProgressColumn"] = ( - [TextColumn("[progress.description]{task.description}")] if description else [] - ) - columns.extend( - ( - BarColumn( - style=style, - complete_style=complete_style, - finished_style=finished_style, - pulse_style=pulse_style, - ), - DownloadColumn(), - TimeRemainingColumn(), - ) - ) - progress = Progress( - *columns, - auto_refresh=auto_refresh, - console=console, - transient=transient, - get_time=get_time, - refresh_per_second=refresh_per_second or 10, - disable=disable, - ) - - reader = progress.wrap_file(file, total=total, description=description) - return _ReadContext(progress, reader) - - -@typing.overload -def open( - file: Union[str, "PathLike[str]", bytes], - mode: Union[Literal["rt"], Literal["r"]], - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - description: str = "Reading...", - auto_refresh: bool = True, - console: Optional[Console] = None, - transient: bool = False, - get_time: Optional[Callable[[], float]] = None, - refresh_per_second: float = 10, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - disable: bool = False, -) -> ContextManager[TextIO]: - pass - - -@typing.overload -def open( - file: Union[str, "PathLike[str]", bytes], - mode: Literal["rb"], - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - description: str = "Reading...", - auto_refresh: bool = True, - console: Optional[Console] = None, - transient: bool = False, - get_time: Optional[Callable[[], float]] = None, - refresh_per_second: float = 10, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - disable: bool = False, -) -> ContextManager[BinaryIO]: - pass - - -def open( - file: Union[str, "PathLike[str]", bytes], - mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r", - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - description: str = "Reading...", - auto_refresh: bool = True, - console: Optional[Console] = None, - transient: bool = False, - get_time: Optional[Callable[[], float]] = None, - refresh_per_second: float = 10, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - disable: bool = False, -) -> Union[ContextManager[BinaryIO], ContextManager[TextIO]]: - """Read bytes from a file while tracking progress. - - Args: - path (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode. - mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt". - buffering (int): The buffering strategy to use, see :func:`io.open`. - encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`. - errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`. - newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open` - total: (int, optional): Total number of bytes to read. Must be provided if reading from a file handle. Default for a path is os.stat(file).st_size. - description (str, optional): Description of task show next to progress bar. Defaults to "Reading". - auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True. - transient: (bool, optional): Clear the progress on exit. Defaults to False. - console (Console, optional): Console to write to. Default creates internal Console instance. - refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10. - style (StyleType, optional): Style for the bar background. Defaults to "bar.back". - complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". - finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done". - pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". - disable (bool, optional): Disable display of progress. - encoding (str, optional): The encoding to use when reading in text mode. - - Returns: - ContextManager[BinaryIO]: A context manager yielding a progress reader. - - """ - - columns: List["ProgressColumn"] = ( - [TextColumn("[progress.description]{task.description}")] if description else [] - ) - columns.extend( - ( - BarColumn( - style=style, - complete_style=complete_style, - finished_style=finished_style, - pulse_style=pulse_style, - ), - DownloadColumn(), - TimeRemainingColumn(), - ) - ) - progress = Progress( - *columns, - auto_refresh=auto_refresh, - console=console, - transient=transient, - get_time=get_time, - refresh_per_second=refresh_per_second or 10, - disable=disable, - ) - - reader = progress.open( - file, - mode=mode, - buffering=buffering, - encoding=encoding, - errors=errors, - newline=newline, - total=total, - description=description, - ) - return _ReadContext(progress, reader) # type: ignore[return-value, type-var] - - -class ProgressColumn(ABC): - """Base class for a widget to use in progress display.""" - - max_refresh: Optional[float] = None - - def __init__(self, table_column: Optional[Column] = None) -> None: - self._table_column = table_column - self._renderable_cache: Dict[TaskID, Tuple[float, RenderableType]] = {} - self._update_time: Optional[float] = None - - def get_table_column(self) -> Column: - """Get a table column, used to build tasks table.""" - return self._table_column or Column() - - def __call__(self, task: "Task") -> RenderableType: - """Called by the Progress object to return a renderable for the given task. - - Args: - task (Task): An object containing information regarding the task. - - Returns: - RenderableType: Anything renderable (including str). - """ - current_time = task.get_time() - if self.max_refresh is not None and not task.completed: - try: - timestamp, renderable = self._renderable_cache[task.id] - except KeyError: - pass - else: - if timestamp + self.max_refresh > current_time: - return renderable - - renderable = self.render(task) - self._renderable_cache[task.id] = (current_time, renderable) - return renderable - - @abstractmethod - def render(self, task: "Task") -> RenderableType: - """Should return a renderable object.""" - - -class RenderableColumn(ProgressColumn): - """A column to insert an arbitrary column. - - Args: - renderable (RenderableType, optional): Any renderable. Defaults to empty string. - """ - - def __init__( - self, renderable: RenderableType = "", *, table_column: Optional[Column] = None - ): - self.renderable = renderable - super().__init__(table_column=table_column) - - def render(self, task: "Task") -> RenderableType: - return self.renderable - - -class SpinnerColumn(ProgressColumn): - """A column with a 'spinner' animation. - - Args: - spinner_name (str, optional): Name of spinner animation. Defaults to "dots". - style (StyleType, optional): Style of spinner. Defaults to "progress.spinner". - speed (float, optional): Speed factor of spinner. Defaults to 1.0. - finished_text (TextType, optional): Text used when task is finished. Defaults to " ". - """ - - def __init__( - self, - spinner_name: str = "dots", - style: Optional[StyleType] = "progress.spinner", - speed: float = 1.0, - finished_text: TextType = " ", - table_column: Optional[Column] = None, - ): - self.spinner = Spinner(spinner_name, style=style, speed=speed) - self.finished_text = ( - Text.from_markup(finished_text) - if isinstance(finished_text, str) - else finished_text - ) - super().__init__(table_column=table_column) - - def set_spinner( - self, - spinner_name: str, - spinner_style: Optional[StyleType] = "progress.spinner", - speed: float = 1.0, - ) -> None: - """Set a new spinner. - - Args: - spinner_name (str): Spinner name, see python -m rich.spinner. - spinner_style (Optional[StyleType], optional): Spinner style. Defaults to "progress.spinner". - speed (float, optional): Speed factor of spinner. Defaults to 1.0. - """ - self.spinner = Spinner(spinner_name, style=spinner_style, speed=speed) - - def render(self, task: "Task") -> RenderableType: - text = ( - self.finished_text - if task.finished - else self.spinner.render(task.get_time()) - ) - return text - - -class TextColumn(ProgressColumn): - """A column containing text.""" - - def __init__( - self, - text_format: str, - style: StyleType = "none", - justify: JustifyMethod = "left", - markup: bool = True, - highlighter: Optional[Highlighter] = None, - table_column: Optional[Column] = None, - ) -> None: - self.text_format = text_format - self.justify: JustifyMethod = justify - self.style = style - self.markup = markup - self.highlighter = highlighter - super().__init__(table_column=table_column or Column(no_wrap=True)) - - def render(self, task: "Task") -> Text: - _text = self.text_format.format(task=task) - if self.markup: - text = Text.from_markup(_text, style=self.style, justify=self.justify) - else: - text = Text(_text, style=self.style, justify=self.justify) - if self.highlighter: - self.highlighter.highlight(text) - return text - - -class BarColumn(ProgressColumn): - """Renders a visual progress bar. - - Args: - bar_width (Optional[int], optional): Width of bar or None for full width. Defaults to 40. - style (StyleType, optional): Style for the bar background. Defaults to "bar.back". - complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete". - finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.done". - pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse". - """ - - def __init__( - self, - bar_width: Optional[int] = 40, - style: StyleType = "bar.back", - complete_style: StyleType = "bar.complete", - finished_style: StyleType = "bar.finished", - pulse_style: StyleType = "bar.pulse", - table_column: Optional[Column] = None, - ) -> None: - self.bar_width = bar_width - self.style = style - self.complete_style = complete_style - self.finished_style = finished_style - self.pulse_style = pulse_style - super().__init__(table_column=table_column) - - def render(self, task: "Task") -> ProgressBar: - """Gets a progress bar widget for a task.""" - return ProgressBar( - total=max(0, task.total) if task.total is not None else None, - completed=max(0, task.completed), - width=None if self.bar_width is None else max(1, self.bar_width), - pulse=not task.started, - animation_time=task.get_time(), - style=self.style, - complete_style=self.complete_style, - finished_style=self.finished_style, - pulse_style=self.pulse_style, - ) - - -class TimeElapsedColumn(ProgressColumn): - """Renders time elapsed.""" - - def render(self, task: "Task") -> Text: - """Show time remaining.""" - elapsed = task.finished_time if task.finished else task.elapsed - if elapsed is None: - return Text("-:--:--", style="progress.elapsed") - delta = timedelta(seconds=int(elapsed)) - return Text(str(delta), style="progress.elapsed") - - -class TaskProgressColumn(TextColumn): - """Show task progress as a percentage. - - Args: - text_format (str, optional): Format for percentage display. Defaults to "[progress.percentage]{task.percentage:>3.0f}%". - text_format_no_percentage (str, optional): Format if percentage is unknown. Defaults to "". - style (StyleType, optional): Style of output. Defaults to "none". - justify (JustifyMethod, optional): Text justification. Defaults to "left". - markup (bool, optional): Enable markup. Defaults to True. - highlighter (Optional[Highlighter], optional): Highlighter to apply to output. Defaults to None. - table_column (Optional[Column], optional): Table Column to use. Defaults to None. - show_speed (bool, optional): Show speed if total is unknown. Defaults to False. - """ - - def __init__( - self, - text_format: str = "[progress.percentage]{task.percentage:>3.0f}%", - text_format_no_percentage: str = "", - style: StyleType = "none", - justify: JustifyMethod = "left", - markup: bool = True, - highlighter: Optional[Highlighter] = None, - table_column: Optional[Column] = None, - show_speed: bool = False, - ) -> None: - - self.text_format_no_percentage = text_format_no_percentage - self.show_speed = show_speed - super().__init__( - text_format=text_format, - style=style, - justify=justify, - markup=markup, - highlighter=highlighter, - table_column=table_column, - ) - - @classmethod - def render_speed(cls, speed: Optional[float]) -> Text: - """Render the speed in iterations per second. - - Args: - task (Task): A Task object. - - Returns: - Text: Text object containing the task speed. - """ - if speed is None: - return Text("", style="progress.percentage") - unit, suffix = filesize.pick_unit_and_suffix( - int(speed), - ["", "ร—10ยณ", "ร—10โถ", "ร—10โน", "ร—10ยนยฒ"], - 1000, - ) - data_speed = speed / unit - return Text(f"{data_speed:.1f}{suffix} it/s", style="progress.percentage") - - def render(self, task: "Task") -> Text: - if task.total is None and self.show_speed: - return self.render_speed(task.finished_speed or task.speed) - text_format = ( - self.text_format_no_percentage if task.total is None else self.text_format - ) - _text = text_format.format(task=task) - if self.markup: - text = Text.from_markup(_text, style=self.style, justify=self.justify) - else: - text = Text(_text, style=self.style, justify=self.justify) - if self.highlighter: - self.highlighter.highlight(text) - return text - - -class TimeRemainingColumn(ProgressColumn): - """Renders estimated time remaining. - - Args: - compact (bool, optional): Render MM:SS when time remaining is less than an hour. Defaults to False. - elapsed_when_finished (bool, optional): Render time elapsed when the task is finished. Defaults to False. - """ - - # Only refresh twice a second to prevent jitter - max_refresh = 0.5 - - def __init__( - self, - compact: bool = False, - elapsed_when_finished: bool = False, - table_column: Optional[Column] = None, - ): - self.compact = compact - self.elapsed_when_finished = elapsed_when_finished - super().__init__(table_column=table_column) - - def render(self, task: "Task") -> Text: - """Show time remaining.""" - if self.elapsed_when_finished and task.finished: - task_time = task.finished_time - style = "progress.elapsed" - else: - task_time = task.time_remaining - style = "progress.remaining" - - if task.total is None: - return Text("", style=style) - - if task_time is None: - return Text("--:--" if self.compact else "-:--:--", style=style) - - # Based on https://github.com/tqdm/tqdm/blob/master/tqdm/std.py - minutes, seconds = divmod(int(task_time), 60) - hours, minutes = divmod(minutes, 60) - - if self.compact and not hours: - formatted = f"{minutes:02d}:{seconds:02d}" - else: - formatted = f"{hours:d}:{minutes:02d}:{seconds:02d}" - - return Text(formatted, style=style) - - -class FileSizeColumn(ProgressColumn): - """Renders completed filesize.""" - - def render(self, task: "Task") -> Text: - """Show data completed.""" - data_size = filesize.decimal(int(task.completed)) - return Text(data_size, style="progress.filesize") - - -class TotalFileSizeColumn(ProgressColumn): - """Renders total filesize.""" - - def render(self, task: "Task") -> Text: - """Show data completed.""" - data_size = filesize.decimal(int(task.total)) if task.total is not None else "" - return Text(data_size, style="progress.filesize.total") - - -class MofNCompleteColumn(ProgressColumn): - """Renders completed count/total, e.g. ' 10/1000'. - - Best for bounded tasks with int quantities. - - Space pads the completed count so that progress length does not change as task progresses - past powers of 10. - - Args: - separator (str, optional): Text to separate completed and total values. Defaults to "/". - """ - - def __init__(self, separator: str = "/", table_column: Optional[Column] = None): - self.separator = separator - super().__init__(table_column=table_column) - - def render(self, task: "Task") -> Text: - """Show completed/total.""" - completed = int(task.completed) - total = int(task.total) if task.total is not None else "?" - total_width = len(str(total)) - return Text( - f"{completed:{total_width}d}{self.separator}{total}", - style="progress.download", - ) - - -class DownloadColumn(ProgressColumn): - """Renders file size downloaded and total, e.g. '0.5/2.3 GB'. - - Args: - binary_units (bool, optional): Use binary units, KiB, MiB etc. Defaults to False. - """ - - def __init__( - self, binary_units: bool = False, table_column: Optional[Column] = None - ) -> None: - self.binary_units = binary_units - super().__init__(table_column=table_column) - - def render(self, task: "Task") -> Text: - """Calculate common unit for completed and total.""" - completed = int(task.completed) - - unit_and_suffix_calculation_base = ( - int(task.total) if task.total is not None else completed - ) - if self.binary_units: - unit, suffix = filesize.pick_unit_and_suffix( - unit_and_suffix_calculation_base, - ["bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"], - 1024, - ) - else: - unit, suffix = filesize.pick_unit_and_suffix( - unit_and_suffix_calculation_base, - ["bytes", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"], - 1000, - ) - precision = 0 if unit == 1 else 1 - - completed_ratio = completed / unit - completed_str = f"{completed_ratio:,.{precision}f}" - - if task.total is not None: - total = int(task.total) - total_ratio = total / unit - total_str = f"{total_ratio:,.{precision}f}" - else: - total_str = "?" - - download_status = f"{completed_str}/{total_str} {suffix}" - download_text = Text(download_status, style="progress.download") - return download_text - - -class TransferSpeedColumn(ProgressColumn): - """Renders human readable transfer speed.""" - - def render(self, task: "Task") -> Text: - """Show data transfer speed.""" - speed = task.finished_speed or task.speed - if speed is None: - return Text("?", style="progress.data.speed") - data_speed = filesize.decimal(int(speed)) - return Text(f"{data_speed}/s", style="progress.data.speed") - - -class ProgressSample(NamedTuple): - """Sample of progress for a given time.""" - - timestamp: float - """Timestamp of sample.""" - completed: float - """Number of steps completed.""" - - -@dataclass -class Task: - """Information regarding a progress task. - - This object should be considered read-only outside of the :class:`~Progress` class. - - """ - - id: TaskID - """Task ID associated with this task (used in Progress methods).""" - - description: str - """str: Description of the task.""" - - total: Optional[float] - """Optional[float]: Total number of steps in this task.""" - - completed: float - """float: Number of steps completed""" - - _get_time: GetTimeCallable - """Callable to get the current time.""" - - finished_time: Optional[float] = None - """float: Time task was finished.""" - - visible: bool = True - """bool: Indicates if this task is visible in the progress display.""" - - fields: Dict[str, Any] = field(default_factory=dict) - """dict: Arbitrary fields passed in via Progress.update.""" - - start_time: Optional[float] = field(default=None, init=False, repr=False) - """Optional[float]: Time this task was started, or None if not started.""" - - stop_time: Optional[float] = field(default=None, init=False, repr=False) - """Optional[float]: Time this task was stopped, or None if not stopped.""" - - finished_speed: Optional[float] = None - """Optional[float]: The last speed for a finished task.""" - - _progress: Deque[ProgressSample] = field( - default_factory=lambda: deque(maxlen=1000), init=False, repr=False - ) - - _lock: RLock = field(repr=False, default_factory=RLock) - """Thread lock.""" - - def get_time(self) -> float: - """float: Get the current time, in seconds.""" - return self._get_time() - - @property - def started(self) -> bool: - """bool: Check if the task as started.""" - return self.start_time is not None - - @property - def remaining(self) -> Optional[float]: - """Optional[float]: Get the number of steps remaining, if a non-None total was set.""" - if self.total is None: - return None - return self.total - self.completed - - @property - def elapsed(self) -> Optional[float]: - """Optional[float]: Time elapsed since task was started, or ``None`` if the task hasn't started.""" - if self.start_time is None: - return None - if self.stop_time is not None: - return self.stop_time - self.start_time - return self.get_time() - self.start_time - - @property - def finished(self) -> bool: - """Check if the task has finished.""" - return self.finished_time is not None - - @property - def percentage(self) -> float: - """float: Get progress of task as a percentage. If a None total was set, returns 0""" - if not self.total: - return 0.0 - completed = (self.completed / self.total) * 100.0 - completed = min(100.0, max(0.0, completed)) - return completed - - @property - def speed(self) -> Optional[float]: - """Optional[float]: Get the estimated speed in steps per second.""" - if self.start_time is None: - return None - with self._lock: - progress = self._progress - if not progress: - return None - total_time = progress[-1].timestamp - progress[0].timestamp - if total_time == 0: - return None - iter_progress = iter(progress) - next(iter_progress) - total_completed = sum(sample.completed for sample in iter_progress) - speed = total_completed / total_time - return speed - - @property - def time_remaining(self) -> Optional[float]: - """Optional[float]: Get estimated time to completion, or ``None`` if no data.""" - if self.finished: - return 0.0 - speed = self.speed - if not speed: - return None - remaining = self.remaining - if remaining is None: - return None - estimate = ceil(remaining / speed) - return estimate - - def _reset(self) -> None: - """Reset progress.""" - self._progress.clear() - self.finished_time = None - self.finished_speed = None - - -class Progress(JupyterMixin): - """Renders an auto-updating progress bar(s). - - Args: - console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout. - auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()`. - refresh_per_second (Optional[float], optional): Number of times per second to refresh the progress information or None to use default (10). Defaults to None. - speed_estimate_period: (float, optional): Period (in seconds) used to calculate the speed estimate. Defaults to 30. - transient: (bool, optional): Clear the progress on exit. Defaults to False. - redirect_stdout: (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True. - redirect_stderr: (bool, optional): Enable redirection of stderr. Defaults to True. - get_time: (Callable, optional): A callable that gets the current time, or None to use Console.get_time. Defaults to None. - disable (bool, optional): Disable progress display. Defaults to False - expand (bool, optional): Expand tasks table to fit width. Defaults to False. - """ - - def __init__( - self, - *columns: Union[str, ProgressColumn], - console: Optional[Console] = None, - auto_refresh: bool = True, - refresh_per_second: float = 10, - speed_estimate_period: float = 30.0, - transient: bool = False, - redirect_stdout: bool = True, - redirect_stderr: bool = True, - get_time: Optional[GetTimeCallable] = None, - disable: bool = False, - expand: bool = False, - ) -> None: - assert refresh_per_second > 0, "refresh_per_second must be > 0" - self._lock = RLock() - self.columns = columns or self.get_default_columns() - self.speed_estimate_period = speed_estimate_period - - self.disable = disable - self.expand = expand - self._tasks: Dict[TaskID, Task] = {} - self._task_index: TaskID = TaskID(0) - self.live = Live( - console=console or get_console(), - auto_refresh=auto_refresh, - refresh_per_second=refresh_per_second, - transient=transient, - redirect_stdout=redirect_stdout, - redirect_stderr=redirect_stderr, - get_renderable=self.get_renderable, - ) - self.get_time = get_time or self.console.get_time - self.print = self.console.print - self.log = self.console.log - - @classmethod - def get_default_columns(cls) -> Tuple[ProgressColumn, ...]: - """Get the default columns used for a new Progress instance: - - a text column for the description (TextColumn) - - the bar itself (BarColumn) - - a text column showing completion percentage (TextColumn) - - an estimated-time-remaining column (TimeRemainingColumn) - If the Progress instance is created without passing a columns argument, - the default columns defined here will be used. - - You can also create a Progress instance using custom columns before - and/or after the defaults, as in this example: - - progress = Progress( - SpinnerColumn(), - *Progress.default_columns(), - "Elapsed:", - TimeElapsedColumn(), - ) - - This code shows the creation of a Progress display, containing - a spinner to the left, the default columns, and a labeled elapsed - time column. - """ - return ( - TextColumn("[progress.description]{task.description}"), - BarColumn(), - TaskProgressColumn(), - TimeRemainingColumn(), - ) - - @property - def console(self) -> Console: - return self.live.console - - @property - def tasks(self) -> List[Task]: - """Get a list of Task instances.""" - with self._lock: - return list(self._tasks.values()) - - @property - def task_ids(self) -> List[TaskID]: - """A list of task IDs.""" - with self._lock: - return list(self._tasks.keys()) - - @property - def finished(self) -> bool: - """Check if all tasks have been completed.""" - with self._lock: - if not self._tasks: - return True - return all(task.finished for task in self._tasks.values()) - - def start(self) -> None: - """Start the progress display.""" - if not self.disable: - self.live.start(refresh=True) - - def stop(self) -> None: - """Stop the progress display.""" - self.live.stop() - if not self.console.is_interactive: - self.console.print() - - def __enter__(self) -> "Progress": - self.start() - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.stop() - - def track( - self, - sequence: Union[Iterable[ProgressType], Sequence[ProgressType]], - total: Optional[float] = None, - task_id: Optional[TaskID] = None, - description: str = "Working...", - update_period: float = 0.1, - ) -> Iterable[ProgressType]: - """Track progress by iterating over a sequence. - - Args: - sequence (Sequence[ProgressType]): A sequence of values you want to iterate over and track progress. - total: (float, optional): Total number of steps. Default is len(sequence). - task_id: (TaskID): Task to track. Default is new task. - description: (str, optional): Description of task, if new task is created. - update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1. - - Returns: - Iterable[ProgressType]: An iterable of values taken from the provided sequence. - """ - - task_total: Optional[float] = None - if total is None: - if isinstance(sequence, Sized): - task_total = float(len(sequence)) - else: - task_total = total - - if task_id is None: - task_id = self.add_task(description, total=task_total) - else: - self.update(task_id, total=task_total) - - if self.live.auto_refresh: - with _TrackThread(self, task_id, update_period) as track_thread: - for value in sequence: - yield value - track_thread.completed += 1 - else: - advance = self.advance - refresh = self.refresh - for value in sequence: - yield value - advance(task_id, 1) - refresh() - - def wrap_file( - self, - file: BinaryIO, - total: Optional[int] = None, - *, - task_id: Optional[TaskID] = None, - description: str = "Reading...", - ) -> BinaryIO: - """Track progress file reading from a binary file. - - Args: - file (BinaryIO): A file-like object opened in binary mode. - total (int, optional): Total number of bytes to read. This must be provided unless a task with a total is also given. - task_id (TaskID): Task to track. Default is new task. - description (str, optional): Description of task, if new task is created. - - Returns: - BinaryIO: A readable file-like object in binary mode. - - Raises: - ValueError: When no total value can be extracted from the arguments or the task. - """ - # attempt to recover the total from the task - total_bytes: Optional[float] = None - if total is not None: - total_bytes = total - elif task_id is not None: - with self._lock: - total_bytes = self._tasks[task_id].total - if total_bytes is None: - raise ValueError( - f"unable to get the total number of bytes, please specify 'total'" - ) - - # update total of task or create new task - if task_id is None: - task_id = self.add_task(description, total=total_bytes) - else: - self.update(task_id, total=total_bytes) - - return _Reader(file, self, task_id, close_handle=False) - - @typing.overload - def open( - self, - file: Union[str, "PathLike[str]", bytes], - mode: Literal["rb"], - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - task_id: Optional[TaskID] = None, - description: str = "Reading...", - ) -> BinaryIO: - pass - - @typing.overload - def open( - self, - file: Union[str, "PathLike[str]", bytes], - mode: Union[Literal["r"], Literal["rt"]], - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - task_id: Optional[TaskID] = None, - description: str = "Reading...", - ) -> TextIO: - pass - - def open( - self, - file: Union[str, "PathLike[str]", bytes], - mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r", - buffering: int = -1, - encoding: Optional[str] = None, - errors: Optional[str] = None, - newline: Optional[str] = None, - *, - total: Optional[int] = None, - task_id: Optional[TaskID] = None, - description: str = "Reading...", - ) -> Union[BinaryIO, TextIO]: - """Track progress while reading from a binary file. - - Args: - path (Union[str, PathLike[str]]): The path to the file to read. - mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt". - buffering (int): The buffering strategy to use, see :func:`io.open`. - encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`. - errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`. - newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`. - total (int, optional): Total number of bytes to read. If none given, os.stat(path).st_size is used. - task_id (TaskID): Task to track. Default is new task. - description (str, optional): Description of task, if new task is created. - - Returns: - BinaryIO: A readable file-like object in binary mode. - - Raises: - ValueError: When an invalid mode is given. - """ - # normalize the mode (always rb, rt) - _mode = "".join(sorted(mode, reverse=False)) - if _mode not in ("br", "rt", "r"): - raise ValueError("invalid mode {!r}".format(mode)) - - # patch buffering to provide the same behaviour as the builtin `open` - line_buffering = buffering == 1 - if _mode == "br" and buffering == 1: - warnings.warn( - "line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used", - RuntimeWarning, - ) - buffering = -1 - elif _mode == "rt" or _mode == "r": - if buffering == 0: - raise ValueError("can't have unbuffered text I/O") - elif buffering == 1: - buffering = -1 - - # attempt to get the total with `os.stat` - if total is None: - total = stat(file).st_size - - # update total of task or create new task - if task_id is None: - task_id = self.add_task(description, total=total) - else: - self.update(task_id, total=total) - - # open the file in binary mode, - handle = io.open(file, "rb", buffering=buffering) - reader = _Reader(handle, self, task_id, close_handle=True) - - # wrap the reader in a `TextIOWrapper` if text mode - if mode == "r" or mode == "rt": - return io.TextIOWrapper( - reader, - encoding=encoding, - errors=errors, - newline=newline, - line_buffering=line_buffering, - ) - - return reader - - def start_task(self, task_id: TaskID) -> None: - """Start a task. - - Starts a task (used when calculating elapsed time). You may need to call this manually, - if you called ``add_task`` with ``start=False``. - - Args: - task_id (TaskID): ID of task. - """ - with self._lock: - task = self._tasks[task_id] - if task.start_time is None: - task.start_time = self.get_time() - - def stop_task(self, task_id: TaskID) -> None: - """Stop a task. - - This will freeze the elapsed time on the task. - - Args: - task_id (TaskID): ID of task. - """ - with self._lock: - task = self._tasks[task_id] - current_time = self.get_time() - if task.start_time is None: - task.start_time = current_time - task.stop_time = current_time - - def update( - self, - task_id: TaskID, - *, - total: Optional[float] = None, - completed: Optional[float] = None, - advance: Optional[float] = None, - description: Optional[str] = None, - visible: Optional[bool] = None, - refresh: bool = False, - **fields: Any, - ) -> None: - """Update information associated with a task. - - Args: - task_id (TaskID): Task id (returned by add_task). - total (float, optional): Updates task.total if not None. - completed (float, optional): Updates task.completed if not None. - advance (float, optional): Add a value to task.completed if not None. - description (str, optional): Change task description if not None. - visible (bool, optional): Set visible flag if not None. - refresh (bool): Force a refresh of progress information. Default is False. - **fields (Any): Additional data fields required for rendering. - """ - with self._lock: - task = self._tasks[task_id] - completed_start = task.completed - - if total is not None and total != task.total: - task.total = total - task._reset() - if advance is not None: - task.completed += advance - if completed is not None: - task.completed = completed - if description is not None: - task.description = description - if visible is not None: - task.visible = visible - task.fields.update(fields) - update_completed = task.completed - completed_start - - current_time = self.get_time() - old_sample_time = current_time - self.speed_estimate_period - _progress = task._progress - - popleft = _progress.popleft - while _progress and _progress[0].timestamp < old_sample_time: - popleft() - if update_completed > 0: - _progress.append(ProgressSample(current_time, update_completed)) - if ( - task.total is not None - and task.completed >= task.total - and task.finished_time is None - ): - task.finished_time = task.elapsed - - if refresh: - self.refresh() - - def reset( - self, - task_id: TaskID, - *, - start: bool = True, - total: Optional[float] = None, - completed: int = 0, - visible: Optional[bool] = None, - description: Optional[str] = None, - **fields: Any, - ) -> None: - """Reset a task so completed is 0 and the clock is reset. - - Args: - task_id (TaskID): ID of task. - start (bool, optional): Start the task after reset. Defaults to True. - total (float, optional): New total steps in task, or None to use current total. Defaults to None. - completed (int, optional): Number of steps completed. Defaults to 0. - visible (bool, optional): Enable display of the task. Defaults to True. - description (str, optional): Change task description if not None. Defaults to None. - **fields (str): Additional data fields required for rendering. - """ - current_time = self.get_time() - with self._lock: - task = self._tasks[task_id] - task._reset() - task.start_time = current_time if start else None - if total is not None: - task.total = total - task.completed = completed - if visible is not None: - task.visible = visible - if fields: - task.fields = fields - if description is not None: - task.description = description - task.finished_time = None - self.refresh() - - def advance(self, task_id: TaskID, advance: float = 1) -> None: - """Advance task by a number of steps. - - Args: - task_id (TaskID): ID of task. - advance (float): Number of steps to advance. Default is 1. - """ - current_time = self.get_time() - with self._lock: - task = self._tasks[task_id] - completed_start = task.completed - task.completed += advance - update_completed = task.completed - completed_start - old_sample_time = current_time - self.speed_estimate_period - _progress = task._progress - - popleft = _progress.popleft - while _progress and _progress[0].timestamp < old_sample_time: - popleft() - while len(_progress) > 1000: - popleft() - _progress.append(ProgressSample(current_time, update_completed)) - if ( - task.total is not None - and task.completed >= task.total - and task.finished_time is None - ): - task.finished_time = task.elapsed - task.finished_speed = task.speed - - def refresh(self) -> None: - """Refresh (render) the progress information.""" - if not self.disable and self.live.is_started: - self.live.refresh() - - def get_renderable(self) -> RenderableType: - """Get a renderable for the progress display.""" - renderable = Group(*self.get_renderables()) - return renderable - - def get_renderables(self) -> Iterable[RenderableType]: - """Get a number of renderables for the progress display.""" - table = self.make_tasks_table(self.tasks) - yield table - - def make_tasks_table(self, tasks: Iterable[Task]) -> Table: - """Get a table to render the Progress display. - - Args: - tasks (Iterable[Task]): An iterable of Task instances, one per row of the table. - - Returns: - Table: A table instance. - """ - table_columns = ( - ( - Column(no_wrap=True) - if isinstance(_column, str) - else _column.get_table_column().copy() - ) - for _column in self.columns - ) - table = Table.grid(*table_columns, padding=(0, 1), expand=self.expand) - - for task in tasks: - if task.visible: - table.add_row( - *( - ( - column.format(task=task) - if isinstance(column, str) - else column(task) - ) - for column in self.columns - ) - ) - return table - - def __rich__(self) -> RenderableType: - """Makes the Progress class itself renderable.""" - with self._lock: - return self.get_renderable() - - def add_task( - self, - description: str, - start: bool = True, - total: Optional[float] = 100.0, - completed: int = 0, - visible: bool = True, - **fields: Any, - ) -> TaskID: - """Add a new 'task' to the Progress display. - - Args: - description (str): A description of the task. - start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False, - you will need to call `start` manually. Defaults to True. - total (float, optional): Number of total steps in the progress if known. - Set to None to render a pulsing animation. Defaults to 100. - completed (int, optional): Number of steps completed so far. Defaults to 0. - visible (bool, optional): Enable display of the task. Defaults to True. - **fields (str): Additional data fields required for rendering. - - Returns: - TaskID: An ID you can use when calling `update`. - """ - with self._lock: - task = Task( - self._task_index, - description, - total, - completed, - visible=visible, - fields=fields, - _get_time=self.get_time, - _lock=self._lock, - ) - self._tasks[self._task_index] = task - if start: - self.start_task(self._task_index) - new_task_index = self._task_index - self._task_index = TaskID(int(self._task_index) + 1) - self.refresh() - return new_task_index - - def remove_task(self, task_id: TaskID) -> None: - """Delete a task if it exists. - - Args: - task_id (TaskID): A task ID. - - """ - with self._lock: - del self._tasks[task_id] - - -if __name__ == "__main__": # pragma: no coverage - - import random - import time - - from .panel import Panel - from .rule import Rule - from .syntax import Syntax - from .table import Table - - syntax = Syntax( - '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: - """Iterate and generate a tuple with a flag for last value.""" - iter_values = iter(values) - try: - previous_value = next(iter_values) - except StopIteration: - return - for value in iter_values: - yield False, previous_value - previous_value = value - yield True, previous_value''', - "python", - line_numbers=True, - ) - - table = Table("foo", "bar", "baz") - table.add_row("1", "2", "3") - - progress_renderables = [ - "Text may be printed while the progress bars are rendering.", - Panel("In fact, [i]any[/i] renderable will work"), - "Such as [magenta]tables[/]...", - table, - "Pretty printed structures...", - {"type": "example", "text": "Pretty printed"}, - "Syntax...", - syntax, - Rule("Give it a try!"), - ] - - from itertools import cycle - - examples = cycle(progress_renderables) - - console = Console(record=True) - - with Progress( - SpinnerColumn(), - *Progress.get_default_columns(), - TimeElapsedColumn(), - console=console, - transient=False, - ) as progress: - - task1 = progress.add_task("[red]Downloading", total=1000) - task2 = progress.add_task("[green]Processing", total=1000) - task3 = progress.add_task("[yellow]Thinking", total=None) - - while not progress.finished: - progress.update(task1, advance=0.5) - progress.update(task2, advance=0.3) - time.sleep(0.01) - if random.randint(0, 100) < 1: - progress.log(next(examples)) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/six.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/six.py deleted file mode 100644 index 4e15675d8b5caa33255fe37271700f587bd26671..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/six.py +++ /dev/null @@ -1,998 +0,0 @@ -# Copyright (c) 2010-2020 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -"""Utilities for writing code that runs on Python 2 and 3""" - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson " -__version__ = "1.16.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - -if PY34: - from importlib.util import spec_from_loader -else: - spec_from_loader = None - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def find_spec(self, fullname, path, target=None): - if fullname in self.known_modules: - return spec_from_loader(fullname, self) - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - - def create_module(self, spec): - return self.load_module(spec.name) - - def exec_module(self, module): - pass - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("getoutput", "commands", "subprocess"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("splitvalue", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), - MovedAttribute("parse_http_list", "urllib2", "urllib.request"), - MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - del io - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - _assertNotRegex = "assertNotRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" - _assertNotRegex = "assertNotRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - _assertNotRegex = "assertNotRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -def assertNotRegex(self, *args, **kwargs): - return getattr(self, _assertNotRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - try: - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - finally: - value = None - tb = None - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - try: - raise tp, value, tb - finally: - tb = None -""") - - -if sys.version_info[:2] > (3,): - exec_("""def raise_from(value, from_value): - try: - raise value from from_value - finally: - value = None -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - # This does exactly the same what the :func:`py3:functools.update_wrapper` - # function does on Python versions after 3.2. It sets the ``__wrapped__`` - # attribute on ``wrapper`` object and it doesn't raise an error if any of - # the attributes mentioned in ``assigned`` and ``updated`` are missing on - # ``wrapped`` object. - def _update_wrapper(wrapper, wrapped, - assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - for attr in assigned: - try: - value = getattr(wrapped, attr) - except AttributeError: - continue - else: - setattr(wrapper, attr, value) - for attr in updated: - getattr(wrapper, attr).update(getattr(wrapped, attr, {})) - wrapper.__wrapped__ = wrapped - return wrapper - _update_wrapper.__doc__ = functools.update_wrapper.__doc__ - - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - return functools.partial(_update_wrapper, wrapped=wrapped, - assigned=assigned, updated=updated) - wraps.__doc__ = functools.wraps.__doc__ - -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(type): - - def __new__(cls, name, this_bases, d): - if sys.version_info[:2] >= (3, 7): - # This version introduced PEP 560 that requires a bit - # of extra care (we mimic what is done by __build_class__). - resolved_bases = types.resolve_bases(bases) - if resolved_bases is not bases: - d['__orig_bases__'] = bases - else: - resolved_bases = bases - return meta(name, resolved_bases, d) - - @classmethod - def __prepare__(cls, name, this_bases): - return meta.__prepare__(name, bases) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - if hasattr(cls, '__qualname__'): - orig_vars['__qualname__'] = cls.__qualname__ - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def ensure_binary(s, encoding='utf-8', errors='strict'): - """Coerce **s** to six.binary_type. - - For Python 2: - - `unicode` -> encoded to `str` - - `str` -> `str` - - For Python 3: - - `str` -> encoded to `bytes` - - `bytes` -> `bytes` - """ - if isinstance(s, binary_type): - return s - if isinstance(s, text_type): - return s.encode(encoding, errors) - raise TypeError("not expecting type '%s'" % type(s)) - - -def ensure_str(s, encoding='utf-8', errors='strict'): - """Coerce *s* to `str`. - - For Python 2: - - `unicode` -> encoded to `str` - - `str` -> `str` - - For Python 3: - - `str` -> `str` - - `bytes` -> decoded to `str` - """ - # Optimization: Fast return for the common case. - if type(s) is str: - return s - if PY2 and isinstance(s, text_type): - return s.encode(encoding, errors) - elif PY3 and isinstance(s, binary_type): - return s.decode(encoding, errors) - elif not isinstance(s, (text_type, binary_type)): - raise TypeError("not expecting type '%s'" % type(s)) - return s - - -def ensure_text(s, encoding='utf-8', errors='strict'): - """Coerce *s* to six.text_type. - - For Python 2: - - `unicode` -> `unicode` - - `str` -> `unicode` - - For Python 3: - - `str` -> `str` - - `bytes` -> decoded to `str` - """ - if isinstance(s, binary_type): - return s.decode(encoding, errors) - elif isinstance(s, text_type): - return s - else: - raise TypeError("not expecting type '%s'" % type(s)) - - -def python_2_unicode_compatible(klass): - """ - A class decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/spaces/Rayzggz/illi-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md b/spaces/Rayzggz/illi-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md deleted file mode 100644 index ebc4b2e6fb6b95ddc5f678b4a7f829466799f2da..0000000000000000000000000000000000000000 --- a/spaces/Rayzggz/illi-Bert-VITS2/bert/chinese-roberta-wwm-ext-large/README.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -language: -- zh -tags: -- bert -license: "apache-2.0" ---- - -# Please use 'Bert' related functions to load this model! - -## Chinese BERT with Whole Word Masking -For further accelerating Chinese natural language processing, we provide **Chinese pre-trained BERT with Whole Word Masking**. - -**[Pre-Training with Whole Word Masking for Chinese BERT](https://arxiv.org/abs/1906.08101)** -Yiming Cui, Wanxiang Che, Ting Liu, Bing Qin, Ziqing Yang, Shijin Wang, Guoping Hu - -This repository is developed based on๏ผšhttps://github.com/google-research/bert - -You may also interested in, -- Chinese BERT series: https://github.com/ymcui/Chinese-BERT-wwm -- Chinese MacBERT: https://github.com/ymcui/MacBERT -- Chinese ELECTRA: https://github.com/ymcui/Chinese-ELECTRA -- Chinese XLNet: https://github.com/ymcui/Chinese-XLNet -- Knowledge Distillation Toolkit - TextBrewer: https://github.com/airaria/TextBrewer - -More resources by HFL: https://github.com/ymcui/HFL-Anthology - -## Citation -If you find the technical report or resource is useful, please cite the following technical report in your paper. -- Primary: https://arxiv.org/abs/2004.13922 -``` -@inproceedings{cui-etal-2020-revisiting, - title = "Revisiting Pre-Trained Models for {C}hinese Natural Language Processing", - author = "Cui, Yiming and - Che, Wanxiang and - Liu, Ting and - Qin, Bing and - Wang, Shijin and - Hu, Guoping", - booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings", - month = nov, - year = "2020", - address = "Online", - publisher = "Association for Computational Linguistics", - url = "https://www.aclweb.org/anthology/2020.findings-emnlp.58", - pages = "657--668", -} -``` -- Secondary: https://arxiv.org/abs/1906.08101 -``` -@article{chinese-bert-wwm, - title={Pre-Training with Whole Word Masking for Chinese BERT}, - author={Cui, Yiming and Che, Wanxiang and Liu, Ting and Qin, Bing and Yang, Ziqing and Wang, Shijin and Hu, Guoping}, - journal={arXiv preprint arXiv:1906.08101}, - year={2019} - } -``` diff --git a/spaces/Rbrq/DeticChatGPT/detic/modeling/roi_heads/zero_shot_classifier.py b/spaces/Rbrq/DeticChatGPT/detic/modeling/roi_heads/zero_shot_classifier.py deleted file mode 100644 index edf217c6dbe74fa68e4d7653488bdd5e2e0c2f0e..0000000000000000000000000000000000000000 --- a/spaces/Rbrq/DeticChatGPT/detic/modeling/roi_heads/zero_shot_classifier.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F -from detectron2.config import configurable -from detectron2.layers import Linear, ShapeSpec - -class ZeroShotClassifier(nn.Module): - @configurable - def __init__( - self, - input_shape: ShapeSpec, - *, - num_classes: int, - zs_weight_path: str, - zs_weight_dim: int = 512, - use_bias: float = 0.0, - norm_weight: bool = True, - norm_temperature: float = 50.0, - ): - super().__init__() - if isinstance(input_shape, int): # some backward compatibility - input_shape = ShapeSpec(channels=input_shape) - input_size = input_shape.channels * (input_shape.width or 1) * (input_shape.height or 1) - self.norm_weight = norm_weight - self.norm_temperature = norm_temperature - - self.use_bias = use_bias < 0 - if self.use_bias: - self.cls_bias = nn.Parameter(torch.ones(1) * use_bias) - - self.linear = nn.Linear(input_size, zs_weight_dim) - - if zs_weight_path == 'rand': - zs_weight = torch.randn((zs_weight_dim, num_classes)) - nn.init.normal_(zs_weight, std=0.01) - else: - zs_weight = torch.tensor( - np.load(zs_weight_path), - dtype=torch.float32).permute(1, 0).contiguous() # D x C - zs_weight = torch.cat( - [zs_weight, zs_weight.new_zeros((zs_weight_dim, 1))], - dim=1) # D x (C + 1) - - if self.norm_weight: - zs_weight = F.normalize(zs_weight, p=2, dim=0) - - if zs_weight_path == 'rand': - self.zs_weight = nn.Parameter(zs_weight) - else: - self.register_buffer('zs_weight', zs_weight) - - assert self.zs_weight.shape[1] == num_classes + 1, self.zs_weight.shape - - - @classmethod - def from_config(cls, cfg, input_shape): - return { - 'input_shape': input_shape, - 'num_classes': cfg.MODEL.ROI_HEADS.NUM_CLASSES, - 'zs_weight_path': cfg.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH, - 'zs_weight_dim': cfg.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_DIM, - 'use_bias': cfg.MODEL.ROI_BOX_HEAD.USE_BIAS, - 'norm_weight': cfg.MODEL.ROI_BOX_HEAD.NORM_WEIGHT, - 'norm_temperature': cfg.MODEL.ROI_BOX_HEAD.NORM_TEMP, - } - - def forward(self, x, classifier=None): - ''' - Inputs: - x: B x D' - classifier_info: (C', C' x D) - ''' - x = self.linear(x) - if classifier is not None: - zs_weight = classifier.permute(1, 0).contiguous() # D x C' - zs_weight = F.normalize(zs_weight, p=2, dim=0) \ - if self.norm_weight else zs_weight - else: - zs_weight = self.zs_weight - if self.norm_weight: - x = self.norm_temperature * F.normalize(x, p=2, dim=1) - x = torch.mm(x, zs_weight) - if self.use_bias: - x = x + self.cls_bias - return x \ No newline at end of file diff --git a/spaces/Realcat/image-matching-webui/hloc/extractors/cosplace.py b/spaces/Realcat/image-matching-webui/hloc/extractors/cosplace.py deleted file mode 100644 index 8d13a84d57d80bee090709623cce74453784844b..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/hloc/extractors/cosplace.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Code for loading models trained with CosPlace as a global features extractor -for geolocalization through image retrieval. -Multiple models are available with different backbones. Below is a summary of -models available (backbone : list of available output descriptors -dimensionality). For example you can use a model based on a ResNet50 with -descriptors dimensionality 1024. - ResNet18: [32, 64, 128, 256, 512] - ResNet50: [32, 64, 128, 256, 512, 1024, 2048] - ResNet101: [32, 64, 128, 256, 512, 1024, 2048] - ResNet152: [32, 64, 128, 256, 512, 1024, 2048] - VGG16: [ 64, 128, 256, 512] - -CosPlace paper: https://arxiv.org/abs/2204.02287 -""" - -import torch -import torchvision.transforms as tvf - -from ..utils.base_model import BaseModel - - -class CosPlace(BaseModel): - default_conf = {"backbone": "ResNet50", "fc_output_dim": 2048} - required_inputs = ["image"] - - def _init(self, conf): - self.net = torch.hub.load( - "gmberton/CosPlace", - "get_trained_model", - backbone=conf["backbone"], - fc_output_dim=conf["fc_output_dim"], - ).eval() - - mean = [0.485, 0.456, 0.406] - std = [0.229, 0.224, 0.225] - self.norm_rgb = tvf.Normalize(mean=mean, std=std) - - def _forward(self, data): - image = self.norm_rgb(data["image"]) - desc = self.net(image) - return { - "global_descriptor": desc, - } diff --git a/spaces/Redgon/bingo/src/components/user-menu.tsx b/spaces/Redgon/bingo/src/components/user-menu.tsx deleted file mode 100644 index 9bd1edc9cf9f39b63629b021f0c1186b1a7c1341..0000000000000000000000000000000000000000 --- a/spaces/Redgon/bingo/src/components/user-menu.tsx +++ /dev/null @@ -1,113 +0,0 @@ -'use client' - -import { useEffect, useState } from 'react' -import Image from 'next/image' -import { toast } from 'react-hot-toast' -import { Button } from '@/components/ui/button' -import pkg from '../../package.json' -import { - DropdownMenu, - DropdownMenuContent, - DropdownMenuItem, - DropdownMenuSeparator, - DropdownMenuTrigger -} from '@/components/ui/dropdown-menu' -import { IconCopy, IconExternalLink, IconGitHub } from '@/components/ui/icons' -import SettingIcon from '@/assets/images/settings.svg' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - -export function UserMenu() { - const [host, setHost] = useState('') - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - useEffect(() => { - setHost(location.host) - }, []) - - useEffect(() => { - if (isCopied) { - toast.success('ๅคๅˆถๆˆๅŠŸ') - } - }, [isCopied]) - return ( -
    - - - - - - - location.href='#dialog="settings"' - } - className="cursor-pointer" - > - ่ฎพ็ฝฎ็”จๆˆท - - - - location.href='#dialog="voice"' - } - className="cursor-pointer" - > - ่ฏญ้Ÿณ่ฎพ็ฝฎ - - - - - ๅผ€ๆบๅœฐๅ€ - - - - - - - - ๆ‰˜็ฎกๅœฐๅ€ - ๐Ÿค— - - - - - - - ๅคๅˆถ็ซ™็‚น - - - - - -
    ็‰ˆๆœฌไฟกๆฏ {pkg.version}
    -
    - - -
    ็ซ™็‚นๅŸŸๅ
    -
    copyToClipboard(host)} className="flex gap-1 text-xs text-zinc-500 cursor-pointer"> - {host} -
    -
    -
    -
    -
    - ) -} diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/grid_rcnn.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/grid_rcnn.py deleted file mode 100644 index b6145a1464cd940bd4f98eaa15f6f9ecf6a10a20..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/detectors/grid_rcnn.py +++ /dev/null @@ -1,29 +0,0 @@ -from ..builder import DETECTORS -from .two_stage import TwoStageDetector - - -@DETECTORS.register_module() -class GridRCNN(TwoStageDetector): - """Grid R-CNN. - - This detector is the implementation of: - - Grid R-CNN (https://arxiv.org/abs/1811.12030) - - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688) - """ - - def __init__(self, - backbone, - rpn_head, - roi_head, - train_cfg, - test_cfg, - neck=None, - pretrained=None): - super(GridRCNN, self).__init__( - backbone=backbone, - neck=neck, - rpn_head=rpn_head, - roi_head=roi_head, - train_cfg=train_cfg, - test_cfg=test_cfg, - pretrained=pretrained) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/saconv.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/saconv.py deleted file mode 100644 index b4ee3978e097fca422805db4e31ae481006d7971..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer_base/mmcv/ops/saconv.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from annotator.uniformer.mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init -from annotator.uniformer.mmcv.ops.deform_conv import deform_conv2d -from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version - - -@CONV_LAYERS.register_module(name='SAC') -class SAConv2d(ConvAWS2d): - """SAC (Switchable Atrous Convolution) - - This is an implementation of SAC in DetectoRS - (https://arxiv.org/pdf/2006.02334.pdf). - - Args: - in_channels (int): Number of channels in the input image - out_channels (int): Number of channels produced by the convolution - kernel_size (int or tuple): Size of the convolving kernel - stride (int or tuple, optional): Stride of the convolution. Default: 1 - padding (int or tuple, optional): Zero-padding added to both sides of - the input. Default: 0 - padding_mode (string, optional): ``'zeros'``, ``'reflect'``, - ``'replicate'`` or ``'circular'``. Default: ``'zeros'`` - dilation (int or tuple, optional): Spacing between kernel elements. - Default: 1 - groups (int, optional): Number of blocked connections from input - channels to output channels. Default: 1 - bias (bool, optional): If ``True``, adds a learnable bias to the - output. Default: ``True`` - use_deform: If ``True``, replace convolution with deformable - convolution. Default: ``False``. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True, - use_deform=False): - super().__init__( - in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - bias=bias) - self.use_deform = use_deform - self.switch = nn.Conv2d( - self.in_channels, 1, kernel_size=1, stride=stride, bias=True) - self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size())) - self.pre_context = nn.Conv2d( - self.in_channels, self.in_channels, kernel_size=1, bias=True) - self.post_context = nn.Conv2d( - self.out_channels, self.out_channels, kernel_size=1, bias=True) - if self.use_deform: - self.offset_s = nn.Conv2d( - self.in_channels, - 18, - kernel_size=3, - padding=1, - stride=stride, - bias=True) - self.offset_l = nn.Conv2d( - self.in_channels, - 18, - kernel_size=3, - padding=1, - stride=stride, - bias=True) - self.init_weights() - - def init_weights(self): - constant_init(self.switch, 0, bias=1) - self.weight_diff.data.zero_() - constant_init(self.pre_context, 0) - constant_init(self.post_context, 0) - if self.use_deform: - constant_init(self.offset_s, 0) - constant_init(self.offset_l, 0) - - def forward(self, x): - # pre-context - avg_x = F.adaptive_avg_pool2d(x, output_size=1) - avg_x = self.pre_context(avg_x) - avg_x = avg_x.expand_as(x) - x = x + avg_x - # switch - avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect') - avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0) - switch = self.switch(avg_x) - # sac - weight = self._get_weight(self.weight) - zero_bias = torch.zeros( - self.out_channels, device=weight.device, dtype=weight.dtype) - - if self.use_deform: - offset = self.offset_s(avg_x) - out_s = deform_conv2d(x, offset, weight, self.stride, self.padding, - self.dilation, self.groups, 1) - else: - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.5.0')): - out_s = super().conv2d_forward(x, weight) - elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'): - # bias is a required argument of _conv_forward in torch 1.8.0 - out_s = super()._conv_forward(x, weight, zero_bias) - else: - out_s = super()._conv_forward(x, weight) - ori_p = self.padding - ori_d = self.dilation - self.padding = tuple(3 * p for p in self.padding) - self.dilation = tuple(3 * d for d in self.dilation) - weight = weight + self.weight_diff - if self.use_deform: - offset = self.offset_l(avg_x) - out_l = deform_conv2d(x, offset, weight, self.stride, self.padding, - self.dilation, self.groups, 1) - else: - if (TORCH_VERSION == 'parrots' - or digit_version(TORCH_VERSION) < digit_version('1.5.0')): - out_l = super().conv2d_forward(x, weight) - elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'): - # bias is a required argument of _conv_forward in torch 1.8.0 - out_l = super()._conv_forward(x, weight, zero_bias) - else: - out_l = super()._conv_forward(x, weight) - - out = switch * out_s + (1 - switch) * out_l - self.padding = ori_p - self.dilation = ori_d - # post-context - avg_x = F.adaptive_avg_pool2d(out, output_size=1) - avg_x = self.post_context(avg_x) - avg_x = avg_x.expand_as(out) - out = out + avg_x - return out diff --git a/spaces/Rongjiehuang/GenerSpeech/utils/text_norm.py b/spaces/Rongjiehuang/GenerSpeech/utils/text_norm.py deleted file mode 100644 index d0973cebc91e0525aeb6657e70012a1d37b5e6ff..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/GenerSpeech/utils/text_norm.py +++ /dev/null @@ -1,790 +0,0 @@ -# coding=utf-8 -# Authors: -# 2019.5 Zhiyang Zhou (https://github.com/Joee1995/chn_text_norm.git) -# 2019.9 Jiayu DU -# -# requirements: -# - python 3.X -# notes: python 2.X WILL fail or produce misleading results - -import sys, os, argparse, codecs, string, re - -# ================================================================================ # -# basic constant -# ================================================================================ # -CHINESE_DIGIS = u'้›ถไธ€ไบŒไธ‰ๅ››ไบ”ๅ…ญไธƒๅ…ซไน' -BIG_CHINESE_DIGIS_SIMPLIFIED = u'้›ถๅฃน่ดฐๅ่‚†ไผ้™†ๆŸ’ๆŒ็Ž–' -BIG_CHINESE_DIGIS_TRADITIONAL = u'้›ถๅฃน่ฒณๅƒ่‚†ไผ้™ธๆŸ’ๆŒ็Ž–' -SMALLER_BIG_CHINESE_UNITS_SIMPLIFIED = u'ๅ็™พๅƒไธ‡' -SMALLER_BIG_CHINESE_UNITS_TRADITIONAL = u'ๆ‹พไฝฐไปŸ่ฌ' -LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'ไบฟๅ…†ไบฌๅž“็งญ็ฉฐๆฒŸๆถงๆญฃ่ฝฝ' -LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'ๅ„„ๅ…†ไบฌๅž“็งญ็ฉฐๆบๆพ—ๆญฃ่ผ‰' -SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED = u'ๅ็™พๅƒไธ‡' -SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL = u'ๆ‹พไฝฐไปŸ่ฌ' - -ZERO_ALT = u'ใ€‡' -ONE_ALT = u'ๅนบ' -TWO_ALTS = [u'ไธค', u'ๅ…ฉ'] - -POSITIVE = [u'ๆญฃ', u'ๆญฃ'] -NEGATIVE = [u'่ดŸ', u'่ฒ '] -POINT = [u'็‚น', u'้ปž'] -# PLUS = [u'ๅŠ ', u'ๅŠ '] -# SIL = [u'ๆ ', u'ๆง“'] - -# ไธญๆ–‡ๆ•ฐๅญ—็ณป็ปŸ็ฑปๅž‹ -NUMBERING_TYPES = ['low', 'mid', 'high'] - -CURRENCY_NAMES = '(ไบบๆฐ‘ๅธ|็พŽๅ…ƒ|ๆ—ฅๅ…ƒ|่‹ฑ้•‘|ๆฌงๅ…ƒ|้ฉฌๅ…‹|ๆณ•้ƒŽ|ๅŠ ๆ‹ฟๅคงๅ…ƒ|ๆพณๅ…ƒ|ๆธฏๅธ|ๅ…ˆไปค|่Šฌๅ…ฐ้ฉฌๅ…‹|็ˆฑๅฐ”ๅ…ฐ้•‘|' \ - '้‡Œๆ‹‰|่ทๅ…ฐ็›พ|ๅŸƒๆ–ฏๅบ“ๅคš|ๆฏ”ๅกžๅก”|ๅฐๅฐผ็›พ|ๆž—ๅ‰็‰น|ๆ–ฐ่ฅฟๅ…ฐๅ…ƒ|ๆฏ”็ดข|ๅขๅธƒ|ๆ–ฐๅŠ ๅกๅ…ƒ|้Ÿฉๅ…ƒ|ๆณฐ้“ข)' -CURRENCY_UNITS = '((ไบฟ|ๅƒไธ‡|็™พไธ‡|ไธ‡|ๅƒ|็™พ)|(ไบฟ|ๅƒไธ‡|็™พไธ‡|ไธ‡|ๅƒ|็™พ|)ๅ…ƒ|(ไบฟ|ๅƒไธ‡|็™พไธ‡|ไธ‡|ๅƒ|็™พ|)ๅ—|่ง’|ๆฏ›|ๅˆ†)' -COM_QUANTIFIERS = '(ๅŒน|ๅผ |ๅบง|ๅ›ž|ๅœบ|ๅฐพ|ๆก|ไธช|้ฆ–|้˜™|้˜ต|็ฝ‘|็‚ฎ|้กถ|ไธ˜|ๆฃต|ๅช|ๆ”ฏ|่ขญ|่พ†|ๆŒ‘|ๆ‹…|้ข—|ๅฃณ|็ช |ๆ›ฒ|ๅข™|็พค|่…”|' \ - '็ ฃ|ๅบง|ๅฎข|่ดฏ|ๆ‰Ž|ๆ†|ๅˆ€|ไปค|ๆ‰“|ๆ‰‹|็ฝ—|ๅก|ๅฑฑ|ๅฒญ|ๆฑŸ|ๆบช|้’Ÿ|้˜Ÿ|ๅ•|ๅŒ|ๅฏน|ๅ‡บ|ๅฃ|ๅคด|่„š|ๆฟ|่ทณ|ๆž|ไปถ|่ดด|' \ - '้’ˆ|็บฟ|็ฎก|ๅ|ไฝ|่บซ|ๅ ‚|่ฏพ|ๆœฌ|้กต|ๅฎถ|ๆˆท|ๅฑ‚|ไธ|ๆฏซ|ๅŽ˜|ๅˆ†|้’ฑ|ไธค|ๆ–ค|ๆ‹…|้“ข|็Ÿณ|้’ง|้”ฑ|ๅฟฝ|(ๅƒ|ๆฏซ|ๅพฎ)ๅ…‹|' \ - 'ๆฏซ|ๅŽ˜|ๅˆ†|ๅฏธ|ๅฐบ|ไธˆ|้‡Œ|ๅฏป|ๅธธ|้“บ|็จ‹|(ๅƒ|ๅˆ†|ๅŽ˜|ๆฏซ|ๅพฎ)็ฑณ|ๆ’ฎ|ๅ‹บ|ๅˆ|ๅ‡|ๆ–—|็Ÿณ|็›˜|็ข—|็ขŸ|ๅ |ๆกถ|็ฌผ|็›†|' \ - '็›’|ๆฏ|้’Ÿ|ๆ–›|้”…|็ฐ‹|็ฏฎ|็›˜|ๆกถ|็ฝ|็“ถ|ๅฃถ|ๅฎ|็›|็ฎฉ|็ฎฑ|็…ฒ|ๅ•–|่ข‹|้’ต|ๅนด|ๆœˆ|ๆ—ฅ|ๅญฃ|ๅˆป|ๆ—ถ|ๅ‘จ|ๅคฉ|็ง’|ๅˆ†|ๆ—ฌ|' \ - '็บช|ๅฒ|ไธ–|ๆ›ด|ๅคœ|ๆ˜ฅ|ๅค|็ง‹|ๅ†ฌ|ไปฃ|ไผ|่พˆ|ไธธ|ๆณก|็ฒ’|้ข—|ๅนข|ๅ †|ๆก|ๆ น|ๆ”ฏ|้“|้ข|็‰‡|ๅผ |้ข—|ๅ—)' - -# punctuation information are based on Zhon project (https://github.com/tsroten/zhon.git) -CHINESE_PUNC_STOP = '๏ผ๏ผŸ๏ฝกใ€‚' -CHINESE_PUNC_NON_STOP = '๏ผ‚๏ผƒ๏ผ„๏ผ…๏ผ†๏ผ‡๏ผˆ๏ผ‰๏ผŠ๏ผ‹๏ผŒ๏ผ๏ผ๏ผš๏ผ›๏ผœ๏ผ๏ผž๏ผ ๏ผป๏ผผ๏ผฝ๏ผพ๏ผฟ๏ฝ€๏ฝ›๏ฝœ๏ฝ๏ฝž๏ฝŸ๏ฝ ๏ฝข๏ฝฃ๏ฝคใ€ใ€ƒใ€Šใ€‹ใ€Œใ€ใ€Žใ€ใ€ใ€‘ใ€”ใ€•ใ€–ใ€—ใ€˜ใ€™ใ€šใ€›ใ€œใ€ใ€žใ€Ÿใ€ฐใ€พใ€ฟโ€“โ€”โ€˜โ€™โ€›โ€œโ€โ€žโ€Ÿโ€ฆโ€ง๏น' -CHINESE_PUNC_LIST = CHINESE_PUNC_STOP + CHINESE_PUNC_NON_STOP - - -# ================================================================================ # -# basic class -# ================================================================================ # -class ChineseChar(object): - """ - ไธญๆ–‡ๅญ—็ฌฆ - ๆฏไธชๅญ—็ฌฆๅฏนๅบ”็ฎ€ไฝ“ๅ’Œ็นไฝ“, - e.g. ็ฎ€ไฝ“ = '่ดŸ', ็นไฝ“ = '่ฒ ' - ่ฝฌๆขๆ—ถๅฏ่ฝฌๆขไธบ็ฎ€ไฝ“ๆˆ–็นไฝ“ - """ - - def __init__(self, simplified, traditional): - self.simplified = simplified - self.traditional = traditional - # self.__repr__ = self.__str__ - - def __str__(self): - return self.simplified or self.traditional or None - - def __repr__(self): - return self.__str__() - - -class ChineseNumberUnit(ChineseChar): - """ - ไธญๆ–‡ๆ•ฐๅญ—/ๆ•ฐไฝๅญ—็ฌฆ - ๆฏไธชๅญ—็ฌฆ้™ค็น็ฎ€ไฝ“ๅค–่ฟ˜ๆœ‰ไธ€ไธช้ขๅค–็š„ๅคงๅ†™ๅญ—็ฌฆ - e.g. '้™†' ๅ’Œ '้™ธ' - """ - - def __init__(self, power, simplified, traditional, big_s, big_t): - super(ChineseNumberUnit, self).__init__(simplified, traditional) - self.power = power - self.big_s = big_s - self.big_t = big_t - - def __str__(self): - return '10^{}'.format(self.power) - - @classmethod - def create(cls, index, value, numbering_type=NUMBERING_TYPES[1], small_unit=False): - - if small_unit: - return ChineseNumberUnit(power=index + 1, - simplified=value[0], traditional=value[1], big_s=value[1], big_t=value[1]) - elif numbering_type == NUMBERING_TYPES[0]: - return ChineseNumberUnit(power=index + 8, - simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) - elif numbering_type == NUMBERING_TYPES[1]: - return ChineseNumberUnit(power=(index + 2) * 4, - simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) - elif numbering_type == NUMBERING_TYPES[2]: - return ChineseNumberUnit(power=pow(2, index + 3), - simplified=value[0], traditional=value[1], big_s=value[0], big_t=value[1]) - else: - raise ValueError( - 'Counting type should be in {0} ({1} provided).'.format(NUMBERING_TYPES, numbering_type)) - - -class ChineseNumberDigit(ChineseChar): - """ - ไธญๆ–‡ๆ•ฐๅญ—ๅญ—็ฌฆ - """ - - def __init__(self, value, simplified, traditional, big_s, big_t, alt_s=None, alt_t=None): - super(ChineseNumberDigit, self).__init__(simplified, traditional) - self.value = value - self.big_s = big_s - self.big_t = big_t - self.alt_s = alt_s - self.alt_t = alt_t - - def __str__(self): - return str(self.value) - - @classmethod - def create(cls, i, v): - return ChineseNumberDigit(i, v[0], v[1], v[2], v[3]) - - -class ChineseMath(ChineseChar): - """ - ไธญๆ–‡ๆ•ฐไฝๅญ—็ฌฆ - """ - - def __init__(self, simplified, traditional, symbol, expression=None): - super(ChineseMath, self).__init__(simplified, traditional) - self.symbol = symbol - self.expression = expression - self.big_s = simplified - self.big_t = traditional - - -CC, CNU, CND, CM = ChineseChar, ChineseNumberUnit, ChineseNumberDigit, ChineseMath - - -class NumberSystem(object): - """ - ไธญๆ–‡ๆ•ฐๅญ—็ณป็ปŸ - """ - pass - - -class MathSymbol(object): - """ - ็”จไบŽไธญๆ–‡ๆ•ฐๅญ—็ณป็ปŸ็š„ๆ•ฐๅญฆ็ฌฆๅท (็น/็ฎ€ไฝ“), e.g. - positive = ['ๆญฃ', 'ๆญฃ'] - negative = ['่ดŸ', '่ฒ '] - point = ['็‚น', '้ปž'] - """ - - def __init__(self, positive, negative, point): - self.positive = positive - self.negative = negative - self.point = point - - def __iter__(self): - for v in self.__dict__.values(): - yield v - - -# class OtherSymbol(object): -# """ -# ๅ…ถไป–็ฌฆๅท -# """ -# -# def __init__(self, sil): -# self.sil = sil -# -# def __iter__(self): -# for v in self.__dict__.values(): -# yield v - - -# ================================================================================ # -# basic utils -# ================================================================================ # -def create_system(numbering_type=NUMBERING_TYPES[1]): - """ - ๆ นๆฎๆ•ฐๅญ—็ณป็ปŸ็ฑปๅž‹่ฟ”ๅ›žๅˆ›ๅปบ็›ธๅบ”็š„ๆ•ฐๅญ—็ณป็ปŸ๏ผŒ้ป˜่ฎคไธบ mid - NUMBERING_TYPES = ['low', 'mid', 'high']: ไธญๆ–‡ๆ•ฐๅญ—็ณป็ปŸ็ฑปๅž‹ - low: 'ๅ…†' = 'ไบฟ' * 'ๅ' = $10^{9}$, 'ไบฌ' = 'ๅ…†' * 'ๅ', etc. - mid: 'ๅ…†' = 'ไบฟ' * 'ไธ‡' = $10^{12}$, 'ไบฌ' = 'ๅ…†' * 'ไธ‡', etc. - high: 'ๅ…†' = 'ไบฟ' * 'ไบฟ' = $10^{16}$, 'ไบฌ' = 'ๅ…†' * 'ๅ…†', etc. - ่ฟ”ๅ›žๅฏนๅบ”็š„ๆ•ฐๅญ—็ณป็ปŸ - """ - - # chinese number units of 'ไบฟ' and larger - all_larger_units = zip( - LARGER_CHINESE_NUMERING_UNITS_SIMPLIFIED, LARGER_CHINESE_NUMERING_UNITS_TRADITIONAL) - larger_units = [CNU.create(i, v, numbering_type, False) - for i, v in enumerate(all_larger_units)] - # chinese number units of 'ๅ, ็™พ, ๅƒ, ไธ‡' - all_smaller_units = zip( - SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED, SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL) - smaller_units = [CNU.create(i, v, small_unit=True) - for i, v in enumerate(all_smaller_units)] - # digis - chinese_digis = zip(CHINESE_DIGIS, CHINESE_DIGIS, - BIG_CHINESE_DIGIS_SIMPLIFIED, BIG_CHINESE_DIGIS_TRADITIONAL) - digits = [CND.create(i, v) for i, v in enumerate(chinese_digis)] - digits[0].alt_s, digits[0].alt_t = ZERO_ALT, ZERO_ALT - digits[1].alt_s, digits[1].alt_t = ONE_ALT, ONE_ALT - digits[2].alt_s, digits[2].alt_t = TWO_ALTS[0], TWO_ALTS[1] - - # symbols - positive_cn = CM(POSITIVE[0], POSITIVE[1], '+', lambda x: x) - negative_cn = CM(NEGATIVE[0], NEGATIVE[1], '-', lambda x: -x) - point_cn = CM(POINT[0], POINT[1], '.', lambda x, - y: float(str(x) + '.' + str(y))) - # sil_cn = CM(SIL[0], SIL[1], '-', lambda x, y: float(str(x) + '-' + str(y))) - system = NumberSystem() - system.units = smaller_units + larger_units - system.digits = digits - system.math = MathSymbol(positive_cn, negative_cn, point_cn) - # system.symbols = OtherSymbol(sil_cn) - return system - - -def chn2num(chinese_string, numbering_type=NUMBERING_TYPES[1]): - def get_symbol(char, system): - for u in system.units: - if char in [u.traditional, u.simplified, u.big_s, u.big_t]: - return u - for d in system.digits: - if char in [d.traditional, d.simplified, d.big_s, d.big_t, d.alt_s, d.alt_t]: - return d - for m in system.math: - if char in [m.traditional, m.simplified]: - return m - - def string2symbols(chinese_string, system): - int_string, dec_string = chinese_string, '' - for p in [system.math.point.simplified, system.math.point.traditional]: - if p in chinese_string: - int_string, dec_string = chinese_string.split(p) - break - return [get_symbol(c, system) for c in int_string], \ - [get_symbol(c, system) for c in dec_string] - - def correct_symbols(integer_symbols, system): - """ - ไธ€็™พๅ…ซ to ไธ€็™พๅ…ซๅ - ไธ€ไบฟไธ€ๅƒไธ‰็™พไธ‡ to ไธ€ไบฟ ไธ€ๅƒไธ‡ ไธ‰็™พไธ‡ - """ - - if integer_symbols and isinstance(integer_symbols[0], CNU): - if integer_symbols[0].power == 1: - integer_symbols = [system.digits[1]] + integer_symbols - - if len(integer_symbols) > 1: - if isinstance(integer_symbols[-1], CND) and isinstance(integer_symbols[-2], CNU): - integer_symbols.append( - CNU(integer_symbols[-2].power - 1, None, None, None, None)) - - result = [] - unit_count = 0 - for s in integer_symbols: - if isinstance(s, CND): - result.append(s) - unit_count = 0 - elif isinstance(s, CNU): - current_unit = CNU(s.power, None, None, None, None) - unit_count += 1 - - if unit_count == 1: - result.append(current_unit) - elif unit_count > 1: - for i in range(len(result)): - if isinstance(result[-i - 1], CNU) and result[-i - 1].power < current_unit.power: - result[-i - 1] = CNU(result[-i - 1].power + - current_unit.power, None, None, None, None) - return result - - def compute_value(integer_symbols): - """ - Compute the value. - When current unit is larger than previous unit, current unit * all previous units will be used as all previous units. - e.g. 'ไธคๅƒไธ‡' = 2000 * 10000 not 2000 + 10000 - """ - value = [0] - last_power = 0 - for s in integer_symbols: - if isinstance(s, CND): - value[-1] = s.value - elif isinstance(s, CNU): - value[-1] *= pow(10, s.power) - if s.power > last_power: - value[:-1] = list(map(lambda v: v * - pow(10, s.power), value[:-1])) - last_power = s.power - value.append(0) - return sum(value) - - system = create_system(numbering_type) - int_part, dec_part = string2symbols(chinese_string, system) - int_part = correct_symbols(int_part, system) - int_str = str(compute_value(int_part)) - dec_str = ''.join([str(d.value) for d in dec_part]) - if dec_part: - return '{0}.{1}'.format(int_str, dec_str) - else: - return int_str - - -def num2chn(number_string, numbering_type=NUMBERING_TYPES[1], big=False, - traditional=False, alt_zero=False, alt_one=False, alt_two=True, - use_zeros=True, use_units=True): - def get_value(value_string, use_zeros=True): - - striped_string = value_string.lstrip('0') - - # record nothing if all zeros - if not striped_string: - return [] - - # record one digits - elif len(striped_string) == 1: - if use_zeros and len(value_string) != len(striped_string): - return [system.digits[0], system.digits[int(striped_string)]] - else: - return [system.digits[int(striped_string)]] - - # recursively record multiple digits - else: - result_unit = next(u for u in reversed( - system.units) if u.power < len(striped_string)) - result_string = value_string[:-result_unit.power] - return get_value(result_string) + [result_unit] + get_value(striped_string[-result_unit.power:]) - - system = create_system(numbering_type) - - int_dec = number_string.split('.') - if len(int_dec) == 1: - int_string = int_dec[0] - dec_string = "" - elif len(int_dec) == 2: - int_string = int_dec[0] - dec_string = int_dec[1] - else: - raise ValueError( - "invalid input num string with more than one dot: {}".format(number_string)) - - if use_units and len(int_string) > 1: - result_symbols = get_value(int_string) - else: - result_symbols = [system.digits[int(c)] for c in int_string] - dec_symbols = [system.digits[int(c)] for c in dec_string] - if dec_string: - result_symbols += [system.math.point] + dec_symbols - - if alt_two: - liang = CND(2, system.digits[2].alt_s, system.digits[2].alt_t, - system.digits[2].big_s, system.digits[2].big_t) - for i, v in enumerate(result_symbols): - if isinstance(v, CND) and v.value == 2: - next_symbol = result_symbols[i + - 1] if i < len(result_symbols) - 1 else None - previous_symbol = result_symbols[i - 1] if i > 0 else None - if isinstance(next_symbol, CNU) and isinstance(previous_symbol, (CNU, type(None))): - if next_symbol.power != 1 and ((previous_symbol is None) or (previous_symbol.power != 1)): - result_symbols[i] = liang - - # if big is True, 'ไธค' will not be used and `alt_two` has no impact on output - if big: - attr_name = 'big_' - if traditional: - attr_name += 't' - else: - attr_name += 's' - else: - if traditional: - attr_name = 'traditional' - else: - attr_name = 'simplified' - - result = ''.join([getattr(s, attr_name) for s in result_symbols]) - - # if not use_zeros: - # result = result.strip(getattr(system.digits[0], attr_name)) - - if alt_zero: - result = result.replace( - getattr(system.digits[0], attr_name), system.digits[0].alt_s) - - if alt_one: - result = result.replace( - getattr(system.digits[1], attr_name), system.digits[1].alt_s) - - for i, p in enumerate(POINT): - if result.startswith(p): - return CHINESE_DIGIS[0] + result - - # ^10, 11, .., 19 - if len(result) >= 2 and result[1] in [SMALLER_CHINESE_NUMERING_UNITS_SIMPLIFIED[0], - SMALLER_CHINESE_NUMERING_UNITS_TRADITIONAL[0]] and \ - result[0] in [CHINESE_DIGIS[1], BIG_CHINESE_DIGIS_SIMPLIFIED[1], BIG_CHINESE_DIGIS_TRADITIONAL[1]]: - result = result[1:] - - return result - - -# ================================================================================ # -# different types of rewriters -# ================================================================================ # -class Cardinal: - """ - CARDINAL็ฑป - """ - - def __init__(self, cardinal=None, chntext=None): - self.cardinal = cardinal - self.chntext = chntext - - def chntext2cardinal(self): - return chn2num(self.chntext) - - def cardinal2chntext(self): - return num2chn(self.cardinal) - - -class Digit: - """ - DIGIT็ฑป - """ - - def __init__(self, digit=None, chntext=None): - self.digit = digit - self.chntext = chntext - - # def chntext2digit(self): - # return chn2num(self.chntext) - - def digit2chntext(self): - return num2chn(self.digit, alt_two=False, use_units=False) - - -class TelePhone: - """ - TELEPHONE็ฑป - """ - - def __init__(self, telephone=None, raw_chntext=None, chntext=None): - self.telephone = telephone - self.raw_chntext = raw_chntext - self.chntext = chntext - - # def chntext2telephone(self): - # sil_parts = self.raw_chntext.split('') - # self.telephone = '-'.join([ - # str(chn2num(p)) for p in sil_parts - # ]) - # return self.telephone - - def telephone2chntext(self, fixed=False): - - if fixed: - sil_parts = self.telephone.split('-') - self.raw_chntext = ''.join([ - num2chn(part, alt_two=False, use_units=False) for part in sil_parts - ]) - self.chntext = self.raw_chntext.replace('', '') - else: - sp_parts = self.telephone.strip('+').split() - self.raw_chntext = ''.join([ - num2chn(part, alt_two=False, use_units=False) for part in sp_parts - ]) - self.chntext = self.raw_chntext.replace('', '') - return self.chntext - - -class Fraction: - """ - FRACTION็ฑป - """ - - def __init__(self, fraction=None, chntext=None): - self.fraction = fraction - self.chntext = chntext - - def chntext2fraction(self): - denominator, numerator = self.chntext.split('ๅˆ†ไน‹') - return chn2num(numerator) + '/' + chn2num(denominator) - - def fraction2chntext(self): - numerator, denominator = self.fraction.split('/') - return num2chn(denominator) + 'ๅˆ†ไน‹' + num2chn(numerator) - - -class Date: - """ - DATE็ฑป - """ - - def __init__(self, date=None, chntext=None): - self.date = date - self.chntext = chntext - - # def chntext2date(self): - # chntext = self.chntext - # try: - # year, other = chntext.strip().split('ๅนด', maxsplit=1) - # year = Digit(chntext=year).digit2chntext() + 'ๅนด' - # except ValueError: - # other = chntext - # year = '' - # if other: - # try: - # month, day = other.strip().split('ๆœˆ', maxsplit=1) - # month = Cardinal(chntext=month).chntext2cardinal() + 'ๆœˆ' - # except ValueError: - # day = chntext - # month = '' - # if day: - # day = Cardinal(chntext=day[:-1]).chntext2cardinal() + day[-1] - # else: - # month = '' - # day = '' - # date = year + month + day - # self.date = date - # return self.date - - def date2chntext(self): - date = self.date - try: - year, other = date.strip().split('ๅนด', 1) - year = Digit(digit=year).digit2chntext() + 'ๅนด' - except ValueError: - other = date - year = '' - if other: - try: - month, day = other.strip().split('ๆœˆ', 1) - month = Cardinal(cardinal=month).cardinal2chntext() + 'ๆœˆ' - except ValueError: - day = date - month = '' - if day: - day = Cardinal(cardinal=day[:-1]).cardinal2chntext() + day[-1] - else: - month = '' - day = '' - chntext = year + month + day - self.chntext = chntext - return self.chntext - - -class Money: - """ - MONEY็ฑป - """ - - def __init__(self, money=None, chntext=None): - self.money = money - self.chntext = chntext - - # def chntext2money(self): - # return self.money - - def money2chntext(self): - money = self.money - pattern = re.compile(r'(\d+(\.\d+)?)') - matchers = pattern.findall(money) - if matchers: - for matcher in matchers: - money = money.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext()) - self.chntext = money - return self.chntext - - -class Percentage: - """ - PERCENTAGE็ฑป - """ - - def __init__(self, percentage=None, chntext=None): - self.percentage = percentage - self.chntext = chntext - - def chntext2percentage(self): - return chn2num(self.chntext.strip().strip('็™พๅˆ†ไน‹')) + '%' - - def percentage2chntext(self): - return '็™พๅˆ†ไน‹' + num2chn(self.percentage.strip().strip('%')) - - -# ================================================================================ # -# NSW Normalizer -# ================================================================================ # -class NSWNormalizer: - def __init__(self, raw_text): - self.raw_text = '^' + raw_text + '$' - self.norm_text = '' - - def _particular(self): - text = self.norm_text - pattern = re.compile(r"(([a-zA-Z]+)ไบŒ([a-zA-Z]+))") - matchers = pattern.findall(text) - if matchers: - # print('particular') - for matcher in matchers: - text = text.replace(matcher[0], matcher[1] + '2' + matcher[2], 1) - self.norm_text = text - return self.norm_text - - def normalize(self, remove_punc=True): - text = self.raw_text - - # ่ง„่ŒƒๅŒ–ๆ—ฅๆœŸ - pattern = re.compile(r"\D+((([089]\d|(19|20)\d{2})ๅนด)?(\d{1,2}ๆœˆ(\d{1,2}[ๆ—ฅๅท])?)?)") - matchers = pattern.findall(text) - if matchers: - # print('date') - for matcher in matchers: - text = text.replace(matcher[0], Date(date=matcher[0]).date2chntext(), 1) - - # ่ง„่ŒƒๅŒ–้‡‘้’ฑ - pattern = re.compile(r"\D+((\d+(\.\d+)?)[ๅคšไฝ™ๅ‡ ]?" + CURRENCY_UNITS + r"(\d" + CURRENCY_UNITS + r"?)?)") - matchers = pattern.findall(text) - if matchers: - # print('money') - for matcher in matchers: - text = text.replace(matcher[0], Money(money=matcher[0]).money2chntext(), 1) - - # ่ง„่ŒƒๅŒ–ๅ›บ่ฏ/ๆ‰‹ๆœบๅท็  - # ๆ‰‹ๆœบ - # http://www.jihaoba.com/news/show/13680 - # ็งปๅŠจ๏ผš139ใ€138ใ€137ใ€136ใ€135ใ€134ใ€159ใ€158ใ€157ใ€150ใ€151ใ€152ใ€188ใ€187ใ€182ใ€183ใ€184ใ€178ใ€198 - # ่”้€š๏ผš130ใ€131ใ€132ใ€156ใ€155ใ€186ใ€185ใ€176 - # ็”ตไฟก๏ผš133ใ€153ใ€189ใ€180ใ€181ใ€177 - pattern = re.compile(r"\D((\+?86 ?)?1([38]\d|5[0-35-9]|7[678]|9[89])\d{8})\D") - matchers = pattern.findall(text) - if matchers: - # print('telephone') - for matcher in matchers: - text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(), 1) - # ๅ›บ่ฏ - pattern = re.compile(r"\D((0(10|2[1-3]|[3-9]\d{2})-?)?[1-9]\d{6,7})\D") - matchers = pattern.findall(text) - if matchers: - # print('fixed telephone') - for matcher in matchers: - text = text.replace(matcher[0], TelePhone(telephone=matcher[0]).telephone2chntext(fixed=True), 1) - - # ่ง„่ŒƒๅŒ–ๅˆ†ๆ•ฐ - pattern = re.compile(r"(\d+/\d+)") - matchers = pattern.findall(text) - if matchers: - # print('fraction') - for matcher in matchers: - text = text.replace(matcher, Fraction(fraction=matcher).fraction2chntext(), 1) - - # ่ง„่ŒƒๅŒ–็™พๅˆ†ๆ•ฐ - text = text.replace('๏ผ…', '%') - pattern = re.compile(r"(\d+(\.\d+)?%)") - matchers = pattern.findall(text) - if matchers: - # print('percentage') - for matcher in matchers: - text = text.replace(matcher[0], Percentage(percentage=matcher[0]).percentage2chntext(), 1) - - # ่ง„่ŒƒๅŒ–็บฏๆ•ฐ+้‡่ฏ - pattern = re.compile(r"(\d+(\.\d+)?)[ๅคšไฝ™ๅ‡ ]?" + COM_QUANTIFIERS) - matchers = pattern.findall(text) - if matchers: - # print('cardinal+quantifier') - for matcher in matchers: - text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1) - - # ่ง„่ŒƒๅŒ–ๆ•ฐๅญ—็ผ–ๅท - pattern = re.compile(r"(\d{4,32})") - matchers = pattern.findall(text) - if matchers: - # print('digit') - for matcher in matchers: - text = text.replace(matcher, Digit(digit=matcher).digit2chntext(), 1) - - # ่ง„่ŒƒๅŒ–็บฏๆ•ฐ - pattern = re.compile(r"(\d+(\.\d+)?)") - matchers = pattern.findall(text) - if matchers: - # print('cardinal') - for matcher in matchers: - text = text.replace(matcher[0], Cardinal(cardinal=matcher[0]).cardinal2chntext(), 1) - - self.norm_text = text - self._particular() - - text = self.norm_text.lstrip('^').rstrip('$') - if remove_punc: - # Punctuations removal - old_chars = CHINESE_PUNC_LIST + string.punctuation # includes all CN and EN punctuations - new_chars = ' ' * len(old_chars) - del_chars = '' - text = text.translate(str.maketrans(old_chars, new_chars, del_chars)) - return text - - -def nsw_test_case(raw_text): - print('I:' + raw_text) - print('O:' + NSWNormalizer(raw_text).normalize()) - print('') - - -def nsw_test(): - nsw_test_case('ๅ›บ่ฏ๏ผš0595-23865596ๆˆ–23880880ใ€‚') - nsw_test_case('ๅ›บ่ฏ๏ผš0595-23865596ๆˆ–23880880ใ€‚') - nsw_test_case('ๆ‰‹ๆœบ๏ผš+86 19859213959ๆˆ–15659451527ใ€‚') - nsw_test_case('ๅˆ†ๆ•ฐ๏ผš32477/76391ใ€‚') - nsw_test_case('็™พๅˆ†ๆ•ฐ๏ผš80.03%ใ€‚') - nsw_test_case('็ผ–ๅท๏ผš31520181154418ใ€‚') - nsw_test_case('็บฏๆ•ฐ๏ผš2983.07ๅ…‹ๆˆ–12345.60็ฑณใ€‚') - nsw_test_case('ๆ—ฅๆœŸ๏ผš1999ๅนด2ๆœˆ20ๆ—ฅๆˆ–09ๅนด3ๆœˆ15ๅทใ€‚') - nsw_test_case('้‡‘้’ฑ๏ผš12ๅ—5๏ผŒ34.5ๅ…ƒ๏ผŒ20.1ไธ‡') - nsw_test_case('็‰นๆฎŠ๏ผšO2Oๆˆ–B2Cใ€‚') - nsw_test_case('3456ไธ‡ๅจ') - nsw_test_case('2938ไธช') - nsw_test_case('938') - nsw_test_case('ไปŠๅคฉๅƒไบ†115ไธชๅฐ็ฌผๅŒ…231ไธช้ฆ’ๅคด') - nsw_test_case('ๆœ‰62๏ผ…็š„ๆฆ‚็އ') - - -if __name__ == '__main__': - # nsw_test() - - p = argparse.ArgumentParser() - p.add_argument('ifile', help='input filename, assume utf-8 encoding') - p.add_argument('ofile', help='output filename') - p.add_argument('--to_upper', action='store_true', help='convert to upper case') - p.add_argument('--to_lower', action='store_true', help='convert to lower case') - p.add_argument('--has_key', action='store_true', help="input text has Kaldi's key as first field.") - p.add_argument('--log_interval', type=int, default=10000, help='log interval in number of processed lines') - args = p.parse_args() - - ifile = codecs.open(args.ifile, 'r', 'utf8') - ofile = codecs.open(args.ofile, 'w+', 'utf8') - - n = 0 - for l in ifile: - key = '' - text = '' - if args.has_key: - cols = l.split(maxsplit=1) - key = cols[0] - if len(cols) == 2: - text = cols[1] - else: - text = '' - else: - text = l - - # cases - if args.to_upper and args.to_lower: - sys.stderr.write('text norm: to_upper OR to_lower?') - exit(1) - if args.to_upper: - text = text.upper() - if args.to_lower: - text = text.lower() - - # NSW(Non-Standard-Word) normalization - text = NSWNormalizer(text).normalize() - - # - if args.has_key: - ofile.write(key + '\t' + text) - else: - ofile.write(text) - - n += 1 - if n % args.log_interval == 0: - sys.stderr.write("text norm: {} lines done.\n".format(n)) - - sys.stderr.write("text norm: {} lines done in total.\n".format(n)) - - ifile.close() - ofile.close() diff --git a/spaces/Rongjiehuang/ProDiff/tasks/base_task.py b/spaces/Rongjiehuang/ProDiff/tasks/base_task.py deleted file mode 100644 index aa31903693c814af1e9a75cd64071e883dca4aa1..0000000000000000000000000000000000000000 --- a/spaces/Rongjiehuang/ProDiff/tasks/base_task.py +++ /dev/null @@ -1,355 +0,0 @@ -from itertools import chain - -from torch.utils.data import ConcatDataset -from torch.utils.tensorboard import SummaryWriter -import subprocess -import traceback -from datetime import datetime -from functools import wraps -from utils.hparams import hparams -import random -import sys -import numpy as np -from utils.trainer import Trainer -from torch import nn -import torch.utils.data -import utils -import logging -import os - -torch.multiprocessing.set_sharing_strategy(os.getenv('TORCH_SHARE_STRATEGY', 'file_system')) - -log_format = '%(asctime)s %(message)s' -logging.basicConfig(stream=sys.stdout, level=logging.INFO, - format=log_format, datefmt='%m/%d %I:%M:%S %p') - - -def data_loader(fn): - """ - Decorator to make any fx with this use the lazy property - :param fn: - :return: - """ - - wraps(fn) - attr_name = '_lazy_' + fn.__name__ - - def _get_data_loader(self): - try: - value = getattr(self, attr_name) - except AttributeError: - try: - value = fn(self) # Lazy evaluation, done only once. - except AttributeError as e: - # Guard against AttributeError suppression. (Issue #142) - traceback.print_exc() - error = f'{fn.__name__}: An AttributeError was encountered: ' + str(e) - raise RuntimeError(error) from e - setattr(self, attr_name, value) # Memoize evaluation. - return value - - return _get_data_loader - - -class BaseDataset(torch.utils.data.Dataset): - def __init__(self, shuffle): - super().__init__() - self.hparams = hparams - self.shuffle = shuffle - self.sort_by_len = hparams['sort_by_len'] - self.sizes = None - - @property - def _sizes(self): - return self.sizes - - def __getitem__(self, index): - raise NotImplementedError - - def collater(self, samples): - raise NotImplementedError - - def __len__(self): - return len(self._sizes) - - def num_tokens(self, index): - return self.size(index) - - def size(self, index): - """Return an example's size as a float or tuple. This value is used when - filtering a dataset with ``--max-positions``.""" - return min(self._sizes[index], hparams['max_frames']) - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.shuffle: - indices = np.random.permutation(len(self)) - if self.sort_by_len: - indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] - else: - indices = np.arange(len(self)) - return indices - - @property - def num_workers(self): - return int(os.getenv('NUM_WORKERS', hparams['ds_workers'])) - - -class BaseConcatDataset(ConcatDataset): - def collater(self, samples): - return self.datasets[0].collater(samples) - - @property - def _sizes(self): - if not hasattr(self, 'sizes'): - self.sizes = list(chain.from_iterable([d._sizes for d in self.datasets])) - return self.sizes - - def size(self, index): - return min(self._sizes[index], hparams['max_frames']) - - def num_tokens(self, index): - return self.size(index) - - def ordered_indices(self): - """Return an ordered list of indices. Batches will be constructed based - on this order.""" - if self.datasets[0].shuffle: - indices = np.random.permutation(len(self)) - if self.datasets[0].sort_by_len: - indices = indices[np.argsort(np.array(self._sizes)[indices], kind='mergesort')] - else: - indices = np.arange(len(self)) - return indices - - @property - def num_workers(self): - return self.datasets[0].num_workers - - -class BaseTask(nn.Module): - def __init__(self, *args, **kwargs): - # dataset configs - super(BaseTask, self).__init__() - self.current_epoch = 0 - self.global_step = 0 - self.trainer = None - self.use_ddp = False - self.gradient_clip_norm = hparams['clip_grad_norm'] - self.gradient_clip_val = hparams.get('clip_grad_value', 0) - self.model = None - self.training_losses_meter = None - self.logger: SummaryWriter = None - - ###################### - # build model, dataloaders, optimizer, scheduler and tensorboard - ###################### - def build_model(self): - raise NotImplementedError - - @data_loader - def train_dataloader(self): - raise NotImplementedError - - @data_loader - def test_dataloader(self): - raise NotImplementedError - - @data_loader - def val_dataloader(self): - raise NotImplementedError - - def build_scheduler(self, optimizer): - return None - - def build_optimizer(self, model): - raise NotImplementedError - - def configure_optimizers(self): - optm = self.build_optimizer(self.model) - self.scheduler = self.build_scheduler(optm) - if isinstance(optm, (list, tuple)): - return optm - return [optm] - - def build_tensorboard(self, save_dir, name, version, **kwargs): - root_dir = os.path.join(save_dir, name) - os.makedirs(root_dir, exist_ok=True) - log_dir = os.path.join(root_dir, "version_" + str(version)) - self.logger = SummaryWriter(log_dir=log_dir, **kwargs) - - ###################### - # training - ###################### - def on_train_start(self): - pass - - def on_epoch_start(self): - self.training_losses_meter = {'total_loss': utils.AvgrageMeter()} - - def _training_step(self, sample, batch_idx, optimizer_idx): - """ - - :param sample: - :param batch_idx: - :return: total loss: torch.Tensor, loss_log: dict - """ - raise NotImplementedError - - def training_step(self, sample, batch_idx, optimizer_idx=-1): - """ - - :param sample: - :param batch_idx: - :param optimizer_idx: - :return: {'loss': torch.Tensor, 'progress_bar': dict, 'tb_log': dict} - """ - loss_ret = self._training_step(sample, batch_idx, optimizer_idx) - if loss_ret is None: - return {'loss': None} - total_loss, log_outputs = loss_ret - log_outputs = utils.tensors_to_scalars(log_outputs) - for k, v in log_outputs.items(): - if k not in self.training_losses_meter: - self.training_losses_meter[k] = utils.AvgrageMeter() - if not np.isnan(v): - self.training_losses_meter[k].update(v) - self.training_losses_meter['total_loss'].update(total_loss.item()) - - if optimizer_idx >= 0: - log_outputs[f'lr_{optimizer_idx}'] = self.trainer.optimizers[optimizer_idx].param_groups[0]['lr'] - - progress_bar_log = log_outputs - tb_log = {f'tr/{k}': v for k, v in log_outputs.items()} - return { - 'loss': total_loss, - 'progress_bar': progress_bar_log, - 'tb_log': tb_log - } - - def on_before_optimization(self, opt_idx): - if self.gradient_clip_norm > 0: - torch.nn.utils.clip_grad_norm_(self.parameters(), self.gradient_clip_norm) - if self.gradient_clip_val > 0: - torch.nn.utils.clip_grad_value_(self.parameters(), self.gradient_clip_val) - - def on_after_optimization(self, epoch, batch_idx, optimizer, optimizer_idx): - if self.scheduler is not None: - self.scheduler.step(self.global_step // hparams['accumulate_grad_batches']) - - def on_epoch_end(self): - loss_outputs = {k: round(v.avg, 4) for k, v in self.training_losses_meter.items()} - print(f"Epoch {self.current_epoch} ended. Steps: {self.global_step}. {loss_outputs}") - - def on_train_end(self): - pass - - ###################### - # validation - ###################### - def validation_step(self, sample, batch_idx): - """ - - :param sample: - :param batch_idx: - :return: output: {"losses": {...}, "total_loss": float, ...} or (total loss: torch.Tensor, loss_log: dict) - """ - raise NotImplementedError - - def validation_end(self, outputs): - """ - - :param outputs: - :return: loss_output: dict - """ - all_losses_meter = {'total_loss': utils.AvgrageMeter()} - for output in outputs: - if len(output) == 0 or output is None: - continue - if isinstance(output, dict): - assert 'losses' in output, 'Key "losses" should exist in validation output.' - n = output.pop('nsamples', 1) - losses = utils.tensors_to_scalars(output['losses']) - total_loss = output.get('total_loss', sum(losses.values())) - else: - assert len(output) == 2, 'Validation output should only consist of two elements: (total_loss, losses)' - n = 1 - total_loss, losses = output - losses = utils.tensors_to_scalars(losses) - if isinstance(total_loss, torch.Tensor): - total_loss = total_loss.item() - for k, v in losses.items(): - if k not in all_losses_meter: - all_losses_meter[k] = utils.AvgrageMeter() - all_losses_meter[k].update(v, n) - all_losses_meter['total_loss'].update(total_loss, n) - loss_output = {k: round(v.avg, 4) for k, v in all_losses_meter.items()} - print(f"| Valid results: {loss_output}") - return { - 'tb_log': {f'val/{k}': v for k, v in loss_output.items()}, - 'val_loss': loss_output['total_loss'] - } - - ###################### - # testing - ###################### - def test_start(self): - pass - - def test_step(self, sample, batch_idx): - return self.validation_step(sample, batch_idx) - - def test_end(self, outputs): - return self.validation_end(outputs) - - ###################### - # utils - ###################### - def load_ckpt(self, ckpt_base_dir, current_model_name=None, model_name='model', force=True, strict=True): - if current_model_name is None: - current_model_name = model_name - utils.load_ckpt(self.__getattr__(current_model_name), ckpt_base_dir, current_model_name, force, strict) - - ###################### - # start training/testing - ###################### - @classmethod - def start(cls): - os.environ['MASTER_PORT'] = str(random.randint(15000, 30000)) - random.seed(hparams['seed']) - np.random.seed(hparams['seed']) - work_dir = hparams['work_dir'] - trainer = Trainer( - work_dir=work_dir, - val_check_interval=hparams['val_check_interval'], - tb_log_interval=hparams['tb_log_interval'], - max_updates=hparams['max_updates'], - num_sanity_val_steps=hparams['num_sanity_val_steps'] if not hparams['validate'] else 10000, - accumulate_grad_batches=hparams['accumulate_grad_batches'], - print_nan_grads=hparams['print_nan_grads'], - resume_from_checkpoint=hparams.get('resume_from_checkpoint', 0), - amp=hparams['amp'], - # save ckpt - monitor_key=hparams['valid_monitor_key'], - monitor_mode=hparams['valid_monitor_mode'], - num_ckpt_keep=hparams['num_ckpt_keep'], - save_best=hparams['save_best'], - seed=hparams['seed'], - debug=hparams['debug'] - ) - if not hparams['inference']: # train - if len(hparams['save_codes']) > 0: - t = datetime.now().strftime('%Y%m%d%H%M%S') - code_dir = f'{work_dir}/codes/{t}' - subprocess.check_call(f'mkdir -p "{code_dir}"', shell=True) - for c in hparams['save_codes']: - if os.path.exists(c): - subprocess.check_call(f'rsync -av --exclude=__pycache__ "{c}" "{code_dir}/"', shell=True) - print(f"| Copied codes to {code_dir}.") - trainer.fit(cls) - else: - trainer.test(cls) - - def on_keyboard_interrupt(self): - pass diff --git a/spaces/ServerX/PorcoDiaz/rvc_for_realtime.py b/spaces/ServerX/PorcoDiaz/rvc_for_realtime.py deleted file mode 100644 index 55070f668c385ba0a9ba50989b282448cd75e59b..0000000000000000000000000000000000000000 --- a/spaces/ServerX/PorcoDiaz/rvc_for_realtime.py +++ /dev/null @@ -1,297 +0,0 @@ -import faiss, torch, traceback, parselmouth, numpy as np, torchcrepe, torch.nn as nn, pyworld -from fairseq import checkpoint_utils -from lib.infer_pack.models import ( - SynthesizerTrnMs256NSFsid, - SynthesizerTrnMs256NSFsid_nono, - SynthesizerTrnMs768NSFsid, - SynthesizerTrnMs768NSFsid_nono, -) -import os, sys -from time import time as ttime -import torch.nn.functional as F -import scipy.signal as signal - -now_dir = os.getcwd() -sys.path.append(now_dir) -from configs.config import Config -from multiprocessing import Manager as M - -mm = M() -config = Config() - - -class RVC: - def __init__( - self, key, pth_path, index_path, index_rate, n_cpu, inp_q, opt_q, device - ) -> None: - """ - ๅˆๅง‹ๅŒ– - """ - try: - global config - self.inp_q = inp_q - self.opt_q = opt_q - self.device = device - self.f0_up_key = key - self.time_step = 160 / 16000 * 1000 - self.f0_min = 50 - self.f0_max = 1100 - self.f0_mel_min = 1127 * np.log(1 + self.f0_min / 700) - self.f0_mel_max = 1127 * np.log(1 + self.f0_max / 700) - self.sr = 16000 - self.window = 160 - self.n_cpu = n_cpu - if index_rate != 0: - self.index = faiss.read_index(index_path) - self.big_npy = self.index.reconstruct_n(0, self.index.ntotal) - print("index search enabled") - self.index_rate = index_rate - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - self.model = hubert_model - cpt = torch.load(pth_path, map_location="cpu") - self.tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] - self.if_f0 = cpt.get("f0", 1) - self.version = cpt.get("version", "v1") - if self.version == "v1": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs256NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - elif self.version == "v2": - if self.if_f0 == 1: - self.net_g = SynthesizerTrnMs768NSFsid( - *cpt["config"], is_half=config.is_half - ) - else: - self.net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"]) - del self.net_g.enc_q - print(self.net_g.load_state_dict(cpt["weight"], strict=False)) - self.net_g.eval().to(device) - if config.is_half: - self.net_g = self.net_g.half() - else: - self.net_g = self.net_g.float() - self.is_half = config.is_half - except: - print(traceback.format_exc()) - - def get_f0_post(self, f0): - f0_min = self.f0_min - f0_max = self.f0_max - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int_) - return f0_coarse, f0bak - - def get_f0(self, x, f0_up_key, n_cpu, method="harvest"): - n_cpu = int(n_cpu) - if method == "crepe": - return self.get_f0_crepe(x, f0_up_key) - if method == "rmvpe": - return self.get_f0_rmvpe(x, f0_up_key) - if method == "pm": - p_len = x.shape[0] // 160 - f0 = ( - parselmouth.Sound(x, 16000) - .to_pitch_ac( - time_step=0.01, - voicing_threshold=0.6, - pitch_floor=50, - pitch_ceiling=1100, - ) - .selected_array["frequency"] - ) - - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - print(pad_size, p_len - len(f0) - pad_size) - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - if n_cpu == 1: - f0, t = pyworld.harvest( - x.astype(np.double), - fs=16000, - f0_ceil=1100, - f0_floor=50, - frame_period=10, - ) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - f0bak = np.zeros(x.shape[0] // 160, dtype=np.float64) - length = len(x) - part_length = int(length / n_cpu / 160) * 160 - ts = ttime() - res_f0 = mm.dict() - for idx in range(n_cpu): - tail = part_length * (idx + 1) + 320 - if idx == 0: - self.inp_q.put((idx, x[:tail], res_f0, n_cpu, ts)) - else: - self.inp_q.put( - (idx, x[part_length * idx - 320 : tail], res_f0, n_cpu, ts) - ) - while 1: - res_ts = self.opt_q.get() - if res_ts == ts: - break - f0s = [i[1] for i in sorted(res_f0.items(), key=lambda x: x[0])] - for idx, f0 in enumerate(f0s): - if idx == 0: - f0 = f0[:-3] - elif idx != n_cpu - 1: - f0 = f0[2:-3] - else: - f0 = f0[2:-1] - f0bak[ - part_length * idx // 160 : part_length * idx // 160 + f0.shape[0] - ] = f0 - f0bak = signal.medfilt(f0bak, 3) - f0bak *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0bak) - - def get_f0_crepe(self, x, f0_up_key): - audio = torch.tensor(np.copy(x))[None].float() - f0, pd = torchcrepe.predict( - audio, - self.sr, - 160, - self.f0_min, - self.f0_max, - "full", - batch_size=512, - device=self.device, - return_periodicity=True, - ) - pd = torchcrepe.filter.median(pd, 3) - f0 = torchcrepe.filter.mean(f0, 3) - f0[pd < 0.1] = 0 - f0 = f0[0].cpu().numpy() - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - - def get_f0_rmvpe(self, x, f0_up_key): - if hasattr(self, "model_rmvpe") == False: - from infer.lib.rmvpe import RMVPE - - print("loading rmvpe model") - self.model_rmvpe = RMVPE( - "rmvpe.pt", is_half=self.is_half, device=self.device - ) - # self.model_rmvpe = RMVPE("aug2_58000_half.pt", is_half=self.is_half, device=self.device) - f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03) - f0 *= pow(2, f0_up_key / 12) - return self.get_f0_post(f0) - - def infer( - self, - feats: torch.Tensor, - indata: np.ndarray, - rate1, - rate2, - cache_pitch, - cache_pitchf, - f0method, - ) -> np.ndarray: - feats = feats.view(1, -1) - if config.is_half: - feats = feats.half() - else: - feats = feats.float() - feats = feats.to(self.device) - t1 = ttime() - with torch.no_grad(): - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - inputs = { - "source": feats, - "padding_mask": padding_mask, - "output_layer": 9 if self.version == "v1" else 12, - } - logits = self.model.extract_features(**inputs) - feats = ( - self.model.final_proj(logits[0]) if self.version == "v1" else logits[0] - ) - t2 = ttime() - try: - if hasattr(self, "index") and self.index_rate != 0: - leng_replace_head = int(rate1 * feats[0].shape[0]) - npy = feats[0][-leng_replace_head:].cpu().numpy().astype("float32") - score, ix = self.index.search(npy, k=8) - weight = np.square(1 / score) - weight /= weight.sum(axis=1, keepdims=True) - npy = np.sum(self.big_npy[ix] * np.expand_dims(weight, axis=2), axis=1) - if config.is_half: - npy = npy.astype("float16") - feats[0][-leng_replace_head:] = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * self.index_rate - + (1 - self.index_rate) * feats[0][-leng_replace_head:] - ) - else: - print("index search FAIL or disabled") - except: - traceback.print_exc() - print("index search FAIL") - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t3 = ttime() - if self.if_f0 == 1: - pitch, pitchf = self.get_f0(indata, self.f0_up_key, self.n_cpu, f0method) - cache_pitch[:] = np.append(cache_pitch[pitch[:-1].shape[0] :], pitch[:-1]) - cache_pitchf[:] = np.append( - cache_pitchf[pitchf[:-1].shape[0] :], pitchf[:-1] - ) - p_len = min(feats.shape[1], 13000, cache_pitch.shape[0]) - else: - cache_pitch, cache_pitchf = None, None - p_len = min(feats.shape[1], 13000) - t4 = ttime() - feats = feats[:, :p_len, :] - if self.if_f0 == 1: - cache_pitch = cache_pitch[:p_len] - cache_pitchf = cache_pitchf[:p_len] - cache_pitch = torch.LongTensor(cache_pitch).unsqueeze(0).to(self.device) - cache_pitchf = torch.FloatTensor(cache_pitchf).unsqueeze(0).to(self.device) - p_len = torch.LongTensor([p_len]).to(self.device) - ii = 0 # sid - sid = torch.LongTensor([ii]).to(self.device) - with torch.no_grad(): - if self.if_f0 == 1: - infered_audio = ( - self.net_g.infer( - feats, p_len, cache_pitch, cache_pitchf, sid, rate2 - )[0][0, 0] - .data.cpu() - .float() - ) - else: - infered_audio = ( - self.net_g.infer(feats, p_len, sid, rate2)[0][0, 0] - .data.cpu() - .float() - ) - t5 = ttime() - print("time->fea-index-f0-model:", t2 - t1, t3 - t2, t4 - t3, t5 - t4) - return infered_audio diff --git a/spaces/ShapeNet/shapenet-explorer/README.md b/spaces/ShapeNet/shapenet-explorer/README.md deleted file mode 100644 index b432120c81c9dc746c163dfd587619c70a2cc6f4..0000000000000000000000000000000000000000 --- a/spaces/ShapeNet/shapenet-explorer/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Shapenet Explorer -emoji: ๐ŸŒ -colorFrom: purple -colorTo: red -sdk: gradio -sdk_version: 3.9 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/modules/conditioners.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/modules/conditioners.py deleted file mode 100644 index d10ac8dc96466375379c883cd62f7c04a1bb0a73..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/modules/conditioners.py +++ /dev/null @@ -1,1411 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from collections import defaultdict -from copy import deepcopy -from dataclasses import dataclass, field -from itertools import chain -import logging -import math -from pathlib import Path -import random -import re -import typing as tp -import warnings - -import einops -from num2words import num2words -import spacy -from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore -import torch -from torch import nn -import torch.nn.functional as F -from torch.nn.utils.rnn import pad_sequence - -from .chroma import ChromaExtractor -from .streaming import StreamingModule -from .transformer import create_sin_embedding -from ..data.audio import audio_read -from ..data.audio_dataset import SegmentInfo -from ..data.audio_utils import convert_audio -from ..environment import AudioCraftEnvironment -from ..quantization import ResidualVectorQuantizer -from ..utils.autocast import TorchAutocast -from ..utils.cache import EmbeddingCache -from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once - - -logger = logging.getLogger(__name__) -TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) -ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask - - -class WavCondition(tp.NamedTuple): - wav: torch.Tensor - length: torch.Tensor - sample_rate: tp.List[int] - path: tp.List[tp.Optional[str]] = [] - seek_time: tp.List[tp.Optional[float]] = [] - - -class JointEmbedCondition(tp.NamedTuple): - wav: torch.Tensor - text: tp.List[tp.Optional[str]] - length: torch.Tensor - sample_rate: tp.List[int] - path: tp.List[tp.Optional[str]] = [] - seek_time: tp.List[tp.Optional[float]] = [] - - -@dataclass -class ConditioningAttributes: - text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) - wav: tp.Dict[str, WavCondition] = field(default_factory=dict) - joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) - - def __getitem__(self, item): - return getattr(self, item) - - @property - def text_attributes(self): - return self.text.keys() - - @property - def wav_attributes(self): - return self.wav.keys() - - @property - def joint_embed_attributes(self): - return self.joint_embed.keys() - - @property - def attributes(self): - return { - "text": self.text_attributes, - "wav": self.wav_attributes, - "joint_embed": self.joint_embed_attributes, - } - - def to_flat_dict(self): - return { - **{f"text.{k}": v for k, v in self.text.items()}, - **{f"wav.{k}": v for k, v in self.wav.items()}, - **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} - } - - @classmethod - def from_flat_dict(cls, x): - out = cls() - for k, v in x.items(): - kind, att = k.split(".") - out[kind][att] = v - return out - - -class SegmentWithAttributes(SegmentInfo): - """Base class for all dataclasses that are used for conditioning. - All child classes should implement `to_condition_attributes` that converts - the existing attributes to a dataclass of type ConditioningAttributes. - """ - def to_condition_attributes(self) -> ConditioningAttributes: - raise NotImplementedError() - - -def nullify_condition(condition: ConditionType, dim: int = 1): - """Transform an input condition to a null condition. - The way it is done by converting it to a single zero vector similarly - to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. - - Args: - condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) - dim (int): The dimension that will be truncated (should be the time dimension) - WARNING!: dim should not be the batch dimension! - Returns: - ConditionType: A tuple of null condition and mask - """ - assert dim != 0, "dim cannot be the batch dimension!" - assert isinstance(condition, tuple) and \ - isinstance(condition[0], torch.Tensor) and \ - isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" - cond, mask = condition - B = cond.shape[0] - last_dim = cond.dim() - 1 - out = cond.transpose(dim, last_dim) - out = 0. * out[..., :1] - out = out.transpose(dim, last_dim) - mask = torch.zeros((B, 1), device=out.device).int() - assert cond.dim() == out.dim() - return out, mask - - -def nullify_wav(cond: WavCondition) -> WavCondition: - """Transform a WavCondition to a nullified WavCondition. - It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. - - Args: - cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. - Returns: - WavCondition: Nullified wav condition. - """ - null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) - return WavCondition( - wav=null_wav, - length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), - sample_rate=cond.sample_rate, - path=[None] * cond.wav.shape[0], - seek_time=[None] * cond.wav.shape[0], - ) - - -def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: - """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, - and replacing metadata by dummy attributes. - - Args: - cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. - """ - null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) - return JointEmbedCondition( - wav=null_wav, text=[None] * len(embed.text), - length=torch.LongTensor([0]).to(embed.wav.device), - sample_rate=embed.sample_rate, - path=[None] * embed.wav.shape[0], - seek_time=[0] * embed.wav.shape[0], - ) - - -class Tokenizer: - """Base tokenizer implementation - (in case we want to introduce more advances tokenizers in the future). - """ - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - raise NotImplementedError() - - -class WhiteSpaceTokenizer(Tokenizer): - """This tokenizer should be used for natural language descriptions. - For example: - ["he didn't, know he's going home.", 'shorter sentence'] => - [[78, 62, 31, 4, 78, 25, 19, 34], - [59, 77, 0, 0, 0, 0, 0, 0]] - """ - PUNCTUATION = "?:!.,;" - - def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", - lemma: bool = True, stopwords: bool = True) -> None: - self.n_bins = n_bins - self.pad_idx = pad_idx - self.lemma = lemma - self.stopwords = stopwords - try: - self.nlp = spacy.load(language) - except IOError: - spacy.cli.download(language) # type: ignore - self.nlp = spacy.load(language) - - @tp.no_type_check - def __call__(self, texts: tp.List[tp.Optional[str]], - return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Take a list of strings and convert them to a tensor of indices. - - Args: - texts (list[str]): List of strings. - return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. - Returns: - tuple[torch.Tensor, torch.Tensor]: - - Indices of words in the LUT. - - And a mask indicating where the padding tokens are - """ - output, lengths = [], [] - texts = deepcopy(texts) - for i, text in enumerate(texts): - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(torch.Tensor([self.pad_idx])) - lengths.append(0) - continue - - # convert numbers to words - text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore - # normalize text - text = self.nlp(text) # type: ignore - # remove stopwords - if self.stopwords: - text = [w for w in text if not w.is_stop] # type: ignore - # remove punctuation - text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore - # lemmatize if needed - text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore - - texts[i] = " ".join(text) - lengths.append(len(text)) - # convert to tensor - tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) - output.append(tokens) - - mask = length_to_mask(torch.IntTensor(lengths)).int() - padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() - if return_text: - return padded_output, mask, texts # type: ignore - return padded_output, mask - - -class NoopTokenizer(Tokenizer): - """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. - The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split - strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will - split it to ["Jeff", "Buckley"] and return an index per word. - - For example: - ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] - ["Metal", "Rock", "Classical"] => [0, 223, 51] - """ - def __init__(self, n_bins: int, pad_idx: int = 0): - self.n_bins = n_bins - self.pad_idx = pad_idx - - def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - output, lengths = [], [] - for text in texts: - # if current sample doesn't have a certain attribute, replace with pad token - if text is None: - output.append(self.pad_idx) - lengths.append(0) - else: - output.append(hash_trick(text, self.n_bins)) - lengths.append(1) - - tokens = torch.LongTensor(output).unsqueeze(1) - mask = length_to_mask(torch.IntTensor(lengths)).int() - return tokens, mask - - -class BaseConditioner(nn.Module): - """Base model for all conditioner modules. - We allow the output dim to be different than the hidden dim for two reasons: - 1) keep our LUTs small when the vocab is large; - 2) make all condition dims consistent. - - Args: - dim (int): Hidden dim of the model. - output_dim (int): Output dim of the conditioner. - """ - def __init__(self, dim: int, output_dim: int): - super().__init__() - self.dim = dim - self.output_dim = output_dim - self.output_proj = nn.Linear(dim, output_dim) - - def tokenize(self, *args, **kwargs) -> tp.Any: - """Should be any part of the processing that will lead to a synchronization - point, e.g. BPE tokenization with transfer to the GPU. - - The returned value will be saved and return later when calling forward(). - """ - raise NotImplementedError() - - def forward(self, inputs: tp.Any) -> ConditionType: - """Gets input that should be used as conditioning (e.g, genre, description or a waveform). - Outputs a ConditionType, after the input data was embedded as a dense vector. - - Returns: - ConditionType: - - A tensor of size [B, T, D] where B is the batch size, T is the length of the - output embedding and D is the dimension of the embedding. - - And a mask indicating where the padding tokens. - """ - raise NotImplementedError() - - -class TextConditioner(BaseConditioner): - ... - - -class LUTConditioner(TextConditioner): - """Lookup table TextConditioner. - - Args: - n_bins (int): Number of bins. - dim (int): Hidden dim of the model (text-encoder/LUT). - output_dim (int): Output dim of the conditioner. - tokenizer (str): Name of the tokenizer. - pad_idx (int, optional): Index for padding token. Defaults to 0. - """ - def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): - super().__init__(dim, output_dim) - self.embed = nn.Embedding(n_bins, dim) - self.tokenizer: Tokenizer - if tokenizer == 'whitespace': - self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) - elif tokenizer == 'noop': - self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) - else: - raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: - device = self.embed.weight.device - tokens, mask = self.tokenizer(x) - tokens, mask = tokens.to(device), mask.to(device) - return tokens, mask - - def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: - tokens, mask = inputs - embeds = self.embed(tokens) - embeds = self.output_proj(embeds) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class T5Conditioner(TextConditioner): - """T5-based TextConditioner. - - Args: - name (str): Name of the T5 model. - output_dim (int): Output dim of the conditioner. - finetune (bool): Whether to fine-tune T5 at train time. - device (str): Device for T5 Conditioner. - autocast_dtype (tp.Optional[str], optional): Autocast dtype. - word_dropout (float, optional): Word dropout probability. - normalize_text (bool, optional): Whether to apply text normalization. - """ - MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", - "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", - "google/flan-t5-xl", "google/flan-t5-xxl"] - MODELS_DIMS = { - "t5-small": 512, - "t5-base": 768, - "t5-large": 1024, - "t5-3b": 1024, - "t5-11b": 1024, - "google/flan-t5-small": 512, - "google/flan-t5-base": 768, - "google/flan-t5-large": 1024, - "google/flan-t5-3b": 1024, - "google/flan-t5-11b": 1024, - } - - def __init__(self, name: str, output_dim: int, finetune: bool, device: str, - autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., - normalize_text: bool = False): - assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" - super().__init__(self.MODELS_DIMS[name], output_dim) - self.device = device - self.name = name - self.finetune = finetune - self.word_dropout = word_dropout - if autocast_dtype is None or self.device == 'cpu': - self.autocast = TorchAutocast(enabled=False) - if self.device != 'cpu': - logger.warning("T5 has no autocast, this might lead to NaN") - else: - dtype = getattr(torch, autocast_dtype) - assert isinstance(dtype, torch.dtype) - logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") - self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) - # Let's disable logging temporarily because T5 will vomit some errors otherwise. - # thanks https://gist.github.com/simon-weber/7853144 - previous_level = logging.root.manager.disable - logging.disable(logging.ERROR) - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - try: - self.t5_tokenizer = T5Tokenizer.from_pretrained(name) - t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) - finally: - logging.disable(previous_level) - if finetune: - self.t5 = t5 - else: - # this makes sure that the t5 models is not part - # of the saved checkpoint - self.__dict__['t5'] = t5.to(device) - - self.normalize_text = normalize_text - if normalize_text: - self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) - - def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: - # if current sample doesn't have a certain attribute, replace with empty string - entries: tp.List[str] = [xi if xi is not None else "" for xi in x] - if self.normalize_text: - _, _, entries = self.text_normalizer(entries, return_text=True) - if self.word_dropout > 0. and self.training: - new_entries = [] - for entry in entries: - words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] - new_entries.append(" ".join(words)) - entries = new_entries - - empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) - - inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) - mask = inputs['attention_mask'] - mask[empty_idx, :] = 0 # zero-out index where the input is non-existant - return inputs - - def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: - mask = inputs['attention_mask'] - with torch.set_grad_enabled(self.finetune), self.autocast: - embeds = self.t5(**inputs).last_hidden_state - embeds = self.output_proj(embeds.to(self.output_proj.weight)) - embeds = (embeds * mask.unsqueeze(-1)) - return embeds, mask - - -class WaveformConditioner(BaseConditioner): - """Base class for all conditioners that take a waveform as input. - Classes that inherit must implement `_get_wav_embedding` that outputs - a continuous tensor, and `_downsampling_factor` that returns the down-sampling - factor of the embedding model. - - Args: - dim (int): The internal representation dimension. - output_dim (int): Output dimension. - device (tp.Union[torch.device, str]): Device. - """ - def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): - super().__init__(dim, output_dim) - self.device = device - - def tokenize(self, x: WavCondition) -> WavCondition: - wav, length, sample_rate, path, seek_time = x - assert length is not None - return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) - - def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: - """Gets as input a WavCondition and returns a dense embedding.""" - raise NotImplementedError() - - def _downsampling_factor(self): - """Returns the downsampling factor of the embedding model.""" - raise NotImplementedError() - - def forward(self, x: WavCondition) -> ConditionType: - """Extract condition embedding and mask from a waveform and its metadata. - Args: - x (WavCondition): Waveform condition containing raw waveform and metadata. - Returns: - ConditionType: a dense vector representing the conditioning along with its mask - """ - wav, lengths, *_ = x - with torch.no_grad(): - embeds = self._get_wav_embedding(x) - embeds = embeds.to(self.output_proj.weight) - embeds = self.output_proj(embeds) - - if lengths is not None: - lengths = lengths / self._downsampling_factor() - mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore - else: - mask = torch.ones_like(embeds) - embeds = (embeds * mask.unsqueeze(2).to(self.device)) - - return embeds, mask - - -class ChromaStemConditioner(WaveformConditioner): - """Chroma conditioner based on stems. - The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as - the drums and bass often dominate the chroma leading to the chroma features - not containing information about the melody. - - Args: - output_dim (int): Output dimension for the conditioner. - sample_rate (int): Sample rate for the chroma extractor. - n_chroma (int): Number of chroma bins for the chroma extractor. - radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). - duration (int): duration used during training. This is later used for correct padding - in case we are using chroma as prefix. - match_len_on_eval (bool, optional): if True then all chromas are padded to the training - duration. Defaults to False. - eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as - conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). - Defaults to None. - n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. - device (tp.Union[torch.device, str], optional): Device for the conditioner. - **kwargs: Additional parameters for the chroma extractor. - """ - def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, - duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, - n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, - device: tp.Union[torch.device, str] = 'cpu', **kwargs): - from demucs import pretrained - super().__init__(dim=n_chroma, output_dim=output_dim, device=device) - self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) - self.sample_rate = sample_rate - self.match_len_on_eval = match_len_on_eval - self.duration = duration - self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) - stem_sources: list = self.demucs.sources # type: ignore - self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) - self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, - radix2_exp=radix2_exp, **kwargs).to(device) - self.chroma_len = self._get_chroma_len() - self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) - self.cache = None - if cache_path is not None: - self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, - compute_embed_fn=self._get_full_chroma_for_cache, - extract_embed_fn=self._extract_chroma_chunk) - - def _downsampling_factor(self) -> int: - return self.chroma.winhop - - def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: - """Load pre-defined waveforms from a json. - These waveforms will be used for chroma extraction during evaluation. - This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). - """ - if path is None: - return None - - logger.info(f"Loading evaluation wavs from {path}") - from audiocraft.data.audio_dataset import AudioDataset - dataset: AudioDataset = AudioDataset.from_meta( - path, segment_duration=self.duration, min_audio_duration=self.duration, - sample_rate=self.sample_rate, channels=1) - - if len(dataset) > 0: - eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) - logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") - return eval_wavs - else: - raise ValueError("Could not find evaluation wavs, check lengths of wavs") - - def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: - self.eval_wavs = eval_wavs - - def has_eval_wavs(self) -> bool: - return self.eval_wavs is not None - - def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: - """Sample wavs from a predefined list.""" - assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." - total_eval_wavs = len(self.eval_wavs) - out = self.eval_wavs - if num_samples > total_eval_wavs: - out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) - return out[torch.randperm(len(out))][:num_samples] - - def _get_chroma_len(self) -> int: - """Get length of chroma during training.""" - dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) - dummy_chr = self.chroma(dummy_wav) - return dummy_chr.shape[1] - - @torch.no_grad() - def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: - """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" - from demucs.apply import apply_model - from demucs.audio import convert_audio - with self.autocast: - wav = convert_audio( - wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore - stems = apply_model(self.demucs, wav, device=self.device) - stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning - mix_wav = stems.sum(1) # merge extracted stems to single waveform - mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore - return mix_wav - - @torch.no_grad() - def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: - """Extract chroma features from the waveform.""" - with self.autocast: - return self.chroma(wav) - - @torch.no_grad() - def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: - """Compute wav embedding, applying stem and chroma extraction.""" - # avoid 0-size tensors when we are working with null conds - if wav.shape[-1] == 1: - return self._extract_chroma(wav) - stems = self._get_stemmed_wav(wav, sample_rate) - chroma = self._extract_chroma(stems) - return chroma - - @torch.no_grad() - def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: - """Extract chroma from the whole audio waveform at the given path.""" - wav, sr = audio_read(path) - wav = wav[None].to(self.device) - wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) - chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] - return chroma - - def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: - """Extract a chunk of chroma from the full chroma derived from the full waveform.""" - wav_length = x.wav.shape[-1] - seek_time = x.seek_time[idx] - assert seek_time is not None, ( - "WavCondition seek_time is required " - "when extracting chroma chunks from pre-computed chroma.") - full_chroma = full_chroma.float() - frame_rate = self.sample_rate / self._downsampling_factor() - target_length = int(frame_rate * wav_length / self.sample_rate) - index = int(frame_rate * seek_time) - out = full_chroma[index: index + target_length] - out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] - return out.to(self.device) - - @torch.no_grad() - def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: - """Get the wav embedding from the WavCondition. - The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly - or will rely on the embedding cache to load the pre-computed embedding if relevant. - """ - sampled_wav: tp.Optional[torch.Tensor] = None - if not self.training and self.eval_wavs is not None: - warn_once(logger, "Using precomputed evaluation wavs!") - sampled_wav = self._sample_eval_wavs(len(x.wav)) - - no_undefined_paths = all(p is not None for p in x.path) - no_nullified_cond = x.wav.shape[-1] > 1 - if sampled_wav is not None: - chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) - elif self.cache is not None and no_undefined_paths and no_nullified_cond: - paths = [Path(p) for p in x.path if p is not None] - chroma = self.cache.get_embed_from_cache(paths, x) - else: - assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." - chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) - - if self.match_len_on_eval: - B, T, C = chroma.shape - if T > self.chroma_len: - chroma = chroma[:, :self.chroma_len] - logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") - elif T < self.chroma_len: - n_repeat = int(math.ceil(self.chroma_len / T)) - chroma = chroma.repeat(1, n_repeat, 1) - chroma = chroma[:, :self.chroma_len] - logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") - - return chroma - - def tokenize(self, x: WavCondition) -> WavCondition: - """Apply WavConditioner tokenization and populate cache if needed.""" - x = super().tokenize(x) - no_undefined_paths = all(p is not None for p in x.path) - if self.cache is not None and no_undefined_paths: - paths = [Path(p) for p in x.path if p is not None] - self.cache.populate_embed_cache(paths, x) - return x - - -class JointEmbeddingConditioner(BaseConditioner): - """Joint embedding conditioning supporting both audio or text conditioning. - - Args: - dim (int): Dimension. - output_dim (int): Output dimension. - device (str): Device. - attribute (str): Attribute used by the conditioner. - autocast_dtype (str): Autocast for the conditioner. - quantize (bool): Whether to quantize the CLAP embedding. - n_q (int): Number of residual quantizers (used if quantize is true). - bins (int): Quantizers' codebooks size (used if quantize is true). - kwargs: Additional parameters for residual vector quantizer. - """ - def __init__(self, dim: int, output_dim: int, device: str, attribute: str, - autocast_dtype: tp.Optional[str] = 'float32', quantize: bool = True, - n_q: int = 12, bins: int = 1024, **kwargs): - super().__init__(dim=dim, output_dim=output_dim) - self.device = device - self.attribute = attribute - if autocast_dtype is None or device == 'cpu': - self.autocast = TorchAutocast(enabled=False) - logger.warning("JointEmbeddingConditioner has no autocast, this might lead to NaN.") - else: - dtype = getattr(torch, autocast_dtype) - assert isinstance(dtype, torch.dtype) - logger.info(f"JointEmbeddingConditioner will be evaluated with autocast as {autocast_dtype}.") - self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) - # residual vector quantizer to discretize the conditioned embedding - self.quantizer: tp.Optional[ResidualVectorQuantizer] = None - if quantize: - self.quantizer = ResidualVectorQuantizer(dim, n_q=n_q, bins=bins, **kwargs) - - def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Get joint embedding in latent space from the inputs. - - Returns: - tuple[torch.Tensor, torch.Tensor]: Tensor for the latent embedding - and corresponding empty indexes. - """ - raise NotImplementedError() - - def forward(self, x: JointEmbedCondition) -> ConditionType: - with self.autocast: - embed, empty_idx = self._get_embed(x) - if self.quantizer is not None: - embed = embed.view(-1, self.dim, 1) - q_res = self.quantizer(embed, frame_rate=1) - out_embed = q_res.x.view(-1, self.dim) - else: - out_embed = embed - out_embed = self.output_proj(out_embed).view(-1, 1, self.output_dim) - mask = torch.ones(*out_embed.shape[:2], device=out_embed.device) - mask[empty_idx, :] = 0 # zero-out index where the input is non-existant - out_embed = (out_embed * mask.unsqueeze(-1)) - return out_embed, mask - - def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: - return x - - -class CLAPEmbeddingConditioner(JointEmbeddingConditioner): - """Joint Embedding conditioner based on pre-trained CLAP model. - - This CLAP-based conditioner supports a caching mechanism - over the computed embeddings for faster training. - - Args: - dim (int): Dimension. - output_dim (int): Output dimension. - device (str): Device. - attribute (str): Attribute used by the conditioner. - quantize (bool): Whether to quantize the CLAP embedding. - n_q (int): Number of residual quantizers (used if quantize is true). - bins (int): Quantizers' codebooks size (used if quantize is true). - checkpoint (str): Path to CLAP checkpoint. - model_arch (str): CLAP model architecture. - enable_fusion (bool): Enable fusion for CLAP model. - sample_rate (int): Sample rate used by CLAP model. - max_audio_length (float): Maximum audio length for CLAP model. - audio_stride (float): Stride to use for getting a CLAP embedding on the full sequence. - normalize (bool): Whether to normalize the CLAP embedding. - text_p (float): Probability of using text representation instead of audio at train time. - batch_size (Optional[int]): Batch size for CLAP embedding computation. - autocast_dtype (str): Autocast for the conditioner. - cache_path (Optional[str]): Path for pre-computed embeddings caching. - kwargs: Additional parameters for residual vector quantizer. - """ - def __init__(self, dim: int, output_dim: int, device: str, attribute: str, - quantize: bool, n_q: int, bins: int, checkpoint: tp.Union[str, Path], model_arch: str, - enable_fusion: bool, sample_rate: int, max_audio_length: int, audio_stride: int, - normalize: bool, text_p: bool, batch_size: tp.Optional[int] = None, - autocast_dtype: tp.Optional[str] = 'float32', cache_path: tp.Optional[str] = None, **kwargs): - try: - import laion_clap # type: ignore - except ImportError: - raise ImportError("Please install CLAP to use the CLAPEmbeddingConditioner: 'pip install laion_clap'") - checkpoint = AudioCraftEnvironment.resolve_reference_path(checkpoint) - clap_tokenize = RobertaTokenizer.from_pretrained('roberta-base') - clap_model = laion_clap.CLAP_Module(enable_fusion=enable_fusion, amodel=model_arch) - load_clap_state_dict(clap_model, checkpoint) - clap_model.eval() - clap_model.to(device) - super().__init__(dim=dim, output_dim=output_dim, device=device, attribute=attribute, - autocast_dtype=autocast_dtype, quantize=quantize, n_q=n_q, bins=bins, - **kwargs) - self.checkpoint = checkpoint - self.enable_fusion = enable_fusion - self.model_arch = model_arch - self.clap: laion_clap.CLAP_Module - self.clap_tokenize: RobertaTokenizer - self.clap_sample_rate = sample_rate - self.clap_max_frames = int(self.clap_sample_rate * max_audio_length) - self.clap_stride = int(self.clap_sample_rate * audio_stride) - self.batch_size = batch_size or 1 - self.normalize = normalize - self.text_p = text_p - self.__dict__['clap_tokenize'] = clap_tokenize - self.__dict__['clap'] = clap_model - self.wav_cache, self.text_cache = None, None - if cache_path is not None: - self.wav_cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, - compute_embed_fn=self._get_wav_embedding_for_cache, - extract_embed_fn=self._extract_wav_embedding_chunk) - self.text_cache = EmbeddingCache(Path(cache_path) / 'text', self.device, - compute_embed_fn=self._get_text_embedding_for_cache) - - def _tokenizer(self, texts: tp.Union[str, tp.List[str]]) -> dict: - # we use the default params from CLAP module here as well - return self.clap_tokenize(texts, padding="max_length", truncation=True, max_length=77, return_tensors="pt") - - def _compute_text_embedding(self, text: tp.List[str]) -> torch.Tensor: - """Compute text embedding from CLAP model on a given a batch of text. - - Args: - text (list[str]): List of text for the batch, with B items. - Returns: - torch.Tensor: CLAP embedding derived from text, of shape [B, 1, D], with D the CLAP embedding dimension. - """ - with torch.no_grad(): - embed = self.clap.get_text_embedding(text, tokenizer=self._tokenizer, use_tensor=True) - return embed.view(embed.size(0), 1, embed.size(-1)) - - def _get_text_embedding_for_cache(self, path: tp.Union[Path, str], - x: JointEmbedCondition, idx: int) -> torch.Tensor: - """Get text embedding function for the cache.""" - text = x.text[idx] - text = text if text is not None else "" - return self._compute_text_embedding([text])[0] - - def _preprocess_wav(self, wav: torch.Tensor, length: torch.Tensor, sample_rates: tp.List[int]) -> torch.Tensor: - """Preprocess wav to expected format by CLAP model. - - Args: - wav (torch.Tensor): Audio wav, of shape [B, C, T]. - length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B]. - sample_rates (list[int]): Sample rates for each sample in the batch - Returns: - torch.Tensor: Audio wav of shape [B, T]. - """ - assert wav.dim() == 3, "Expecting wav to be [B, C, T]" - if sample_rates is not None: - _wav = [] - for i, audio in enumerate(wav): - sr = sample_rates[i] - audio = convert_audio(audio, from_rate=sr, to_rate=self.clap_sample_rate, to_channels=1) - _wav.append(audio) - wav = torch.stack(_wav, dim=0) - wav = wav.mean(dim=1) - return wav - - def _compute_wav_embedding(self, wav: torch.Tensor, length: torch.Tensor, - sample_rates: tp.List[int], reduce_mean: bool = False) -> torch.Tensor: - """Compute audio wave embedding from CLAP model. - - Since CLAP operates on a fixed sequence length audio inputs and we need to process longer audio sequences, - we calculate the wav embeddings on `clap_max_frames` windows with `clap_stride`-second stride and - average the resulting embeddings. - - Args: - wav (torch.Tensor): Audio wav, of shape [B, C, T]. - length (torch.Tensor): Actual length of the audio for each item in the batch, of shape [B]. - sample_rates (list[int]): Sample rates for each sample in the batch. - reduce_mean (bool): Whether to get the average tensor. - Returns: - torch.Tensor: Audio embedding of shape [B, F, D], F being the number of chunks, D the dimension. - """ - with torch.no_grad(): - wav = self._preprocess_wav(wav, length, sample_rates) - B, T = wav.shape - if T >= self.clap_max_frames: - wav = wav.unfold(-1, self.clap_max_frames, self.clap_stride) # [B, F, T] - else: - wav = wav.view(-1, 1, T) # [B, F, T] with F=1 - wav = einops.rearrange(wav, 'b f t -> (b f) t') - embed_list = [] - for i in range(0, wav.size(0), self.batch_size): - _wav = wav[i:i+self.batch_size, ...] - _embed = self.clap.get_audio_embedding_from_data(_wav, use_tensor=True) - embed_list.append(_embed) - embed = torch.cat(embed_list, dim=0) - embed = einops.rearrange(embed, '(b f) d -> b f d', b=B) - if reduce_mean: - embed = embed.mean(dim=1, keepdim=True) - return embed # [B, F, D] with F=1 if reduce_mean is True - - def _get_wav_embedding_for_cache(self, path: tp.Union[str, Path], - x: JointEmbedCondition, idx: int) -> torch.Tensor: - """Compute audio wave embedding for the cache. - The embedding is computed on a given audio read from file. - - Args: - path (str or Path): Path to the full audio file. - Returns: - torch.Tensor: Single-item tensor of shape [F, D], F being the number of chunks, D the dimension. - """ - wav, sr = audio_read(path) # [C, T] - wav = wav.unsqueeze(0).to(self.device) # [1, C, T] - wav_len = torch.LongTensor([wav.shape[-1]]).to(self.device) - embed = self._compute_wav_embedding(wav, wav_len, [sr], reduce_mean=False) # [B, F, D] - return embed.squeeze(0) # [F, D] - - def _extract_wav_embedding_chunk(self, full_embed: torch.Tensor, x: JointEmbedCondition, idx: int) -> torch.Tensor: - """Extract the chunk of embedding matching the seek_time and length from the full CLAP audio embedding. - - Args: - full_embed (torch.Tensor): CLAP embedding computed on the full wave, of shape [F, D]. - x (JointEmbedCondition): Joint embedding condition for the full batch. - idx (int): Index considered for the given embedding to extract. - Returns: - torch.Tensor: Wav embedding averaged on sliding window, of shape [1, D]. - """ - sample_rate = x.sample_rate[idx] - seek_time = x.seek_time[idx] - seek_time = 0. if seek_time is None else seek_time - clap_stride = int(self.clap_stride / self.clap_sample_rate) * sample_rate - end_seek_time = seek_time + self.clap_max_frames / self.clap_sample_rate - start_offset = int(seek_time * sample_rate // clap_stride) - end_offset = int(end_seek_time * sample_rate // clap_stride) - wav_embed = full_embed[start_offset:end_offset, ...] - wav_embed = wav_embed.mean(dim=0, keepdim=True) - return wav_embed.to(self.device) # [F, D] - - def _get_text_embedding(self, x: JointEmbedCondition) -> torch.Tensor: - """Get CLAP embedding from a batch of text descriptions.""" - no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout - if self.text_cache is not None and no_nullified_cond: - assert all(p is not None for p in x.path), "Cache requires all JointEmbedCondition paths to be provided" - paths = [Path(p) for p in x.path if p is not None] - embed = self.text_cache.get_embed_from_cache(paths, x) - else: - text = [xi if xi is not None else "" for xi in x.text] - embed = self._compute_text_embedding(text) - if self.normalize: - embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1) - return embed - - def _get_wav_embedding(self, x: JointEmbedCondition) -> torch.Tensor: - """Get CLAP embedding from a batch of audio tensors (and corresponding sample rates).""" - no_undefined_paths = all(p is not None for p in x.path) - no_nullified_cond = x.wav.shape[-1] > 1 # we don't want to read from cache when condition dropout - if self.wav_cache is not None and no_undefined_paths and no_nullified_cond: - paths = [Path(p) for p in x.path if p is not None] - embed = self.wav_cache.get_embed_from_cache(paths, x) - else: - embed = self._compute_wav_embedding(x.wav, x.length, x.sample_rate, reduce_mean=True) - if self.normalize: - embed = torch.nn.functional.normalize(embed, p=2.0, dim=-1) - return embed - - def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: - # Trying to limit as much as possible sync points when the cache is warm. - no_undefined_paths = all(p is not None for p in x.path) - if self.wav_cache is not None and no_undefined_paths: - assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided" - paths = [Path(p) for p in x.path if p is not None] - self.wav_cache.populate_embed_cache(paths, x) - if self.text_cache is not None and no_undefined_paths: - assert all([p is not None for p in x.path]), "Cache requires all JointEmbedCondition paths to be provided" - paths = [Path(p) for p in x.path if p is not None] - self.text_cache.populate_embed_cache(paths, x) - return x - - def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Extract shared latent representation from either the wav or the text using CLAP.""" - # decide whether to use text embedding at train time or not - use_text_embed = random.random() < self.text_p - if self.training and not use_text_embed: - embed = self._get_wav_embedding(x) - empty_idx = torch.LongTensor([]) # we assume we always have the audio wav - else: - embed = self._get_text_embedding(x) - empty_idx = torch.LongTensor([i for i, xi in enumerate(x.text) if xi is None or xi == ""]) - return embed, empty_idx - - -def dropout_condition(sample: ConditioningAttributes, condition_type: str, condition: str) -> ConditioningAttributes: - """Utility function for nullifying an attribute inside an ConditioningAttributes object. - If the condition is of type "wav", then nullify it using `nullify_condition` function. - If the condition is of any other type, set its value to None. - Works in-place. - """ - if condition_type not in ['text', 'wav', 'joint_embed']: - raise ValueError( - "dropout_condition got an unexpected condition type!" - f" expected 'text', 'wav' or 'joint_embed' but got '{condition_type}'" - ) - - if condition not in getattr(sample, condition_type): - raise ValueError( - "dropout_condition received an unexpected condition!" - f" expected wav={sample.wav.keys()} and text={sample.text.keys()}" - f" but got '{condition}' of type '{condition_type}'!" - ) - - if condition_type == 'wav': - wav_cond = sample.wav[condition] - sample.wav[condition] = nullify_wav(wav_cond) - elif condition_type == 'joint_embed': - embed = sample.joint_embed[condition] - sample.joint_embed[condition] = nullify_joint_embed(embed) - else: - sample.text[condition] = None - - return sample - - -class DropoutModule(nn.Module): - """Base module for all dropout modules.""" - def __init__(self, seed: int = 1234): - super().__init__() - self.rng = torch.Generator() - self.rng.manual_seed(seed) - - -class AttributeDropout(DropoutModule): - """Dropout with a given probability per attribute. - This is different from the behavior of ClassifierFreeGuidanceDropout as this allows for attributes - to be dropped out separately. For example, "artist" can be dropped while "genre" remains. - This is in contrast to ClassifierFreeGuidanceDropout where if "artist" is dropped "genre" - must also be dropped. - - Args: - p (tp.Dict[str, float]): A dict mapping between attributes and dropout probability. For example: - ... - "genre": 0.1, - "artist": 0.5, - "wav": 0.25, - ... - active_on_eval (bool, optional): Whether the dropout is active at eval. Default to False. - seed (int, optional): Random seed. - """ - def __init__(self, p: tp.Dict[str, tp.Dict[str, float]], active_on_eval: bool = False, seed: int = 1234): - super().__init__(seed=seed) - self.active_on_eval = active_on_eval - # construct dict that return the values from p otherwise 0 - self.p = {} - for condition_type, probs in p.items(): - self.p[condition_type] = defaultdict(lambda: 0, probs) - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (list[ConditioningAttributes]): List of conditions. - Returns: - list[ConditioningAttributes]: List of conditions after certain attributes were set to None. - """ - if not self.training and not self.active_on_eval: - return samples - - samples = deepcopy(samples) - for condition_type, ps in self.p.items(): # for condition types [text, wav] - for condition, p in ps.items(): # for attributes of each type (e.g., [artist, genre]) - if torch.rand(1, generator=self.rng).item() < p: - for sample in samples: - dropout_condition(sample, condition_type, condition) - return samples - - def __repr__(self): - return f"AttributeDropout({dict(self.p)})" - - -class ClassifierFreeGuidanceDropout(DropoutModule): - """Classifier Free Guidance dropout. - All attributes are dropped with the same probability. - - Args: - p (float): Probability to apply condition dropout during training. - seed (int): Random seed. - """ - def __init__(self, p: float, seed: int = 1234): - super().__init__(seed=seed) - self.p = p - - def forward(self, samples: tp.List[ConditioningAttributes]) -> tp.List[ConditioningAttributes]: - """ - Args: - samples (list[ConditioningAttributes]): List of conditions. - Returns: - list[ConditioningAttributes]: List of conditions after all attributes were set to None. - """ - if not self.training: - return samples - - # decide on which attributes to drop in a batched fashion - drop = torch.rand(1, generator=self.rng).item() < self.p - if not drop: - return samples - - # nullify conditions of all attributes - samples = deepcopy(samples) - for condition_type in ["wav", "text"]: - for sample in samples: - for condition in sample.attributes[condition_type]: - dropout_condition(sample, condition_type, condition) - return samples - - def __repr__(self): - return f"ClassifierFreeGuidanceDropout(p={self.p})" - - -class ConditioningProvider(nn.Module): - """Prepare and provide conditions given all the supported conditioners. - - Args: - conditioners (dict): Dictionary of conditioners. - device (torch.device or str, optional): Device for conditioners and output condition types. - """ - def __init__(self, conditioners: tp.Dict[str, BaseConditioner], device: tp.Union[torch.device, str] = "cpu"): - super().__init__() - self.device = device - self.conditioners = nn.ModuleDict(conditioners) - - @property - def joint_embed_conditions(self): - return [m.attribute for m in self.conditioners.values() if isinstance(m, JointEmbeddingConditioner)] - - @property - def has_joint_embed_conditions(self): - return len(self.joint_embed_conditions) > 0 - - @property - def text_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, TextConditioner)] - - @property - def wav_conditions(self): - return [k for k, v in self.conditioners.items() if isinstance(v, WaveformConditioner)] - - @property - def has_wav_condition(self): - return len(self.wav_conditions) > 0 - - def tokenize(self, inputs: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.Any]: - """Match attributes/wavs with existing conditioners in self, and compute tokenize them accordingly. - This should be called before starting any real GPU work to avoid synchronization points. - This will return a dict matching conditioner names to their arbitrary tokenized representations. - - Args: - inputs (list[ConditioningAttributes]): List of ConditioningAttributes objects containing - text and wav conditions. - """ - assert all([isinstance(x, ConditioningAttributes) for x in inputs]), ( - "Got unexpected types input for conditioner! should be tp.List[ConditioningAttributes]", - f" but types were {set([type(x) for x in inputs])}" - ) - - output = {} - text = self._collate_text(inputs) - wavs = self._collate_wavs(inputs) - joint_embeds = self._collate_joint_embeds(inputs) - - assert set(text.keys() | wavs.keys() | joint_embeds.keys()).issubset(set(self.conditioners.keys())), ( - f"Got an unexpected attribute! Expected {self.conditioners.keys()}, ", - f"got {text.keys(), wavs.keys(), joint_embeds.keys()}" - ) - - for attribute, batch in chain(text.items(), wavs.items(), joint_embeds.items()): - output[attribute] = self.conditioners[attribute].tokenize(batch) - return output - - def forward(self, tokenized: tp.Dict[str, tp.Any]) -> tp.Dict[str, ConditionType]: - """Compute pairs of `(embedding, mask)` using the configured conditioners and the tokenized representations. - The output is for example: - { - "genre": (torch.Tensor([B, 1, D_genre]), torch.Tensor([B, 1])), - "description": (torch.Tensor([B, T_desc, D_desc]), torch.Tensor([B, T_desc])), - ... - } - - Args: - tokenized (dict): Dict of tokenized representations as returned by `tokenize()`. - """ - output = {} - for attribute, inputs in tokenized.items(): - condition, mask = self.conditioners[attribute](inputs) - output[attribute] = (condition, mask) - return output - - def _collate_text(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, tp.List[tp.Optional[str]]]: - """Given a list of ConditioningAttributes objects, compile a dictionary where the keys - are the attributes and the values are the aggregated input per attribute. - For example: - Input: - [ - ConditioningAttributes(text={"genre": "Rock", "description": "A rock song with a guitar solo"}, wav=...), - ConditioningAttributes(text={"genre": "Hip-hop", "description": "A hip-hop verse"}, wav=...), - ] - Output: - { - "genre": ["Rock", "Hip-hop"], - "description": ["A rock song with a guitar solo", "A hip-hop verse"] - } - - Args: - samples (list of ConditioningAttributes): List of ConditioningAttributes samples. - Returns: - dict[str, list[str, optional]]: A dictionary mapping an attribute name to text batch. - """ - out: tp.Dict[str, tp.List[tp.Optional[str]]] = defaultdict(list) - texts = [x.text for x in samples] - for text in texts: - for condition in self.text_conditions: - out[condition].append(text[condition]) - return out - - def _collate_wavs(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, WavCondition]: - """Generate a dict where the keys are attributes by which we fetch similar wavs, - and the values are Tensors of wavs according to said attributes. - - *Note*: by the time the samples reach this function, each sample should have some waveform - inside the "wav" attribute. It should be either: - 1. A real waveform - 2. A null waveform due to the sample having no similar waveforms (nullified by the dataset) - 3. A null waveform due to it being dropped in a dropout module (nullified by dropout) - - Args: - samples (list of ConditioningAttributes): List of ConditioningAttributes samples. - Returns: - dict[str, WavCondition]: A dictionary mapping an attribute name to wavs. - """ - wavs = defaultdict(list) - lengths = defaultdict(list) - sample_rates = defaultdict(list) - paths = defaultdict(list) - seek_times = defaultdict(list) - out: tp.Dict[str, WavCondition] = {} - - for sample in samples: - for attribute in self.wav_conditions: - wav, length, sample_rate, path, seek_time = sample.wav[attribute] - assert wav.dim() == 3, f"Got wav with dim={wav.dim()}, but expected 3 [1, C, T]" - assert wav.size(0) == 1, f"Got wav [B, C, T] with shape={wav.shape}, but expected B == 1" - # mono-channel conditioning - wav = wav.mean(1, keepdim=True) # [1, 1, T] - wavs[attribute].append(wav.flatten()) # [T] - lengths[attribute].append(length) - sample_rates[attribute].extend(sample_rate) - paths[attribute].extend(path) - seek_times[attribute].extend(seek_time) - - # stack all wavs to a single tensor - for attribute in self.wav_conditions: - stacked_wav, _ = collate(wavs[attribute], dim=0) - out[attribute] = WavCondition( - stacked_wav.unsqueeze(1), torch.cat(lengths[attribute]), sample_rates[attribute], - paths[attribute], seek_times[attribute]) - - return out - - def _collate_joint_embeds(self, samples: tp.List[ConditioningAttributes]) -> tp.Dict[str, JointEmbedCondition]: - """Generate a dict where the keys are attributes by which we compute joint embeddings, - and the values are Tensors of pre-computed embeddings and the corresponding text attributes. - - Args: - samples (list[ConditioningAttributes]): List of ConditioningAttributes samples. - Returns: - A dictionary mapping an attribute name to joint embeddings. - """ - texts = defaultdict(list) - wavs = defaultdict(list) - lengths = defaultdict(list) - sample_rates = defaultdict(list) - paths = defaultdict(list) - seek_times = defaultdict(list) - channels: int = 0 - - out = {} - for sample in samples: - for attribute in self.joint_embed_conditions: - wav, text, length, sample_rate, path, seek_time = sample.joint_embed[attribute] - assert wav.dim() == 3 - if channels == 0: - channels = wav.size(1) - else: - assert channels == wav.size(1), "not all audio has same number of channels in batch" - assert wav.size(0) == 1, "Expecting single-wav batch in the collate method" - wav = einops.rearrange(wav, "b c t -> (b c t)") # [1, C, T] => [C * T] - wavs[attribute].append(wav) - texts[attribute].extend(text) - lengths[attribute].append(length) - sample_rates[attribute].extend(sample_rate) - paths[attribute].extend(path) - seek_times[attribute].extend(seek_time) - - for attribute in self.joint_embed_conditions: - stacked_texts = texts[attribute] - stacked_paths = paths[attribute] - stacked_seek_times = seek_times[attribute] - stacked_wavs = pad_sequence(wavs[attribute]).to(self.device) - stacked_wavs = einops.rearrange(stacked_wavs, "(c t) b -> b c t", c=channels) - stacked_sample_rates = sample_rates[attribute] - stacked_lengths = torch.cat(lengths[attribute]).to(self.device) - assert stacked_lengths.size(0) == stacked_wavs.size(0) - assert len(stacked_sample_rates) == stacked_wavs.size(0) - assert len(stacked_texts) == stacked_wavs.size(0) - out[attribute] = JointEmbedCondition( - text=stacked_texts, wav=stacked_wavs, - length=stacked_lengths, sample_rate=stacked_sample_rates, - path=stacked_paths, seek_time=stacked_seek_times) - - return out - - -class ConditionFuser(StreamingModule): - """Condition fuser handles the logic to combine the different conditions - to the actual model input. - - Args: - fuse2cond (tp.Dict[str, str]): A dictionary that says how to fuse - each condition. For example: - { - "prepend": ["description"], - "sum": ["genre", "bpm"], - "cross": ["description"], - } - cross_attention_pos_emb (bool, optional): Use positional embeddings in cross attention. - cross_attention_pos_emb_scale (int): Scale for positional embeddings in cross attention if used. - """ - FUSING_METHODS = ["sum", "prepend", "cross", "input_interpolate"] - - def __init__(self, fuse2cond: tp.Dict[str, tp.List[str]], cross_attention_pos_emb: bool = False, - cross_attention_pos_emb_scale: float = 1.0): - super().__init__() - assert all( - [k in self.FUSING_METHODS for k in fuse2cond.keys()] - ), f"Got invalid fuse method, allowed methods: {self.FUSING_METHODS}" - self.cross_attention_pos_emb = cross_attention_pos_emb - self.cross_attention_pos_emb_scale = cross_attention_pos_emb_scale - self.fuse2cond: tp.Dict[str, tp.List[str]] = fuse2cond - self.cond2fuse: tp.Dict[str, str] = {} - for fuse_method, conditions in fuse2cond.items(): - for condition in conditions: - self.cond2fuse[condition] = fuse_method - - def forward( - self, - input: torch.Tensor, - conditions: tp.Dict[str, ConditionType] - ) -> tp.Tuple[torch.Tensor, tp.Optional[torch.Tensor]]: - """Fuse the conditions to the provided model input. - - Args: - input (torch.Tensor): Transformer input. - conditions (dict[str, ConditionType]): Dict of conditions. - Returns: - tuple[torch.Tensor, torch.Tensor]: The first tensor is the transformer input - after the conditions have been fused. The second output tensor is the tensor - used for cross-attention or None if no cross attention inputs exist. - """ - B, T, _ = input.shape - - if 'offsets' in self._streaming_state: - first_step = False - offsets = self._streaming_state['offsets'] - else: - first_step = True - offsets = torch.zeros(input.shape[0], dtype=torch.long, device=input.device) - - assert set(conditions.keys()).issubset(set(self.cond2fuse.keys())), \ - f"given conditions contain unknown attributes for fuser, " \ - f"expected {self.cond2fuse.keys()}, got {conditions.keys()}" - cross_attention_output = None - for cond_type, (cond, cond_mask) in conditions.items(): - op = self.cond2fuse[cond_type] - if op == 'sum': - input += cond - elif op == 'input_interpolate': - cond = einops.rearrange(cond, "b t d -> b d t") - cond = F.interpolate(cond, size=input.shape[1]) - input += einops.rearrange(cond, "b d t -> b t d") - elif op == 'prepend': - if first_step: - input = torch.cat([cond, input], dim=1) - elif op == 'cross': - if cross_attention_output is not None: - cross_attention_output = torch.cat([cross_attention_output, cond], dim=1) - else: - cross_attention_output = cond - else: - raise ValueError(f"unknown op ({op})") - - if self.cross_attention_pos_emb and cross_attention_output is not None: - positions = torch.arange( - cross_attention_output.shape[1], - device=cross_attention_output.device - ).view(1, -1, 1) - pos_emb = create_sin_embedding(positions, cross_attention_output.shape[-1]) - cross_attention_output = cross_attention_output + self.cross_attention_pos_emb_scale * pos_emb - - if self._is_streaming: - self._streaming_state['offsets'] = offsets + T - - return input, cross_attention_output diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_daemon_thread.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_daemon_thread.py deleted file mode 100644 index 87295974d99611efb233f66e32cac9eebde2e8bc..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_daemon_thread.py +++ /dev/null @@ -1,193 +0,0 @@ -from _pydev_bundle._pydev_saved_modules import threading -from _pydev_bundle import _pydev_saved_modules -from _pydevd_bundle.pydevd_utils import notify_about_gevent_if_needed -import weakref -from _pydevd_bundle.pydevd_constants import IS_JYTHON, IS_IRONPYTHON, \ - PYDEVD_APPLY_PATCHING_TO_HIDE_PYDEVD_THREADS -from _pydev_bundle.pydev_log import exception as pydev_log_exception -import sys -from _pydev_bundle import pydev_log -import pydevd_tracing -from _pydevd_bundle.pydevd_collect_bytecode_info import iter_instructions - -if IS_JYTHON: - import org.python.core as JyCore # @UnresolvedImport - - -class PyDBDaemonThread(threading.Thread): - - def __init__(self, py_db, target_and_args=None): - ''' - :param target_and_args: - tuple(func, args, kwargs) if this should be a function and args to run. - -- Note: use through run_as_pydevd_daemon_thread(). - ''' - threading.Thread.__init__(self) - notify_about_gevent_if_needed() - self._py_db = weakref.ref(py_db) - self._kill_received = False - mark_as_pydevd_daemon_thread(self) - self._target_and_args = target_and_args - - @property - def py_db(self): - return self._py_db() - - def run(self): - created_pydb_daemon = self.py_db.created_pydb_daemon_threads - created_pydb_daemon[self] = 1 - try: - try: - if IS_JYTHON and not isinstance(threading.current_thread(), threading._MainThread): - # we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading' - # module, and the new instance of main thread is created - ss = JyCore.PySystemState() - # Note: Py.setSystemState() affects only the current thread. - JyCore.Py.setSystemState(ss) - - self._stop_trace() - self._on_run() - except: - if sys is not None and pydev_log_exception is not None: - pydev_log_exception() - finally: - del created_pydb_daemon[self] - - def _on_run(self): - if self._target_and_args is not None: - target, args, kwargs = self._target_and_args - target(*args, **kwargs) - else: - raise NotImplementedError('Should be reimplemented by: %s' % self.__class__) - - def do_kill_pydev_thread(self): - if not self._kill_received: - pydev_log.debug('%s received kill signal', self.name) - self._kill_received = True - - def _stop_trace(self): - if self.pydev_do_not_trace: - pydevd_tracing.SetTrace(None) # no debugging on this thread - - -def _collect_load_names(func): - found_load_names = set() - for instruction in iter_instructions(func.__code__): - if instruction.opname in ('LOAD_GLOBAL', 'LOAD_ATTR', 'LOAD_METHOD'): - found_load_names.add(instruction.argrepr) - return found_load_names - - -def _patch_threading_to_hide_pydevd_threads(): - ''' - Patches the needed functions on the `threading` module so that the pydevd threads are hidden. - - Note that we patch the functions __code__ to avoid issues if some code had already imported those - variables prior to the patching. - ''' - found_load_names = _collect_load_names(threading.enumerate) - # i.e.: we'll only apply the patching if the function seems to be what we expect. - - new_threading_enumerate = None - - if found_load_names in ( - {'_active_limbo_lock', '_limbo', '_active', 'values', 'list'}, - {'_active_limbo_lock', '_limbo', '_active', 'values', 'NULL + list'} - ): - pydev_log.debug('Applying patching to hide pydevd threads (Py3 version).') - - def new_threading_enumerate(): - with _active_limbo_lock: - ret = list(_active.values()) + list(_limbo.values()) - - return [t for t in ret if not getattr(t, 'is_pydev_daemon_thread', False)] - - elif found_load_names == set(('_active_limbo_lock', '_limbo', '_active', 'values')): - pydev_log.debug('Applying patching to hide pydevd threads (Py2 version).') - - def new_threading_enumerate(): - with _active_limbo_lock: - ret = _active.values() + _limbo.values() - - return [t for t in ret if not getattr(t, 'is_pydev_daemon_thread', False)] - - else: - pydev_log.info('Unable to hide pydevd threads. Found names in threading.enumerate: %s', found_load_names) - - if new_threading_enumerate is not None: - - def pydevd_saved_threading_enumerate(): - with threading._active_limbo_lock: - return list(threading._active.values()) + list(threading._limbo.values()) - - _pydev_saved_modules.pydevd_saved_threading_enumerate = pydevd_saved_threading_enumerate - - threading.enumerate.__code__ = new_threading_enumerate.__code__ - - # We also need to patch the active count (to match what we have in the enumerate). - def new_active_count(): - # Note: as this will be executed in the `threading` module, `enumerate` will - # actually be threading.enumerate. - return len(enumerate()) - - threading.active_count.__code__ = new_active_count.__code__ - - # When shutting down, Python (on some versions) may do something as: - # - # def _pickSomeNonDaemonThread(): - # for t in enumerate(): - # if not t.daemon and t.is_alive(): - # return t - # return None - # - # But in this particular case, we do want threads with `is_pydev_daemon_thread` to appear - # explicitly due to the pydevd `CheckAliveThread` (because we want the shutdown to wait on it). - # So, it can't rely on the `enumerate` for that anymore as it's patched to not return pydevd threads. - if hasattr(threading, '_pickSomeNonDaemonThread'): - - def new_pick_some_non_daemon_thread(): - with _active_limbo_lock: - # Ok for py2 and py3. - threads = list(_active.values()) + list(_limbo.values()) - - for t in threads: - if not t.daemon and t.is_alive(): - return t - return None - - threading._pickSomeNonDaemonThread.__code__ = new_pick_some_non_daemon_thread.__code__ - - -_patched_threading_to_hide_pydevd_threads = False - - -def mark_as_pydevd_daemon_thread(thread): - if not IS_JYTHON and not IS_IRONPYTHON and PYDEVD_APPLY_PATCHING_TO_HIDE_PYDEVD_THREADS: - global _patched_threading_to_hide_pydevd_threads - if not _patched_threading_to_hide_pydevd_threads: - # When we mark the first thread as a pydevd daemon thread, we also change the threading - # functions to hide pydevd threads. - # Note: we don't just "hide" the pydevd threads from the threading module by not using it - # (i.e.: just using the `thread.start_new_thread` instead of `threading.Thread`) - # because there's 1 thread (the `CheckAliveThread`) which is a pydevd thread but - # isn't really a daemon thread (so, we need CPython to wait on it for shutdown, - # in which case it needs to be in `threading` and the patching would be needed anyways). - _patched_threading_to_hide_pydevd_threads = True - try: - _patch_threading_to_hide_pydevd_threads() - except: - pydev_log.exception('Error applying patching to hide pydevd threads.') - - thread.pydev_do_not_trace = True - thread.is_pydev_daemon_thread = True - thread.daemon = True - - -def run_as_pydevd_daemon_thread(py_db, func, *args, **kwargs): - ''' - Runs a function as a pydevd daemon thread (without any tracing in place). - ''' - t = PyDBDaemonThread(py_db, target_and_args=(func, args, kwargs)) - t.name = '%s (pydevd daemon thread)' % (func.__name__,) - t.start() - return t diff --git a/spaces/Sunilkumarkanugula/SunilChatBot/app.py b/spaces/Sunilkumarkanugula/SunilChatBot/app.py deleted file mode 100644 index d4e8c682c1696197371641afad14c940b3d5ab15..0000000000000000000000000000000000000000 --- a/spaces/Sunilkumarkanugula/SunilChatBot/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """You are a tech-savvy computer science student who spends countless hours coding, building apps, and keeping up with the latest tech trends. You enjoy discussing programming languages, AI, and gadgets and are always ready to troubleshoot tech-related problems. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/export/api.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/export/api.py deleted file mode 100644 index cf1a27a4806ca83d97f5cd8c27726ec29f4e7e50..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/export/api.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import logging -import os -import torch -from caffe2.proto import caffe2_pb2 -from torch import nn - -from annotator.oneformer.detectron2.config import CfgNode -from annotator.oneformer.detectron2.utils.file_io import PathManager - -from .caffe2_inference import ProtobufDetectionModel -from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format -from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph - -__all__ = [ - "Caffe2Model", - "Caffe2Tracer", -] - - -class Caffe2Tracer: - """ - Make a detectron2 model traceable with Caffe2 operators. - This class creates a traceable version of a detectron2 model which: - - 1. Rewrite parts of the model using ops in Caffe2. Note that some ops do - not have GPU implementation in Caffe2. - 2. Remove post-processing and only produce raw layer outputs - - After making a traceable model, the class provide methods to export such a - model to different deployment formats. - Exported graph produced by this class take two input tensors: - - 1. (1, C, H, W) float "data" which is an image (usually in [0, 255]). - (H, W) often has to be padded to multiple of 32 (depend on the model - architecture). - 2. 1x3 float "im_info", each row of which is (height, width, 1.0). - Height and width are true image shapes before padding. - - The class currently only supports models using builtin meta architectures. - Batch inference is not supported, and contributions are welcome. - """ - - def __init__(self, cfg: CfgNode, model: nn.Module, inputs): - """ - Args: - cfg (CfgNode): a detectron2 config used to construct caffe2-compatible model. - model (nn.Module): An original pytorch model. Must be among a few official models - in detectron2 that can be converted to become caffe2-compatible automatically. - Weights have to be already loaded to this model. - inputs: sample inputs that the given model takes for inference. - Will be used to trace the model. For most models, random inputs with - no detected objects will not work as they lead to wrong traces. - """ - assert isinstance(cfg, CfgNode), cfg - assert isinstance(model, torch.nn.Module), type(model) - - # TODO make it support custom models, by passing in c2 model directly - C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE] - self.traceable_model = C2MetaArch(cfg, copy.deepcopy(model)) - self.inputs = inputs - self.traceable_inputs = self.traceable_model.get_caffe2_inputs(inputs) - - def export_caffe2(self): - """ - Export the model to Caffe2's protobuf format. - The returned object can be saved with its :meth:`.save_protobuf()` method. - The result can be loaded and executed using Caffe2 runtime. - - Returns: - :class:`Caffe2Model` - """ - from .caffe2_export import export_caffe2_detection_model - - predict_net, init_net = export_caffe2_detection_model( - self.traceable_model, self.traceable_inputs - ) - return Caffe2Model(predict_net, init_net) - - def export_onnx(self): - """ - Export the model to ONNX format. - Note that the exported model contains custom ops only available in caffe2, therefore it - cannot be directly executed by other runtime (such as onnxruntime or TensorRT). - Post-processing or transformation passes may be applied on the model to accommodate - different runtimes, but we currently do not provide support for them. - - Returns: - onnx.ModelProto: an onnx model. - """ - from .caffe2_export import export_onnx_model as export_onnx_model_impl - - return export_onnx_model_impl(self.traceable_model, (self.traceable_inputs,)) - - def export_torchscript(self): - """ - Export the model to a ``torch.jit.TracedModule`` by tracing. - The returned object can be saved to a file by ``.save()``. - - Returns: - torch.jit.TracedModule: a torch TracedModule - """ - logger = logging.getLogger(__name__) - logger.info("Tracing the model with torch.jit.trace ...") - with torch.no_grad(): - return torch.jit.trace(self.traceable_model, (self.traceable_inputs,)) - - -class Caffe2Model(nn.Module): - """ - A wrapper around the traced model in Caffe2's protobuf format. - The exported graph has different inputs/outputs from the original Pytorch - model, as explained in :class:`Caffe2Tracer`. This class wraps around the - exported graph to simulate the same interface as the original Pytorch model. - It also provides functions to save/load models in Caffe2's format.' - - Examples: - :: - c2_model = Caffe2Tracer(cfg, torch_model, inputs).export_caffe2() - inputs = [{"image": img_tensor_CHW}] - outputs = c2_model(inputs) - orig_outputs = torch_model(inputs) - """ - - def __init__(self, predict_net, init_net): - super().__init__() - self.eval() # always in eval mode - self._predict_net = predict_net - self._init_net = init_net - self._predictor = None - - __init__.__HIDE_SPHINX_DOC__ = True - - @property - def predict_net(self): - """ - caffe2.core.Net: the underlying caffe2 predict net - """ - return self._predict_net - - @property - def init_net(self): - """ - caffe2.core.Net: the underlying caffe2 init net - """ - return self._init_net - - def save_protobuf(self, output_dir): - """ - Save the model as caffe2's protobuf format. - It saves the following files: - - * "model.pb": definition of the graph. Can be visualized with - tools like `netron `_. - * "model_init.pb": model parameters - * "model.pbtxt": human-readable definition of the graph. Not - needed for deployment. - - Args: - output_dir (str): the output directory to save protobuf files. - """ - logger = logging.getLogger(__name__) - logger.info("Saving model to {} ...".format(output_dir)) - if not PathManager.exists(output_dir): - PathManager.mkdirs(output_dir) - - with PathManager.open(os.path.join(output_dir, "model.pb"), "wb") as f: - f.write(self._predict_net.SerializeToString()) - with PathManager.open(os.path.join(output_dir, "model.pbtxt"), "w") as f: - f.write(str(self._predict_net)) - with PathManager.open(os.path.join(output_dir, "model_init.pb"), "wb") as f: - f.write(self._init_net.SerializeToString()) - - def save_graph(self, output_file, inputs=None): - """ - Save the graph as SVG format. - - Args: - output_file (str): a SVG file - inputs: optional inputs given to the model. - If given, the inputs will be used to run the graph to record - shape of every tensor. The shape information will be - saved together with the graph. - """ - from .caffe2_export import run_and_save_graph - - if inputs is None: - save_graph(self._predict_net, output_file, op_only=False) - else: - size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0) - device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii") - inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device) - inputs = [x.cpu().numpy() for x in inputs] - run_and_save_graph(self._predict_net, self._init_net, inputs, output_file) - - @staticmethod - def load_protobuf(dir): - """ - Args: - dir (str): a directory used to save Caffe2Model with - :meth:`save_protobuf`. - The files "model.pb" and "model_init.pb" are needed. - - Returns: - Caffe2Model: the caffe2 model loaded from this directory. - """ - predict_net = caffe2_pb2.NetDef() - with PathManager.open(os.path.join(dir, "model.pb"), "rb") as f: - predict_net.ParseFromString(f.read()) - - init_net = caffe2_pb2.NetDef() - with PathManager.open(os.path.join(dir, "model_init.pb"), "rb") as f: - init_net.ParseFromString(f.read()) - - return Caffe2Model(predict_net, init_net) - - def __call__(self, inputs): - """ - An interface that wraps around a Caffe2 model and mimics detectron2's models' - input/output format. See details about the format at :doc:`/tutorials/models`. - This is used to compare the outputs of caffe2 model with its original torch model. - - Due to the extra conversion between Pytorch/Caffe2, this method is not meant for - benchmark. Because of the conversion, this method also has dependency - on detectron2 in order to convert to detectron2's output format. - """ - if self._predictor is None: - self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net) - return self._predictor(inputs) diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/evaluation/detection_coco_evaluator.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/evaluation/detection_coco_evaluator.py deleted file mode 100644 index 8ea65b7f2226bfea3a884a5f4aa37f6e658b7e83..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/oneformer/oneformer/evaluation/detection_coco_evaluator.py +++ /dev/null @@ -1,723 +0,0 @@ -# ------------------------------------------------------------------------------ -# Reference: https://github.com/facebookresearch/detectron2/blob/main/detectron2/evaluation/coco_evaluation.py -# Modified by Jitesh Jain (https://github.com/praeclarumjj3) -# ------------------------------------------------------------------------------ - -import contextlib -import copy -import io -import itertools -import json -import logging -import numpy as np -import os -import pickle -from collections import OrderedDict -import annotator.oneformer.pycocotools.mask as mask_util -import torch -from annotator.oneformer.pycocotools.coco import COCO -from annotator.oneformer.pycocotools.cocoeval import COCOeval -from tabulate import tabulate - -import annotator.oneformer.detectron2.utils.comm as comm -from annotator.oneformer.detectron2.config import CfgNode -from annotator.oneformer.detectron2.data import MetadataCatalog -from annotator.oneformer.detectron2.data.datasets.coco import convert_to_coco_json -from annotator.oneformer.detectron2.structures import Boxes, BoxMode, pairwise_iou -from annotator.oneformer.detectron2.utils.file_io import PathManager -from annotator.oneformer.detectron2.utils.logger import create_small_table - -from .evaluator import DatasetEvaluator - -try: - from annotator.oneformer.detectron2.evaluation.fast_eval_api import COCOeval_opt -except ImportError: - COCOeval_opt = COCOeval - - -class DetectionCOCOEvaluator(DatasetEvaluator): - """ - Evaluate AR for object proposals, AP for instance detection/segmentation, AP - for keypoint detection outputs using COCO's metrics. - See http://cocodataset.org/#detection-eval and - http://cocodataset.org/#keypoints-eval to understand its metrics. - The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means - the metric cannot be computed (e.g. due to no predictions made). - - In addition to COCO, this evaluator is able to support any bounding box detection, - instance segmentation, or keypoint detection dataset. - """ - - def __init__( - self, - dataset_name, - tasks=None, - distributed=True, - output_dir=None, - *, - max_dets_per_image=None, - use_fast_impl=True, - kpt_oks_sigmas=(), - allow_cached_coco=True, - ): - """ - Args: - dataset_name (str): name of the dataset to be evaluated. - It must have either the following corresponding metadata: - - "json_file": the path to the COCO format annotation - - Or it must be in detectron2's standard dataset format - so it can be converted to COCO format automatically. - tasks (tuple[str]): tasks that can be evaluated under the given - configuration. A task is one of "bbox", "segm", "keypoints". - By default, will infer this automatically from predictions. - distributed (True): if True, will collect results from all ranks and run evaluation - in the main process. - Otherwise, will only evaluate the results in the current process. - output_dir (str): optional, an output directory to dump all - results predicted on the dataset. The dump contains two files: - - 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and - contains all the results in the format they are produced by the model. - 2. "coco_instances_results.json" a json file in COCO's result format. - max_dets_per_image (int): limit on the maximum number of detections per image. - By default in COCO, this limit is to 100, but this can be customized - to be greater, as is needed in evaluation metrics AP fixed and AP pool - (see https://arxiv.org/pdf/2102.01066.pdf) - This doesn't affect keypoint evaluation. - use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. - Although the results should be very close to the official implementation in COCO - API, it is still recommended to compute results with the official API for use in - papers. The faster implementation also uses more RAM. - kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS. - See http://cocodataset.org/#keypoints-eval - When empty, it will use the defaults in COCO. - Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS. - allow_cached_coco (bool): Whether to use cached coco json from previous validation - runs. You should set this to False if you need to use different validation data. - Defaults to True. - """ - self._logger = logging.getLogger(__name__) - self._distributed = distributed - self._output_dir = output_dir - - if use_fast_impl and (COCOeval_opt is COCOeval): - self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.") - use_fast_impl = False - self._use_fast_impl = use_fast_impl - - # COCOeval requires the limit on the number of detections per image (maxDets) to be a list - # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the - # 3rd element (100) is used as the limit on the number of detections per image when - # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval, - # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults. - if max_dets_per_image is None: - max_dets_per_image = [1, 10, 100] - else: - max_dets_per_image = [1, 10, max_dets_per_image] - self._max_dets_per_image = max_dets_per_image - - if tasks is not None and isinstance(tasks, CfgNode): - kpt_oks_sigmas = ( - tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas - ) - self._logger.warn( - "COCO Evaluator instantiated using config, this is deprecated behavior." - " Please pass in explicit arguments instead." - ) - self._tasks = None # Infering it from predictions should be better - else: - self._tasks = tasks - - self._cpu_device = torch.device("cpu") - - self._metadata = MetadataCatalog.get(dataset_name) - if not hasattr(self._metadata, "json_file"): - if output_dir is None: - raise ValueError( - "output_dir must be provided to COCOEvaluator " - "for datasets not in COCO format." - ) - self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...") - - cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json") - self._metadata.json_file = cache_path - convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco) - - json_file = PathManager.get_local_path(self._metadata.json_file) - with contextlib.redirect_stdout(io.StringIO()): - self._coco_api = COCO(json_file) - - # Test set json files do not contain annotations (evaluation must be - # performed using the COCO evaluation server). - self._do_evaluation = "annotations" in self._coco_api.dataset - if self._do_evaluation: - self._kpt_oks_sigmas = kpt_oks_sigmas - - def reset(self): - self._predictions = [] - - def process(self, inputs, outputs): - """ - Args: - inputs: the inputs to a COCO model (e.g., GeneralizedRCNN). - It is a list of dict. Each dict corresponds to an image and - contains keys like "height", "width", "file_name", "image_id". - outputs: the outputs of a COCO model. It is a list of dicts with key - "box_instances" that contains :class:`Instances`. - """ - for input, output in zip(inputs, outputs): - prediction = {"image_id": input["image_id"]} - - if "box_instances" in output: - instances = output["box_instances"].to(self._cpu_device) - prediction["box_instances"] = instances_to_coco_json(instances, input["image_id"]) - if "proposals" in output: - prediction["proposals"] = output["proposals"].to(self._cpu_device) - if len(prediction) > 1: - self._predictions.append(prediction) - - def evaluate(self, img_ids=None): - """ - Args: - img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset - """ - if self._distributed: - comm.synchronize() - predictions = comm.gather(self._predictions, dst=0) - predictions = list(itertools.chain(*predictions)) - - if not comm.is_main_process(): - return {} - else: - predictions = self._predictions - - if len(predictions) == 0: - self._logger.warning("[COCOEvaluator] Did not receive valid predictions.") - return {} - - if self._output_dir: - PathManager.mkdirs(self._output_dir) - file_path = os.path.join(self._output_dir, "instances_predictions.pth") - with PathManager.open(file_path, "wb") as f: - torch.save(predictions, f) - - self._results = OrderedDict() - if "proposals" in predictions[0]: - self._eval_box_proposals(predictions) - if "box_instances" in predictions[0]: - self._eval_predictions(predictions, img_ids=img_ids) - # Copy so the caller can do whatever with results - return copy.deepcopy(self._results) - - def _tasks_from_predictions(self, predictions): - """ - Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions. - """ - tasks = {"bbox"} - for pred in predictions: - if "keypoints" in pred: - tasks.add("keypoints") - return sorted(tasks) - - def _eval_predictions(self, predictions, img_ids=None): - """ - Evaluate predictions. Fill self._results with the metrics of the tasks. - """ - self._logger.info("Preparing results for COCO format ...") - coco_results = list(itertools.chain(*[x["box_instances"] for x in predictions])) - tasks = self._tasks or self._tasks_from_predictions(coco_results) - - # unmap the category ids for COCO - if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"): - dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id - all_contiguous_ids = list(dataset_id_to_contiguous_id.values()) - num_classes = len(all_contiguous_ids) - assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1 - - reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()} - for result in coco_results: - category_id = result["category_id"] - assert category_id < num_classes, ( - f"A prediction has class={category_id}, " - f"but the dataset only has {num_classes} classes and " - f"predicted class id should be in [0, {num_classes - 1}]." - ) - result["category_id"] = reverse_id_mapping[category_id] - - if self._output_dir: - file_path = os.path.join(self._output_dir, "coco_instances_results.json") - self._logger.info("Saving results to {}".format(file_path)) - with PathManager.open(file_path, "w") as f: - f.write(json.dumps(coco_results)) - f.flush() - - if not self._do_evaluation: - self._logger.info("Annotations are not available for evaluation.") - return - - self._logger.info( - "Evaluating predictions with {} COCO API...".format( - "unofficial" if self._use_fast_impl else "official" - ) - ) - for task in sorted(tasks): - assert task in {"bbox", "keypoints"}, f"Got unknown task: {task}!" - coco_eval = ( - _evaluate_predictions_on_coco( - self._coco_api, - coco_results, - task, - kpt_oks_sigmas=self._kpt_oks_sigmas, - use_fast_impl=self._use_fast_impl, - img_ids=img_ids, - max_dets_per_image=self._max_dets_per_image, - ) - if len(coco_results) > 0 - else None # cocoapi does not handle empty results very well - ) - - res = self._derive_coco_results( - coco_eval, task, class_names=self._metadata.get("thing_classes") - ) - self._results[task] = res - - def _eval_box_proposals(self, predictions): - """ - Evaluate the box proposals in predictions. - Fill self._results with the metrics for "box_proposals" task. - """ - if self._output_dir: - # Saving generated box proposals to file. - # Predicted box_proposals are in XYXY_ABS mode. - bbox_mode = BoxMode.XYXY_ABS.value - ids, boxes, objectness_logits = [], [], [] - for prediction in predictions: - ids.append(prediction["image_id"]) - boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy()) - objectness_logits.append(prediction["proposals"].objectness_logits.numpy()) - - proposal_data = { - "boxes": boxes, - "objectness_logits": objectness_logits, - "ids": ids, - "bbox_mode": bbox_mode, - } - with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f: - pickle.dump(proposal_data, f) - - if not self._do_evaluation: - self._logger.info("Annotations are not available for evaluation.") - return - - self._logger.info("Evaluating bbox proposals ...") - res = {} - areas = {"all": "", "small": "s", "medium": "m", "large": "l"} - for limit in [100, 1000]: - for area, suffix in areas.items(): - stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit) - key = "AR{}@{:d}".format(suffix, limit) - res[key] = float(stats["ar"].item() * 100) - self._logger.info("Proposal metrics: \n" + create_small_table(res)) - self._results["box_proposals"] = res - - def _derive_coco_results(self, coco_eval, iou_type, class_names=None): - """ - Derive the desired score numbers from summarized COCOeval. - - Args: - coco_eval (None or COCOEval): None represents no predictions from model. - iou_type (str): - class_names (None or list[str]): if provided, will use it to predict - per-category AP. - - Returns: - a dict of {metric name: score} - """ - - metrics = { - "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], - "keypoints": ["AP", "AP50", "AP75", "APm", "APl"], - }[iou_type] - - if coco_eval is None: - self._logger.warn("No predictions from the model!") - return {metric: float("nan") for metric in metrics} - - # the standard metrics - results = { - metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan") - for idx, metric in enumerate(metrics) - } - self._logger.info( - "Evaluation results for {}: \n".format(iou_type) + create_small_table(results) - ) - if not np.isfinite(sum(results.values())): - self._logger.info("Some metrics cannot be computed and is shown as NaN.") - - if class_names is None or len(class_names) <= 1: - return results - # Compute per-category AP - # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa - precisions = coco_eval.eval["precision"] - # precision has dims (iou, recall, cls, area range, max dets) - assert len(class_names) == precisions.shape[2] - - results_per_category = [] - for idx, name in enumerate(class_names): - # area range index 0: all area ranges - # max dets index -1: typically 100 per image - precision = precisions[:, :, idx, 0, -1] - precision = precision[precision > -1] - ap = np.mean(precision) if precision.size else float("nan") - results_per_category.append(("{}".format(name), float(ap * 100))) - - # tabulate it - N_COLS = min(6, len(results_per_category) * 2) - results_flatten = list(itertools.chain(*results_per_category)) - results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)]) - table = tabulate( - results_2d, - tablefmt="pipe", - floatfmt=".3f", - headers=["category", "AP"] * (N_COLS // 2), - numalign="left", - ) - self._logger.info("Per-category {} AP: \n".format(iou_type) + table) - - results.update({"AP-" + name: ap for name, ap in results_per_category}) - return results - - -def instances_to_coco_json(instances, img_id): - """ - Dump an "Instances" object to a COCO-format json that's used for evaluation. - - Args: - instances (Instances): - img_id (int): the image id - - Returns: - list[dict]: list of json annotations in COCO format. - """ - num_instance = len(instances) - if num_instance == 0: - return [] - - boxes = instances.pred_boxes.tensor.numpy() - boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS) - boxes = boxes.tolist() - scores = instances.scores.tolist() - classes = instances.pred_classes.tolist() - - has_mask = instances.has("pred_masks") - if has_mask: - # use RLE to encode the masks, because they are too large and takes memory - # since this evaluator stores outputs of the entire dataset - rles = [ - mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0] - for mask in instances.pred_masks - ] - for rle in rles: - # "counts" is an array encoded by mask_util as a byte-stream. Python3's - # json writer which always produces strings cannot serialize a bytestream - # unless you decode it. Thankfully, utf-8 works out (which is also what - # the annotator.oneformer.pycocotools/_mask.pyx does). - rle["counts"] = rle["counts"].decode("utf-8") - - has_keypoints = instances.has("pred_keypoints") - if has_keypoints: - keypoints = instances.pred_keypoints - - results = [] - for k in range(num_instance): - result = { - "image_id": img_id, - "category_id": classes[k], - "bbox": boxes[k], - "score": scores[k], - } - if has_mask: - result["segmentation"] = rles[k] - if has_keypoints: - # In COCO annotations, - # keypoints coordinates are pixel indices. - # However our predictions are floating point coordinates. - # Therefore we subtract 0.5 to be consistent with the annotation format. - # This is the inverse of data loading logic in `datasets/coco.py`. - keypoints[k][:, :2] -= 0.5 - result["keypoints"] = keypoints[k].flatten().tolist() - results.append(result) - return results - - -# inspired from Detectron: -# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa -def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None): - """ - Evaluate detection proposal recall metrics. This function is a much - faster alternative to the official COCO API recall evaluation code. However, - it produces slightly different results. - """ - # Record max overlap value for each gt box - # Return vector of overlap values - areas = { - "all": 0, - "small": 1, - "medium": 2, - "large": 3, - "96-128": 4, - "128-256": 5, - "256-512": 6, - "512-inf": 7, - } - area_ranges = [ - [0**2, 1e5**2], # all - [0**2, 32**2], # small - [32**2, 96**2], # medium - [96**2, 1e5**2], # large - [96**2, 128**2], # 96-128 - [128**2, 256**2], # 128-256 - [256**2, 512**2], # 256-512 - [512**2, 1e5**2], - ] # 512-inf - assert area in areas, "Unknown area range: {}".format(area) - area_range = area_ranges[areas[area]] - gt_overlaps = [] - num_pos = 0 - - for prediction_dict in dataset_predictions: - predictions = prediction_dict["proposals"] - - # sort predictions in descending order - # TODO maybe remove this and make it explicit in the documentation - inds = predictions.objectness_logits.sort(descending=True)[1] - predictions = predictions[inds] - - ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"]) - anno = coco_api.loadAnns(ann_ids) - gt_boxes = [ - BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) - for obj in anno - if obj["iscrowd"] == 0 - ] - gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes - gt_boxes = Boxes(gt_boxes) - gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) - - if len(gt_boxes) == 0 or len(predictions) == 0: - continue - - valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) - gt_boxes = gt_boxes[valid_gt_inds] - - num_pos += len(gt_boxes) - - if len(gt_boxes) == 0: - continue - - if limit is not None and len(predictions) > limit: - predictions = predictions[:limit] - - overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes) - - _gt_overlaps = torch.zeros(len(gt_boxes)) - for j in range(min(len(predictions), len(gt_boxes))): - # find which proposal box maximally covers each gt box - # and get the iou amount of coverage for each gt box - max_overlaps, argmax_overlaps = overlaps.max(dim=0) - - # find which gt box is 'best' covered (i.e. 'best' = most iou) - gt_ovr, gt_ind = max_overlaps.max(dim=0) - assert gt_ovr >= 0 - # find the proposal box that covers the best covered gt box - box_ind = argmax_overlaps[gt_ind] - # record the iou coverage of this gt box - _gt_overlaps[j] = overlaps[box_ind, gt_ind] - assert _gt_overlaps[j] == gt_ovr - # mark the proposal box and the gt box as used - overlaps[box_ind, :] = -1 - overlaps[:, gt_ind] = -1 - - # append recorded iou coverage level - gt_overlaps.append(_gt_overlaps) - gt_overlaps = ( - torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32) - ) - gt_overlaps, _ = torch.sort(gt_overlaps) - - if thresholds is None: - step = 0.05 - thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) - recalls = torch.zeros_like(thresholds) - # compute recall for each iou threshold - for i, t in enumerate(thresholds): - recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) - # ar = 2 * np.trapz(recalls, thresholds) - ar = recalls.mean() - return { - "ar": ar, - "recalls": recalls, - "thresholds": thresholds, - "gt_overlaps": gt_overlaps, - "num_pos": num_pos, - } - - -def _evaluate_predictions_on_coco( - coco_gt, - coco_results, - iou_type, - kpt_oks_sigmas=None, - use_fast_impl=True, - img_ids=None, - max_dets_per_image=None, -): - """ - Evaluate the coco results using COCOEval API. - """ - assert len(coco_results) > 0 - - if iou_type == "segm": - coco_results = copy.deepcopy(coco_results) - # When evaluating mask AP, if the results contain bbox, cocoapi will - # use the box area as the area of the instance, instead of the mask area. - # This leads to a different definition of small/medium/large. - # We remove the bbox field to let mask AP use mask area. - for c in coco_results: - c.pop("bbox", None) - - coco_dt = coco_gt.loadRes(coco_results) - coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type) - # For COCO, the default max_dets_per_image is [1, 10, 100]. - if max_dets_per_image is None: - max_dets_per_image = [1, 10, 100] # Default from COCOEval - else: - assert ( - len(max_dets_per_image) >= 3 - ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3" - # In the case that user supplies a custom input for max_dets_per_image, - # apply COCOevalMaxDets to evaluate AP with the custom input. - if max_dets_per_image[2] != 100: - coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type) - if iou_type != "keypoints": - coco_eval.params.maxDets = max_dets_per_image - - if img_ids is not None: - coco_eval.params.imgIds = img_ids - - if iou_type == "keypoints": - # Use the COCO default keypoint OKS sigmas unless overrides are specified - if kpt_oks_sigmas: - assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "annotator.oneformer.pycocotools is too old!" - coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas) - # COCOAPI requires every detection and every gt to have keypoints, so - # we just take the first entry from both - num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3 - num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3 - num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas) - assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, ( - f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. " - f"Ground truth contains {num_keypoints_gt} keypoints. " - f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. " - "They have to agree with each other. For meaning of OKS, please refer to " - "http://cocodataset.org/#keypoints-eval." - ) - - coco_eval.evaluate() - coco_eval.accumulate() - coco_eval.summarize() - - return coco_eval - - -class COCOevalMaxDets(COCOeval): - """ - Modified version of COCOeval for evaluating AP with a custom - maxDets (by default for COCO, maxDets is 100) - """ - - def summarize(self): - """ - Compute and display summary metrics for evaluation results given - a custom value for max_dets_per_image - """ - - def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100): - p = self.params - iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}" - titleStr = "Average Precision" if ap == 1 else "Average Recall" - typeStr = "(AP)" if ap == 1 else "(AR)" - iouStr = ( - "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1]) - if iouThr is None - else "{:0.2f}".format(iouThr) - ) - - aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] - mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] - if ap == 1: - # dimension of precision: [TxRxKxAxM] - s = self.eval["precision"] - # IoU - if iouThr is not None: - t = np.where(iouThr == p.iouThrs)[0] - s = s[t] - s = s[:, :, :, aind, mind] - else: - # dimension of recall: [TxKxAxM] - s = self.eval["recall"] - if iouThr is not None: - t = np.where(iouThr == p.iouThrs)[0] - s = s[t] - s = s[:, :, aind, mind] - if len(s[s > -1]) == 0: - mean_s = -1 - else: - mean_s = np.mean(s[s > -1]) - print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s)) - return mean_s - - def _summarizeDets(): - stats = np.zeros((12,)) - # Evaluate AP using the custom limit on maximum detections per image - stats[0] = _summarize(1, maxDets=self.params.maxDets[2]) - stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2]) - stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2]) - stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2]) - stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2]) - stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2]) - stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) - stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) - stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) - stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2]) - stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2]) - stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2]) - return stats - - def _summarizeKps(): - stats = np.zeros((10,)) - stats[0] = _summarize(1, maxDets=20) - stats[1] = _summarize(1, maxDets=20, iouThr=0.5) - stats[2] = _summarize(1, maxDets=20, iouThr=0.75) - stats[3] = _summarize(1, maxDets=20, areaRng="medium") - stats[4] = _summarize(1, maxDets=20, areaRng="large") - stats[5] = _summarize(0, maxDets=20) - stats[6] = _summarize(0, maxDets=20, iouThr=0.5) - stats[7] = _summarize(0, maxDets=20, iouThr=0.75) - stats[8] = _summarize(0, maxDets=20, areaRng="medium") - stats[9] = _summarize(0, maxDets=20, areaRng="large") - return stats - - if not self.eval: - raise Exception("Please run accumulate() first") - iouType = self.params.iouType - if iouType == "segm" or iouType == "bbox": - summarize = _summarizeDets - elif iouType == "keypoints": - summarize = _summarizeKps - self.stats = summarize() - - def __str__(self): - self.summarize() \ No newline at end of file diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/utils/weight_init.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/utils/weight_init.py deleted file mode 100644 index 38141ba3d61f64ddfc0a31574b4648cbad96d7dd..0000000000000000000000000000000000000000 --- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/utils/weight_init.py +++ /dev/null @@ -1,62 +0,0 @@ -"""Modified from https://github.com/rwightman/pytorch-image- -models/blob/master/timm/models/layers/drop.py.""" - -import math -import warnings - -import torch - - -def _no_grad_trunc_normal_(tensor, mean, std, a, b): - """Reference: https://people.sc.fsu.edu/~jburkardt/presentations - /truncated_normal.pdf""" - - def norm_cdf(x): - # Computes standard normal cumulative distribution function - return (1. + math.erf(x / math.sqrt(2.))) / 2. - - if (mean < a - 2 * std) or (mean > b + 2 * std): - warnings.warn( - 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. ' - 'The distribution of values may be incorrect.', - stacklevel=2) - - with torch.no_grad(): - # Values are generated by using a truncated uniform distribution and - # then using the inverse CDF for the normal distribution. - # Get upper and lower cdf values - lower_bound = norm_cdf((a - mean) / std) - upper_bound = norm_cdf((b - mean) / std) - - # Uniformly fill tensor with values from [l, u], then translate to - # [2l-1, 2u-1]. - tensor.uniform_(2 * lower_bound - 1, 2 * upper_bound - 1) - - # Use inverse cdf transform for normal distribution to get truncated - # standard normal - tensor.erfinv_() - - # Transform to proper mean, std - tensor.mul_(std * math.sqrt(2.)) - tensor.add_(mean) - - # Clamp to ensure it's in the proper range - tensor.clamp_(min=a, max=b) - return tensor - - -def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): - r"""Fills the input Tensor with values drawn from a truncated - normal distribution. The values are effectively drawn from the - normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` - with values outside :math:`[a, b]` redrawn until they are within - the bounds. The method used for generating the random values works - best when :math:`a \leq \text{mean} \leq b`. - Args: - tensor (``torch.Tensor``): an n-dimensional `torch.Tensor` - mean (float): the mean of the normal distribution - std (float): the standard deviation of the normal distribution - a (float): the minimum cutoff value - b (float): the maximum cutoff value - """ - return _no_grad_trunc_normal_(tensor, mean, std, a, b) diff --git a/spaces/TFanon/TFanon/README.md b/spaces/TFanon/TFanon/README.md deleted file mode 100644 index cf2e7752268f9fe20678df9aa1791afc43c272b5..0000000000000000000000000000000000000000 --- a/spaces/TFanon/TFanon/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: TFanon -emoji: ๐Ÿจ -colorFrom: yellow -colorTo: red -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py deleted file mode 100644 index 5aa9ecbb80cf08255f7e678432313b10b0a5f5ce..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/pygments/filters/__init__.py +++ /dev/null @@ -1,940 +0,0 @@ -""" - pygments.filters - ~~~~~~~~~~~~~~~~ - - Module containing filter lookup functions and default - filters. - - :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -import re - -from pip._vendor.pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \ - string_to_tokentype -from pip._vendor.pygments.filter import Filter -from pip._vendor.pygments.util import get_list_opt, get_int_opt, get_bool_opt, \ - get_choice_opt, ClassNotFound, OptionError -from pip._vendor.pygments.plugin import find_plugin_filters - - -def find_filter_class(filtername): - """Lookup a filter by name. Return None if not found.""" - if filtername in FILTERS: - return FILTERS[filtername] - for name, cls in find_plugin_filters(): - if name == filtername: - return cls - return None - - -def get_filter_by_name(filtername, **options): - """Return an instantiated filter. - - Options are passed to the filter initializer if wanted. - Raise a ClassNotFound if not found. - """ - cls = find_filter_class(filtername) - if cls: - return cls(**options) - else: - raise ClassNotFound('filter %r not found' % filtername) - - -def get_all_filters(): - """Return a generator of all filter names.""" - yield from FILTERS - for name, _ in find_plugin_filters(): - yield name - - -def _replace_special(ttype, value, regex, specialttype, - replacefunc=lambda x: x): - last = 0 - for match in regex.finditer(value): - start, end = match.start(), match.end() - if start != last: - yield ttype, value[last:start] - yield specialttype, replacefunc(value[start:end]) - last = end - if last != len(value): - yield ttype, value[last:] - - -class CodeTagFilter(Filter): - """Highlight special code tags in comments and docstrings. - - Options accepted: - - `codetags` : list of strings - A list of strings that are flagged as code tags. The default is to - highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``. - - .. versionchanged:: 2.13 - Now recognizes ``FIXME`` by default. - """ - - def __init__(self, **options): - Filter.__init__(self, **options) - tags = get_list_opt(options, 'codetags', - ['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE']) - self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([ - re.escape(tag) for tag in tags if tag - ])) - - def filter(self, lexer, stream): - regex = self.tag_re - for ttype, value in stream: - if ttype in String.Doc or \ - ttype in Comment and \ - ttype not in Comment.Preproc: - yield from _replace_special(ttype, value, regex, Comment.Special) - else: - yield ttype, value - - -class SymbolFilter(Filter): - """Convert mathematical symbols such as \\ in Isabelle - or \\longrightarrow in LaTeX into Unicode characters. - - This is mostly useful for HTML or console output when you want to - approximate the source rendering you'd see in an IDE. - - Options accepted: - - `lang` : string - The symbol language. Must be one of ``'isabelle'`` or - ``'latex'``. The default is ``'isabelle'``. - """ - - latex_symbols = { - '\\alpha' : '\U000003b1', - '\\beta' : '\U000003b2', - '\\gamma' : '\U000003b3', - '\\delta' : '\U000003b4', - '\\varepsilon' : '\U000003b5', - '\\zeta' : '\U000003b6', - '\\eta' : '\U000003b7', - '\\vartheta' : '\U000003b8', - '\\iota' : '\U000003b9', - '\\kappa' : '\U000003ba', - '\\lambda' : '\U000003bb', - '\\mu' : '\U000003bc', - '\\nu' : '\U000003bd', - '\\xi' : '\U000003be', - '\\pi' : '\U000003c0', - '\\varrho' : '\U000003c1', - '\\sigma' : '\U000003c3', - '\\tau' : '\U000003c4', - '\\upsilon' : '\U000003c5', - '\\varphi' : '\U000003c6', - '\\chi' : '\U000003c7', - '\\psi' : '\U000003c8', - '\\omega' : '\U000003c9', - '\\Gamma' : '\U00000393', - '\\Delta' : '\U00000394', - '\\Theta' : '\U00000398', - '\\Lambda' : '\U0000039b', - '\\Xi' : '\U0000039e', - '\\Pi' : '\U000003a0', - '\\Sigma' : '\U000003a3', - '\\Upsilon' : '\U000003a5', - '\\Phi' : '\U000003a6', - '\\Psi' : '\U000003a8', - '\\Omega' : '\U000003a9', - '\\leftarrow' : '\U00002190', - '\\longleftarrow' : '\U000027f5', - '\\rightarrow' : '\U00002192', - '\\longrightarrow' : '\U000027f6', - '\\Leftarrow' : '\U000021d0', - '\\Longleftarrow' : '\U000027f8', - '\\Rightarrow' : '\U000021d2', - '\\Longrightarrow' : '\U000027f9', - '\\leftrightarrow' : '\U00002194', - '\\longleftrightarrow' : '\U000027f7', - '\\Leftrightarrow' : '\U000021d4', - '\\Longleftrightarrow' : '\U000027fa', - '\\mapsto' : '\U000021a6', - '\\longmapsto' : '\U000027fc', - '\\relbar' : '\U00002500', - '\\Relbar' : '\U00002550', - '\\hookleftarrow' : '\U000021a9', - '\\hookrightarrow' : '\U000021aa', - '\\leftharpoondown' : '\U000021bd', - '\\rightharpoondown' : '\U000021c1', - '\\leftharpoonup' : '\U000021bc', - '\\rightharpoonup' : '\U000021c0', - '\\rightleftharpoons' : '\U000021cc', - '\\leadsto' : '\U0000219d', - '\\downharpoonleft' : '\U000021c3', - '\\downharpoonright' : '\U000021c2', - '\\upharpoonleft' : '\U000021bf', - '\\upharpoonright' : '\U000021be', - '\\restriction' : '\U000021be', - '\\uparrow' : '\U00002191', - '\\Uparrow' : '\U000021d1', - '\\downarrow' : '\U00002193', - '\\Downarrow' : '\U000021d3', - '\\updownarrow' : '\U00002195', - '\\Updownarrow' : '\U000021d5', - '\\langle' : '\U000027e8', - '\\rangle' : '\U000027e9', - '\\lceil' : '\U00002308', - '\\rceil' : '\U00002309', - '\\lfloor' : '\U0000230a', - '\\rfloor' : '\U0000230b', - '\\flqq' : '\U000000ab', - '\\frqq' : '\U000000bb', - '\\bot' : '\U000022a5', - '\\top' : '\U000022a4', - '\\wedge' : '\U00002227', - '\\bigwedge' : '\U000022c0', - '\\vee' : '\U00002228', - '\\bigvee' : '\U000022c1', - '\\forall' : '\U00002200', - '\\exists' : '\U00002203', - '\\nexists' : '\U00002204', - '\\neg' : '\U000000ac', - '\\Box' : '\U000025a1', - '\\Diamond' : '\U000025c7', - '\\vdash' : '\U000022a2', - '\\models' : '\U000022a8', - '\\dashv' : '\U000022a3', - '\\surd' : '\U0000221a', - '\\le' : '\U00002264', - '\\ge' : '\U00002265', - '\\ll' : '\U0000226a', - '\\gg' : '\U0000226b', - '\\lesssim' : '\U00002272', - '\\gtrsim' : '\U00002273', - '\\lessapprox' : '\U00002a85', - '\\gtrapprox' : '\U00002a86', - '\\in' : '\U00002208', - '\\notin' : '\U00002209', - '\\subset' : '\U00002282', - '\\supset' : '\U00002283', - '\\subseteq' : '\U00002286', - '\\supseteq' : '\U00002287', - '\\sqsubset' : '\U0000228f', - '\\sqsupset' : '\U00002290', - '\\sqsubseteq' : '\U00002291', - '\\sqsupseteq' : '\U00002292', - '\\cap' : '\U00002229', - '\\bigcap' : '\U000022c2', - '\\cup' : '\U0000222a', - '\\bigcup' : '\U000022c3', - '\\sqcup' : '\U00002294', - '\\bigsqcup' : '\U00002a06', - '\\sqcap' : '\U00002293', - '\\Bigsqcap' : '\U00002a05', - '\\setminus' : '\U00002216', - '\\propto' : '\U0000221d', - '\\uplus' : '\U0000228e', - '\\bigplus' : '\U00002a04', - '\\sim' : '\U0000223c', - '\\doteq' : '\U00002250', - '\\simeq' : '\U00002243', - '\\approx' : '\U00002248', - '\\asymp' : '\U0000224d', - '\\cong' : '\U00002245', - '\\equiv' : '\U00002261', - '\\Join' : '\U000022c8', - '\\bowtie' : '\U00002a1d', - '\\prec' : '\U0000227a', - '\\succ' : '\U0000227b', - '\\preceq' : '\U0000227c', - '\\succeq' : '\U0000227d', - '\\parallel' : '\U00002225', - '\\mid' : '\U000000a6', - '\\pm' : '\U000000b1', - '\\mp' : '\U00002213', - '\\times' : '\U000000d7', - '\\div' : '\U000000f7', - '\\cdot' : '\U000022c5', - '\\star' : '\U000022c6', - '\\circ' : '\U00002218', - '\\dagger' : '\U00002020', - '\\ddagger' : '\U00002021', - '\\lhd' : '\U000022b2', - '\\rhd' : '\U000022b3', - '\\unlhd' : '\U000022b4', - '\\unrhd' : '\U000022b5', - '\\triangleleft' : '\U000025c3', - '\\triangleright' : '\U000025b9', - '\\triangle' : '\U000025b3', - '\\triangleq' : '\U0000225c', - '\\oplus' : '\U00002295', - '\\bigoplus' : '\U00002a01', - '\\otimes' : '\U00002297', - '\\bigotimes' : '\U00002a02', - '\\odot' : '\U00002299', - '\\bigodot' : '\U00002a00', - '\\ominus' : '\U00002296', - '\\oslash' : '\U00002298', - '\\dots' : '\U00002026', - '\\cdots' : '\U000022ef', - '\\sum' : '\U00002211', - '\\prod' : '\U0000220f', - '\\coprod' : '\U00002210', - '\\infty' : '\U0000221e', - '\\int' : '\U0000222b', - '\\oint' : '\U0000222e', - '\\clubsuit' : '\U00002663', - '\\diamondsuit' : '\U00002662', - '\\heartsuit' : '\U00002661', - '\\spadesuit' : '\U00002660', - '\\aleph' : '\U00002135', - '\\emptyset' : '\U00002205', - '\\nabla' : '\U00002207', - '\\partial' : '\U00002202', - '\\flat' : '\U0000266d', - '\\natural' : '\U0000266e', - '\\sharp' : '\U0000266f', - '\\angle' : '\U00002220', - '\\copyright' : '\U000000a9', - '\\textregistered' : '\U000000ae', - '\\textonequarter' : '\U000000bc', - '\\textonehalf' : '\U000000bd', - '\\textthreequarters' : '\U000000be', - '\\textordfeminine' : '\U000000aa', - '\\textordmasculine' : '\U000000ba', - '\\euro' : '\U000020ac', - '\\pounds' : '\U000000a3', - '\\yen' : '\U000000a5', - '\\textcent' : '\U000000a2', - '\\textcurrency' : '\U000000a4', - '\\textdegree' : '\U000000b0', - } - - isabelle_symbols = { - '\\' : '\U0001d7ec', - '\\' : '\U0001d7ed', - '\\' : '\U0001d7ee', - '\\' : '\U0001d7ef', - '\\' : '\U0001d7f0', - '\\' : '\U0001d7f1', - '\\' : '\U0001d7f2', - '\\' : '\U0001d7f3', - '\\' : '\U0001d7f4', - '\\' : '\U0001d7f5', - '\\' : '\U0001d49c', - '\\' : '\U0000212c', - '\\' : '\U0001d49e', - '\\' : '\U0001d49f', - '\\' : '\U00002130', - '\\' : '\U00002131', - '\\' : '\U0001d4a2', - '\\' : '\U0000210b', - '\\' : '\U00002110', - '\\' : '\U0001d4a5', - '\\' : '\U0001d4a6', - '\\' : '\U00002112', - '\\' : '\U00002133', - '\\' : '\U0001d4a9', - '\\' : '\U0001d4aa', - '\\

    ' : '\U0001d5c9', - '\\' : '\U0001d5ca', - '\\' : '\U0001d5cb', - '\\' : '\U0001d5cc', - '\\' : '\U0001d5cd', - '\\' : '\U0001d5ce', - '\\' : '\U0001d5cf', - '\\' : '\U0001d5d0', - '\\' : '\U0001d5d1', - '\\' : '\U0001d5d2', - '\\' : '\U0001d5d3', - '\\' : '\U0001d504', - '\\' : '\U0001d505', - '\\' : '\U0000212d', - '\\

    ' : '\U0001d507', - '\\' : '\U0001d508', - '\\' : '\U0001d509', - '\\' : '\U0001d50a', - '\\' : '\U0000210c', - '\\' : '\U00002111', - '\\' : '\U0001d50d', - '\\' : '\U0001d50e', - '\\' : '\U0001d50f', - '\\' : '\U0001d510', - '\\' : '\U0001d511', - '\\' : '\U0001d512', - '\\' : '\U0001d513', - '\\' : '\U0001d514', - '\\' : '\U0000211c', - '\\' : '\U0001d516', - '\\' : '\U0001d517', - '\\' : '\U0001d518', - '\\' : '\U0001d519', - '\\' : '\U0001d51a', - '\\' : '\U0001d51b', - '\\' : '\U0001d51c', - '\\' : '\U00002128', - '\\' : '\U0001d51e', - '\\' : '\U0001d51f', - '\\' : '\U0001d520', - '\\
    ' : '\U0001d521', - '\\' : '\U0001d522', - '\\' : '\U0001d523', - '\\' : '\U0001d524', - '\\' : '\U0001d525', - '\\' : '\U0001d526', - '\\' : '\U0001d527', - '\\' : '\U0001d528', - '\\' : '\U0001d529', - '\\' : '\U0001d52a', - '\\' : '\U0001d52b', - '\\' : '\U0001d52c', - '\\' : '\U0001d52d', - '\\' : '\U0001d52e', - '\\' : '\U0001d52f', - '\\' : '\U0001d530', - '\\' : '\U0001d531', - '\\' : '\U0001d532', - '\\' : '\U0001d533', - '\\' : '\U0001d534', - '\\' : '\U0001d535', - '\\' : '\U0001d536', - '\\' : '\U0001d537', - '\\' : '\U000003b1', - '\\' : '\U000003b2', - '\\' : '\U000003b3', - '\\' : '\U000003b4', - '\\' : '\U000003b5', - '\\' : '\U000003b6', - '\\' : '\U000003b7', - '\\' : '\U000003b8', - '\\' : '\U000003b9', - '\\' : '\U000003ba', - '\\' : '\U000003bb', - '\\' : '\U000003bc', - '\\' : '\U000003bd', - '\\' : '\U000003be', - '\\' : '\U000003c0', - '\\' : '\U000003c1', - '\\' : '\U000003c3', - '\\' : '\U000003c4', - '\\' : '\U000003c5', - '\\' : '\U000003c6', - '\\' : '\U000003c7', - '\\' : '\U000003c8', - '\\' : '\U000003c9', - '\\' : '\U00000393', - '\\' : '\U00000394', - '\\' : '\U00000398', - '\\' : '\U0000039b', - '\\' : '\U0000039e', - '\\' : '\U000003a0', - '\\' : '\U000003a3', - '\\' : '\U000003a5', - '\\' : '\U000003a6', - '\\' : '\U000003a8', - '\\' : '\U000003a9', - '\\' : '\U0001d539', - '\\' : '\U00002102', - '\\' : '\U00002115', - '\\' : '\U0000211a', - '\\' : '\U0000211d', - '\\' : '\U00002124', - '\\' : '\U00002190', - '\\' : '\U000027f5', - '\\' : '\U00002192', - '\\' : '\U000027f6', - '\\' : '\U000021d0', - '\\' : '\U000027f8', - '\\' : '\U000021d2', - '\\' : '\U000027f9', - '\\' : '\U00002194', - '\\' : '\U000027f7', - '\\' : '\U000021d4', - '\\' : '\U000027fa', - '\\' : '\U000021a6', - '\\' : '\U000027fc', - '\\' : '\U00002500', - '\\' : '\U00002550', - '\\' : '\U000021a9', - '\\' : '\U000021aa', - '\\' : '\U000021bd', - '\\' : '\U000021c1', - '\\' : '\U000021bc', - '\\' : '\U000021c0', - '\\' : '\U000021cc', - '\\' : '\U0000219d', - '\\' : '\U000021c3', - '\\' : '\U000021c2', - '\\' : '\U000021bf', - '\\' : '\U000021be', - '\\' : '\U000021be', - '\\' : '\U00002237', - '\\' : '\U00002191', - '\\' : '\U000021d1', - '\\' : '\U00002193', - '\\' : '\U000021d3', - '\\' : '\U00002195', - '\\' : '\U000021d5', - '\\' : '\U000027e8', - '\\' : '\U000027e9', - '\\' : '\U00002308', - '\\' : '\U00002309', - '\\' : '\U0000230a', - '\\' : '\U0000230b', - '\\' : '\U00002987', - '\\' : '\U00002988', - '\\' : '\U000027e6', - '\\' : '\U000027e7', - '\\' : '\U00002983', - '\\' : '\U00002984', - '\\' : '\U000000ab', - '\\' : '\U000000bb', - '\\' : '\U000022a5', - '\\' : '\U000022a4', - '\\' : '\U00002227', - '\\' : '\U000022c0', - '\\' : '\U00002228', - '\\' : '\U000022c1', - '\\' : '\U00002200', - '\\' : '\U00002203', - '\\' : '\U00002204', - '\\' : '\U000000ac', - '\\' : '\U000025a1', - '\\' : '\U000025c7', - '\\' : '\U000022a2', - '\\' : '\U000022a8', - '\\' : '\U000022a9', - '\\' : '\U000022ab', - '\\' : '\U000022a3', - '\\' : '\U0000221a', - '\\' : '\U00002264', - '\\' : '\U00002265', - '\\' : '\U0000226a', - '\\' : '\U0000226b', - '\\' : '\U00002272', - '\\' : '\U00002273', - '\\' : '\U00002a85', - '\\' : '\U00002a86', - '\\' : '\U00002208', - '\\' : '\U00002209', - '\\' : '\U00002282', - '\\' : '\U00002283', - '\\' : '\U00002286', - '\\' : '\U00002287', - '\\' : '\U0000228f', - '\\' : '\U00002290', - '\\' : '\U00002291', - '\\' : '\U00002292', - '\\' : '\U00002229', - '\\' : '\U000022c2', - '\\' : '\U0000222a', - '\\' : '\U000022c3', - '\\' : '\U00002294', - '\\' : '\U00002a06', - '\\' : '\U00002293', - '\\' : '\U00002a05', - '\\' : '\U00002216', - '\\' : '\U0000221d', - '\\' : '\U0000228e', - '\\' : '\U00002a04', - '\\' : '\U00002260', - '\\' : '\U0000223c', - '\\' : '\U00002250', - '\\' : '\U00002243', - '\\' : '\U00002248', - '\\' : '\U0000224d', - '\\' : '\U00002245', - '\\' : '\U00002323', - '\\' : '\U00002261', - '\\' : '\U00002322', - '\\' : '\U000022c8', - '\\' : '\U00002a1d', - '\\' : '\U0000227a', - '\\' : '\U0000227b', - '\\' : '\U0000227c', - '\\' : '\U0000227d', - '\\' : '\U00002225', - '\\' : '\U000000a6', - '\\' : '\U000000b1', - '\\' : '\U00002213', - '\\' : '\U000000d7', - '\\
    ' : '\U000000f7', - '\\' : '\U000022c5', - '\\' : '\U000022c6', - '\\' : '\U00002219', - '\\' : '\U00002218', - '\\' : '\U00002020', - '\\' : '\U00002021', - '\\' : '\U000022b2', - '\\' : '\U000022b3', - '\\' : '\U000022b4', - '\\' : '\U000022b5', - '\\' : '\U000025c3', - '\\' : '\U000025b9', - '\\' : '\U000025b3', - '\\' : '\U0000225c', - '\\' : '\U00002295', - '\\' : '\U00002a01', - '\\' : '\U00002297', - '\\' : '\U00002a02', - '\\' : '\U00002299', - '\\' : '\U00002a00', - '\\' : '\U00002296', - '\\' : '\U00002298', - '\\' : '\U00002026', - '\\' : '\U000022ef', - '\\' : '\U00002211', - '\\' : '\U0000220f', - '\\' : '\U00002210', - '\\' : '\U0000221e', - '\\' : '\U0000222b', - '\\' : '\U0000222e', - '\\' : '\U00002663', - '\\' : '\U00002662', - '\\' : '\U00002661', - '\\' : '\U00002660', - '\\' : '\U00002135', - '\\' : '\U00002205', - '\\' : '\U00002207', - '\\' : '\U00002202', - '\\' : '\U0000266d', - '\\' : '\U0000266e', - '\\' : '\U0000266f', - '\\' : '\U00002220', - '\\' : '\U000000a9', - '\\' : '\U000000ae', - '\\' : '\U000000ad', - '\\' : '\U000000af', - '\\' : '\U000000bc', - '\\' : '\U000000bd', - '\\' : '\U000000be', - '\\' : '\U000000aa', - '\\' : '\U000000ba', - '\\
    ' : '\U000000a7', - '\\' : '\U000000b6', - '\\' : '\U000000a1', - '\\' : '\U000000bf', - '\\' : '\U000020ac', - '\\' : '\U000000a3', - '\\' : '\U000000a5', - '\\' : '\U000000a2', - '\\' : '\U000000a4', - '\\' : '\U000000b0', - '\\' : '\U00002a3f', - '\\' : '\U00002127', - '\\' : '\U000025ca', - '\\' : '\U00002118', - '\\' : '\U00002240', - '\\' : '\U000022c4', - '\\' : '\U000000b4', - '\\' : '\U00000131', - '\\' : '\U000000a8', - '\\' : '\U000000b8', - '\\' : '\U000002dd', - '\\' : '\U000003f5', - '\\' : '\U000023ce', - '\\' : '\U00002039', - '\\' : '\U0000203a', - '\\' : '\U00002302', - '\\<^sub>' : '\U000021e9', - '\\<^sup>' : '\U000021e7', - '\\<^bold>' : '\U00002759', - '\\<^bsub>' : '\U000021d8', - '\\<^esub>' : '\U000021d9', - '\\<^bsup>' : '\U000021d7', - '\\<^esup>' : '\U000021d6', - } - - lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols} - - def __init__(self, **options): - Filter.__init__(self, **options) - lang = get_choice_opt(options, 'lang', - ['isabelle', 'latex'], 'isabelle') - self.symbols = self.lang_map[lang] - - def filter(self, lexer, stream): - for ttype, value in stream: - if value in self.symbols: - yield ttype, self.symbols[value] - else: - yield ttype, value - - -class KeywordCaseFilter(Filter): - """Convert keywords to lowercase or uppercase or capitalize them, which - means first letter uppercase, rest lowercase. - - This can be useful e.g. if you highlight Pascal code and want to adapt the - code to your styleguide. - - Options accepted: - - `case` : string - The casing to convert keywords to. Must be one of ``'lower'``, - ``'upper'`` or ``'capitalize'``. The default is ``'lower'``. - """ - - def __init__(self, **options): - Filter.__init__(self, **options) - case = get_choice_opt(options, 'case', - ['lower', 'upper', 'capitalize'], 'lower') - self.convert = getattr(str, case) - - def filter(self, lexer, stream): - for ttype, value in stream: - if ttype in Keyword: - yield ttype, self.convert(value) - else: - yield ttype, value - - -class NameHighlightFilter(Filter): - """Highlight a normal Name (and Name.*) token with a different token type. - - Example:: - - filter = NameHighlightFilter( - names=['foo', 'bar', 'baz'], - tokentype=Name.Function, - ) - - This would highlight the names "foo", "bar" and "baz" - as functions. `Name.Function` is the default token type. - - Options accepted: - - `names` : list of strings - A list of names that should be given the different token type. - There is no default. - `tokentype` : TokenType or string - A token type or a string containing a token type name that is - used for highlighting the strings in `names`. The default is - `Name.Function`. - """ - - def __init__(self, **options): - Filter.__init__(self, **options) - self.names = set(get_list_opt(options, 'names', [])) - tokentype = options.get('tokentype') - if tokentype: - self.tokentype = string_to_tokentype(tokentype) - else: - self.tokentype = Name.Function - - def filter(self, lexer, stream): - for ttype, value in stream: - if ttype in Name and value in self.names: - yield self.tokentype, value - else: - yield ttype, value - - -class ErrorToken(Exception): - pass - - -class RaiseOnErrorTokenFilter(Filter): - """Raise an exception when the lexer generates an error token. - - Options accepted: - - `excclass` : Exception class - The exception class to raise. - The default is `pygments.filters.ErrorToken`. - - .. versionadded:: 0.8 - """ - - def __init__(self, **options): - Filter.__init__(self, **options) - self.exception = options.get('excclass', ErrorToken) - try: - # issubclass() will raise TypeError if first argument is not a class - if not issubclass(self.exception, Exception): - raise TypeError - except TypeError: - raise OptionError('excclass option is not an exception class') - - def filter(self, lexer, stream): - for ttype, value in stream: - if ttype is Error: - raise self.exception(value) - yield ttype, value - - -class VisibleWhitespaceFilter(Filter): - """Convert tabs, newlines and/or spaces to visible characters. - - Options accepted: - - `spaces` : string or bool - If this is a one-character string, spaces will be replaces by this string. - If it is another true value, spaces will be replaced by ``ยท`` (unicode - MIDDLE DOT). If it is a false value, spaces will not be replaced. The - default is ``False``. - `tabs` : string or bool - The same as for `spaces`, but the default replacement character is ``ยป`` - (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value - is ``False``. Note: this will not work if the `tabsize` option for the - lexer is nonzero, as tabs will already have been expanded then. - `tabsize` : int - If tabs are to be replaced by this filter (see the `tabs` option), this - is the total number of characters that a tab should be expanded to. - The default is ``8``. - `newlines` : string or bool - The same as for `spaces`, but the default replacement character is ``ยถ`` - (unicode PILCROW SIGN). The default value is ``False``. - `wstokentype` : bool - If true, give whitespace the special `Whitespace` token type. This allows - styling the visible whitespace differently (e.g. greyed out), but it can - disrupt background colors. The default is ``True``. - - .. versionadded:: 0.8 - """ - - def __init__(self, **options): - Filter.__init__(self, **options) - for name, default in [('spaces', 'ยท'), - ('tabs', 'ยป'), - ('newlines', 'ยถ')]: - opt = options.get(name, False) - if isinstance(opt, str) and len(opt) == 1: - setattr(self, name, opt) - else: - setattr(self, name, (opt and default or '')) - tabsize = get_int_opt(options, 'tabsize', 8) - if self.tabs: - self.tabs += ' ' * (tabsize - 1) - if self.newlines: - self.newlines += '\n' - self.wstt = get_bool_opt(options, 'wstokentype', True) - - def filter(self, lexer, stream): - if self.wstt: - spaces = self.spaces or ' ' - tabs = self.tabs or '\t' - newlines = self.newlines or '\n' - regex = re.compile(r'\s') - - def replacefunc(wschar): - if wschar == ' ': - return spaces - elif wschar == '\t': - return tabs - elif wschar == '\n': - return newlines - return wschar - - for ttype, value in stream: - yield from _replace_special(ttype, value, regex, Whitespace, - replacefunc) - else: - spaces, tabs, newlines = self.spaces, self.tabs, self.newlines - # simpler processing - for ttype, value in stream: - if spaces: - value = value.replace(' ', spaces) - if tabs: - value = value.replace('\t', tabs) - if newlines: - value = value.replace('\n', newlines) - yield ttype, value - - -class GobbleFilter(Filter): - """Gobbles source code lines (eats initial characters). - - This filter drops the first ``n`` characters off every line of code. This - may be useful when the source code fed to the lexer is indented by a fixed - amount of space that isn't desired in the output. - - Options accepted: - - `n` : int - The number of characters to gobble. - - .. versionadded:: 1.2 - """ - def __init__(self, **options): - Filter.__init__(self, **options) - self.n = get_int_opt(options, 'n', 0) - - def gobble(self, value, left): - if left < len(value): - return value[left:], 0 - else: - return '', left - len(value) - - def filter(self, lexer, stream): - n = self.n - left = n # How many characters left to gobble. - for ttype, value in stream: - # Remove ``left`` tokens from first line, ``n`` from all others. - parts = value.split('\n') - (parts[0], left) = self.gobble(parts[0], left) - for i in range(1, len(parts)): - (parts[i], left) = self.gobble(parts[i], n) - value = '\n'.join(parts) - - if value != '': - yield ttype, value - - -class TokenMergeFilter(Filter): - """Merges consecutive tokens with the same token type in the output - stream of a lexer. - - .. versionadded:: 1.2 - """ - def __init__(self, **options): - Filter.__init__(self, **options) - - def filter(self, lexer, stream): - current_type = None - current_value = None - for ttype, value in stream: - if ttype is current_type: - current_value += value - else: - if current_type is not None: - yield current_type, current_value - current_type = ttype - current_value = value - if current_type is not None: - yield current_type, current_value - - -FILTERS = { - 'codetagify': CodeTagFilter, - 'keywordcase': KeywordCaseFilter, - 'highlight': NameHighlightFilter, - 'raiseonerror': RaiseOnErrorTokenFilter, - 'whitespace': VisibleWhitespaceFilter, - 'gobble': GobbleFilter, - 'tokenmerge': TokenMergeFilter, - 'symbols': SymbolFilter, -} diff --git a/spaces/Temptingchina/Real-CUGAN/upcunet_v3.py b/spaces/Temptingchina/Real-CUGAN/upcunet_v3.py deleted file mode 100644 index f7919a6cc9efe3b8af73a73e30825a4c7d7d76da..0000000000000000000000000000000000000000 --- a/spaces/Temptingchina/Real-CUGAN/upcunet_v3.py +++ /dev/null @@ -1,714 +0,0 @@ -import torch -from torch import nn as nn -from torch.nn import functional as F -import os, sys -import numpy as np - -root_path = os.path.abspath('.') -sys.path.append(root_path) - - -class SEBlock(nn.Module): - def __init__(self, in_channels, reduction=8, bias=False): - super(SEBlock, self).__init__() - self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias) - self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias) - - def forward(self, x): - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half() - else: - x0 = torch.mean(x, dim=(2, 3), keepdim=True) - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - def forward_mean(self, x, x0): - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - -class UNetConv(nn.Module): - def __init__(self, in_channels, mid_channels, out_channels, se): - super(UNetConv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(in_channels, mid_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - nn.Conv2d(mid_channels, out_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - ) - if se: - self.seblock = SEBlock(out_channels, reduction=8, bias=True) - else: - self.seblock = None - - def forward(self, x): - z = self.conv(x) - if self.seblock is not None: - z = self.seblock(z) - return z - - -class UNet1(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet1x3(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1x3, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet2(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet2, self).__init__() - - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 64, 128, se=True) - self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0) - self.conv3 = UNetConv(128, 256, 128, se=True) - self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0) - self.conv4 = UNetConv(128, 64, 64, se=True) - self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv5 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3(x3) - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4(x2 + x3) - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - def forward_a(self, x): # conv234็ป“ๅฐพๆœ‰se - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x2): # conv234็ป“ๅฐพๆœ‰se - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3.conv(x3) - return x3 - - def forward_c(self, x2, x3): # conv234็ป“ๅฐพๆœ‰se - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4.conv(x2 + x3) - return x4 - - def forward_d(self, x1, x4): # conv234็ป“ๅฐพๆœ‰se - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - -class UpCunet2x(nn.Module): # ๅฎŒ็พŽtile๏ผŒๅ…จ็จ‹ๆ— ๆŸ - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet2x, self).__init__() - self.unet1 = UNet1(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # ไธtile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # ้œ€่ฆไฟ่ฏ่ขซ2ๆ•ด้™ค - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2] - return x - elif (tile_mode == 1): # ๅฏน้•ฟ่พนๅ‡ๅŠ - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # ๅ‡ๅŠๅŽ่ƒฝ่ขซ2ๆ•ด้™ค๏ผŒๆ‰€ไปฅ่ฆๅ…ˆ่ขซ4ๆ•ด้™ค - crop_size_h = (h0 - 1) // 2 * 2 + 2 # ่ƒฝ่ขซ2ๆ•ด้™ค - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # ๅ‡ๅŠๅŽ่ƒฝ่ขซ2ๆ•ด้™ค๏ผŒๆ‰€ไปฅ่ฆๅ…ˆ่ขซ4ๆ•ด้™ค - crop_size_w = (w0 - 1) // 2 * 2 + 2 # ่ƒฝ่ขซ2ๆ•ด้™ค - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw้ƒฝๅ‡ๅŠ - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw้ƒฝไธ‰ๅˆ†ไน‹ไธ€ - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G - elif (tile_mode == 4): # hw้ƒฝๅ››ๅˆ†ไน‹ไธ€ - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 36, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 36, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0ๆ˜ฏunet2็š„ๆœ€็ปˆ่พ“ๅ‡บ - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2] - return res # - - -class UpCunet3x(nn.Module): # ๅฎŒ็พŽtile๏ผŒๅ…จ็จ‹ๆ— ๆŸ - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet3x, self).__init__() - self.unet1 = UNet1x3(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # ไธtile - ph = ((h0 - 1) // 4 + 1) * 4 - pw = ((w0 - 1) // 4 + 1) * 4 - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # ้œ€่ฆไฟ่ฏ่ขซ2ๆ•ด้™ค - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3] - return x - elif (tile_mode == 1): # ๅฏน้•ฟ่พนๅ‡ๅŠ - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # ๅ‡ๅŠๅŽ่ƒฝ่ขซ4ๆ•ด้™ค๏ผŒๆ‰€ไปฅ่ฆๅ…ˆ่ขซ8ๆ•ด้™ค - crop_size_h = (h0 - 1) // 4 * 4 + 4 # ่ƒฝ่ขซ4ๆ•ด้™ค - else: - crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # ๅ‡ๅŠๅŽ่ƒฝ่ขซ4ๆ•ด้™ค๏ผŒๆ‰€ไปฅ่ฆๅ…ˆ่ขซ8ๆ•ด้™ค - crop_size_w = (w0 - 1) // 4 * 4 + 4 # ่ƒฝ่ขซ4ๆ•ด้™ค - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw้ƒฝๅ‡ๅŠ - crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G - elif (tile_mode == 3): # hw้ƒฝไธ‰ๅˆ†ไน‹ไธ€ - crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G - elif (tile_mode == 4): # hw้ƒฝๅ››ๅˆ†ไน‹ไธ€ - crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 28, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 28, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0ๆ˜ฏunet2็š„ๆœ€็ปˆ่พ“ๅ‡บ - opt_res_dict[i][j] = x_crop # - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3] - return res - - -class UpCunet4x(nn.Module): # ๅฎŒ็พŽtile๏ผŒๅ…จ็จ‹ๆ— ๆŸ - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet4x, self).__init__() - self.unet1 = UNet1(in_channels, 64, deconv=True) - self.unet2 = UNet2(64, 64, deconv=False) - self.ps = nn.PixelShuffle(2) - self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True) - - def forward(self, x, tile_mode): - n, c, h0, w0 = x.shape - x00 = x - if (tile_mode == 0): # ไธtile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # ้œ€่ฆไฟ่ฏ่ขซ2ๆ•ด้™ค - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - x = self.conv_final(x) - x = F.pad(x, (-1, -1, -1, -1)) - x = self.ps(x) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4] - x += F.interpolate(x00, scale_factor=4, mode='nearest') - return x - elif (tile_mode == 1): # ๅฏน้•ฟ่พนๅ‡ๅŠ - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # ๅ‡ๅŠๅŽ่ƒฝ่ขซ2ๆ•ด้™ค๏ผŒๆ‰€ไปฅ่ฆๅ…ˆ่ขซ4ๆ•ด้™ค - crop_size_h = (h0 - 1) // 2 * 2 + 2 # ่ƒฝ่ขซ2ๆ•ด้™ค - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # ๅ‡ๅŠๅŽ่ƒฝ่ขซ2ๆ•ด้™ค๏ผŒๆ‰€ไปฅ่ฆๅ…ˆ่ขซ4ๆ•ด้™ค - crop_size_w = (w0 - 1) // 2 * 2 + 2 # ่ƒฝ่ขซ2ๆ•ด้™ค - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw้ƒฝๅ‡ๅŠ - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw้ƒฝไธ‰ๅˆ†ไน‹ไธ€ - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G - elif (tile_mode == 4): # hw้ƒฝๅ››ๅˆ†ไน‹ไธ€ - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 38, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 38, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0ๆ˜ฏunet2็š„ๆœ€็ปˆ่พ“ๅ‡บ - x_crop = self.conv_final(x_crop) - x_crop = F.pad(x_crop, (-1, -1, -1, -1)) - x_crop = self.ps(x_crop) - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape) - res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4] - res += F.interpolate(x00, scale_factor=4, mode='nearest') - return res # - - -class RealWaifuUpScaler(object): - def __init__(self, scale, weight_path, half, device): - weight = torch.load(weight_path, map_location="cpu") - self.model = eval("UpCunet%sx" % scale)() - if (half == True): - self.model = self.model.half().to(device) - else: - self.model = self.model.to(device) - self.model.load_state_dict(weight, strict=True) - self.model.eval() - self.half = half - self.device = device - - def np2tensor(self, np_frame): - if (self.half == False): - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255 - else: - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255 - - def tensor2np(self, tensor): - if (self.half == False): - return ( - np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0))) - else: - return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), - (1, 2, 0))) - - def __call__(self, frame, tile_mode): - with torch.no_grad(): - tensor = self.np2tensor(frame) - result = self.tensor2np(self.model(tensor, tile_mode)) - return result - - -if __name__ == "__main__": - ###########inference_img - import time, cv2, sys - from time import time as ttime - - for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3), - ("weights_v3/up4x-latest-denoise3x.pth", 4)]: - for tile_mode in [0, 1, 2, 3, 4]: - upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0") - input_dir = "%s/input_dir1" % root_path - output_dir = "%s/opt-dir-all-test" % root_path - os.makedirs(output_dir, exist_ok=True) - for name in os.listdir(input_dir): - print(name) - tmp = name.split(".") - inp_path = os.path.join(input_dir, name) - suffix = tmp[-1] - prefix = ".".join(tmp[:-1]) - tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - print(inp_path, tmp_path) - # ๆ”ฏๆŒไธญๆ–‡่ทฏๅพ„ - # os.link(inp_path, tmp_path)#win็”จ็กฌ้“พๆŽฅ - os.symlink(inp_path, tmp_path) # linux็”จ่ฝฏ้“พๆŽฅ - frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]] - t0 = ttime() - result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1] - t1 = ttime() - print(prefix, "done", t1 - t0) - tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - cv2.imwrite(tmp_opt_path, result) - n = 0 - while (1): - if (n == 0): - suffix = "_%sx_tile%s.png" % (scale, tile_mode) - else: - suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) # - if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False): - break - else: - n += 1 - final_opt_path = os.path.join(output_dir, prefix + suffix) - os.rename(tmp_opt_path, final_opt_path) - os.remove(tmp_path) diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/builtin_datasets.md b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/builtin_datasets.md deleted file mode 100644 index 0eb44cc3b23beeb1755ab8d12002d26f13434235..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/builtin_datasets.md +++ /dev/null @@ -1,140 +0,0 @@ -# Use Builtin Datasets - -A dataset can be used by accessing [DatasetCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.DatasetCatalog) -for its data, or [MetadataCatalog](https://detectron2.readthedocs.io/modules/data.html#detectron2.data.MetadataCatalog) for its metadata (class names, etc). -This document explains how to setup the builtin datasets so they can be used by the above APIs. -[Use Custom Datasets](https://detectron2.readthedocs.io/tutorials/datasets.html) gives a deeper dive on how to use `DatasetCatalog` and `MetadataCatalog`, -and how to add new datasets to them. - -Detectron2 has builtin support for a few datasets. -The datasets are assumed to exist in a directory specified by the environment variable -`DETECTRON2_DATASETS`. -Under this directory, detectron2 will look for datasets in the structure described below, if needed. -``` -$DETECTRON2_DATASETS/ - coco/ - lvis/ - cityscapes/ - VOC20{07,12}/ -``` - -You can set the location for builtin datasets by `export DETECTRON2_DATASETS=/path/to/datasets`. -If left unset, the default is `./datasets` relative to your current working directory. - -The [model zoo](https://github.com/facebookresearch/detectron2/blob/master/MODEL_ZOO.md) -contains configs and models that use these builtin datasets. - -## Expected dataset structure for [COCO instance/keypoint detection](https://cocodataset.org/#download): - -``` -coco/ - annotations/ - instances_{train,val}2017.json - person_keypoints_{train,val}2017.json - {train,val}2017/ - # image files that are mentioned in the corresponding json -``` - -You can use the 2014 version of the dataset as well. - -Some of the builtin tests (`dev/run_*_tests.sh`) uses a tiny version of the COCO dataset, -which you can download with `./datasets/prepare_for_tests.sh`. - -## Expected dataset structure for PanopticFPN: - -Extract panoptic annotations from [COCO website](https://cocodataset.org/#download) -into the following structure: -``` -coco/ - annotations/ - panoptic_{train,val}2017.json - panoptic_{train,val}2017/ # png annotations - panoptic_stuff_{train,val}2017/ # generated by the script mentioned below -``` - -Install panopticapi by: -``` -pip install git+https://github.com/cocodataset/panopticapi.git -``` -Then, run `python datasets/prepare_panoptic_fpn.py`, to extract semantic annotations from panoptic annotations. - -## Expected dataset structure for [LVIS instance segmentation](https://www.lvisdataset.org/dataset): -``` -coco/ - {train,val,test}2017/ -lvis/ - lvis_v0.5_{train,val}.json - lvis_v0.5_image_info_test.json - lvis_v1_{train,val}.json - lvis_v1_image_info_test{,_challenge}.json -``` - -Install lvis-api by: -``` -pip install git+https://github.com/lvis-dataset/lvis-api.git -``` - -To evaluate models trained on the COCO dataset using LVIS annotations, -run `python datasets/prepare_cocofied_lvis.py` to prepare "cocofied" LVIS annotations. - -## Expected dataset structure for [cityscapes](https://www.cityscapes-dataset.com/downloads/): -``` -cityscapes/ - gtFine/ - train/ - aachen/ - color.png, instanceIds.png, labelIds.png, polygons.json, - labelTrainIds.png - ... - val/ - test/ - # below are generated Cityscapes panoptic annotation - cityscapes_panoptic_train.json - cityscapes_panoptic_train/ - cityscapes_panoptic_val.json - cityscapes_panoptic_val/ - cityscapes_panoptic_test.json - cityscapes_panoptic_test/ - leftImg8bit/ - train/ - val/ - test/ -``` -Install cityscapes scripts by: -``` -pip install git+https://github.com/mcordts/cityscapesScripts.git -``` - -Note: to create labelTrainIds.png, first prepare the above structure, then run cityscapesescript with: -``` -CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createTrainIdLabelImgs.py -``` -These files are not needed for instance segmentation. - -Note: to generate Cityscapes panoptic dataset, run cityscapesescript with: -``` -CITYSCAPES_DATASET=/path/to/abovementioned/cityscapes python cityscapesscripts/preparation/createPanopticImgs.py -``` -These files are not needed for semantic and instance segmentation. - -## Expected dataset structure for [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/index.html): -``` -VOC20{07,12}/ - Annotations/ - ImageSets/ - Main/ - trainval.txt - test.txt - # train.txt or val.txt, if you use these splits - JPEGImages/ -``` - -## Expected dataset structure for [ADE20k Scene Parsing](http://sceneparsing.csail.mit.edu/): -``` -ADEChallengeData2016/ - annotations/ - annotations_detectron2/ - images/ - objectInfo150.txt -``` -The directory `annotations_detectron2` is generated by running `python datasets/prepare_ade20k_sem_seg.py`. diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/getting_started.md b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/getting_started.md deleted file mode 100644 index 404b0c8f467264d1adf61e8274e5f864e24018e8..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/docs/tutorials/getting_started.md +++ /dev/null @@ -1,79 +0,0 @@ -## Getting Started with Detectron2 - -This document provides a brief intro of the usage of builtin command-line tools in detectron2. - -For a tutorial that involves actual coding with the API, -see our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) -which covers how to run inference with an -existing model, and how to train a builtin model on a custom dataset. - - -### Inference Demo with Pre-trained Models - -1. Pick a model and its config file from - [model zoo](MODEL_ZOO.md), - for example, `mask_rcnn_R_50_FPN_3x.yaml`. -2. We provide `demo.py` that is able to demo builtin configs. Run it with: -``` -cd demo/ -python demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ - --input input1.jpg input2.jpg \ - [--other-options] - --opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl -``` -The configs are made for training, therefore we need to specify `MODEL.WEIGHTS` to a model from model zoo for evaluation. -This command will run the inference and show visualizations in an OpenCV window. - -For details of the command line arguments, see `demo.py -h` or look at its source code -to understand its behavior. Some common arguments are: -* To run __on your webcam__, replace `--input files` with `--webcam`. -* To run __on a video__, replace `--input files` with `--video-input video.mp4`. -* To run __on cpu__, add `MODEL.DEVICE cpu` after `--opts`. -* To save outputs to a directory (for images) or a file (for webcam or video), use `--output`. - - -### Training & Evaluation in Command Line - -We provide two scripts in "tools/plain_train_net.py" and "tools/train_net.py", -that are made to train all the configs provided in detectron2. You may want to -use it as a reference to write your own training script. - -Compared to "train_net.py", "plain_train_net.py" supports fewer default -features. It also includes fewer abstraction, therefore is easier to add custom -logic. - -To train a model with "train_net.py", first -setup the corresponding datasets following -[datasets/README.md](./datasets/README.md), -then run: -``` -cd tools/ -./train_net.py --num-gpus 8 \ - --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml -``` - -The configs are made for 8-GPU training. -To train on 1 GPU, you may need to [change some parameters](https://arxiv.org/abs/1706.02677), e.g.: -``` -./train_net.py \ - --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ - --num-gpus 1 SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025 -``` - -To evaluate a model's performance, use -``` -./train_net.py \ - --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ - --eval-only MODEL.WEIGHTS /path/to/checkpoint_file -``` -For more options, see `./train_net.py -h`. - -### Use Detectron2 APIs in Your Code - -See our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) -to learn how to use detectron2 APIs to: -1. run inference with an existing model -2. train a builtin model on a custom dataset - -See [detectron2/projects](https://github.com/facebookresearch/detectron2/tree/main/projects) -for more ways to build your project on detectron2. diff --git a/spaces/Tetel/secondbing/EdgeGPT/utilities.py b/spaces/Tetel/secondbing/EdgeGPT/utilities.py deleted file mode 100644 index cd2be0fcd998e12db495ef3d02a68344caa23018..0000000000000000000000000000000000000000 --- a/spaces/Tetel/secondbing/EdgeGPT/utilities.py +++ /dev/null @@ -1,39 +0,0 @@ -import json -import locale -import random -import sys -from typing import Union - -from .constants import DELIMITER -from .locale import LocationHint - - -def append_identifier(msg: dict) -> str: - # Convert dict to json string - return json.dumps(msg, ensure_ascii=False) + DELIMITER - - -def get_ran_hex(length: int = 32) -> str: - return "".join(random.choice("0123456789abcdef") for _ in range(length)) - - -def get_location_hint_from_locale(locale: str) -> Union[dict, None]: - locale = locale.lower() - if locale == "en-gb": - hint = LocationHint.UK.value - elif locale == "en-ie": - hint = LocationHint.EU.value - elif locale == "zh-cn": - hint = LocationHint.CHINA.value - else: - hint = LocationHint.USA.value - return hint.get("LocationHint") - - -def guess_locale() -> str: - if sys.platform.startswith("win"): - return "en-us" - loc, _ = locale.getlocale() - if not loc: - return "en-us" - return loc.replace("_", "-") diff --git a/spaces/Theivaprakasham/yolov6/configs/yolov6s.py b/spaces/Theivaprakasham/yolov6/configs/yolov6s.py deleted file mode 100644 index 8b281bf612fd4d309a2fd174f936c40f06451bba..0000000000000000000000000000000000000000 --- a/spaces/Theivaprakasham/yolov6/configs/yolov6s.py +++ /dev/null @@ -1,53 +0,0 @@ -# YOLOv6s model -model = dict( - type='YOLOv6s', - pretrained=None, - depth_multiple=0.33, - width_multiple=0.50, - backbone=dict( - type='EfficientRep', - num_repeats=[1, 6, 12, 18, 6], - out_channels=[64, 128, 256, 512, 1024], - ), - neck=dict( - type='RepPAN', - num_repeats=[12, 12, 12, 12], - out_channels=[256, 128, 128, 256, 256, 512], - ), - head=dict( - type='EffiDeHead', - in_channels=[128, 256, 512], - num_layers=3, - begin_indices=24, - anchors=1, - out_indices=[17, 20, 23], - strides=[8, 16, 32], - iou_type='siou' - ) -) - -solver = dict( - optim='SGD', - lr_scheduler='Cosine', - lr0=0.01, - lrf=0.01, - momentum=0.937, - weight_decay=0.0005, - warmup_epochs=3.0, - warmup_momentum=0.8, - warmup_bias_lr=0.1 -) - -data_aug = dict( - hsv_h=0.015, - hsv_s=0.7, - hsv_v=0.4, - degrees=0.0, - translate=0.1, - scale=0.5, - shear=0.0, - flipud=0.0, - fliplr=0.5, - mosaic=1.0, - mixup=0.0, -) diff --git a/spaces/Uday007/House-Price-Predictor/app.py b/spaces/Uday007/House-Price-Predictor/app.py deleted file mode 100644 index 9ebbcd35f919f081b619bf1d2641832b3e0b3d6c..0000000000000000000000000000000000000000 --- a/spaces/Uday007/House-Price-Predictor/app.py +++ /dev/null @@ -1,77 +0,0 @@ -import importlib -from tkinter import Label -import gradio as gd -import pandas as pd -import numpy as np -from joblib import load - -def predict_price( - id,date,bedrooms,bathrooms,sqft_living, - sqft_lot,floors,waterfront,view,condition, - grade,sqft_above,sqft_basement,yr_built,yr_renovated, - zipcode,lat,long,sqft_living15,sqft_lot15 -): - model=load("housedata.jb") - - # Create dict array from parameters - data={ - 'id':[id], - 'date':[date], - 'bedrooms':[bedrooms], - 'bathrooms':[bathrooms], - 'sqft_living':[sqft_living], - 'sqft_lot':[sqft_lot], - 'floors':[floors], - 'waterfront':[waterfront], - 'view':[view], - 'condition':[condition], - 'grade':[grade], - 'sqft_above':[sqft_above], - 'sqft_basement':[sqft_basement], - 'yr_built':[yr_built], - 'yr_renovated':[yr_renovated], - 'zipcode':[zipcode], - 'lat':[lat], - 'long':[long], - 'sqft_living15':[sqft_living15], - 'sqft_lot15':[sqft_lot15] - } - - xin=pd.DataFrame(data) - Price=model.predict(xin) - return Price[0] - -ui=gd.Interface( - fn=predict_price, - inputs=[ - gd.inputs.Textbox(type="text", placeholder="id",label="ID"), - gd.inputs.Textbox(type="text", placeholder="date",label="DATE"), - gd.inputs.Textbox(type="text", placeholder="bedrooms",numeric=True,label="BEDROOMS"), - gd.inputs.Textbox(type="text", placeholder="bathrooms",numeric=True,label="BATHROOMS"), - gd.inputs.Textbox(type="text", placeholder="sqft_living",numeric=True,label="SQFT_LIVING"), - gd.inputs.Textbox(type="text", placeholder="sqft_lot",numeric=True,label="SQFT_LOT"), - gd.Dropdown([1. , 2. , 1.5, 3. , 2.5, 3.5],label="FLOORS"), - gd.Dropdown([0,1],label="WATERFRONT"), - gd.Dropdown([0, 1, 2, 3, 4],label="VIEW"), - gd.Dropdown([1,2,3,4,5],label="CONDITION"), - gd.inputs.Textbox(type="text", placeholder="grade",numeric=True,label="GRADE"), - gd.inputs.Textbox(type="text", placeholder="sqft_above",numeric=True,label="SQFT_ABOVE"), - gd.inputs.Textbox(type="text", placeholder="sqft_basement",numeric=True,label="SQFT_BASEMENT"), - gd.inputs.Textbox(type="text", placeholder="yr_built",numeric=True,label="YR_BUILT"), - gd.inputs.Textbox(type="text", placeholder="yr_renovated",numeric=True,label="YR_RENOVATED"), - gd.inputs.Textbox(type="text", placeholder="zipcode",label="ZIPCODE"), - gd.inputs.Textbox(type="text", placeholder="lat",numeric=True,label="LATITUDE"), - gd.inputs.Textbox(type="text", placeholder="long",numeric=True,label="LONGITUDE"), - gd.inputs.Textbox(type="text", placeholder="sqft_living15",numeric=True,label="SQFT_LIVING15"), - gd.inputs.Textbox(type="text", placeholder="sqft_lot15",numeric=True,label="SQFT_LOT15"), - ], - - title="HOUSE PRICE PREDICTOR", - outputs="text", - examples=[["7129300520","20141013T000000",3,1,1180,5650,"1",0,0,3,7,1180,0,1955,0,"98178",47.5112,-122.257,1340,5650], - ["9297300055","20150124T000000",4,3,2950,5000,"2",0,3,3,9,1980,970,1979,0,"98126",47.5714,-122.375,2140,4000], - ["0065000400","20141022T000000",4,3,1490,6766,"1.5",0,1,5,7,1490,0,1915,0,"98136",47.5446,-122.382,1990,6526]] -) - -if __name__=="__main__": - ui.launch() \ No newline at end of file diff --git a/spaces/Vegecken/sovits4dzl/onnx/onnx_export.py b/spaces/Vegecken/sovits4dzl/onnx/onnx_export.py deleted file mode 100644 index 976bfe97a213d1390bdc044b5d86cab84d10e63b..0000000000000000000000000000000000000000 --- a/spaces/Vegecken/sovits4dzl/onnx/onnx_export.py +++ /dev/null @@ -1,73 +0,0 @@ -import argparse -import time -import numpy as np -import onnx -from onnxsim import simplify -import onnxruntime as ort -import onnxoptimizer -import torch -from model_onnx import SynthesizerTrn -import utils -from hubert import hubert_model_onnx - -def main(HubertExport,NetExport): - - path = "NyaruTaffy" - - if(HubertExport): - device = torch.device("cuda") - hubert_soft = utils.get_hubert_model() - test_input = torch.rand(1, 1, 16000) - input_names = ["source"] - output_names = ["embed"] - torch.onnx.export(hubert_soft.to(device), - test_input.to(device), - "hubert3.0.onnx", - dynamic_axes={ - "source": { - 2: "sample_length" - } - }, - verbose=False, - opset_version=13, - input_names=input_names, - output_names=output_names) - if(NetExport): - device = torch.device("cuda") - hps = utils.get_hparams_from_file(f"checkpoints/{path}/config.json") - SVCVITS = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model) - _ = utils.load_checkpoint(f"checkpoints/{path}/model.pth", SVCVITS, None) - _ = SVCVITS.eval().to(device) - for i in SVCVITS.parameters(): - i.requires_grad = False - test_hidden_unit = torch.rand(1, 50, 256) - test_lengths = torch.LongTensor([50]) - test_pitch = torch.rand(1, 50) - test_sid = torch.LongTensor([0]) - input_names = ["hidden_unit", "lengths", "pitch", "sid"] - output_names = ["audio", ] - SVCVITS.eval() - torch.onnx.export(SVCVITS, - ( - test_hidden_unit.to(device), - test_lengths.to(device), - test_pitch.to(device), - test_sid.to(device) - ), - f"checkpoints/{path}/model.onnx", - dynamic_axes={ - "hidden_unit": [0, 1], - "pitch": [1] - }, - do_constant_folding=False, - opset_version=16, - verbose=False, - input_names=input_names, - output_names=output_names) - - -if __name__ == '__main__': - main(False,True) diff --git a/spaces/Xintao/GFPGAN/README.md b/spaces/Xintao/GFPGAN/README.md deleted file mode 100644 index 6249a4e054d04ce358af1ea08fa77160dfcb7870..0000000000000000000000000000000000000000 --- a/spaces/Xintao/GFPGAN/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: GFPGAN -emoji: ๐Ÿ˜ -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.26.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/XzJosh/Bella-Bert-VITS2/bert_gen.py b/spaces/XzJosh/Bella-Bert-VITS2/bert_gen.py deleted file mode 100644 index 44814715396ffc3abe84a12c74d66293c356eb4f..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Bella-Bert-VITS2/bert_gen.py +++ /dev/null @@ -1,53 +0,0 @@ -import torch -from torch.utils.data import DataLoader -from multiprocessing import Pool -import commons -import utils -from data_utils import TextAudioSpeakerLoader, TextAudioSpeakerCollate -from tqdm import tqdm -import warnings - -from text import cleaned_text_to_sequence, get_bert - -config_path = 'configs/config.json' -hps = utils.get_hparams_from_file(config_path) - -def process_line(line): - _id, spk, language_str, text, phones, tone, word2ph = line.strip().split("|") - phone = phones.split(" ") - tone = [int(i) for i in tone.split(" ")] - word2ph = [int(i) for i in word2ph.split(" ")] - w2pho = [i for i in word2ph] - word2ph = [i for i in word2ph] - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - wav_path = f'{_id}' - - bert_path = wav_path.replace(".wav", ".bert.pt") - try: - bert = torch.load(bert_path) - assert bert.shape[-1] == len(phone) - except: - bert = get_bert(text, word2ph, language_str) - assert bert.shape[-1] == len(phone) - torch.save(bert, bert_path) - - -if __name__ == '__main__': - lines = [] - with open(hps.data.training_files, encoding='utf-8' ) as f: - lines.extend(f.readlines()) - - with open(hps.data.validation_files, encoding='utf-8' ) as f: - lines.extend(f.readlines()) - - with Pool(processes=12) as pool: #A100 40GB suitable config,if coom,please decrease the processess number. - for _ in tqdm(pool.imap_unordered(process_line, lines)): - pass diff --git a/spaces/XzJosh/JM-Bert-VITS2/server.py b/spaces/XzJosh/JM-Bert-VITS2/server.py deleted file mode 100644 index c736ca4f95fec853950eef6654ef79856beffc0a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/JM-Bert-VITS2/server.py +++ /dev/null @@ -1,123 +0,0 @@ -from flask import Flask, request, Response -from io import BytesIO -import torch -from av import open as avopen - -import commons -import utils -from models import SynthesizerTrn -from text.symbols import symbols -from text import cleaned_text_to_sequence, get_bert -from text.cleaner import clean_text -from scipy.io import wavfile - -# Flask Init -app = Flask(__name__) -app.config['JSON_AS_ASCII'] = False -def get_text(text, language_str, hps): - norm_text, phone, tone, word2ph = clean_text(text, language_str) - print([f"{p}{t}" for p, t in zip(phone, tone)]) - phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str) - - if hps.data.add_blank: - phone = commons.intersperse(phone, 0) - tone = commons.intersperse(tone, 0) - language = commons.intersperse(language, 0) - for i in range(len(word2ph)): - word2ph[i] = word2ph[i] * 2 - word2ph[0] += 1 - bert = get_bert(norm_text, word2ph, language_str) - - assert bert.shape[-1] == len(phone) - - phone = torch.LongTensor(phone) - tone = torch.LongTensor(tone) - language = torch.LongTensor(language) - - return bert, phone, tone, language - -def infer(text, sdp_ratio, noise_scale, noise_scale_w,length_scale,sid): - bert, phones, tones, lang_ids = get_text(text,"ZH", hps,) - with torch.no_grad(): - x_tst=phones.to(dev).unsqueeze(0) - tones=tones.to(dev).unsqueeze(0) - lang_ids=lang_ids.to(dev).unsqueeze(0) - bert = bert.to(dev).unsqueeze(0) - x_tst_lengths = torch.LongTensor([phones.size(0)]).to(dev) - speakers = torch.LongTensor([hps.data.spk2id[sid]]).to(dev) - audio = net_g.infer(x_tst, x_tst_lengths, speakers, tones, lang_ids,bert, sdp_ratio=sdp_ratio - , noise_scale=noise_scale, noise_scale_w=noise_scale_w, length_scale=length_scale)[0][0,0].data.cpu().float().numpy() - return audio - -def replace_punctuation(text, i=2): - punctuation = "๏ผŒใ€‚๏ผŸ๏ผ" - for char in punctuation: - text = text.replace(char, char * i) - return text - -def wav2(i, o, format): - inp = avopen(i, 'rb') - out = avopen(o, 'wb', format=format) - if format == "ogg": format = "libvorbis" - - ostream = out.add_stream(format) - - for frame in inp.decode(audio=0): - for p in ostream.encode(frame): out.mux(p) - - for p in ostream.encode(None): out.mux(p) - - out.close() - inp.close() - -# Load Generator -hps = utils.get_hparams_from_file("./configs/config.json") - -dev='cuda' -net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - n_speakers=hps.data.n_speakers, - **hps.model).to(dev) -_ = net_g.eval() - -_ = utils.load_checkpoint("logs/G_649000.pth", net_g, None,skip_optimizer=True) - -@app.route("/",methods=['GET','POST']) -def main(): - if request.method == 'GET': - try: - speaker = request.args.get('speaker') - text = request.args.get('text').replace("/n","") - sdp_ratio = float(request.args.get("sdp_ratio", 0.2)) - noise = float(request.args.get("noise", 0.5)) - noisew = float(request.args.get("noisew", 0.6)) - length = float(request.args.get("length", 1.2)) - if length >= 2: - return "Too big length" - if len(text) >=200: - return "Too long text" - fmt = request.args.get("format", "wav") - if None in (speaker, text): - return "Missing Parameter" - if fmt not in ("mp3", "wav", "ogg"): - return "Invalid Format" - except: - return "Invalid Parameter" - - with torch.no_grad(): - audio = infer(text, sdp_ratio=sdp_ratio, noise_scale=noise, noise_scale_w=noisew, length_scale=length, sid=speaker) - - with BytesIO() as wav: - wavfile.write(wav, hps.data.sampling_rate, audio) - torch.cuda.empty_cache() - if fmt == "wav": - return Response(wav.getvalue(), mimetype="audio/wav") - wav.seek(0, 0) - with BytesIO() as ofp: - wav2(wav, ofp, fmt) - return Response( - ofp.getvalue(), - mimetype="audio/mpeg" if fmt == "mp3" else "audio/ogg" - ) diff --git a/spaces/XzJosh/XingTong-Bert-VITS2/utils.py b/spaces/XzJosh/XingTong-Bert-VITS2/utils.py deleted file mode 100644 index c6aa6cfc64c33e2eed33e9845239e831fc1c4a1a..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/XingTong-Bert-VITS2/utils.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - elif optimizer is None and not skip_optimizer: - #else: #Disable this line if Infer ,and enable the line upper - new_opt_dict = optimizer.state_dict() - new_opt_dict_params = new_opt_dict['param_groups'][0]['params'] - new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups'] - new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params - optimizer.load_state_dict(new_opt_dict) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - #assert "emb_g" not in k - # print("load", k) - new_state_dict[k] = saved_state_dict[k] - assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) - except: - print("error, %s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - print("load ") - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="./OUTPUT_MODEL", - help='Model name') - parser.add_argument('--cont', dest='cont', action="store_true", default=False, help="whether to continue training on the latest checkpoint") - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.cont = args.cont - return hparams - - -def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): - """Freeing up space by deleting saved ckpts - - Arguments: - path_to_models -- Path to the model directory - n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth - sort_by_time -- True -> chronologically delete ckpts - False -> lexicographically delete ckpts - """ - import re - ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) - time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) - sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], - key=sort_key) - to_del = [os.path.join(path_to_models, fn) for fn in - (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] - rs = [del_routine(fn) for fn in to_del] - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/YUANAI/DiffspeechResearch/mfa_usr/run_mfa_train_align.sh b/spaces/YUANAI/DiffspeechResearch/mfa_usr/run_mfa_train_align.sh deleted file mode 100644 index ed3d8265f2d4dbfd83d7314d8aad734bc20ea657..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/mfa_usr/run_mfa_train_align.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -set -e - -NUM_JOB=${NUM_JOB:-36} -echo "| Training MFA using ${NUM_JOB} cores." -BASE_DIR=data/processed/$CORPUS -MODEL_NAME=${MODEL_NAME:-"mfa_model"} -PRETRAIN_MODEL_NAME=${PRETRAIN_MODEL_NAME:-"mfa_model_pretrain"} -MFA_INPUTS=${MFA_INPUTS:-"mfa_inputs"} -MFA_OUTPUTS=${MFA_OUTPUTS:-"mfa_outputs"} -MFA_CMD=${MFA_CMD:-"train"} -rm -rf $BASE_DIR/mfa_outputs_tmp -if [ "$MFA_CMD" = "train" ]; then - mfa train $BASE_DIR/$MFA_INPUTS $BASE_DIR/mfa_dict.txt $BASE_DIR/mfa_outputs_tmp -t $BASE_DIR/mfa_tmp -o $BASE_DIR/$MODEL_NAME.zip --clean -j $NUM_JOB --config_path mfa_usr/mfa_train_config.yaml -elif [ "$MFA_CMD" = "adapt" ]; then - python mfa_usr/mfa.py adapt \ - $BASE_DIR/$MFA_INPUTS \ - $BASE_DIR/mfa_dict.txt \ - $BASE_DIR/$PRETRAIN_MODEL_NAME.zip \ - $BASE_DIR/$MODEL_NAME.zip \ - $BASE_DIR/mfa_outputs_tmp \ - -t $BASE_DIR/mfa_tmp --clean -j $NUM_JOB -fi -rm -rf $BASE_DIR/mfa_tmp $BASE_DIR/$MFA_OUTPUTS -mkdir -p $BASE_DIR/$MFA_OUTPUTS -find $BASE_DIR/mfa_outputs_tmp -regex ".*\.TextGrid" -print0 | xargs -0 -i mv {} $BASE_DIR/$MFA_OUTPUTS/ -if [ -e "$BASE_DIR/mfa_outputs_tmp/unaligned.txt" ]; then - cp $BASE_DIR/mfa_outputs_tmp/unaligned.txt $BASE_DIR/ -fi -rm -rf $BASE_DIR/mfa_outputs_tmp \ No newline at end of file diff --git a/spaces/Yiqin/ChatVID/model/fastchat/eval/qa_baseline_gpt35.py b/spaces/Yiqin/ChatVID/model/fastchat/eval/qa_baseline_gpt35.py deleted file mode 100644 index f0f9f5fbc9a20f3acfee58569b210d1e0572c7b9..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/fastchat/eval/qa_baseline_gpt35.py +++ /dev/null @@ -1,82 +0,0 @@ -"""Generate answers with GPT-3.5""" -# Note: you need to be using OpenAI Python v0.27.0 for the code below to work -import argparse -import json -import os -import time -import concurrent.futures - -import openai -import tqdm -import shortuuid - -MODEL = "gpt-3.5-turbo" -MODEL_ID = "gpt-3.5-turbo:20230327" - - -def get_answer(question_id: int, question: str, max_tokens: int): - ans = { - "answer_id": shortuuid.uuid(), - "question_id": question_id, - "model_id": MODEL_ID, - } - for _ in range(3): - try: - response = openai.ChatCompletion.create( - model=MODEL, - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - { - "role": "user", - "content": question, - }, - ], - max_tokens=max_tokens, - ) - ans["text"] = response["choices"][0]["message"]["content"] - return ans - except Exception as e: - print("[ERROR]", e) - ans["text"] = "#ERROR#" - time.sleep(1) - return ans - - -if __name__ == "__main__": - parser = argparse.ArgumentParser(description="ChatGPT answer generation.") - parser.add_argument("-q", "--question") - parser.add_argument("-o", "--output") - parser.add_argument( - "--max-tokens", - type=int, - default=1024, - help="maximum number of tokens produced in the output", - ) - args = parser.parse_args() - - questions_dict = {} - with open(os.path.expanduser(args.question)) as f: - for line in f: - if not line: - continue - q = json.loads(line) - questions_dict[q["question_id"]] = q["text"] - - answers = [] - - with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor: - futures = [] - for qid, question in questions_dict.items(): - future = executor.submit(get_answer, qid, question, args.max_tokens) - futures.append(future) - - for future in tqdm.tqdm( - concurrent.futures.as_completed(futures), total=len(futures) - ): - answers.append(future.result()) - - answers.sort(key=lambda x: x["question_id"]) - - with open(os.path.expanduser(args.output), "w") as f: - table = [json.dumps(ans) for ans in answers] - f.write("\n".join(table)) diff --git a/spaces/Yiqin/ChatVID/model/vision/grit_src/grit/data/datasets/grit_coco.py b/spaces/Yiqin/ChatVID/model/vision/grit_src/grit/data/datasets/grit_coco.py deleted file mode 100644 index fea81f7dd8ad2c27dac8438753b845ab64cef81e..0000000000000000000000000000000000000000 --- a/spaces/Yiqin/ChatVID/model/vision/grit_src/grit/data/datasets/grit_coco.py +++ /dev/null @@ -1,112 +0,0 @@ -import logging -import os -from fvcore.common.timer import Timer -from detectron2.structures import BoxMode -from fvcore.common.file_io import PathManager -from detectron2.data import DatasetCatalog, MetadataCatalog -from lvis import LVIS - -logger = logging.getLogger(__name__) - -__all__ = ["load_GRiTcoco_json", "register_GRiTcoco_instances"] - - -def register_GRiTcoco_instances(name, metadata, json_file, image_root): - """ - """ - DatasetCatalog.register(name, lambda: load_GRiTcoco_json( - json_file, image_root, name)) - MetadataCatalog.get(name).set( - json_file=json_file, image_root=image_root, - evaluator_type="coco", **metadata - ) - - -def get_GRiTcoco_meta(): - categories = [{'supercategory': 'object', 'id': 1, 'name': 'object'}] - categories = sorted(categories, key=lambda x: x["id"]) - thing_classes = [k["name"] for k in categories] - meta = {"thing_classes": thing_classes} - return meta - - -def load_GRiTcoco_json(json_file, image_root, dataset_name=None): - ''' - Load COCO class name text for object description for GRiT - ''' - - json_file = PathManager.get_local_path(json_file) - - timer = Timer() - lvis_api = LVIS(json_file) - if timer.seconds() > 1: - logger.info("Loading {} takes {:.2f} seconds.".format( - json_file, timer.seconds())) - - class_names = {} - sort_cat = sorted(lvis_api.dataset['categories'], key=lambda x: x['id']) - for x in sort_cat: - class_names[x['id']] = x['name'] - - img_ids = sorted(lvis_api.imgs.keys()) - imgs = lvis_api.load_imgs(img_ids) - anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids] - - ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] - assert len(set(ann_ids)) == len(ann_ids), \ - "Annotation ids in '{}' are not unique".format(json_file) - - imgs_anns = list(zip(imgs, anns)) - logger.info("Loaded {} images in the LVIS v1 format from {}".format( - len(imgs_anns), json_file)) - - dataset_dicts = [] - - for (img_dict, anno_dict_list) in imgs_anns: - record = {} - if "file_name" in img_dict: - file_name = img_dict["file_name"] - record["file_name"] = os.path.join(image_root, file_name) - - record["height"] = int(img_dict["height"]) - record["width"] = int(img_dict["width"]) - image_id = record["image_id"] = img_dict["id"] - - objs = [] - for anno in anno_dict_list: - assert anno["image_id"] == image_id - if anno.get('iscrowd', 0) > 0: - continue - obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS} - obj["category_id"] = 0 - obj["object_description"] = class_names[anno['category_id']] - if 'segmentation' in anno: - segm = anno["segmentation"] - valid_segm = [poly for poly in segm \ - if len(poly) % 2 == 0 and len(poly) >= 6] - if not len(segm) == len(valid_segm): - print('Annotation contains an invalid polygon with < 3 points') - assert len(segm) > 0 - obj["segmentation"] = segm - objs.append(obj) - record["annotations"] = objs - if len(record["annotations"]) == 0: - continue - record["task"] = "ObjectDet" - dataset_dicts.append(record) - - return dataset_dicts - - -_CUSTOM_SPLITS_LVIS = { - "GRiT_coco2017_train": ("coco/train2017/", "coco/annotations/instances_train2017.json"), -} - - -for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items(): - register_GRiTcoco_instances( - key, - get_GRiTcoco_meta(), - os.path.join("datasets", json_file) if "://" not in json_file else json_file, - os.path.join("datasets", image_root), - ) \ No newline at end of file diff --git a/spaces/ZeroTwo3/WavJourney/wavjourney_cli.py b/spaces/ZeroTwo3/WavJourney/wavjourney_cli.py deleted file mode 100644 index c898cdc6d136d09ab8b697a18abf1a103f56f589..0000000000000000000000000000000000000000 --- a/spaces/ZeroTwo3/WavJourney/wavjourney_cli.py +++ /dev/null @@ -1,27 +0,0 @@ -import time -import argparse - -import utils -import pipeline - -parser = argparse.ArgumentParser() -parser.add_argument('-f', '--full', action='store_true', help='Go through the full process') -parser.add_argument('--input-text', type=str, default='', help='input text or text file') -parser.add_argument('--session-id', type=str, default='', help='session id, if set to empty, system will allocate an id') -args = parser.parse_args() - -if args.full: - input_text = args.input_text - - start_time = time.time() - session_id = pipeline.init_session(args.session_id) - api_key = utils.get_api_key() - - assert api_key != None, "Please set your openai_key in the environment variable." - - print(f"Session {session_id} is created.") - - pipeline.full_steps(session_id, input_text, api_key) - end_time = time.time() - - print(f"WavJourney took {end_time - start_time:.2f} seconds to complete.") diff --git a/spaces/abdvl/datahub_qa_bot/docs/schema-history.md b/spaces/abdvl/datahub_qa_bot/docs/schema-history.md deleted file mode 100644 index 9fc9ec1af52bbcc42218a47c839ed65b49bff50a..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/schema-history.md +++ /dev/null @@ -1,55 +0,0 @@ -import FeatureAvailability from '@site/src/components/FeatureAvailability'; - -# About DataHub Schema History - - - -Schema History is a valuable tool for understanding how a Dataset changes over time and gives insight into the following cases, -along with informing Data Practitioners when these changes happened. - -- A new field is added -- An existing field is removed -- An existing field changes type - -Schema History uses DataHub's [Timeline API](https://datahubproject.io/docs/dev-guides/timeline/) to compute schema changes. - -## Schema History Setup, Prerequisites, and Permissions - -Schema History is viewable in the DataHub UI for any Dataset that has had at least one schema change. To view a Dataset, a user -must have the **View Entity Page** privilege, or be assigned to **any** DataHub Role. - -## Using Schema History - -You can view the Schema History for a Dataset by navigating to that Dataset's Schema Tab. As long as that Dataset has more than -one version, you can view what a Dataset looked like at any given version by using the version selector. -Here's an example from DataHub's official Demo environment with the -[Snowflake pets dataset](https://demo.datahubproject.io/dataset/urn:li:dataset:(urn:li:dataPlatform:snowflake,long_tail_companions.adoption.pets,PROD)/Schema?is_lineage_mode=false). - -![](./imgs/schema-history-latest-version.png) - -If you click on an older version in the selector, you'll be able to see what the schema looked like back then. Notice -the changes here to the glossary terms for the `status` field, and to the descriptions for the `created_at` and `updated_at` -fields. - -![](./imgs/schema-history-older-version.png) - -In addition to this, you can also toggle the Audit view that shows you when the most recent changes were made to each field. -You can active this by clicking on the Audit icon you see above the top right of the table. - -![](./imgs/schema-history-audit-activated.png) - -You can see here that some of these fields were added at the oldest dataset version, while some were added only at this latest -version. Some fields were even modified and had a type change at the latest version! - -### GraphQL - -* [getSchemaBlame](../graphql/queries.md#getSchemaBlame) -* [getSchemaVersionList](../graphql/queries.md#getSchemaVersionList) - -## FAQ and Troubleshooting - -**What updates are planned for the Schema History feature?** - -In the future, we plan on adding the following features -- Supporting a linear timeline view where you can see what changes were made to various schema fields over time -- Adding a diff viewer that highlights the differences between two versions of a Dataset diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/coder/tblr_bbox_coder.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/coder/tblr_bbox_coder.py deleted file mode 100644 index edaffaf1fa252857e1a660ea14a613e2466fb52c..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmdet/core/bbox/coder/tblr_bbox_coder.py +++ /dev/null @@ -1,198 +0,0 @@ -import mmcv -import torch - -from ..builder import BBOX_CODERS -from .base_bbox_coder import BaseBBoxCoder - - -@BBOX_CODERS.register_module() -class TBLRBBoxCoder(BaseBBoxCoder): - """TBLR BBox coder. - - Following the practice in `FSAF `_, - this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, - right) and decode it back to the original. - - Args: - normalizer (list | float): Normalization factor to be - divided with when coding the coordinates. If it is a list, it should - have length of 4 indicating normalization factor in tblr dims. - Otherwise it is a unified float factor for all dims. Default: 4.0 - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Defaults to True. - """ - - def __init__(self, normalizer=4.0, clip_border=True): - super(BaseBBoxCoder, self).__init__() - self.normalizer = normalizer - self.clip_border = clip_border - - def encode(self, bboxes, gt_bboxes): - """Get box regression transformation deltas that can be used to - transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left, - bottom, right) order. - - Args: - bboxes (torch.Tensor): source boxes, e.g., object proposals. - gt_bboxes (torch.Tensor): target of the transformation, e.g., - ground truth boxes. - - Returns: - torch.Tensor: Box transformation deltas - """ - assert bboxes.size(0) == gt_bboxes.size(0) - assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 - encoded_bboxes = bboxes2tblr( - bboxes, gt_bboxes, normalizer=self.normalizer) - return encoded_bboxes - - def decode(self, bboxes, pred_bboxes, max_shape=None): - """Apply transformation `pred_bboxes` to `boxes`. - - Args: - bboxes (torch.Tensor): Basic boxes.Shape (B, N, 4) or (N, 4) - pred_bboxes (torch.Tensor): Encoded boxes with shape - (B, N, 4) or (N, 4) - max_shape (Sequence[int] or torch.Tensor or Sequence[ - Sequence[int]],optional): Maximum bounds for boxes, specifies - (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then - the max_shape should be a Sequence[Sequence[int]] - and the length of max_shape should also be B. - - Returns: - torch.Tensor: Decoded boxes. - """ - decoded_bboxes = tblr2bboxes( - bboxes, - pred_bboxes, - normalizer=self.normalizer, - max_shape=max_shape, - clip_border=self.clip_border) - - return decoded_bboxes - - -@mmcv.jit(coderize=True) -def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True): - """Encode ground truth boxes to tblr coordinate. - - It first convert the gt coordinate to tblr format, - (top, bottom, left, right), relative to prior box centers. - The tblr coordinate may be normalized by the side length of prior bboxes - if `normalize_by_wh` is specified as True, and it is then normalized by - the `normalizer` factor. - - Args: - priors (Tensor): Prior boxes in point form - Shape: (num_proposals,4). - gts (Tensor): Coords of ground truth for each prior in point-form - Shape: (num_proposals, 4). - normalizer (Sequence[float] | float): normalization parameter of - encoded boxes. If it is a list, it has to have length = 4. - Default: 4.0 - normalize_by_wh (bool): Whether to normalize tblr coordinate by the - side length (wh) of prior bboxes. - - Return: - encoded boxes (Tensor), Shape: (num_proposals, 4) - """ - - # dist b/t match center and prior's center - if not isinstance(normalizer, float): - normalizer = torch.tensor(normalizer, device=priors.device) - assert len(normalizer) == 4, 'Normalizer must have length = 4' - assert priors.size(0) == gts.size(0) - prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2 - xmin, ymin, xmax, ymax = gts.split(1, dim=1) - top = prior_centers[:, 1].unsqueeze(1) - ymin - bottom = ymax - prior_centers[:, 1].unsqueeze(1) - left = prior_centers[:, 0].unsqueeze(1) - xmin - right = xmax - prior_centers[:, 0].unsqueeze(1) - loc = torch.cat((top, bottom, left, right), dim=1) - if normalize_by_wh: - # Normalize tblr by anchor width and height - wh = priors[:, 2:4] - priors[:, 0:2] - w, h = torch.split(wh, 1, dim=1) - loc[:, :2] /= h # tb is normalized by h - loc[:, 2:] /= w # lr is normalized by w - # Normalize tblr by the given normalization factor - return loc / normalizer - - -@mmcv.jit(coderize=True) -def tblr2bboxes(priors, - tblr, - normalizer=4.0, - normalize_by_wh=True, - max_shape=None, - clip_border=True): - """Decode tblr outputs to prediction boxes. - - The process includes 3 steps: 1) De-normalize tblr coordinates by - multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the - prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert - tblr (top, bottom, left, right) pair relative to the center of priors back - to (xmin, ymin, xmax, ymax) coordinate. - - Args: - priors (Tensor): Prior boxes in point form (x0, y0, x1, y1) - Shape: (N,4) or (B, N, 4). - tblr (Tensor): Coords of network output in tblr form - Shape: (N, 4) or (B, N, 4). - normalizer (Sequence[float] | float): Normalization parameter of - encoded boxes. By list, it represents the normalization factors at - tblr dims. By float, it is the unified normalization factor at all - dims. Default: 4.0 - normalize_by_wh (bool): Whether the tblr coordinates have been - normalized by the side length (wh) of prior bboxes. - max_shape (Sequence[int] or torch.Tensor or Sequence[ - Sequence[int]],optional): Maximum bounds for boxes, specifies - (H, W, C) or (H, W). If priors shape is (B, N, 4), then - the max_shape should be a Sequence[Sequence[int]] - and the length of max_shape should also be B. - clip_border (bool, optional): Whether clip the objects outside the - border of the image. Defaults to True. - - Return: - encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4) - """ - if not isinstance(normalizer, float): - normalizer = torch.tensor(normalizer, device=priors.device) - assert len(normalizer) == 4, 'Normalizer must have length = 4' - assert priors.size(0) == tblr.size(0) - if priors.ndim == 3: - assert priors.size(1) == tblr.size(1) - - loc_decode = tblr * normalizer - prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2 - if normalize_by_wh: - wh = priors[..., 2:4] - priors[..., 0:2] - w, h = torch.split(wh, 1, dim=-1) - # Inplace operation with slice would failed for exporting to ONNX - th = h * loc_decode[..., :2] # tb - tw = w * loc_decode[..., 2:] # lr - loc_decode = torch.cat([th, tw], dim=-1) - # Cannot be exported using onnx when loc_decode.split(1, dim=-1) - top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1) - xmin = prior_centers[..., 0].unsqueeze(-1) - left - xmax = prior_centers[..., 0].unsqueeze(-1) + right - ymin = prior_centers[..., 1].unsqueeze(-1) - top - ymax = prior_centers[..., 1].unsqueeze(-1) + bottom - - bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1) - - if clip_border and max_shape is not None: - if not isinstance(max_shape, torch.Tensor): - max_shape = priors.new_tensor(max_shape) - max_shape = max_shape[..., :2].type_as(priors) - if max_shape.ndim == 2: - assert bboxes.ndim == 3 - assert max_shape.size(0) == bboxes.size(0) - - min_xy = priors.new_tensor(0) - max_xy = torch.cat([max_shape, max_shape], - dim=-1).flip(-1).unsqueeze(-2) - bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) - bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) - - return bboxes diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/utils/trace.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/utils/trace.py deleted file mode 100644 index 5ca99dc3eda05ef980d9a4249b50deca8273b6cc..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/utils/trace.py +++ /dev/null @@ -1,23 +0,0 @@ -import warnings - -import torch - -from annotator.uniformer.mmcv.utils import digit_version - - -def is_jit_tracing() -> bool: - if (torch.__version__ != 'parrots' - and digit_version(torch.__version__) >= digit_version('1.6.0')): - on_trace = torch.jit.is_tracing() - # In PyTorch 1.6, torch.jit.is_tracing has a bug. - # Refers to https://github.com/pytorch/pytorch/issues/42448 - if isinstance(on_trace, bool): - return on_trace - else: - return torch._C._is_tracing() - else: - warnings.warn( - 'torch.jit.is_tracing is only supported after v1.6.0. ' - 'Therefore is_tracing returns False automatically. Please ' - 'set on_trace manually if you are using trace.', UserWarning) - return False diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/font/fontconfig.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/font/fontconfig.py deleted file mode 100644 index a15f617d246e908373fa50cb73e6581b4157087f..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/font/fontconfig.py +++ /dev/null @@ -1,362 +0,0 @@ -""" -Wrapper around the Linux FontConfig library. Used to find available fonts. -""" - -from collections import OrderedDict -from ctypes import * - -import pyglet.lib -from pyglet.util import asbytes, asstr -from pyglet.font.base import FontException - - -# fontconfig library definitions - -(FcResultMatch, - FcResultNoMatch, - FcResultTypeMismatch, - FcResultNoId, - FcResultOutOfMemory) = range(5) -FcResult = c_int - -FC_FAMILY = asbytes('family') -FC_SIZE = asbytes('size') -FC_SLANT = asbytes('slant') -FC_WEIGHT = asbytes('weight') -FC_FT_FACE = asbytes('ftface') -FC_FILE = asbytes('file') - -FC_WEIGHT_REGULAR = 80 -FC_WEIGHT_BOLD = 200 - -FC_SLANT_ROMAN = 0 -FC_SLANT_ITALIC = 100 - -(FcTypeVoid, - FcTypeInteger, - FcTypeDouble, - FcTypeString, - FcTypeBool, - FcTypeMatrix, - FcTypeCharSet, - FcTypeFTFace, - FcTypeLangSet) = range(9) -FcType = c_int - -(FcMatchPattern, - FcMatchFont) = range(2) -FcMatchKind = c_int - - -class _FcValueUnion(Union): - _fields_ = [ - ('s', c_char_p), - ('i', c_int), - ('b', c_int), - ('d', c_double), - ('m', c_void_p), - ('c', c_void_p), - ('f', c_void_p), - ('p', c_void_p), - ('l', c_void_p), - ] - - -class FcValue(Structure): - _fields_ = [ - ('type', FcType), - ('u', _FcValueUnion) - ] - -# End of library definitions - - -class FontConfig: - def __init__(self): - self._fontconfig = self._load_fontconfig_library() - self._search_cache = OrderedDict() - self._cache_size = 20 - - def dispose(self): - while len(self._search_cache) > 0: - self._search_cache.popitem().dispose() - - self._fontconfig.FcFini() - self._fontconfig = None - - def create_search_pattern(self): - return FontConfigSearchPattern(self._fontconfig) - - def find_font(self, name, size=12, bold=False, italic=False): - result = self._get_from_search_cache(name, size, bold, italic) - if result: - return result - - search_pattern = self.create_search_pattern() - search_pattern.name = name - search_pattern.size = size - search_pattern.bold = bold - search_pattern.italic = italic - - result = search_pattern.match() - self._add_to_search_cache(search_pattern, result) - search_pattern.dispose() - return result - - def have_font(self, name): - result = self.find_font(name) - if result: - # Check the name matches, fontconfig can return a default - if name and result.name and result.name.lower() != name.lower(): - return False - return True - else: - return False - - def char_index(self, ft_face, character): - return self._fontconfig.FcFreeTypeCharIndex(ft_face, ord(character)) - - def _add_to_search_cache(self, search_pattern, result_pattern): - self._search_cache[(search_pattern.name, - search_pattern.size, - search_pattern.bold, - search_pattern.italic)] = result_pattern - if len(self._search_cache) > self._cache_size: - self._search_cache.popitem(last=False)[1].dispose() - - def _get_from_search_cache(self, name, size, bold, italic): - result = self._search_cache.get((name, size, bold, italic), None) - - if result and result.is_valid: - return result - else: - return None - - @staticmethod - def _load_fontconfig_library(): - fontconfig = pyglet.lib.load_library('fontconfig') - fontconfig.FcInit() - - fontconfig.FcPatternBuild.restype = c_void_p - fontconfig.FcPatternCreate.restype = c_void_p - fontconfig.FcFontMatch.restype = c_void_p - fontconfig.FcFreeTypeCharIndex.restype = c_uint - - fontconfig.FcPatternAddDouble.argtypes = [c_void_p, c_char_p, c_double] - fontconfig.FcPatternAddInteger.argtypes = [c_void_p, c_char_p, c_int] - fontconfig.FcPatternAddString.argtypes = [c_void_p, c_char_p, c_char_p] - fontconfig.FcConfigSubstitute.argtypes = [c_void_p, c_void_p, c_int] - fontconfig.FcDefaultSubstitute.argtypes = [c_void_p] - fontconfig.FcFontMatch.argtypes = [c_void_p, c_void_p, c_void_p] - fontconfig.FcPatternDestroy.argtypes = [c_void_p] - - fontconfig.FcPatternGetFTFace.argtypes = [c_void_p, c_char_p, c_int, c_void_p] - fontconfig.FcPatternGet.argtypes = [c_void_p, c_char_p, c_int, c_void_p] - - return fontconfig - - -class FontConfigPattern: - def __init__(self, fontconfig, pattern=None): - self._fontconfig = fontconfig - self._pattern = pattern - - @property - def is_valid(self): - return self._fontconfig and self._pattern - - def _create(self): - assert not self._pattern - assert self._fontconfig - self._pattern = self._fontconfig.FcPatternCreate() - - def _destroy(self): - assert self._pattern - assert self._fontconfig - self._fontconfig.FcPatternDestroy(self._pattern) - self._pattern = None - - @staticmethod - def _bold_to_weight(bold): - return FC_WEIGHT_BOLD if bold else FC_WEIGHT_REGULAR - - @staticmethod - def _italic_to_slant(italic): - return FC_SLANT_ITALIC if italic else FC_SLANT_ROMAN - - def _set_string(self, name, value): - assert self._pattern - assert name - assert self._fontconfig - - if not value: - return - - value = value.encode('utf8') - - self._fontconfig.FcPatternAddString(self._pattern, name, asbytes(value)) - - def _set_double(self, name, value): - assert self._pattern - assert name - assert self._fontconfig - - if not value: - return - - self._fontconfig.FcPatternAddDouble(self._pattern, name, c_double(value)) - - def _set_integer(self, name, value): - assert self._pattern - assert name - assert self._fontconfig - - if not value: - return - - self._fontconfig.FcPatternAddInteger(self._pattern, name, c_int(value)) - - def _get_value(self, name): - assert self._pattern - assert name - assert self._fontconfig - - value = FcValue() - result = self._fontconfig.FcPatternGet(self._pattern, name, 0, byref(value)) - if _handle_fcresult(result): - return value - else: - return None - - def _get_string(self, name): - value = self._get_value(name) - - if value and value.type == FcTypeString: - return asstr(value.u.s) - else: - return None - - def _get_face(self, name): - value = self._get_value(name) - - if value and value.type == FcTypeFTFace: - return value.u.f - else: - return None - - def _get_integer(self, name): - value = self._get_value(name) - - if value and value.type == FcTypeInteger: - return value.u.i - else: - return None - - def _get_double(self, name): - value = self._get_value(name) - - if value and value.type == FcTypeDouble: - return value.u.d - else: - return None - - -class FontConfigSearchPattern(FontConfigPattern): - def __init__(self, fontconfig): - super(FontConfigSearchPattern, self).__init__(fontconfig) - - self.name = None - self.bold = False - self.italic = False - self.size = None - - def match(self): - self._prepare_search_pattern() - result_pattern = self._get_match() - - if result_pattern: - return FontConfigSearchResult(self._fontconfig, result_pattern) - else: - return None - - def _prepare_search_pattern(self): - self._create() - self._set_string(FC_FAMILY, self.name) - self._set_double(FC_SIZE, self.size) - self._set_integer(FC_WEIGHT, self._bold_to_weight(self.bold)) - self._set_integer(FC_SLANT, self._italic_to_slant(self.italic)) - - self._substitute_defaults() - - def _substitute_defaults(self): - assert self._pattern - assert self._fontconfig - - self._fontconfig.FcConfigSubstitute(None, self._pattern, FcMatchPattern) - self._fontconfig.FcDefaultSubstitute(self._pattern) - - def _get_match(self): - assert self._pattern - assert self._fontconfig - - match_result = FcResult() - match_pattern = self._fontconfig.FcFontMatch(0, self._pattern, byref(match_result)) - - if _handle_fcresult(match_result.value): - return match_pattern - else: - return None - - def dispose(self): - self._destroy() - - -class FontConfigSearchResult(FontConfigPattern): - def __init__(self, fontconfig, result_pattern): - super(FontConfigSearchResult, self).__init__(fontconfig, result_pattern) - - @property - def name(self): - return self._get_string(FC_FAMILY) - - @property - def size(self): - return self._get_double(FC_SIZE) - - @property - def bold(self): - return self._get_integer(FC_WEIGHT) == FC_WEIGHT_BOLD - - @property - def italic(self): - return self._get_integer(FC_SLANT) == FC_SLANT_ITALIC - - @property - def face(self): - return self._get_face(FC_FT_FACE) - - @property - def file(self): - return self._get_string(FC_FILE) - - def dispose(self): - self._destroy() - - -def _handle_fcresult(result): - if result == FcResultMatch: - return True - elif result in (FcResultNoMatch, FcResultTypeMismatch, FcResultNoId): - return False - elif result == FcResultOutOfMemory: - raise FontException('FontConfig ran out of memory.') - - -_fontconfig_instance = None - - -def get_fontconfig(): - global _fontconfig_instance - if not _fontconfig_instance: - _fontconfig_instance = FontConfig() - return _fontconfig_instance diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/input/controller_db.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/input/controller_db.py deleted file mode 100644 index 61b70ae9c6764fc10dda0364979d98ed9ea6e30a..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/input/controller_db.py +++ /dev/null @@ -1,837 +0,0 @@ -from pyglet import compat_platform - - -# This file is automatically generated by 'pyglet/tools/gen_controller_db.py' -# Generated on: Wed Jan 18 14:06:46 2023 - -if compat_platform.startswith("linux"): - mapping_list = [ -"xinput,*,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000c82d00000090000011010000,8BitDo FC30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000090000011010000,8BitDo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00001038000000010000,8BitDo FC30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00001038000000010000,8BitDo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000650000011010000,8BitDo M30 Gamepad,a:b0,b:b1,back:b10,guide:b2,leftshoulder:b6,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a5,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00005106000000010000,8BitDo M30 Gamepad,a:b1,b:b0,back:b10,guide:b2,leftshoulder:b6,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001590000011010000,8BitDo N30 Pro 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001590000011010000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00006528000000010000,8BitDo N30 Pro 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00006528000000010000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000003512000012ab000010010000,8BitDo NES30 Gamepad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000003512000012ab000010010000,8BitDo NES30 Gamepad,a:b1,b:b0,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000022000000090000011010000,8BitDo NES30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000022000000090000011010000,8BitDo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000190000011010000,8BitDo NES30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000190000011010000,8BitDo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000203800000900000000010000,8BitDo NES30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000203800000900000000010000,8BitDo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00002038000000010000,8BitDo NES30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00002038000000010000,8BitDo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000660000011010000,8BitDo Pro 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000660000011010000,8BitDo Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00000660000000010000,8BitDo Pro 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00000660000000010000,8BitDo Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00000061000000010000,8BitDo SF30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00000061000000010000,8BitDo SF30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000102800000900000000010000,8BitDo SFC30 Gamepad,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000102800000900000000010000,8BitDo SFC30 Gamepad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00003028000000010000,8BitDo SFC30 Gamepad,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00003028000000010000,8BitDo SFC30 Gamepad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000260000011010000,8BitDo SN30 Pro+,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000260000011010000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00000261000000010000,8BitDo SN30 Pro+,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00000261000000010000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000160000011010000,8BitDo SN30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000160000011010000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000003512000020ab000010010000,8BitDo SNES30 Gamepad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000003512000020ab000010010000,8BitDo SNES30 Gamepad,a:b1,b:b0,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000202800000900000000010000,8BitDo SNES30 Gamepad,a:b0,b:b1,back:b10,dpdown:b122,dpleft:b119,dpright:b120,dpup:b117,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000202800000900000000010000,8BitDo SNES30 Gamepad,a:b1,b:b0,back:b10,dpdown:b122,dpleft:b119,dpright:b120,dpup:b117,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001130000011010000,8BitDo Ultimate Wired Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b26,paddle1:b24,paddle2:b25,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000c82d00001330000011010000,8BitDo Ultimate Wireless Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b26,paddle1:b23,paddle2:b19,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000c82d00001890000011010000,8BitDo Zero 2,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001890000011010000,8BitDo Zero 2,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00003032000000010000,8BitDo Zero 2,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00003032000000010000,8BitDo Zero 2,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000a00500003232000001000000,8BitDo Zero Gamepad,a:b0,b:b1,back:b10,dpdown:b122,dpleft:b119,dpright:b120,dpup:b117,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000a00500003232000001000000,8BitDo Zero Gamepad,a:b1,b:b0,back:b10,dpdown:b122,dpleft:b119,dpright:b120,dpup:b117,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000a00500003232000008010000,8BitDo Zero Gamepad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000a00500003232000008010000,8BitDo Zero Gamepad,a:b1,b:b0,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000031000011010000,8Bitdo Receiver,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,", -"03000000c82d00001290000011010000,8Bitdo SN30 Gamepad,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001290000011010000,8Bitdo SN30 Gamepad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00006228000000010000,8Bitdo SN30 Gamepad,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000c82d00006228000000010000,8Bitdo SN30 Gamepad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"05000000050b00000045000031000000,ASUS Gamepad,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b6,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b10,x:b2,y:b3,", -"05000000050b00000045000040000000,ASUS Gamepad,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b6,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b10,x:b2,y:b3,", -"03000000050b00000579000011010000,ASUS ROG Kunai 3 Gamepad,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b36,paddle1:b52,paddle2:b53,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"05000000050b00000679000000010000,ASUS ROG Kunai 3 Gamepad,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b21,paddle1:b22,paddle2:b23,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"030000006f0e00003901000020060000,Afterglow Controller for Xbox One,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000006f0e00003901000000430000,Afterglow Prismatic Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000006f0e00001302000000010000,Afterglow,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000100000008200000011010000,Akishop Customs PS360+ v1.66,a:b1,b:b2,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"05000000491900000204000021000000,Amazon Fire Game Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b17,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b12,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000491900001904000011010000,Amazon Luna Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b9,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b7,x:b2,y:b3,", -"05000000710100001904000000010000,Amazon Luna Controller,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b11,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b6,x:b2,y:b3,", -"03000000790000003018000011010000,Arcade Fightstick F300,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"03000000503200000110000000000000,Atari Classic Controller,a:b0,back:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b4,start:b3,x:b1,", -"05000000503200000110000000000000,Atari Classic Controller,a:b0,back:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b4,start:b3,x:b1,", -"03000000503200000210000000000000,Atari Game Controller,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a4,rightx:a2,righty:a3,start:b8,x:b2,y:b3,", -"05000000503200000210000000000000,Atari Game Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b2,", -"030000005e0400008e02000047010000,Atari Xbox 360 Game Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000c62400001b89000011010000,BDA MOGA XP5-X Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000d62000002a79000011010000,BDA PS4 Fightpad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000120c0000f70e000011010000,Brook Universal Fighting Board,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:,lefty:,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:,righty:,start:b9,x:b0,y:b3,", -"03000000b40400000a01000000010000,CYPRESS USB Gamepad,a:b0,b:b1,back:b5,guide:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b8,x:b3,y:b4,", -"03000000ffff0000ffff000000010000,Chinese-made Xbox Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b5,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b2,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b4,", -"03000000e82000006058000001010000,Cideko AK08b,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000260900008888000000010000,Cyber Gadget GameCube Controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:a5,rightx:a2,righty:a3~,start:b7,x:b2,y:b3,", -"03000000a306000022f6000011010000,Cyborg V.3 Rumble Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:+a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:-a3,rightx:a2,righty:a4,start:b9,x:b0,y:b3,", -"03000000790000000600000010010000,DragonRise Inc. Generic USB Joystick,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b3,y:b0,", -"030000006f0e00003001000001010000,EA Sports PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000b40400001124000011010000,Flydigi Vader 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b12,lefttrigger:a5,leftx:a0,lefty:a1,paddle1:b2,paddle2:b5,paddle4:b17,rightshoulder:b7,rightstick:b13,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"05000000151900004000000001000000,Flydigi Vader 2,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"0300000079000000d418000000010000,GPD Win 2 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"0500000047532067616d657061640000,GS Gamepad,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000341a000005f7000010010000,GameCube {HuiJia USB box},a:b1,b:b2,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,", -"03000000bc2000000055000011010000,GameSir G3w,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"0500000049190000020400001b010000,GameSir T4 Pro,crc:8283,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b23,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000ac0500001a06000011010000,GameSir-T3 2.02,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"0500000047532047616d657061640000,GameStop Gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000c01100000140000011010000,GameStop PS4 Fun Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000006f0e00000104000000010000,Gamestop Logic3 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000008f0e00000800000010010000,Gasia Co. Ltd PS(R) Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"030000006f0e00001304000000010000,Generic X-Box pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000f0250000c183000010010000,Goodbetterbest Ltd USB Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000d11800000094000011010000,Google Stadia Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,", -"03000000280400000140000000010000,Gravis Gamepad Pro USB ,a:b1,b:b2,back:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000008f0e00000610000000010000,GreenAsia Electronics 4Axes 12Keys Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b9,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b10,righttrigger:b5,rightx:a3,righty:a2,start:b11,x:b3,y:b0,", -"030000008f0e00001200000010010000,GreenAsia Inc. USB Joystick,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b2,y:b3,", -"03000000c9110000f055000011010000,HJC Game GAMEPAD,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"030000000d0f00001000000011010000,HORI CO. LTD. FIGHTING STICK 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000000d0f00002200000011010000,HORI CO. LTD. REAL ARCADE Pro.V3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000000d0f00006a00000011010000,HORI CO. LTD. Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00006b00000011010000,HORI CO. LTD. Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00005001000009040000,HORI Fighting Commander OCTA,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000000d0f00008400000011010000,HORI Fighting Commander,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00008500000010010000,HORI Fighting Commander,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00008800000011010000,HORI Fighting Stick mini 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,rightstick:b11,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000000d0f00008700000011010000,HORI Fighting Stick mini 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,rightshoulder:b5,rightstick:b11,righttrigger:a4,start:b9,x:b0,y:b3,", -"030000000d0f0000d800000072056800,HORI Real Arcade Pro S,a:b0,b:b1,back:b4,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b5,leftshoulder:b9,leftstick:b7,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b10,rightstick:b8,righttrigger:a5,rightx:a2,righty:a3,start:b6,x:b2,y:b3,", -"030000000d0f0000aa00000011010000,HORI Real Arcade Pro,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"030000000d0f00006e00000011010000,HORIPAD 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00006600000011010000,HORIPAD 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00006700000001010000,HORIPAD ONE,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"06000000adde0000efbe000002010000,Hidromancer Game Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000d81400000862000011010000,HitBox (PS3/PC) Analog Mode,a:b1,b:b2,back:b8,guide:b9,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b12,x:b0,y:b3,", -"030000000d0f00005f00000011010000,Hori Fighting Commander 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00005e00000011010000,Hori Fighting Commander 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00008600000002010000,Hori Fighting Commander,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000ad1b000001f5000033050000,Hori Pad EX Turbo 2,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000008f0e00001330000010010000,HuiJia SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b9,x:b3,y:b0,", -"03000000242e00008816000001010000,Hyperkin X91,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000d80400008200000003000000,IMS PCU#0 Gamepad Interface,a:b1,b:b0,back:b4,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,start:b5,x:b3,y:b2,", -"03000000fd0500000030000000010000,InterAct GoPad I-73000 (Fighting Game Layout),a:b3,b:b4,back:b6,leftx:a0,lefty:a1,rightshoulder:b2,righttrigger:b5,start:b7,x:b0,y:b1,", -"05000000491900000204000000000000,Ipega PG-9087S,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"030000006e0500000320000010010000,JC-U3613M - DirectInput Mode,a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3,start:b11,x:b0,y:b1,", -"03000000300f00001001000010010000,Jess Tech Dual Analog Rumble Pad,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,", -"03000000ba2200002010000001010000,Jess Technology USB Game Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b3,y:b0,", -"030000006f0e00000103000000020000,Logic3 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000006d04000019c2000010010000,Logitech Cordless RumblePad 2,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d04000016c2000010010000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d04000016c2000011010000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d0400001dc2000014400000,Logitech F310 Gamepad (XInput),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000006d0400001ec2000020200000,Logitech F510 Gamepad (XInput),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000006d04000019c2000011010000,Logitech F710 Gamepad (DInput),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d0400001fc2000005030000,Logitech F710 Gamepad (XInput),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000006d04000018c2000010010000,Logitech RumblePad 2,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d04000011c2000010010000,Logitech WingMan Cordless RumblePad,a:b0,b:b1,back:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b5,leftshoulder:b6,lefttrigger:b9,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b10,rightx:a3,righty:a4,start:b8,x:b3,y:b4,", -"03000000c62400002b89000011010000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"05000000c62400002a89000000010000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b22,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"05000000c62400001a89000000010000,MOGA XP5-X Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000250900006688000000010000,MP-8866 Super Dual Box,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,", -"05000000380700006652000025010000,Mad Catz C.T.R.L.R ,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700005032000011010000,Mad Catz FightPad PRO (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700005082000011010000,Mad Catz FightPad PRO (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000380700008433000011010000,Mad Catz FightStick TE S+ (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700008483000011010000,Mad Catz FightStick TE S+ (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000ad1b00002ef0000090040000,Mad Catz Fightpad SFxT,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,lefttrigger:a2,rightshoulder:b5,righttrigger:a5,start:b7,x:b2,y:b3,", -"03000000380700003847000090040000,Mad Catz Wired Xbox 360 Controller (SFIV),a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000380700001647000010040000,Mad Catz Wired Xbox 360 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000ad1b000016f0000090040000,Mad Catz Xbox 360 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000380700008034000011010000,Mad Catz fightstick (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700008084000011010000,Mad Catz fightstick (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000380700001888000010010000,MadCatz PC USB Wired Stick 8818,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700003888000010010000,MadCatz PC USB Wired Stick 8838,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:a0,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000790000004418000010010000,Mayflash GameCube Controller,a:b1,b:b2,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,", -"03000000780000000600000010010000,Microntek USB Joystick,a:b2,b:b1,back:b8,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b3,y:b0,", -"030000005e0400000e00000000010000,Microsoft SideWinder,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,rightshoulder:b7,start:b8,x:b3,y:b4,", -"030000005e0400008e02000004010000,Microsoft X-Box 360 pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e0400008e02000062230000,Microsoft X-Box 360 pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e040000d102000003020000,Microsoft X-Box One pad v2,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e040000d102000001010000,Microsoft X-Box One pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e0400008502000000010000,Microsoft X-Box pad (Japan),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b5,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b2,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b4,", -"030000005e0400008902000021010000,Microsoft X-Box pad v2 (US),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b5,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b2,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b4,", -"030000005e0400008902000020010000,Microsoft Xbox Controller S,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b5,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b2,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b4,", -"05000000d6200000ad0d000001000000,Moga Pro,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b6,x:b2,y:b3,", -"030000006b140000010c000010010000,NACON GC-400ES,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"030000001008000001e5000010010000,NEXT SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b6,start:b9,x:b3,y:b0,", -"03000000550900001472000011010000,NVIDIA Controller v01.04,a:b0,b:b1,back:b14,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b17,leftshoulder:b4,leftstick:b7,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a5,start:b6,x:b2,y:b3,", -"03000000550900001072000011010000,NVIDIA Controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b13,leftshoulder:b4,leftstick:b8,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,", -"030000004b120000014d000000010000,NYKO AIRFLO EX,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b2,y:b3,", -"03000000451300000830000010010000,NYKO CORE,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000790000004318000010010000,Nintendo GameCube Controller,a:b1,b:b2,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000790000004318000010010000,Nintendo GameCube Controller,a:b1,b:b0,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b2,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000007e0500003703000000016800,Nintendo GameCube Controller,a:b0,b:b2,dpdown:b6,dpleft:b4,dpright:b5,dpup:b7,lefttrigger:a4,leftx:a0,lefty:a1~,rightshoulder:b9,righttrigger:a5,rightx:a2,righty:a3~,start:b8,x:b1,y:b3,", -"050000007e0500000620000001800000,Nintendo Switch Joy-Con (L),a:b15,b:b16,guide:b4,leftshoulder:b6,leftstick:b12,leftx:a1,lefty:a0~,rightshoulder:b8,start:b9,x:b17,y:b14,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500000620000001800000,Nintendo Switch Joy-Con (L),a:b16,b:b15,guide:b4,leftshoulder:b6,leftstick:b12,leftx:a1,lefty:a0~,rightshoulder:b8,start:b9,x:b14,y:b17,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"060000007e0500000620000000000000,Nintendo Switch Joy-Con (L/R),a:b1,b:b0,back:b9,dpdown:b15,dpleft:b16,dpright:b17,dpup:b14,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"060000007e0500000620000000000000,Nintendo Switch Joy-Con (L/R),a:b0,b:b1,back:b9,dpdown:b15,dpleft:b16,dpright:b17,dpup:b14,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"060000007e0500000820000000000000,Nintendo Switch Joy-Con (L/R),a:b1,b:b0,back:b9,dpdown:b15,dpleft:b16,dpright:b17,dpup:b14,guide:b11,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"060000007e0500000820000000000000,Nintendo Switch Joy-Con (L/R),a:b0,b:b1,back:b9,dpdown:b15,dpleft:b16,dpright:b17,dpup:b14,guide:b11,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500000720000001800000,Nintendo Switch Joy-Con (R),a:b2,b:b1,guide:b9,leftshoulder:b4,leftstick:b10,leftx:a1~,lefty:a0,rightshoulder:b6,start:b8,x:b3,y:b0,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500000720000001800000,Nintendo Switch Joy-Con (R),a:b1,b:b2,guide:b9,leftshoulder:b4,leftstick:b10,leftx:a1~,lefty:a0,rightshoulder:b6,start:b8,x:b0,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000d620000013a7000011010000,Nintendo Switch PowerA Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000d620000013a7000011010000,Nintendo Switch PowerA Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000d620000011a7000011010000,Nintendo Switch PowerA Core Plus Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000d620000011a7000011010000,Nintendo Switch PowerA Core Plus Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000007e0500000920000011810000,Nintendo Switch Pro Controller,a:b1,b:b0,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b11,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,misc1:b4,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000007e0500000920000011810000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b11,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,misc1:b4,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000004c69632050726f20436f6e00,Nintendo Switch Pro Controller,crc:15b7,a:b1,b:b0,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000004c69632050726f20436f6e00,Nintendo Switch Pro Controller,crc:15b7,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500000920000001000000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500000920000001000000,Nintendo Switch Pro Controller,a:b1,b:b0,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500000920000001800000,Nintendo Switch Pro Controller,a:b1,b:b0,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b11,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500000920000001800000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b11,leftshoulder:b5,leftstick:b12,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b13,righttrigger:b8,rightx:a2,righty:a3,start:b10,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500000603000000060000,Nintendo Wii Remote Classic Controller,crc:0d8a,a:b0,b:b1,back:b10,dpdown:b14,dpleft:b12,dpright:b13,dpup:b11,guide:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3~,start:b9,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500000603000000060000,Nintendo Wii Remote Classic Controller,crc:0d8a,a:b1,b:b0,back:b10,dpdown:b14,dpleft:b12,dpright:b13,dpup:b11,guide:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3~,start:b9,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500003003000001000000,Nintendo Wii Remote Pro Controller,a:b1,b:b0,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500003003000001000000,Nintendo Wii Remote Pro Controller,a:b0,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e0500000603000000060000,Nintendo Wii Remote,crc:60be,a:b1,b:b0,back:b4,dpdown:b8,dpleft:b6,dpright:b7,dpup:b5,guide:b2,start:b3,x:b9,y:b10,", -"05000000010000000100000003000000,Nintendo Wiimote,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"030000000d0500000308000010010000,Nostromo n45 Dual Analog Gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b9,leftshoulder:b4,leftstick:b12,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b10,x:b2,y:b3,", -"05000000362800000100000002010000,OUYA Game Controller,a:b0,b:b3,dpdown:b9,dpleft:b10,dpright:b11,dpup:b8,guide:b14,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,x:b1,y:b2,", -"05000000362800000100000003010000,OUYA Game Controller,a:b0,b:b3,dpdown:b9,dpleft:b10,dpright:b11,dpup:b8,guide:b14,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,x:b1,y:b2,", -"030000005e0400000202000000010000,Old Xbox pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b5,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b2,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b3,y:b4,", -"03000000ff1100003133000010010000,PC Game Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"030000006f0e00006401000001010000,PDP Battlefield One,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000006f0e00000901000011010000,PDP Versus Fighting Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"03000000ff1100004133000010010000,PS2 Controller,a:b2,b:b1,back:b8,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b3,y:b0,", -"03000000341a00003608000011010000,PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000004c0500006802000010010000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,", -"030000004c0500006802000010810000,PS3 Controller,a:b0,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"030000004c0500006802000011010000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:a12,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:a13,rightx:a2,righty:a3,start:b3,x:b15,y:b12,", -"030000004c0500006802000011810000,PS3 Controller,a:b0,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"030000006f0e00001402000011010000,PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000008f0e00000300000010010000,PS3 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"050000004c0500006802000000010000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:a12,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:a13,rightx:a2,righty:a3,start:b3,x:b15,y:b12,", -"050000004c0500006802000000800000,PS3 Controller,a:b0,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"050000004c0500006802000000810000,PS3 Controller,a:b0,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b16,dpup:b13,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"05000000504c415953544154494f4e00,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,", -"060000004c0500006802000000010000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,", -"030000004c050000a00b000011010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000a00b000011810000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"030000004c050000c405000011010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000c405000011810000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"030000004c050000cc09000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000cc09000011010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000cc09000011810000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"050000004c050000c405000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"050000004c050000c405000000810000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"050000004c050000cc09000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"050000004c050000cc09000000810000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"050000004c050000cc09000001800000,PS4 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"030000004c050000e60c000000010000,PS5 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000e60c000011010000,PS5 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000e60c000011810000,PS5 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"050000004c050000e60c000000010000,PS5 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"050000004c050000e60c000000810000,PS5 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b2,", -"030000004c050000da0c000011010000,Playstation Controller,a:b2,b:b1,back:b8,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b3,y:b0,", -"03000000c62400003a54000001010000,PowerA XBox One Controller,a:b0,b:b1,back:b6,dpdown:h0.7,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000c62400000053000000010000,PowerA,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000300f00001211000011010000,QanBa Arcade JoyStick,a:b2,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b5,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b6,start:b9,x:b1,y:b3,", -"03000000222c00000225000011010000,Qanba Dragon Arcade Joystick (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000222c00000025000011010000,Qanba Dragon Arcade Joystick (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000222c00000020000011010000,Qanba Drone Arcade Joystick (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,rightshoulder:b5,righttrigger:a4,start:b9,x:b0,y:b3,", -"03000000222c00000223000011010000,Qanba Obsidian Arcade Joystick (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000222c00000023000011010000,Qanba Obsidian Arcade Joystick (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000008916000001fd000024010000,Razer Onza Classic Edition,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b11,dpright:b12,dpup:b13,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000321500000204000011010000,Razer Panthera (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000321500000104000011010000,Razer Panthera (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000321500000010000011010000,Razer RAIJU,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000321500000507000000010000,Razer Raiju Mobile,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b21,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000321500000011000011010000,Razer Raion Fightpad for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000008916000000fe000024010000,Razer Sabertooth,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000c6240000045d000024010000,Razer Sabertooth,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000c6240000045d000025010000,Razer Sabertooth,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000321500000009000011010000,Razer Serval,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,", -"050000003215000000090000163a0000,Razer Serval,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,", -"0300000032150000030a000001010000,Razer Wildcat,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"0300000000f000000300000000010000,RetroPad,a:b1,b:b5,back:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b3,x:b0,y:b4,", -"03000000790000001100000010010000,Retrolink SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b9,x:b3,y:b0,", -"030000006b140000130d000011010000,Revolution Pro Controller 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000006b140000010d000011010000,Revolution Pro Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000006f0e00001e01000011010000,Rock Candy PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006f0e00004601000001010000,Rock Candy Xbox One Controller,a:b0,b:b1,back:b6,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000006f0e00001f01000000010000,Rock Candy,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000632500007505000010010000,SHANWAN PS3/PC Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000341a00000908000010010000,SL-6566,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000457500002211000010010000,SZMY-POWER PC Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000a306000023f6000011010000,Saitek Cyborg V.1 Game Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b0,y:b3,", -"03000000a30600000cff000010010000,Saitek P2500 Force Rumble Pad,a:b2,b:b3,back:b11,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a3,righty:a2,x:b0,y:b1,", -"03000000a30600000c04000011010000,Saitek P2900 Wireless Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b9,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b12,x:b0,y:b3,", -"03000000a30600000901000000010000,Saitek P880,a:b2,b:b3,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a3,righty:a2,x:b0,y:b1,", -"03000000a30600000b04000000010000,Saitek P990 Dual Analog Pad,a:b1,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b8,x:b0,y:b3,", -"03000000a306000018f5000010010000,Saitek PLC Saitek P3200 Rumble Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b0,y:b3,", -"03000000c01600008704000011010000,Serial/Keyboard/Mouse/Joystick,a:b12,b:b10,back:b4,dpdown:b2,dpleft:b3,dpright:b1,dpup:b0,leftshoulder:b9,leftstick:b14,lefttrigger:b6,leftx:a1,lefty:a0,rightshoulder:b8,rightstick:b15,righttrigger:b7,rightx:a2,righty:a3,start:b5,x:b13,y:b11,", -"03000000f025000021c1000010010000,ShanWan Gioteck PS3 Wired Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000632500002305000010010000,ShanWan USB Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000250900000500000000010000,Sony PS2 pad with SmartJoy adapter,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,", -"030000005e0400008e02000020200000,SpeedLink XEOX Pro Analog Gamepad pad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e0400008e02000073050000,Speedlink TORID Wireless Gamepad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000de2800000112000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,", -"03000000de2800000112000011010000,Steam Controller,a:b2,b:b3,back:b10,dpdown:+a5,dpleft:-a4,dpright:+a4,dpup:-a5,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a7,leftx:a0,lefty:a1,paddle1:b15,paddle2:b16,rightshoulder:b7,rightstick:b14,righttrigger:a6,rightx:a2,righty:a3,start:b11,x:b4,y:b5,", -"03000000de2800000211000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,", -"03000000de2800000211000011010000,Steam Controller,a:b2,b:b3,back:b10,dpdown:+a5,dpleft:-a4,dpright:+a4,dpup:-a5,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a7,leftx:a0,lefty:a1,paddle1:b15,paddle2:b16,rightshoulder:b7,rightstick:b14,righttrigger:a6,rightx:a2,righty:a3,start:b11,x:b4,y:b5,", -"03000000de2800004211000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,", -"03000000de2800004211000011010000,Steam Controller,a:b2,b:b3,back:b10,dpdown:+a5,dpleft:-a4,dpright:+a4,dpup:-a5,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a7,leftx:a0,lefty:a1,paddle1:b15,paddle2:b16,rightshoulder:b7,rightstick:b14,righttrigger:a6,rightx:a2,righty:a3,start:b11,x:b4,y:b5,", -"03000000de280000fc11000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"05000000de2800000212000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,", -"05000000de2800000511000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,", -"05000000de2800000611000001000000,Steam Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,paddle1:b11,paddle2:b10,rightshoulder:b5,righttrigger:a3,start:b7,x:b2,y:b3,", -"03000000de280000ff11000001000000,Steam Virtual Gamepad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"0500000011010000311400001b010000,SteelSeries Stratus Duo,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b32,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"05000000110100001914000009010000,SteelSeries Stratus XL,a:b0,b:b1,back:b17,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b18,leftshoulder:b6,leftstick:b13,lefttrigger:+a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:+a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000ad1b000038f0000090040000,Street Fighter IV FightStick TE,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000666600000488000000010000,Super Joy Box 5 Pro,a:b2,b:b1,back:b9,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,", -"0300000000f00000f100000000010000,Super RetroPort,a:b1,b:b5,back:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b3,x:b0,y:b4,", -"030000004f0400000ed0000011010000,ThrustMaster eSwap PRO Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004f04000020b3000010010000,Thrustmaster 2 in 1 DT,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,", -"030000004f04000015b3000001010000,Thrustmaster Dual Analog 3.2,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,", -"030000004f04000015b3000010010000,Thrustmaster Dual Analog 4,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,", -"030000004f04000023b3000000010000,Thrustmaster Dual Trigger 3-in-1,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004f04000000b3000010010000,Thrustmaster Firestorm Dual Power,a:b0,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b11,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b10,x:b1,y:b3,", -"030000004f04000009d0000000010000,Thrustmaster Run N Drive Wireless PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000004f04000008d0000000010000,Thrustmaster Run N Drive Wireless,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000bd12000015d0000010010000,Tomee SNES USB Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b9,x:b3,y:b0,", -"03000000d814000007cd000011010000,Toodles 2008 Chimp PC/PS3,a:b0,b:b1,back:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b3,y:b2,", -"03000000100800000100000010010000,Twin USB PS2 Adapter,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,", -"03000000100800000300000010010000,USB Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,", -"03000000790000000600000007010000,USB Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b3,y:b0,", -"03000000790000001100000000010000,USB Gamepad1,a:b2,b:b1,back:b8,dpdown:a0,dpleft:a1,dpright:a2,dpup:a4,start:b9,", -"05000000ac0500003232000001000000,VR-BOX,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b2,y:b3,", -"030000006f0e00000302000011010000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000006f0e00000702000011010000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000005e0400008e02000010010000,X360 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e0400008e02000014010000,X360 Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e0400001907000000010000,X360 Wireless Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b11,dpright:b12,dpup:b13,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e0400009102000007010000,X360 Wireless Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b11,dpright:b12,dpup:b13,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e040000a102000000010000,X360 Wireless Controller,a:b0,b:b1,back:b6,dpdown:b14,dpleft:b11,dpright:b12,dpup:b13,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e040000a102000007010000,X360 Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000450c00002043000010010000,XEOX Gamepad SL-6556-BK,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"0000000058626f782033363020576900,Xbox 360 Wireless Controller,a:b0,b:b1,back:b14,dpdown:b11,dpleft:b12,dpright:b13,dpup:b10,guide:b7,leftshoulder:b4,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b6,x:b2,y:b3,", -"030000005e040000a102000014010000,Xbox 360 Wireless Receiver (XBOX),a:b0,b:b1,back:b6,dpdown:b14,dpleft:b11,dpright:b12,dpup:b13,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"0000000058626f782047616d65706100,Xbox Gamepad (userspace driver),a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,", -"050000005e040000050b000002090000,Xbox One Elite Series 2,a:b0,b:b1,back:b136,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a6,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"050000005e040000050b000003090000,Xbox One Elite Series 2,a:b0,b:b1,back:b121,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"050000005e040000e302000002090000,Xbox One Elite,a:b0,b:b1,back:b136,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a6,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"030000005e040000ea02000000000000,Xbox One Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e040000ea02000001030000,Xbox One Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"050000005e040000e002000003090000,Xbox One Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"050000005e040000fd02000003090000,Xbox One Wireless Controller,a:b0,b:b1,back:b15,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,guide:b16,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"050000005e040000130b000007050000,Xbox Series X Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b15,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"050000005e040000130b000011050000,Xbox Series X Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b15,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"05000000172700004431000029010000,XiaoMi Game Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b20,leftshoulder:b6,leftstick:b13,lefttrigger:a7,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a6,rightx:a2,righty:a5,start:b11,x:b3,y:b4,", -"03000000c0160000e105000001010000,Xin-Mo Xin-Mo Dual Arcade,a:b4,b:b3,back:b6,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b9,leftshoulder:b2,leftx:a0,lefty:a1,rightshoulder:b5,start:b7,x:b1,y:b0,", -"03000000120c0000100e000011010000,ZEROPLUS P4 Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000120c0000101e000011010000,ZEROPLUS P4 Wired Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000666600006706000000010000,boom PSX to PC Converter,a:b2,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,leftshoulder:b6,leftstick:b9,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b10,righttrigger:b5,rightx:a2,righty:a3,start:b11,x:b3,y:b0,", -"030000000d0f00000d00000000010000,hori,a:b0,b:b6,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b3,leftx:b4,lefty:b5,rightshoulder:b7,start:b9,x:b1,y:b2,", -"03000000830500006020000010010000,iBuffalo SNES Controller,a:b0,b:b1,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000830500006020000010010000,iBuffalo SNES Controller,a:b1,b:b0,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000006964726f69643a636f6e0000,idroid:con,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000b50700001503000010010000,impact,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,", -"030000009b2800008000000020020000,raphnet technologies 1-player WUSBMote v2.2,a:b1,b:b4,back:b2,dpdown:b13,dpleft:b14,dpright:b15,dpup:b12,leftshoulder:b6,rightshoulder:b7,start:b3,x:b0,y:b5,", -"030000009b2800000300000001010000,raphnet.net 4nes4snes v1.5,a:b0,b:b4,back:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b3,x:b1,y:b5,", -] -elif compat_platform.startswith("darwin"): - mapping_list = [ -"03000000c82d00000090000001000000,8BitDo FC30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000090000001000000,8BitDo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001038000000010000,8BitDo FC30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001038000000010000,8BitDo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000650000001000000,8BitDo M30 Gamepad,a:b0,b:b1,back:b10,guide:b2,leftshoulder:b6,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a5,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000650000001000000,8BitDo M30 Gamepad,a:b1,b:b0,back:b10,guide:b2,leftshoulder:b6,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a5,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00005106000000010000,8BitDo M30 Gamepad,a:b0,b:b1,back:b10,guide:b2,leftshoulder:b6,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00005106000000010000,8BitDo M30 Gamepad,a:b1,b:b0,back:b10,guide:b2,leftshoulder:b6,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001590000001000000,8BitDo N30 Pro 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001590000001000000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00006528000000010000,8BitDo N30 Pro 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00006528000000010000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000003512000012ab000001000000,8BitDo NES30 Gamepad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000003512000012ab000001000000,8BitDo NES30 Gamepad,a:b1,b:b0,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000022000000090000001000000,8BitDo NES30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000022000000090000001000000,8BitDo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000203800000900000000010000,8BitDo NES30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000203800000900000000010000,8BitDo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000660000000020000,8BitDo Pro 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000660000000020000,8BitDo Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000102800000900000000000000,8BitDo SFC30 Gamepad,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000102800000900000000000000,8BitDo SFC30 Gamepad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001290000001000000,8BitDo SN30 Gamepad,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001290000001000000,8BitDo SN30 Gamepad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000260000001000000,8BitDo SN30 Pro+,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000260000001000000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000261000000010000,8BitDo SN30 Pro+,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000261000000010000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000160000001000000,8BitDo SN30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000160000001000000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a5,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001130000000020000,8BitDo Ultimate Wired Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b26,paddle1:b24,paddle2:b25,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000c82d00001330000000020000,8BitDo Ultimate Wireless Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b26,paddle1:b23,paddle2:b19,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000c82d00001890000001000000,8BitDo Zero 2,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001890000001000000,8BitDo Zero 2,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00003032000000010000,8BitDo Zero 2,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00003032000000010000,8BitDo Zero 2,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000a00500003232000008010000,8BitDo Zero Gamepad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000a00500003232000008010000,8BitDo Zero Gamepad,a:b1,b:b2,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000a00500003232000009010000,8BitDo Zero Gamepad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000a00500003232000009010000,8BitDo Zero Gamepad,a:b1,b:b0,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000050b00000579000000010000,ASUS ROG Kunai 3 Gamepad,a:b0,b:b1,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b14,leftshoulder:b6,leftstick:b15,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b42,paddle1:b9,paddle2:b11,rightshoulder:b7,rightstick:b16,righttrigger:a4,rightx:a2,righty:a3,start:b13,x:b3,y:b4,", -"03000000050b00000679000000010000,ASUS ROG Kunai 3 Gamepad,a:b0,b:b1,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b14,leftshoulder:b6,leftstick:b15,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b23,rightshoulder:b7,rightstick:b16,righttrigger:a4,rightx:a2,righty:a3,start:b13,x:b3,y:b4,", -"03000000491900001904000001010000,Amazon Luna Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b9,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b7,x:b2,y:b3,", -"03000000710100001904000000010000,Amazon Luna Controller,a:b0,b:b1,back:b11,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b7,lefttrigger:a5,leftx:a0,lefty:a1,misc1:b9,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a3,start:b6,x:b2,y:b3,", -"03000000c62400001a89000000010000,BDA MOGA XP5-X Plus,a:b0,b:b1,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b14,leftshoulder:b6,leftstick:b15,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b16,righttrigger:a4,rightx:a2,righty:a3,start:b13,x:b3,y:b4,", -"03000000c62400001b89000000010000,BDA MOGA XP5-X Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000d62000002a79000000010000,BDA PS4 Fightpad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000008305000031b0000000000000,Cideko AK08b,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000260900008888000088020000,Cyber Gadget GameCube Controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:a5,rightx:a2,righty:a3~,start:b7,x:b2,y:b3,", -"03000000a306000022f6000001030000,Cyborg V.3 Rumble Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:+a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:-a3,rightx:a2,righty:a4,start:b9,x:b0,y:b3,", -"030000000d0f00008400000000010000,Fighting Commander,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00008500000000010000,Fighting Commander,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000151900004000000001000000,Flydigi Vader 2,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000b40400001124000000000000,Flydigi Vader 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b12,lefttrigger:b8,leftx:a0,lefty:a1,paddle1:b4,paddle2:b5,paddle3:b17,rightshoulder:b7,rightstick:b13,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b2,y:b3,", -"03000000790000000600000000000000,G-Shark GS-GP702,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000ac0500001a06000002020000,GameSir-T3 2.02,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"0500000047532047616d657061640000,GameStop Gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000c01100000140000000010000,GameStop PS4 Fun Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000ad1b000001f9000000000000,Gamestop BB-070 X360 Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"03000000d11800000094000000010000,Google Stadia Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,", -"030000000d0f00005f00000000000000,HORI Fighting Commander 4 PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00005e00000000000000,HORI Fighting Commander 4 PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00008800000000010000,HORI Fighting Stick mini 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,rightstick:b11,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000000d0f00008700000000010000,HORI Fighting Stick mini 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,rightstick:b11,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000000d0f00004d00000000000000,HORI Gem Pad 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f0000aa00000072050000,HORI Real Arcade Pro,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"030000000d0f00006e00000000010000,HORIPAD 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00006600000000010000,HORIPAD 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00006600000000000000,HORIPAD FPS PLUS 4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00005f00000000010000,Hori Fighting Commander 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00005e00000000010000,Hori Fighting Commander 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000008f0e00001330000011010000,HuiJia SNES Controller,a:b4,b:b2,back:b16,dpdown:+a2,dpleft:-a0,dpright:+a0,dpup:-a2,leftshoulder:b12,rightshoulder:b14,start:b18,x:b6,y:b0,", -"030000006d04000016c2000000020000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d04000016c2000000030000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d04000016c2000014040000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d04000016c2000000000000,Logitech F310 Gamepad (DInput),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d04000018c2000000000000,Logitech F510 Gamepad (DInput),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d0400001fc2000000000000,Logitech F710 Gamepad (XInput),a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"030000006d04000019c2000000000000,Logitech Wireless Gamepad (DInput),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000d8140000cecf000000000000,MC Cthulhu,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"03000000c62400002a89000000010000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b21,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000c62400002b89000000010000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000380700005032000000010000,Mad Catz FightPad PRO (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700005082000000010000,Mad Catz FightPad PRO (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000380700008433000000010000,Mad Catz FightStick TE S+ (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700008483000000010000,Mad Catz FightStick TE S+ (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000790000004418000000010000,Mayflash GameCube Controller,a:b1,b:b2,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,", -"0300000025090000e803000000000000,Mayflash Wii Classic Controller,a:b1,b:b0,back:b8,dpdown:b13,dpleft:b12,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,", -"03000000790000000018000000000000,Mayflash WiiU Pro Game Controller Adapter (DInput),a:b4,b:b8,back:b32,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b16,leftstick:b40,lefttrigger:b24,leftx:a0,lefty:a4,rightshoulder:b20,rightstick:b44,righttrigger:b28,rightx:a8,righty:a12,start:b36,x:b0,y:b12,", -"030000001008000001e5000006010000,NEXT SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b6,start:b9,x:b3,y:b0,", -"03000000550900001472000025050000,NVIDIA Controller v01.04,a:b0,b:b1,back:b17,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b4,leftstick:b7,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b8,righttrigger:a4,rightx:a2,righty:a5,start:b6,x:b2,y:b3,", -"030000004b120000014d000000010000,NYKO AIRFLO EX,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b2,y:b3,", -"030000007e0500000920000000000000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"050000007e05000009200000ff070000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b9,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b10,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"050000007e05000009200000ff070000,Nintendo Switch Pro Controller,a:b1,b:b0,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b9,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b10,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000006f0e00000901000002010000,PDP Versus Fighting Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000004c0500006802000000000000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,", -"030000004c0500006802000000010000,PS3 Controller,a:b14,b:b13,back:b0,dpdown:b6,dpleft:b7,dpright:b5,dpup:b4,guide:b16,leftshoulder:b10,leftstick:b1,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b11,rightstick:b2,righttrigger:b9,rightx:a2,righty:a3,start:b3,x:b15,y:b12,", -"030000004c050000a00b000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000c405000000000000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000c405000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000cc09000000010000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"050000004c050000e60c000000010000,PS5 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000008f0e00000300000000000000,Piranha xtreme,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,", -"03000000222c00000225000000010000,Qanba Dragon Arcade Joystick (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000008916000000fd000000000000,Razer Onza TE,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"03000000321500000204000000010000,Razer Panthera (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000321500000104000000010000,Razer Panthera (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000321500000010000000010000,Razer RAIJU,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000321500000507000001010000,Razer Raiju Mobile,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b21,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000321500000011000000010000,Razer Raion Fightpad for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000321500000009000000020000,Razer Serval,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b9,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:a4,rightx:a2,righty:a3,start:b7,x:b2,y:b3,", -"0300000032150000030a000000000000,Razer Wildcat,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"03000000790000001100000000000000,Retrolink Classic Controller,a:b2,b:b1,back:b8,leftshoulder:b4,leftx:a3,lefty:a4,rightshoulder:b5,start:b9,x:b3,y:b0,", -"03000000790000001100000006010000,Retrolink SNES Controller,a:b2,b:b1,back:b8,dpdown:+a4,dpleft:-a3,dpright:+a3,dpup:-a4,leftshoulder:b4,rightshoulder:b5,start:b9,x:b3,y:b0,", -"030000006b140000130d000000010000,Revolution Pro Controller 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000006b140000010d000000010000,Revolution Pro Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000003512000021ab000000000000,SFC30 Joystick,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,", -"03000000457500002211000000010000,SZMY-POWER PC Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000b40400000a01000000000000,Sega Saturn USB Gamepad,a:b0,b:b1,back:b5,guide:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b8,x:b3,y:b4,", -"03000000811700007e05000000000000,Sega Saturn,a:b2,b:b4,dpdown:b16,dpleft:b15,dpright:b14,dpup:b17,leftshoulder:b8,lefttrigger:a5,leftx:a0,lefty:a2,rightshoulder:b9,righttrigger:a4,start:b13,x:b0,y:b6,", -"030000004c050000cc09000000000000,Sony DualShock 4 V2,a:b1,b:b2,back:b13,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000a00b000000000000,Sony DualShock 4 Wireless Adaptor,a:b1,b:b2,back:b13,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000005e0400008e02000001000000,Steam Virtual Gamepad,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"050000004e696d6275732b0000000000,SteelSeries Nimbus+,a:b0,b:b1,back:b15,dpdown:b11,dpleft:b13,dpright:b12,dpup:b10,guide:b16,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3~,start:b14,x:b2,y:b3,", -"03000000110100002014000000000000,SteelSeries Nimbus,a:b0,b:b1,dpdown:b9,dpleft:b11,dpright:b10,dpup:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b12,x:b2,y:b3,", -"03000000110100002014000001000000,SteelSeries Nimbus,a:b0,b:b1,dpdown:b9,dpleft:b11,dpright:b10,dpup:b8,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3~,x:b2,y:b3,", -"03000000381000002014000001000000,SteelSeries Nimbus,a:b0,b:b1,dpdown:b9,dpleft:b11,dpright:b10,dpup:b8,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3~,x:b2,y:b3,", -"03000000110100001714000000000000,SteelSeries Stratus XL,a:b0,b:b1,dpdown:b9,dpleft:b11,dpright:b10,dpup:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3~,start:b12,x:b2,y:b3,", -"03000000110100001714000020010000,SteelSeries Stratus XL,a:b0,b:b1,dpdown:b9,dpleft:b11,dpright:b10,dpup:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1~,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3~,start:b12,x:b2,y:b3,", -"030000004f0400000ed0000000020000,ThrustMaster eSwap PRO Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004f04000015b3000000000000,Thrustmaster Dual Analog 3.2,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,", -"030000004f04000000b3000000000000,Thrustmaster Firestorm Dual Power,a:b0,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b11,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:b7,rightx:a2,righty:a3,start:b10,x:b1,y:b3,", -"03000000bd12000015d0000000000000,Tomee SNES USB Controller,a:b2,b:b1,back:b8,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,start:b9,x:b3,y:b0,", -"03000000bd12000015d0000000010000,Tomee SNES USB Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b9,x:b3,y:b0,", -"03000000100800000100000000000000,Twin USB Joystick,a:b4,b:b2,back:b16,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b12,leftstick:b20,lefttrigger:b8,leftx:a0,lefty:a2,rightshoulder:b14,rightstick:b22,righttrigger:b10,rightx:a6,righty:a4,start:b18,x:b6,y:b0,", -"030000006f0e00000302000025040000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000006f0e00000702000003060000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"050000005769696d6f74652028303000,Wii Remote,a:b4,b:b5,back:b7,dpdown:b3,dpleft:b0,dpright:b1,dpup:b2,guide:b8,leftshoulder:b11,lefttrigger:b12,leftx:a0,lefty:a1,start:b6,x:b10,y:b9,", -"050000005769696d6f74652028313800,Wii U Pro Controller,a:b16,b:b15,back:b7,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b8,leftshoulder:b19,leftstick:b23,lefttrigger:b21,leftx:a0,lefty:a1,rightshoulder:b20,rightstick:b24,righttrigger:b22,rightx:a2,righty:a3,start:b6,x:b18,y:b17,", -"030000005e0400008e02000000000000,X360 Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"03000000c6240000045d000000000000,Xbox 360 Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"030000005e040000050b000003090000,Xbox Elite Wireless Controller,a:b0,b:b1,back:b38,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"030000005e040000d102000000000000,Xbox One Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"030000005e040000dd02000000000000,Xbox One Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"030000005e040000e302000000000000,Xbox One Wired Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"030000005e040000200b000011050000,Xbox Wireless Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"030000005e040000e002000000000000,Xbox Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e040000e002000003090000,Xbox Wireless Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a5,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000005e040000ea02000000000000,Xbox Wireless Controller,a:b0,b:b1,back:b9,dpdown:b12,dpleft:b13,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a5,rightx:a3,righty:a4,start:b8,x:b2,y:b3,", -"030000005e040000fd02000003090000,Xbox Wireless Controller,a:b0,b:b1,back:b16,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b6,leftstick:b13,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a4,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000172700004431000029010000,XiaoMi Game Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a6,rightx:a2,righty:a5,start:b11,x:b3,y:b4,", -"03000000120c0000100e000000010000,ZEROPLUS P4 Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000120c0000101e000000010000,ZEROPLUS P4 Wired Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000830500006020000000010000,iBuffalo SNES Controller,a:b0,b:b1,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000830500006020000000010000,iBuffalo SNES Controller,a:b1,b:b0,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000830500006020000000000000,iBuffalo USB 2-axis 8-button Gamepad,a:b1,b:b0,back:b6,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,start:b7,x:b3,y:b2,", -] -elif compat_platform.startswith("win"): - mapping_list = [ -"03000000fa2d00000100000000000000,3DRUDDER,leftx:a0,lefty:a1,rightx:a5,righty:a2,", -"03000000c82d00000090000000000000,8BitDo FC30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000090000000000000,8BitDo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001038000000000000,8BitDo FC30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001038000000000000,8BitDo FC30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000650000000000000,8BitDo M30 Gamepad,a:b0,b:b1,back:b10,guide:b2,leftshoulder:b6,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b9,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000650000000000000,8BitDo M30 Gamepad,a:b1,b:b0,back:b10,guide:b2,leftshoulder:b6,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b9,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00005106000000000000,8BitDo M30 Gamepad,a:b0,b:b1,back:b10,guide:b2,leftshoulder:b6,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b9,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00005106000000000000,8BitDo M30 Gamepad,a:b1,b:b0,back:b10,guide:b2,leftshoulder:b6,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b9,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001590000000000000,8BitDo N30 Pro 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001590000000000000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00006528000000000000,8BitDo N30 Pro 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00006528000000000000,8BitDo N30 Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000003512000012ab000000000000,8BitDo NES30 Gamepad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000003512000012ab000000000000,8BitDo NES30 Gamepad,a:b1,b:b0,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000022000000090000000000000,8BitDo NES30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000022000000090000000000000,8BitDo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000203800000900000000000000,8BitDo NES30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000203800000900000000000000,8BitDo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00002038000000000000,8BitDo NES30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00002038000000000000,8BitDo NES30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000660000000000000,8BitDo Pro 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000660000000000000,8BitDo Pro 2,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000060000000000000,8BitDo SF30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000060000000000000,8BitDo SF30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000061000000000000,8BitDo SF30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000061000000000000,8BitDo SF30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000102800000900000000000000,8BitDo SFC30 Gamepad,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000102800000900000000000000,8BitDo SFC30 Gamepad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001290000000000000,8BitDo SN30 Gamepad,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001290000000000000,8BitDo SN30 Gamepad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00006228000000000000,8BitDo SN30 Gamepad,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00006228000000000000,8BitDo SN30 Gamepad,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000260000000000000,8BitDo SN30 Pro+,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000260000000000000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000261000000000000,8BitDo SN30 Pro+,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000261000000000000,8BitDo SN30 Pro+,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b2,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000160000000000000,8BitDo SN30 Pro,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00000160000000000000,8BitDo SN30 Pro,a:b1,b:b0,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000003512000020ab000000000000,8BitDo SNES30 Gamepad,a:b0,b:b1,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000003512000020ab000000000000,8BitDo SNES30 Gamepad,a:b1,b:b0,back:b10,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001130000000000000,8BitDo Ultimate Wired Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,misc1:b26,paddle1:b24,paddle2:b25,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000c82d00001330000000000000,8BitDo Ultimate Wireless Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,misc1:b26,paddle1:b23,paddle2:b19,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000c82d00001890000000000000,8BitDo Zero 2,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00001890000000000000,8BitDo Zero 2,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00003032000000000000,8BitDo Zero 2,a:b0,b:b1,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000c82d00003032000000000000,8BitDo Zero 2,a:b1,b:b0,back:b10,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000a00500003232000000000000,8BitDo Zero Gamepad,a:b0,b:b1,back:b10,dpdown:+a2,dpleft:-a0,dpright:+a0,dpup:-a2,leftshoulder:b6,rightshoulder:b7,start:b11,x:b3,y:b4,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000a00500003232000000000000,8BitDo Zero Gamepad,a:b1,b:b0,back:b10,dpdown:+a2,dpleft:-a0,dpright:+a0,dpup:-a2,leftshoulder:b6,rightshoulder:b7,start:b11,x:b4,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000050b00000579000000000000,ASUS ROG Kunai 3 Gamepad,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000050b00000679000000000000,ASUS ROG Kunai 3 Gamepad,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,misc1:b15,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"030000008f0e00001200000000000000,Acme GA-02,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b2,y:b3,", -"03000000fa190000f0ff000000000000,Acteck AGJ-3200,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000341a00003608000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006f0e00000263000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006f0e00001101000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006f0e00001401000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006f0e00001402000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006f0e00001901000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006f0e00001a01000000000000,Afterglow PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000d62000001d57000000000000,Airflo PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000491900001904000000000000,Amazon Luna Controller,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b9,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b7,x:b2,y:b3,", -"03000000d62000002a79000000000000,BDA PS4 Fightpad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000d81d00000b00000000000000,BUFFALO BSGP1601 Series ,a:b5,b:b3,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b8,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b9,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b13,x:b4,y:b2,", -"03000000d6200000e557000000000000,Batarang,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000c01100001352000000000000,Battalife Joystick,a:b6,b:b7,back:b2,leftshoulder:b0,leftx:a0,lefty:a1,rightshoulder:b1,start:b3,x:b4,y:b5,", -"030000006f0e00003201000000000000,Battlefield 4 PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000bc2000006012000000000000,Betop 2126F,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000bc2000000055000000000000,Betop BFM Gamepad,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000bc2000006312000000000000,Betop Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000bc2000006412000000000000,Betop Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000c01100000555000000000000,Betop Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000c01100000655000000000000,Betop Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000790000000700000000000000,Betop Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b3,y:b0,", -"03000000808300000300000000000000,Betop Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b3,y:b0,", -"030000006b1400000055000000000000,Bigben PS3 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"030000006b1400000103000000000000,Bigben PS3 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,", -"0300000066f700000500000000000000,BrutalLegendTest,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b3,", -"03000000e82000006058000000000000,Cideko AK08b,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000260900008888000000000000,Cyber Gadget GameCube Controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a5,leftx:a0,lefty:a1,rightshoulder:b6,righttrigger:a4,rightx:a2,righty:a3~,start:b7,x:b2,y:b3,", -"03000000a306000022f6000000000000,Cyborg V.3 Rumble Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:+a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:-a3,rightx:a2,righty:a4,start:b9,x:b0,y:b3,", -"03000000451300000830000000000000,Defender Game Racer X7,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000791d00000103000000000000,Dual Box WII,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000bd12000002e0000000000000,Dual USB Vibration Joystick,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b9,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b10,righttrigger:b5,rightx:a3,righty:a2,start:b11,x:b3,y:b0,", -"030000006f0e00003001000000000000,EA SPORTS PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000341a00000108000000000000,EXEQ RF USB Gamepad 8206,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"030000008f0e00000f31000000000000,EXEQ,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,", -"03000000b80500000410000000000000,Elecom Gamepad,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b1,", -"03000000b80500000610000000000000,Elecom Gamepad,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b1,", -"03000000852100000201000000000000,FF-GP1,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00002700000000000000,FIGHTING STICK V3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"03000000151900004000000000000000,Flydigi Vader 2,a:b11,b:b10,back:b3,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b7,leftstick:b1,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b0,righttrigger:b4,rightx:a3,righty:a4,start:b2,x:b9,y:b8,", -"03000000b40400001124000000000000,Flydigi Vader 2,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b12,lefttrigger:b8,leftx:a0,lefty:a1,paddle1:b4,paddle2:b5,paddle4:b17,rightshoulder:b7,rightstick:b13,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b2,y:b3,", -"03000000790000000600000000000000,G-Shark GS-GP702,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"030000008f0e00000d31000000000000,GAMEPAD 3 TURBO,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000300f00000b01000000000000,GGE909 Recoil Pad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b3,y:b0,", -"03000000790000002201000000000000,Game Controller for PC,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"0300000066f700000100000000000000,Game VIB Joystick,a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a3,righty:a2,start:b11,x:b0,y:b1,", -"03000000491900000204000000000000,GameSir T4 Pro,crc:1aa4,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000ac0500003d03000000000000,GameSir,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000ac0500004d04000000000000,GameSir,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000ac0500001a06000000000000,GameSir-T3 2.02,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000ffff00000000000000000000,GameStop Gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000c01100000140000000000000,GameStop PS4 Fun Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000260900002625000000000000,Gamecube Controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b6,lefttrigger:a4,leftx:a0,lefty:a1,righttrigger:a5,rightx:a2,righty:a3,start:b7,x:b2,y:b3,", -"03000000280400000140000000000000,Gamepad Pro USB,a:b1,b:b2,back:b8,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000005c1a00003330000000000000,Genius MaxFire Grandias 12V,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b4,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b2,y:b3,", -"030000008305000031b0000000000000,Genius Maxfire Blaze 3,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000451300000010000000000000,Genius Maxfire Grandias 12,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"030000008305000009a0000000000000,Genius,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000f025000021c1000000000000,Gioteck PS3 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000f0250000c383000000000000,Gioteck VX2 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000f0250000c483000000000000,Gioteck VX2 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000f0250000c283000000000000,Gioteck,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000d11800000094000000000000,Google Stadia Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:b12,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:b11,rightx:a3,righty:a4,start:b9,x:b2,y:b3,", -"03000000632500002605000000000000,HJD-X,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"030000000d0f00008400000000000000,HORI Fighting Commander,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00008500000000000000,HORI Fighting Commander,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00008800000000000000,HORI Fighting Stick mini 4 (PS3),a:b1,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b8,x:b0,y:b3,", -"030000000d0f00008700000000000000,HORI Fighting Stick mini 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000000d0f00006e00000000000000,HORIPAD 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00006600000000000000,HORIPAD 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f0000ee00000000000000,HORIPAD mini4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000250900000017000000000000,HRAP2 on PS/SS/N64 Joypad to USB BOX,a:b2,b:b1,back:b9,leftshoulder:b5,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b6,start:b8,x:b3,y:b0,", -"03000000341a00000302000000000000,Hama Scorpad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00004900000000000000,Hatsune Miku Sho Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000d81400000862000000000000,HitBox Edition Cthulhu+,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b5,lefttrigger:b4,rightshoulder:b7,righttrigger:b6,start:b9,x:b0,y:b3,", -"030000000d0f00005f00000000000000,Hori Fighting Commander 4 (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00005e00000000000000,Hori Fighting Commander 4 (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00004000000000000000,Hori Fighting Stick Mini 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b5,lefttrigger:b4,rightshoulder:b7,righttrigger:b6,start:b9,x:b0,y:b3,", -"030000000d0f00000900000000000000,Hori Pad 3 Turbo,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00005400000000000000,Hori Pad 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00004d00000000000000,Hori Pad A,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f0000c100000000000000,Horipad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000008f0e00001330000000000000,HuiJia SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b6,rightshoulder:b7,start:b9,x:b3,y:b0,", -"030000006f0e00002401000000000000,INJUSTICE FightStick PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"03000000ac0500002c02000000000000,IPEGA,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b8,leftstick:b13,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b9,rightstick:b14,righttrigger:b7,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000b50700001403000000000000,Impact Black,a:b2,b:b3,back:b8,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,", -"03000000491900000204000000000000,Ipega PG-9023,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"030000006e0500000520000000000000,JC-P301U,a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3,start:b11,x:b0,y:b1,", -"030000006e0500000320000000000000,JC-U3613M (DInput),a:b2,b:b3,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3,start:b11,x:b0,y:b1,", -"030000006e0500000720000000000000,JC-W01U,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b1,", -"03000000790000000200000000000000,King PS3 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b3,y:b0,", -"030000006d040000d1ca000000000000,Logitech ChillStream,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d040000d2ca000000000000,Logitech Cordless Precision,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d04000011c2000000000000,Logitech Cordless Wingman,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b9,leftstick:b5,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b10,rightstick:b2,righttrigger:b7,rightx:a3,righty:a4,x:b4,", -"030000006d04000016c2000000000000,Logitech Dual Action,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d04000018c2000000000000,Logitech F510 Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d04000019c2000000000000,Logitech F710 Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006d0400001ac2000000000000,Logitech Precision Gamepad,a:b1,b:b2,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"03000000380700008081000000000000,MADCATZ SFV Arcade FightStick Alpha PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000380700006382000000000000,MLG Gamepad PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000c62400002a89000000000000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b15,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000c62400002b89000000000000,MOGA XP5-A Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000c62400001a89000000000000,MOGA XP5-X Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000c62400001b89000000000000,MOGA XP5-X Plus,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000250900006688000000000000,MP-8866 Super Dual Box,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,", -"03000000380700006652000000000000,Mad Catz C.T.R.L.R,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b0,y:b3,", -"03000000380700005032000000000000,Mad Catz FightPad PRO (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700005082000000000000,Mad Catz FightPad PRO (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000380700008433000000000000,Mad Catz FightStick TE S+ (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700008483000000000000,Mad Catz FightStick TE S+ (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000380700008134000000000000,Mad Catz FightStick TE2+ PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b7,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b4,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700008184000000000000,Mad Catz FightStick TE2+ PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b5,leftstick:b10,lefttrigger:a4,leftx:a0,lefty:a1,rightshoulder:b4,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000380700006252000000000000,Mad Catz Micro C.T.R.L.R,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b0,y:b3,", -"03000000380700008034000000000000,Mad Catz TE2 PS3 Fightstick,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700008084000000000000,Mad Catz TE2 PS4 Fightstick,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000380700001888000000000000,MadCatz SFIV FightStick PS3,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b5,lefttrigger:b7,leftx:a0,lefty:a1,rightshoulder:b4,righttrigger:b6,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000380700008532000000000000,Madcatz Arcade Fightstick TE S PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000380700003888000000000000,Madcatz Arcade Fightstick TE S+ PS3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000002a0600001024000000000000,Matricom,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b2,y:b3,", -"03000000250900000128000000000000,Mayflash Arcade Stick,a:b1,b:b2,back:b8,leftshoulder:b0,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b3,righttrigger:b7,start:b9,x:b5,y:b6,", -"03000000790000004418000000000000,Mayflash GameCube Controller,a:b1,b:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,", -"030000008f0e00001030000000000000,Mayflash USB Adapter for original Sega Saturn controller,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,lefttrigger:b5,rightshoulder:b2,righttrigger:b7,start:b9,x:b3,y:b4,", -"0300000025090000e803000000000000,Mayflash Wii Classic Controller,a:b1,b:b0,back:b8,dpdown:b13,dpleft:b12,dpright:b14,dpup:b11,guide:b10,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,", -"03000000790000000018000000000000,Mayflash WiiU Pro Game Controller Adapter (DInput),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000efbe0000edfe000000000000,Monect Virtual Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a5,rightx:a3,righty:a4,start:b9,x:b3,y:b0,", -"030000006b140000010c000000000000,NACON GC-400ES,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"030000001008000001e5000000000000,NEXT SNES Controller,a:b2,b:b1,back:b8,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b6,start:b9,x:b3,y:b0,", -"03000000152000000182000000000000,NGDS,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b3,y:b0,", -"030000005509000000b4000000000000,NVIDIA Virtual Gamepad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:+a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:-a2,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000004b120000014d000000000000,NYKO AIRFLO EX,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b11,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b12,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b2,y:b3,", -"03000000790000004318000000000000,Nintendo GameCube Controller,a:b1,b:b2,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b0,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000790000004318000000000000,Nintendo GameCube Controller,a:b1,b:b0,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b2,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000bd12000015d0000000000000,Nintendo Retrolink USB Super SNES Classic Controller,a:b2,b:b1,back:b8,leftshoulder:b4,leftx:a0,lefty:a1,rightshoulder:b5,start:b9,x:b3,y:b0,", -"030000007e0500000920000000000000,Nintendo Switch Pro Controller,a:b1,b:b0,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b2,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000007e0500000920000000000000,Nintendo Switch Pro Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000000d0500000308000000000000,Nostromo N45,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b9,leftshoulder:b4,leftstick:b12,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b10,x:b2,y:b3,", -"03000000d62000006d57000000000000,OPP PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000362800000100000000000000,OUYA Game Controller,a:b0,b:b3,dpdown:b9,dpleft:b10,dpright:b11,dpup:b8,guide:b14,leftshoulder:b4,leftstick:b6,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:b13,rightx:a3,righty:a4,x:b1,y:b2,", -"03000000782300000a10000000000000,Onlive Wireless Controller,a:b15,b:b14,back:b7,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b5,leftshoulder:b11,leftstick:b9,lefttrigger:a2,leftx:a0,lefty:a1,rightshoulder:b10,rightstick:b8,righttrigger:a5,rightx:a3,righty:a4,start:b6,x:b13,y:b12,", -"030000006b14000001a1000000000000,Orange Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b6,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b7,righttrigger:a4,rightx:a5,righty:a2,start:b9,x:b2,y:b3,", -"03000000120c0000f60e000000000000,P4 Wired Gamepad,a:b1,b:b2,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b5,lefttrigger:b7,rightshoulder:b4,righttrigger:b6,start:b9,x:b0,y:b3,", -"030000006f0e00000901000000000000,PDP Versus Fighting Pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"03000000632500002306000000000000,PS Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a2,righty:a3,start:b11,x:b3,y:b4,", -"03000000e30500009605000000000000,PS to USB convert cable,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,", -"03000000100800000100000000000000,PS1 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,", -"030000008f0e00007530000000000000,PS1 Controller,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b1,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000100800000300000000000000,PS2 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a4,righty:a2,start:b9,x:b3,y:b0,", -"03000000250900008888000000000000,PS2 Controller,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,", -"03000000666600006706000000000000,PS2 Controller,a:b2,b:b1,back:b8,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,leftshoulder:b6,leftstick:b9,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b10,righttrigger:b5,rightx:a2,righty:a3,start:b11,x:b3,y:b0,", -"030000006b1400000303000000000000,PS2 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"030000009d0d00001330000000000000,PS2 Controller,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000250900000500000000000000,PS3 Controller,a:b2,b:b1,back:b9,dpdown:h0.8,dpleft:h0.4,dpright:h0.2,dpup:h0.1,guide:,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b0,y:b3,", -"030000004c0500006802000000000000,PS3 Controller,a:b2,b:b1,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b10,lefttrigger:a3~,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:a4~,rightx:a2,righty:a5,start:b8,x:b3,y:b0,", -"03000000632500007505000000000000,PS3 Controller,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000888800000803000000000000,PS3 Controller,a:b2,b:b1,back:b8,dpdown:h0.8,dpleft:h0.4,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b9,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b10,righttrigger:b7,rightx:a3,righty:a4,start:b11,x:b0,y:b3,", -"030000008f0e00001431000000000000,PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000003807000056a8000000000000,PS3 RF pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000100000008200000000000000,PS360+ v1.66,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:h0.4,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000004c050000a00b000000000000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000c405000000000000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,touchpad:b13,x:b0,y:b3,", -"030000004c050000cc09000000000000,PS4 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004c050000e60c000000000000,PS5 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,misc1:b13,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000008f0e00000300000000000000,Piranha xtreme,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,", -"03000000d62000006dca000000000000,PowerA Pro Ex,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000d62000009557000000000000,Pro Elite PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000d62000009f31000000000000,Pro Ex mini PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000d6200000c757000000000000,Pro Ex mini PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000222c00000020000000000000,QANBA DRONE ARCADE JOYSTICK,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:a3,rightshoulder:b5,righttrigger:a4,start:b9,x:b0,y:b3,", -"03000000300f00000011000000000000,QanBa Arcade JoyStick 1008,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,start:b10,x:b0,y:b3,", -"03000000300f00001611000000000000,QanBa Arcade JoyStick 4018,a:b1,b:b2,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b9,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b8,x:b0,y:b3,", -"03000000300f00001210000000000000,QanBa Joystick Plus,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,rightshoulder:b5,start:b9,x:b2,y:b3,", -"03000000341a00000104000000000000,QanBa Joystick Q4RAF,a:b5,b:b6,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b0,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b3,righttrigger:b7,start:b9,x:b1,y:b2,", -"03000000222c00000025000000000000,Qanba Dragon Arcade Joystick,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000222c00000223000000000000,Qanba Obsidian Arcade Joystick (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000222c00000023000000000000,Qanba Obsidian Arcade Joystick (PS4),a:b1,b:b2,back:b13,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00001100000000000000,REAL ARCADE PRO.3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,rightstick:b11,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000000d0f00007000000000000000,REAL ARCADE PRO.4 VLX,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,rightstick:b11,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000000d0f00002200000000000000,REAL ARCADE Pro.V3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000050b00005819000000000000,ROG Chakram Core,a:b1,b:b0,leftx:a0,lefty:a1,x:b2,y:b3,", -"03000000050b0000181a000000000000,ROG Chakram X,a:b1,b:b0,leftx:a0,lefty:a1,x:b2,y:b3,", -"03000000050b00001a1a000000000000,ROG Chakram X,a:b1,b:b0,leftx:a0,lefty:a1,x:b2,y:b3,", -"03000000050b00001c1a000000000000,ROG Chakram X,a:b1,b:b0,leftx:a0,lefty:a1,x:b2,y:b3,", -"03000000050b0000e318000000000000,ROG Chakram,a:b1,b:b0,leftx:a0,lefty:a1,x:b2,y:b3,", -"03000000050b0000e518000000000000,ROG Chakram,a:b1,b:b0,leftx:a0,lefty:a1,x:b2,y:b3,", -"03000000321500000003000000000000,Razer Hydra,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a2,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000321500000204000000000000,Razer Panthera (PS3),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000321500000104000000000000,Razer Panthera (PS4),a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000321500000507000000000000,Razer Raiju Mobile,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000321500000707000000000000,Razer Raiju Mobile,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000321500000011000000000000,Razer Raion Fightpad for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000321500000009000000000000,Razer Serval,+lefty:+a2,-lefty:-a1,a:b0,b:b1,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,leftx:a0,rightshoulder:b5,rightstick:b9,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"030000000d0f00006a00000000000000,Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00006b00000000000000,Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00008a00000000000000,Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00008b00000000000000,Real Arcade Pro.4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000000d0f00005b00000000000000,Real Arcade Pro.V4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000000d0f00005c00000000000000,Real Arcade Pro.V4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"0300000000f000000300000000000000,RetroUSB.com RetroPad,a:b1,b:b5,back:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b3,x:b0,y:b4,", -"0300000000f00000f100000000000000,RetroUSB.com Super RetroPort,a:b1,b:b5,back:b2,leftshoulder:b6,leftx:a0,lefty:a1,rightshoulder:b7,start:b3,x:b0,y:b4,", -"03000000790000001100000000000000,Retrolink SNES Controller,a:b2,b:b1,back:b8,dpdown:+a4,dpleft:-a3,dpright:+a3,dpup:-a4,leftshoulder:b4,rightshoulder:b5,start:b9,x:b3,y:b0,", -"030000006b140000130d000000000000,Revolution Pro Controller 3,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000006b140000010d000000000000,Revolution Pro Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000006f0e00001e01000000000000,Rock Candy PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006f0e00002801000000000000,Rock Candy PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000006f0e00002f01000000000000,Rock Candy PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000341a00000208000000000000,SL-6555-SBK,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:-a4,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:a4,rightx:a3,righty:a2,start:b7,x:b2,y:b3,", -"03000000341a00000908000000000000,SL-6566,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000790000001c18000000000000,STK-7024X,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000ff1100003133000000000000,SVEN X-PAD,a:b2,b:b3,back:b4,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b9,rightx:a2,righty:a4,start:b5,x:b0,y:b1,", -"03000000457500002211000000000000,SZMY-POWER PC Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000a306000023f6000000000000,Saitek Cyborg V.1 Game pad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b0,y:b3,", -"03000000a30600001af5000000000000,Saitek Cyborg,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a4,start:b9,x:b0,y:b3,", -"03000000300f00001201000000000000,Saitek Dual Analog Pad,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,", -"03000000a30600000cff000000000000,Saitek P2500 Force Rumble Pad,a:b2,b:b3,back:b11,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b10,leftshoulder:b4,leftstick:b8,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:b7,rightx:a2,righty:a3,x:b0,y:b1,", -"03000000a30600000c04000000000000,Saitek P2900,a:b1,b:b2,back:b12,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b3,", -"03000000300f00001001000000000000,Saitek P480 Rumble Pad,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,", -"03000000a30600000b04000000010000,Saitek P990 Dual Analog Pad,a:b1,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b8,x:b0,y:b3,", -"03000000a30600000b04000000000000,Saitek P990,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b3,", -"03000000a30600002106000000000000,Saitek PS1000,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b0,y:b3,", -"03000000a306000020f6000000000000,Saitek PS2700,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b0,y:b3,", -"03000000300f00001101000000000000,Saitek Rumble Pad,a:b2,b:b3,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,", -"0300000000050000289b000000000000,Saturn_Adapter_2.0,a:b1,b:b2,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b0,y:b3,", -"030000009b2800000500000000000000,Saturn_Adapter_2.0,a:b1,b:b2,leftshoulder:b6,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,righttrigger:b5,start:b9,x:b0,y:b3,", -"030000008f0e00000800000000000000,SpeedLink Strike FX,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000c01100000591000000000000,Speedlink Torid,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000de280000ff11000000000000,Steam Virtual Gamepad,a:b0,b:b1,back:b6,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b8,lefttrigger:+a2,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b9,righttrigger:-a2,rightx:a3,righty:a4,start:b7,x:b2,y:b3,", -"03000000110100003114000000000000,SteelSeries Stratus Duo,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000381000001814000000000000,SteelSeries Stratus XL,a:b0,b:b1,back:b18,dpdown:b13,dpleft:b14,dpright:b15,dpup:b12,guide:b19,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b2,y:b3,", -"03000000110100001914000000000000,SteelSeries,a:b0,b:b1,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:,leftstick:b13,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:,rightstick:b14,righttrigger:b7,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000d620000011a7000000000000,Switch,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000004f04000007d0000000000000,T Mini Wireless,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000fa1900000706000000000000,Team 5,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000b50700001203000000000000,Techmobility X6-38V,a:b2,b:b3,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b0,y:b1,", -"030000004f0400000ed0000000000000,ThrustMaster eSwap PRO Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004f04000015b3000000000000,Thrustmaster Dual Analog 4,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,", -"030000004f04000023b3000000000000,Thrustmaster Dual Trigger 3-in-1,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"030000004f04000004b3000000000000,Thrustmaster Firestorm Dual Power 3,a:b0,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b1,y:b3,", -"030000004f04000000b3000000000000,Thrustmaster Firestorm Dual Power,a:b0,b:b2,back:b9,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b8,leftshoulder:b4,leftstick:b11,lefttrigger:b5,leftx:a0,lefty:a1,rightshoulder:b6,rightstick:b12,righttrigger:b7,rightx:a2,righty:a3,start:b10,x:b1,y:b3,", -"03000000666600000488000000000000,TigerGame PS/PS2 Game Controller Adapter,a:b2,b:b1,back:b9,dpdown:b14,dpleft:b15,dpright:b13,dpup:b12,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b8,x:b3,y:b0,", -"03000000d62000006000000000000000,Tournament PS3 Controller,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"030000005f140000c501000000000000,Trust Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000b80500000210000000000000,Trust Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000d90400000200000000000000,TwinShock PS2,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a3,righty:a2,start:b9,x:b3,y:b0,", -"03000000300f00000701000000000000,USB 4-Axis 12-Button Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a3,righty:a2,start:b9,x:b3,y:b0,", -"03000000341a00002308000000000000,USB Gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"030000006b1400000203000000000000,USB Gamepad,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000790000000a00000000000000,USB Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a4,start:b9,x:b3,y:b0,", -"03000000f0250000c183000000000000,USB Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b0,y:b3,", -"03000000ff1100004133000000000000,USB Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a4,righty:a2,start:b9,x:b3,y:b0,", -"03000000632500002305000000000000,USB Vibration Joystick (BM),a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000790000001b18000000000000,Venom Arcade Joystick,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000006f0e00000302000000000000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"030000006f0e00000702000000000000,Victrix Pro Fight Stick for PS4,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,rightshoulder:b5,righttrigger:b7,start:b9,x:b0,y:b3,", -"03000000450c00002043000000000000,XEOX Gamepad SL-6556-BK,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000341a00000608000000000000,Xeox,a:b0,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b2,y:b3,", -"03000000172700004431000000000000,XiaoMi Game Controller,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b20,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:a7,rightx:a2,righty:a5,start:b11,x:b3,y:b4,", -"03000000790000004f18000000000000,ZD-T Android,a:b0,b:b1,back:b10,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b6,leftstick:b13,lefttrigger:b8,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b14,righttrigger:b9,rightx:a3,righty:a4,start:b11,x:b3,y:b4,", -"03000000120c0000101e000000000000,ZEROPLUS P4 Wired Gamepad,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b12,leftshoulder:b4,leftstick:b10,lefttrigger:a3,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:a4,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000d81d00000f00000000000000,iBUFFALO BSGP1204 Series,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000d81d00001000000000000000,iBUFFALO BSGP1204P Series,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b6,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:b7,rightstick:b11,righttrigger:b5,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -"03000000830500006020000000000000,iBuffalo SNES Controller,a:b0,b:b1,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b2,y:b3,hint:SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"03000000830500006020000000000000,iBuffalo SNES Controller,a:b1,b:b0,back:b6,dpdown:+a1,dpleft:-a0,dpright:+a0,dpup:-a1,leftshoulder:b4,rightshoulder:b5,start:b7,x:b3,y:b2,hint:!SDL_GAMECONTROLLER_USE_BUTTON_LABELS:=1,", -"030000004f04000003d0000000000000,run'n'drive,a:b1,b:b2,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,guide:b7,leftshoulder:a3,leftstick:b10,lefttrigger:b4,leftx:a0,lefty:a1,rightshoulder:a4,rightstick:b11,righttrigger:b5,rightx:a2,righty:a5,start:b9,x:b0,y:b3,", -"03000000101c0000171c000000000000,uRage Gamepad,a:b2,b:b1,back:b8,dpdown:h0.4,dpleft:h0.8,dpright:h0.2,dpup:h0.1,leftshoulder:b4,leftstick:b10,lefttrigger:b6,leftx:a0,lefty:a1,rightshoulder:b5,rightstick:b11,righttrigger:b7,rightx:a2,righty:a3,start:b9,x:b3,y:b0,", -] -else: - mapping_list = [] diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/tests/unit/test_egl.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/tests/unit/test_egl.py deleted file mode 100644 index e2f4bef39e33c2794e6837b5a1bb127d8d4dba06..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/tests/unit/test_egl.py +++ /dev/null @@ -1,16 +0,0 @@ -# from pyrender.platforms import egl - - -def tmp_test_default_device(): - egl.get_default_device() - - -def tmp_test_query_device(): - devices = egl.query_devices() - assert len(devices) > 0 - - -def tmp_test_init_context(): - device = egl.query_devices()[0] - platform = egl.EGLPlatform(128, 128, device=device) - platform.init_context() diff --git a/spaces/akhaliq/JoJoGAN/e4e/datasets/gt_res_dataset.py b/spaces/akhaliq/JoJoGAN/e4e/datasets/gt_res_dataset.py deleted file mode 100644 index c0beacfee5335aa10aa7e8b7cabe206d7f9a56f7..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/JoJoGAN/e4e/datasets/gt_res_dataset.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/python -# encoding: utf-8 -import os -from torch.utils.data import Dataset -from PIL import Image -import torch - -class GTResDataset(Dataset): - - def __init__(self, root_path, gt_dir=None, transform=None, transform_train=None): - self.pairs = [] - for f in os.listdir(root_path): - image_path = os.path.join(root_path, f) - gt_path = os.path.join(gt_dir, f) - if f.endswith(".jpg") or f.endswith(".png"): - self.pairs.append([image_path, gt_path.replace('.png', '.jpg'), None]) - self.transform = transform - self.transform_train = transform_train - - def __len__(self): - return len(self.pairs) - - def __getitem__(self, index): - from_path, to_path, _ = self.pairs[index] - from_im = Image.open(from_path).convert('RGB') - to_im = Image.open(to_path).convert('RGB') - - if self.transform: - to_im = self.transform(to_im) - from_im = self.transform(from_im) - - return from_im, to_im diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/template_multi_spk/voc1/path.sh b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/template_multi_spk/voc1/path.sh deleted file mode 100644 index b0ca27c615f70aa29e240222ec370f8ad4e7b45a..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/egs/template_multi_spk/voc1/path.sh +++ /dev/null @@ -1,33 +0,0 @@ -# cuda related -export CUDA_HOME=/usr/local/cuda-10.0 -export LD_LIBRARY_PATH="${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}" - -# path related -export PRJ_ROOT="${PWD}/../../.." -if [ -e "${PRJ_ROOT}/tools/venv/bin/activate" ]; then - # shellcheck disable=SC1090 - . "${PRJ_ROOT}/tools/venv/bin/activate" -fi - -# python related -export OMP_NUM_THREADS=1 -export PYTHONIOENCODING=UTF-8 -export MPL_BACKEND=Agg - -# check installation -if ! command -v parallel-wavegan-train > /dev/null; then - echo "Error: It seems setup is not finished." >&2 - echo "Error: Please setup your environment by following README.md" >&2 - return 1 -fi -if ! command -v jq > /dev/null; then - echo "Error: It seems jq is not installed." >&2 - echo "Error: Please install via \`sudo apt-get install jq\`." >&2 - echo "Error: If you do not have sudo, please download from https://stedolan.github.io/jq/download/." >&2 - return 1 -fi -if ! command -v yq > /dev/null; then - echo "Error: It seems yq is not installed." >&2 - echo "Error: Please install via \`pip install yq\`." >&2 - return 1 -fi diff --git a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/utils/run.pl b/spaces/akhaliq/VQMIVC/ParallelWaveGAN/utils/run.pl deleted file mode 100644 index f23bb8dc0b0ea53af01b52cac86bc4a451f52018..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/VQMIVC/ParallelWaveGAN/utils/run.pl +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/env perl -use warnings; #sed replacement for -w perl parameter - -# In general, doing -# run.pl some.log a b c is like running the command a b c in -# the bash shell, and putting the standard error and output into some.log. -# To run parallel jobs (backgrounded on the host machine), you can do (e.g.) -# run.pl JOB=1:4 some.JOB.log a b c JOB is like running the command a b c JOB -# and putting it in some.JOB.log, for each one. [Note: JOB can be any identifier]. -# If any of the jobs fails, this script will fail. - -# A typical example is: -# run.pl some.log my-prog "--opt=foo bar" foo \| other-prog baz -# and run.pl will run something like: -# ( my-prog '--opt=foo bar' foo | other-prog baz ) >& some.log -# -# Basically it takes the command-line arguments, quotes them -# as necessary to preserve spaces, and evaluates them with bash. -# In addition it puts the command line at the top of the log, and -# the start and end times of the command at the beginning and end. -# The reason why this is useful is so that we can create a different -# version of this program that uses a queueing system instead. - -# use Data::Dumper; - -@ARGV < 2 && die "usage: run.pl log-file command-line arguments..."; - - -$max_jobs_run = -1; -$jobstart = 1; -$jobend = 1; -$ignored_opts = ""; # These will be ignored. - -# First parse an option like JOB=1:4, and any -# options that would normally be given to -# queue.pl, which we will just discard. - -for (my $x = 1; $x <= 2; $x++) { # This for-loop is to - # allow the JOB=1:n option to be interleaved with the - # options to qsub. - while (@ARGV >= 2 && $ARGV[0] =~ m:^-:) { - # parse any options that would normally go to qsub, but which will be ignored here. - my $switch = shift @ARGV; - if ($switch eq "-V") { - $ignored_opts .= "-V "; - } elsif ($switch eq "--max-jobs-run" || $switch eq "-tc") { - # we do support the option --max-jobs-run n, and its GridEngine form -tc n. - $max_jobs_run = shift @ARGV; - if (! ($max_jobs_run > 0)) { - die "run.pl: invalid option --max-jobs-run $max_jobs_run"; - } - } else { - my $argument = shift @ARGV; - if ($argument =~ m/^--/) { - print STDERR "run.pl: WARNING: suspicious argument '$argument' to $switch; starts with '-'\n"; - } - if ($switch eq "-sync" && $argument =~ m/^[yY]/) { - $ignored_opts .= "-sync "; # Note: in the - # corresponding code in queue.pl it says instead, just "$sync = 1;". - } elsif ($switch eq "-pe") { # e.g. -pe smp 5 - my $argument2 = shift @ARGV; - $ignored_opts .= "$switch $argument $argument2 "; - } elsif ($switch eq "--gpu") { - $using_gpu = $argument; - } else { - # Ignore option. - $ignored_opts .= "$switch $argument "; - } - } - } - if ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+):(\d+)$/) { # e.g. JOB=1:20 - $jobname = $1; - $jobstart = $2; - $jobend = $3; - shift; - if ($jobstart > $jobend) { - die "run.pl: invalid job range $ARGV[0]"; - } - if ($jobstart <= 0) { - die "run.pl: invalid job range $ARGV[0], start must be strictly positive (this is required for GridEngine compatibility)."; - } - } elsif ($ARGV[0] =~ m/^([\w_][\w\d_]*)+=(\d+)$/) { # e.g. JOB=1. - $jobname = $1; - $jobstart = $2; - $jobend = $2; - shift; - } elsif ($ARGV[0] =~ m/.+\=.*\:.*$/) { - print STDERR "run.pl: Warning: suspicious first argument to run.pl: $ARGV[0]\n"; - } -} - -# Users found this message confusing so we are removing it. -# if ($ignored_opts ne "") { -# print STDERR "run.pl: Warning: ignoring options \"$ignored_opts\"\n"; -# } - -if ($max_jobs_run == -1) { # If --max-jobs-run option not set, - # then work out the number of processors if possible, - # and set it based on that. - $max_jobs_run = 0; - if ($using_gpu) { - if (open(P, "nvidia-smi -L |")) { - $max_jobs_run++ while (

    ); - close(P); - } - if ($max_jobs_run == 0) { - $max_jobs_run = 1; - print STDERR "run.pl: Warning: failed to detect number of GPUs from nvidia-smi, using ${max_jobs_run}\n"; - } - } elsif (open(P, ") { if (m/^processor/) { $max_jobs_run++; } } - if ($max_jobs_run == 0) { - print STDERR "run.pl: Warning: failed to detect any processors from /proc/cpuinfo\n"; - $max_jobs_run = 10; # reasonable default. - } - close(P); - } elsif (open(P, "sysctl -a |")) { # BSD/Darwin - while (

    ) { - if (m/hw\.ncpu\s*[:=]\s*(\d+)/) { # hw.ncpu = 4, or hw.ncpu: 4 - $max_jobs_run = $1; - last; - } - } - close(P); - if ($max_jobs_run == 0) { - print STDERR "run.pl: Warning: failed to detect any processors from sysctl -a\n"; - $max_jobs_run = 10; # reasonable default. - } - } else { - # allow at most 32 jobs at once, on non-UNIX systems; change this code - # if you need to change this default. - $max_jobs_run = 32; - } - # The just-computed value of $max_jobs_run is just the number of processors - # (or our best guess); and if it happens that the number of jobs we need to - # run is just slightly above $max_jobs_run, it will make sense to increase - # $max_jobs_run to equal the number of jobs, so we don't have a small number - # of leftover jobs. - $num_jobs = $jobend - $jobstart + 1; - if (!$using_gpu && - $num_jobs > $max_jobs_run && $num_jobs < 1.4 * $max_jobs_run) { - $max_jobs_run = $num_jobs; - } -} - -$logfile = shift @ARGV; - -if (defined $jobname && $logfile !~ m/$jobname/ && - $jobend > $jobstart) { - print STDERR "run.pl: you are trying to run a parallel job but " - . "you are putting the output into just one log file ($logfile)\n"; - exit(1); -} - -$cmd = ""; - -foreach $x (@ARGV) { - if ($x =~ m/^\S+$/) { $cmd .= $x . " "; } - elsif ($x =~ m:\":) { $cmd .= "'$x' "; } - else { $cmd .= "\"$x\" "; } -} - -#$Data::Dumper::Indent=0; -$ret = 0; -$numfail = 0; -%active_pids=(); - -use POSIX ":sys_wait_h"; -for ($jobid = $jobstart; $jobid <= $jobend; $jobid++) { - if (scalar(keys %active_pids) >= $max_jobs_run) { - - # Lets wait for a change in any child's status - # Then we have to work out which child finished - $r = waitpid(-1, 0); - $code = $?; - if ($r < 0 ) { die "run.pl: Error waiting for child process"; } # should never happen. - if ( defined $active_pids{$r} ) { - $jid=$active_pids{$r}; - $fail[$jid]=$code; - if ($code !=0) { $numfail++;} - delete $active_pids{$r}; - # print STDERR "Finished: $r/$jid " . Dumper(\%active_pids) . "\n"; - } else { - die "run.pl: Cannot find the PID of the chold process that just finished."; - } - - # In theory we could do a non-blocking waitpid over all jobs running just - # to find out if only one or more jobs finished during the previous waitpid() - # However, we just omit this and will reap the next one in the next pass - # through the for(;;) cycle - } - $childpid = fork(); - if (!defined $childpid) { die "run.pl: Error forking in run.pl (writing to $logfile)"; } - if ($childpid == 0) { # We're in the child... this branch - # executes the job and returns (possibly with an error status). - if (defined $jobname) { - $cmd =~ s/$jobname/$jobid/g; - $logfile =~ s/$jobname/$jobid/g; - } - system("mkdir -p `dirname $logfile` 2>/dev/null"); - open(F, ">$logfile") || die "run.pl: Error opening log file $logfile"; - print F "# " . $cmd . "\n"; - print F "# Started at " . `date`; - $starttime = `date +'%s'`; - print F "#\n"; - close(F); - - # Pipe into bash.. make sure we're not using any other shell. - open(B, "|bash") || die "run.pl: Error opening shell command"; - print B "( " . $cmd . ") 2>>$logfile >> $logfile"; - close(B); # If there was an error, exit status is in $? - $ret = $?; - - $lowbits = $ret & 127; - $highbits = $ret >> 8; - if ($lowbits != 0) { $return_str = "code $highbits; signal $lowbits" } - else { $return_str = "code $highbits"; } - - $endtime = `date +'%s'`; - open(F, ">>$logfile") || die "run.pl: Error opening log file $logfile (again)"; - $enddate = `date`; - chop $enddate; - print F "# Accounting: time=" . ($endtime - $starttime) . " threads=1\n"; - print F "# Ended ($return_str) at " . $enddate . ", elapsed time " . ($endtime-$starttime) . " seconds\n"; - close(F); - exit($ret == 0 ? 0 : 1); - } else { - $pid[$jobid] = $childpid; - $active_pids{$childpid} = $jobid; - # print STDERR "Queued: " . Dumper(\%active_pids) . "\n"; - } -} - -# Now we have submitted all the jobs, lets wait until all the jobs finish -foreach $child (keys %active_pids) { - $jobid=$active_pids{$child}; - $r = waitpid($pid[$jobid], 0); - $code = $?; - if ($r == -1) { die "run.pl: Error waiting for child process"; } # should never happen. - if ($r != 0) { $fail[$jobid]=$code; $numfail++ if $code!=0; } # Completed successfully -} - -# Some sanity checks: -# The $fail array should not contain undefined codes -# The number of non-zeros in that array should be equal to $numfail -# We cannot do foreach() here, as the JOB ids do not necessarily start by zero -$failed_jids=0; -for ($jobid = $jobstart; $jobid <= $jobend; $jobid++) { - $job_return = $fail[$jobid]; - if (not defined $job_return ) { - # print Dumper(\@fail); - - die "run.pl: Sanity check failed: we have indication that some jobs are running " . - "even after we waited for all jobs to finish" ; - } - if ($job_return != 0 ){ $failed_jids++;} -} -if ($failed_jids != $numfail) { - die "run.pl: Sanity check failed: cannot find out how many jobs failed ($failed_jids x $numfail)." -} -if ($numfail > 0) { $ret = 1; } - -if ($ret != 0) { - $njobs = $jobend - $jobstart + 1; - if ($njobs == 1) { - if (defined $jobname) { - $logfile =~ s/$jobname/$jobstart/; # only one numbered job, so replace name with - # that job. - } - print STDERR "run.pl: job failed, log is in $logfile\n"; - if ($logfile =~ m/JOB/) { - print STDERR "run.pl: probably you forgot to put JOB=1:\$nj in your script."; - } - } - else { - $logfile =~ s/$jobname/*/g; - print STDERR "run.pl: $numfail / $njobs failed, log is in $logfile\n"; - } -} - - -exit ($ret); diff --git a/spaces/akhaliq/anything-v3.0/app.py b/spaces/akhaliq/anything-v3.0/app.py deleted file mode 100644 index 684b93502cfe79d7ed90c2034f21460eb135d035..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/anything-v3.0/app.py +++ /dev/null @@ -1,276 +0,0 @@ -from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler -import gradio as gr -import torch -from PIL import Image -import utils -import datetime -import time -import psutil - -start_time = time.time() -is_colab = utils.is_google_colab() - -class Model: - def __init__(self, name, path="", prefix=""): - self.name = name - self.path = path - self.prefix = prefix - self.pipe_t2i = None - self.pipe_i2i = None - -models = [ - Model("anything v3", "Linaqruf/anything-v3.0", "anything v3 style"), - ] - # Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "), - # Model("Balloon Art", "Fictiverse/Stable_Diffusion_BalloonArt_Model", "BalloonArt "), - # Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "), - # Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy ") - #Model("Pokรฉmon", "lambdalabs/sd-pokemon-diffusers", ""), - #Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""), - #Model("Robo Diffusion", "nousr/robo-diffusion", ""), - -scheduler = DPMSolverMultistepScheduler( - beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - num_train_timesteps=1000, - trained_betas=None, - predict_epsilon=True, - thresholding=False, - algorithm_type="dpmsolver++", - solver_type="midpoint", - lower_order_final=True, -) - -custom_model = None -if is_colab: - models.insert(0, Model("Custom model")) - custom_model = models[0] - -last_mode = "txt2img" -current_model = models[1] if is_colab else models[0] -current_model_path = current_model.path - -if is_colab: - pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False)) - -else: # download all models - print(f"{datetime.datetime.now()} Downloading vae...") - vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16) - for model in models: - try: - print(f"{datetime.datetime.now()} Downloading {model.name} model...") - unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16) - model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler) - model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler) - except Exception as e: - print(f"{datetime.datetime.now()} Failed to load model " + model.name + ": " + str(e)) - models.remove(model) - pipe = models[0].pipe_t2i - -if torch.cuda.is_available(): - pipe = pipe.to("cuda") - -device = "GPU ๐Ÿ”ฅ" if torch.cuda.is_available() else "CPU ๐Ÿฅถ" - -def error_str(error, title="Error"): - return f"""#### {title} - {error}""" if error else "" - -def custom_model_changed(path): - models[0].path = path - global current_model - current_model = models[0] - -def on_model_change(model_name): - - prefix = "Enter prompt. \"" + next((m.prefix for m in models if m.name == model_name), None) + "\" is prefixed automatically" if model_name != models[0].name else "Don't forget to use the custom model prefix in the prompt!" - - return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix) - -def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""): - - print(psutil.virtual_memory()) # print memory usage - - global current_model - for model in models: - if model.name == model_name: - current_model = model - model_path = current_model.path - - generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None - - try: - if img is not None: - return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None - else: - return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator), None - except Exception as e: - return None, error_str(e) - -def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator): - - print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}") - - global last_mode - global pipe - global current_model_path - if model_path != current_model_path or last_mode != "txt2img": - current_model_path = model_path - - if is_colab or current_model == custom_model: - pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False)) - else: - pipe = pipe.to("cpu") - pipe = current_model.pipe_t2i - - if torch.cuda.is_available(): - pipe = pipe.to("cuda") - last_mode = "txt2img" - - prompt = current_model.prefix + prompt - result = pipe( - prompt, - negative_prompt = neg_prompt, - # num_images_per_prompt=n_images, - num_inference_steps = int(steps), - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return replace_nsfw_images(result) - -def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator): - - print(f"{datetime.datetime.now()} img_to_img, model: {model_path}") - - global last_mode - global pipe - global current_model_path - if model_path != current_model_path or last_mode != "img2img": - current_model_path = model_path - - if is_colab or current_model == custom_model: - pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False)) - else: - pipe = pipe.to("cpu") - pipe = current_model.pipe_i2i - - if torch.cuda.is_available(): - pipe = pipe.to("cuda") - last_mode = "img2img" - - prompt = current_model.prefix + prompt - ratio = min(height / img.height, width / img.width) - img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS) - result = pipe( - prompt, - negative_prompt = neg_prompt, - # num_images_per_prompt=n_images, - init_image = img, - num_inference_steps = int(steps), - strength = strength, - guidance_scale = guidance, - width = width, - height = height, - generator = generator) - - return replace_nsfw_images(result) - -def replace_nsfw_images(results): - - if is_colab: - return results.images[0] - - for i in range(len(results.images)): - if results.nsfw_content_detected[i]: - results.images[i] = Image.open("nsfw.png") - return results.images[0] - -css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem} -""" -with gr.Blocks(css=css) as demo: - gr.HTML( - f""" -

    - """ - ) - with gr.Row(): - - with gr.Column(scale=55): - with gr.Group(): - model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name) - with gr.Box(visible=False) as custom_model_group: - custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", interactive=True) - gr.HTML("
    Custom models have to be downloaded first, so give it some time.
    ") - - with gr.Row(): - prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False) - generate = gr.Button(value="Generate").style(rounded=(False, True, True, False)) - - - image_out = gr.Image(height=512) - # gallery = gr.Gallery( - # label="Generated images", show_label=False, elem_id="gallery" - # ).style(grid=[1], height="auto") - error_output = gr.Markdown() - - with gr.Column(scale=45): - with gr.Tab("Options"): - with gr.Group(): - neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image") - - # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1) - - with gr.Row(): - guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15) - steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1) - - with gr.Row(): - width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8) - height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8) - - seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1) - - with gr.Tab("Image to image"): - with gr.Group(): - image = gr.Image(label="Image", height=256, tool="editor", type="pil") - strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5) - - if is_colab: - model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False) - custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None) - # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery) - - inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt] - outputs = [image_out, error_output] - prompt.submit(inference, inputs=inputs, outputs=outputs) - generate.click(inference, inputs=inputs, outputs=outputs) - - ex = gr.Examples([ - [models[0].name, "iron man", 7.5, 50], - - ], inputs=[model_name, prompt, guidance, steps, seed], outputs=outputs, fn=inference, cache_examples=False) - - gr.HTML(""" -
    -
    -

    Model by Linaqruf

    -
    - """) - -print(f"Space built in {time.time() - start_time:.2f} seconds") - -if not is_colab: - demo.queue(concurrency_count=1) -demo.launch(debug=is_colab, share=is_colab) \ No newline at end of file diff --git a/spaces/akhaliq/lama/bin/split_tar.py b/spaces/akhaliq/lama/bin/split_tar.py deleted file mode 100644 index ac1692addbb4191200c8c871fe356bb80d534c44..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/lama/bin/split_tar.py +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env python3 - - -import tqdm -import webdataset as wds - - -def main(args): - input_dataset = wds.Dataset(args.infile) - output_dataset = wds.ShardWriter(args.outpattern) - for rec in tqdm.tqdm(input_dataset): - output_dataset.write(rec) - - -if __name__ == '__main__': - import argparse - - aparser = argparse.ArgumentParser() - aparser.add_argument('infile', type=str) - aparser.add_argument('outpattern', type=str) - - main(aparser.parse_args()) diff --git a/spaces/akhaliq/stylegan3_clip/torch_utils/ops/fma.py b/spaces/akhaliq/stylegan3_clip/torch_utils/ops/fma.py deleted file mode 100644 index 8e9b3e9f6e3dd8afae6ccd7ea2b66b5e13ec0dcf..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/stylegan3_clip/torch_utils/ops/fma.py +++ /dev/null @@ -1,60 +0,0 @@ -๏ปฟ# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`.""" - -import torch - -#---------------------------------------------------------------------------- - -def fma(a, b, c): # => a * b + c - return _FusedMultiplyAdd.apply(a, b, c) - -#---------------------------------------------------------------------------- - -class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c - @staticmethod - def forward(ctx, a, b, c): # pylint: disable=arguments-differ - out = torch.addcmul(c, a, b) - ctx.save_for_backward(a, b) - ctx.c_shape = c.shape - return out - - @staticmethod - def backward(ctx, dout): # pylint: disable=arguments-differ - a, b = ctx.saved_tensors - c_shape = ctx.c_shape - da = None - db = None - dc = None - - if ctx.needs_input_grad[0]: - da = _unbroadcast(dout * b, a.shape) - - if ctx.needs_input_grad[1]: - db = _unbroadcast(dout * a, b.shape) - - if ctx.needs_input_grad[2]: - dc = _unbroadcast(dout, c_shape) - - return da, db, dc - -#---------------------------------------------------------------------------- - -def _unbroadcast(x, shape): - extra_dims = x.ndim - len(shape) - assert extra_dims >= 0 - dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)] - if len(dim): - x = x.sum(dim=dim, keepdim=True) - if extra_dims: - x = x.reshape(-1, *x.shape[extra_dims+1:]) - assert x.shape == shape - return x - -#---------------------------------------------------------------------------- diff --git a/spaces/alankabisov/youtube-video-summary/README.md b/spaces/alankabisov/youtube-video-summary/README.md deleted file mode 100644 index d595991cc471805053502ae9b6116f85cc039166..0000000000000000000000000000000000000000 --- a/spaces/alankabisov/youtube-video-summary/README.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: Youtube Video Summary -emoji: ๐Ÿ“ -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -models: [t5-small] -pinned: false ---- - -# YouTube Video Summary ๐Ÿ“ -Extracts transcripts for given video URL and creates summary. Uses [T5-small](https://huggingface.co/t5-small) model -under the hood since it have shown the best results on general purpose tasks. - -**Online demo:** [๐Ÿค— Spaces](https://huggingface.co/spaces/alankabisov/youtube-video-summary) - -### Requirements -``` -torch -transformers -youtube_transcript_api -tqdm -stqdm -streamlit==1.10.0 -``` - diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/_distutils_hack/override.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/_distutils_hack/override.py deleted file mode 100644 index 2cc433a4a55e3b41fa31089918fb62096092f89f..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/_distutils_hack/override.py +++ /dev/null @@ -1 +0,0 @@ -__import__('_distutils_hack').do_override() diff --git a/spaces/aliabid94/AutoGPT/autogpt/__init__.py b/spaces/aliabid94/AutoGPT/autogpt/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/allknowingroger/Image-Models-Test144/app.py b/spaces/allknowingroger/Image-Models-Test144/app.py deleted file mode 100644 index 9048512ea50ec590d4634e9a2db607034956b4c9..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test144/app.py +++ /dev/null @@ -1,144 +0,0 @@ -import gradio as gr -# import os -# import sys -# from pathlib import Path -import time - -models =[ - "Yntec/aMovieX", - "Leekp/toonmaker3", - "sakiotsu/zarema", - "shantanudave/shantanuimagessept10", - "tensor-diffusion/chilloutmix-NI", - "miittnnss/miittnnss-lora", - "rmarion/phtmejhn-xl", - "shantanudave/autotrain-adv-15sept", - "Yntec/photoMovieXFinal", -] - - -model_functions = {} -model_idx = 1 -for model_path in models: - try: - model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False) - except Exception as error: - def the_fn(txt): - return None - model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"]) - model_idx+=1 - - -def send_it_idx(idx): - def send_it_fn(prompt): - output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt) - return output - return send_it_fn - -def get_prompts(prompt_text): - return prompt_text - -def clear_it(val): - if int(val) != 0: - val = 0 - else: - val = 0 - pass - return val - -def all_task_end(cnt,t_stamp): - to = t_stamp + 60 - et = time.time() - if et > to and t_stamp != 0: - d = gr.update(value=0) - tog = gr.update(value=1) - #print(f'to: {to} et: {et}') - else: - if cnt != 0: - d = gr.update(value=et) - else: - d = gr.update(value=0) - tog = gr.update(value=0) - #print (f'passing: to: {to} et: {et}') - pass - return d, tog - -def all_task_start(): - print("\n\n\n\n\n\n\n") - t = time.gmtime() - t_stamp = time.time() - current_time = time.strftime("%H:%M:%S", t) - return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0) - -def clear_fn(): - nn = len(models) - return tuple([None, *[None for _ in range(nn)]]) - - - -with gr.Blocks(title="SD Models") as my_interface: - with gr.Column(scale=12): - # with gr.Row(): - # gr.Markdown("""- Primary prompt: ไฝ ๆƒณ็”ป็š„ๅ†…ๅฎน(่‹ฑๆ–‡ๅ•่ฏ๏ผŒๅฆ‚ a cat, ๅŠ ่‹ฑๆ–‡้€—ๅทๆ•ˆๆžœๆ›ดๅฅฝ๏ผ›็‚น Improve ๆŒ‰้’ฎ่ฟ›่กŒๅฎŒๅ–„)\n- Real prompt: ๅฎŒๅ–„ๅŽ็š„ๆ็คบ่ฏ๏ผŒๅ‡บ็ŽฐๅŽๅ†็‚นๅณ่พน็š„ Run ๆŒ‰้’ฎๅผ€ๅง‹่ฟ่กŒ""") - with gr.Row(): - with gr.Row(scale=6): - primary_prompt=gr.Textbox(label="Prompt", value="") - # real_prompt=gr.Textbox(label="Real prompt") - with gr.Row(scale=6): - # improve_prompts_btn=gr.Button("Improve") - with gr.Row(): - run=gr.Button("Run",variant="primary") - clear_btn=gr.Button("Clear") - with gr.Row(): - sd_outputs = {} - model_idx = 1 - for model_path in models: - with gr.Column(scale=3, min_width=320): - with gr.Box(): - sd_outputs[model_idx] = gr.Image(label=model_path) - pass - model_idx += 1 - pass - pass - - with gr.Row(visible=False): - start_box=gr.Number(interactive=False) - end_box=gr.Number(interactive=False) - tog_box=gr.Textbox(value=0,interactive=False) - - start_box.change( - all_task_end, - [start_box, end_box], - [start_box, tog_box], - every=1, - show_progress=False) - - primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box]) - run.click(all_task_start, None, [start_box, end_box, tog_box]) - runs_dict = {} - model_idx = 1 - for model_path in models: - runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]]) - model_idx += 1 - pass - pass - - # improve_prompts_btn_clicked=improve_prompts_btn.click( - # get_prompts, - # inputs=[primary_prompt], - # outputs=[primary_prompt], - # cancels=list(runs_dict.values())) - clear_btn.click( - clear_fn, - None, - [primary_prompt, *list(sd_outputs.values())], - cancels=[*list(runs_dict.values())]) - tog_box.change( - clear_it, - tog_box, - tog_box, - cancels=[*list(runs_dict.values())]) - -my_interface.queue(concurrency_count=600, status_update_rate=1) -my_interface.launch(inline=True, show_api=False) - \ No newline at end of file diff --git a/spaces/almakedon/faster-whisper-webui/README.md b/spaces/almakedon/faster-whisper-webui/README.md deleted file mode 100644 index b3c10b031523b15b2b5efc4df4b1c40121ca0bea..0000000000000000000000000000000000000000 --- a/spaces/almakedon/faster-whisper-webui/README.md +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: Faster Whisper Webui -emoji: ๐Ÿš€ -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: aadnk/faster-whisper-webui ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference - -# Running Locally - -To run this program locally, first install Python 3.9+ and Git. Then install Pytorch 10.1+ and all the other dependencies: -``` -pip install -r requirements.txt -``` - -You can find detailed instructions for how to install this on Windows 10/11 [here (PDF)](docs/windows/install_win10_win11.pdf). - -Finally, run the full version (no audio length restrictions) of the app with parallel CPU/GPU enabled: -``` -python app.py --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True -``` - -You can also run the CLI interface, which is similar to Whisper's own CLI but also supports the following additional arguments: -``` -python cli.py \ -[--vad {none,silero-vad,silero-vad-skip-gaps,silero-vad-expand-into-gaps,periodic-vad}] \ -[--vad_merge_window VAD_MERGE_WINDOW] \ -[--vad_max_merge_size VAD_MAX_MERGE_SIZE] \ -[--vad_padding VAD_PADDING] \ -[--vad_prompt_window VAD_PROMPT_WINDOW] -[--vad_cpu_cores NUMBER_OF_CORES] -[--vad_parallel_devices COMMA_DELIMITED_DEVICES] -[--auto_parallel BOOLEAN] -``` -In addition, you may also use URL's in addition to file paths as input. -``` -python cli.py --model large --vad silero-vad --language Japanese "https://www.youtube.com/watch?v=4cICErqqRSM" -``` - -Rather than supplying arguments to `app.py` or `cli.py`, you can also use the configuration file [config.json5](config.json5). See that file for more information. -If you want to use a different configuration file, you can use the `WHISPER_WEBUI_CONFIG` environment variable to specify the path to another file. - -### Multiple Files - -You can upload multiple files either through the "Upload files" option, or as a playlist on YouTube. -Each audio file will then be processed in turn, and the resulting SRT/VTT/Transcript will be made available in the "Download" section. -When more than one file is processed, the UI will also generate a "All_Output" zip file containing all the text output files. - -## Whisper Implementation - -You can choose between using `whisper` or `faster-whisper`. [Faster Whisper](https://github.com/guillaumekln/faster-whisper) as a drop-in replacement for the -default Whisper which achieves up to a 4x speedup and 2x reduction in memory usage. - -You can install the requirements for a specific Whisper implementation in `requirements-fasterWhisper.txt` -or `requirements-whisper.txt`: -``` -pip install -r requirements-fasterWhisper.txt -``` -And then run the App or the CLI with the `--whisper_implementation faster-whisper` flag: -``` -python app.py --whisper_implementation faster-whisper --input_audio_max_duration -1 --server_name 127.0.0.1 --auto_parallel True -``` -You can also select the whisper implementation in `config.json5`: -```json5 -{ - "whisper_implementation": "faster-whisper" -} -``` -### GPU Acceleration - -In order to use GPU acceleration with Faster Whisper, both CUDA 11.2 and cuDNN 8 must be installed. You may want to install it in a virtual environment like Anaconda. - -## Google Colab - -You can also run this Web UI directly on [Google Colab](https://colab.research.google.com/drive/1qeTSvi7Bt_5RMm88ipW4fkcsMOKlDDss?usp=sharing), if you haven't got a GPU powerful enough to run the larger models. - -See the [colab documentation](docs/colab.md) for more information. - -## Parallel Execution - -You can also run both the Web-UI or the CLI on multiple GPUs in parallel, using the `vad_parallel_devices` option. This takes a comma-delimited list of -device IDs (0, 1, etc.) that Whisper should be distributed to and run on concurrently: -``` -python cli.py --model large --vad silero-vad --language Japanese \ ---vad_parallel_devices 0,1 "https://www.youtube.com/watch?v=4cICErqqRSM" -``` - -Note that this requires a VAD to function properly, otherwise only the first GPU will be used. Though you could use `period-vad` to avoid taking the hit -of running Silero-Vad, at a slight cost to accuracy. - -This is achieved by creating N child processes (where N is the number of selected devices), where Whisper is run concurrently. In `app.py`, you can also -set the `vad_process_timeout` option. This configures the number of seconds until a process is killed due to inactivity, freeing RAM and video memory. -The default value is 30 minutes. - -``` -python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600 -``` - -To execute the Silero VAD itself in parallel, use the `vad_cpu_cores` option: -``` -python app.py --input_audio_max_duration -1 --vad_parallel_devices 0,1 --vad_process_timeout 3600 --vad_cpu_cores 4 -``` - -You may also use `vad_process_timeout` with a single device (`--vad_parallel_devices 0`), if you prefer to always free video memory after a period of time. - -### Auto Parallel - -You can also set `auto_parallel` to `True`. This will set `vad_parallel_devices` to use all the GPU devices on the system, and `vad_cpu_cores` to be equal to the number of -cores (up to 8): -``` -python app.py --input_audio_max_duration -1 --auto_parallel True -``` - -# Docker - -To run it in Docker, first install Docker and optionally the NVIDIA Container Toolkit in order to use the GPU. -Then either use the GitLab hosted container below, or check out this repository and build an image: -``` -sudo docker build -t whisper-webui:1 . -``` - -You can then start the WebUI with GPU support like so: -``` -sudo docker run -d --gpus=all -p 7860:7860 whisper-webui:1 -``` - -Leave out "--gpus=all" if you don't have access to a GPU with enough memory, and are fine with running it on the CPU only: -``` -sudo docker run -d -p 7860:7860 whisper-webui:1 -``` - -# GitLab Docker Registry - -This Docker container is also hosted on GitLab: - -``` -sudo docker run -d --gpus=all -p 7860:7860 registry.gitlab.com/aadnk/whisper-webui:latest -``` - -## Custom Arguments - -You can also pass custom arguments to `app.py` in the Docker container, for instance to be able to use all the GPUs in parallel (replace administrator with your user): -``` -sudo docker run -d --gpus all -p 7860:7860 \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ ---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \ ---restart=on-failure:15 registry.gitlab.com/aadnk/whisper-webui:latest \ -app.py --input_audio_max_duration -1 --server_name 0.0.0.0 --auto_parallel True \ ---default_vad silero-vad --default_model_name large -``` - -You can also call `cli.py` the same way: -``` -sudo docker run --gpus all \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ ---mount type=bind,source=/home/administrator/.cache/huggingface,target=/root/.cache/huggingface \ ---mount type=bind,source=${PWD},target=/app/data \ -registry.gitlab.com/aadnk/whisper-webui:latest \ -cli.py --model large --auto_parallel True --vad silero-vad \ ---output_dir /app/data /app/data/YOUR-FILE-HERE.mp4 -``` - -## Caching - -Note that the models themselves are currently not included in the Docker images, and will be downloaded on the demand. -To avoid this, bind the directory /root/.cache/whisper to some directory on the host (for instance /home/administrator/.cache/whisper), where you can (optionally) -prepopulate the directory with the different Whisper models. -``` -sudo docker run -d --gpus=all -p 7860:7860 \ ---mount type=bind,source=/home/administrator/.cache/whisper,target=/root/.cache/whisper \ -registry.gitlab.com/aadnk/whisper-webui:latest -``` \ No newline at end of file diff --git a/spaces/alvanlii/FROMAGe/README.md b/spaces/alvanlii/FROMAGe/README.md deleted file mode 100644 index 725b868b7f1968fb908e169cd09705fcf4315ff8..0000000000000000000000000000000000000000 --- a/spaces/alvanlii/FROMAGe/README.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: FROMAGe -emoji: ๐Ÿง€ -colorFrom: pink -colorTo: red -sdk: docker -tags: - - making-demos ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_write_stop.c b/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_write_stop.c deleted file mode 100644 index 855923f465e2b1128765ced62e9f27d60e21690e..0000000000000000000000000000000000000000 --- a/spaces/amarchheda/ChordDuplicate/portaudio/test/patest_write_stop.c +++ /dev/null @@ -1,165 +0,0 @@ -/** @file patest_write_stop.c - @brief Play a few seconds of silence followed by a few cycles of a sine wave. Tests to make sure that pa_StopStream() completes playback in blocking I/O - @author Bjorn Roche of XO Audio (www.xoaudio.com) - @author Ross Bencina - @author Phil Burk -*/ -/* - * $Id$ - * - * This program uses the PortAudio Portable Audio Library. - * For more information see: http://www.portaudio.com/ - * Copyright (c) 1999-2000 Ross Bencina and Phil Burk - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#include "portaudio.h" - -#define NUM_SECONDS (5) -#define SAMPLE_RATE (44100) -#define FRAMES_PER_BUFFER (1024) - -#ifndef M_PI -#define M_PI (3.14159265) -#endif - -#define TABLE_SIZE (200) - - -int main(void); -int main(void) -{ - PaStreamParameters outputParameters; - PaStream *stream; - PaError err; - float buffer[FRAMES_PER_BUFFER][2]; /* stereo output buffer */ - float sine[TABLE_SIZE]; /* sine wavetable */ - int left_phase = 0; - int right_phase = 0; - int left_inc = 1; - int right_inc = 3; /* higher pitch so we can distinguish left and right. */ - int i, j; - int bufferCount; - const int framesBy2 = FRAMES_PER_BUFFER >> 1; - const float framesBy2f = (float) framesBy2 ; - - - printf( "PortAudio Test: output silence, followed by one buffer of a ramped sine wave. SR = %d, BufSize = %d\n", - SAMPLE_RATE, FRAMES_PER_BUFFER); - - /* initialise sinusoidal wavetable */ - for( i=0; idefaultHighOutputLatency * 5; - outputParameters.hostApiSpecificStreamInfo = NULL; - - /* open the stream */ - err = Pa_OpenStream( - &stream, - NULL, /* no input */ - &outputParameters, - SAMPLE_RATE, - FRAMES_PER_BUFFER, - paClipOff, /* we won't output out of range samples so don't bother clipping them */ - NULL, /* no callback, use blocking API */ - NULL ); /* no callback, so no callback userData */ - if( err != paNoError ) goto error; - - /* start the stream */ - err = Pa_StartStream( stream ); - if( err != paNoError ) goto error; - - printf("Playing %d seconds of silence followed by one buffer of a ramped sinusoid.\n", NUM_SECONDS ); - - bufferCount = ((NUM_SECONDS * SAMPLE_RATE) / FRAMES_PER_BUFFER); - - /* clear buffer */ - for( j=0; j < FRAMES_PER_BUFFER; j++ ) - { - buffer[j][0] = 0; /* left */ - buffer[j][1] = 0; /* right */ - } - /* play the silent buffer a bunch o' times */ - for( i=0; i < bufferCount; i++ ) - { - err = Pa_WriteStream( stream, buffer, FRAMES_PER_BUFFER ); - if( err != paNoError ) goto error; - } - /* play a non-silent buffer once */ - for( j=0; j < FRAMES_PER_BUFFER; j++ ) - { - float ramp = 1; - if( j < framesBy2 ) - ramp = j / framesBy2f; - else - ramp = (FRAMES_PER_BUFFER - j) / framesBy2f ; - - buffer[j][0] = sine[left_phase] * ramp; /* left */ - buffer[j][1] = sine[right_phase] * ramp; /* right */ - left_phase += left_inc; - if( left_phase >= TABLE_SIZE ) left_phase -= TABLE_SIZE; - right_phase += right_inc; - if( right_phase >= TABLE_SIZE ) right_phase -= TABLE_SIZE; - } - err = Pa_WriteStream( stream, buffer, FRAMES_PER_BUFFER ); - if( err != paNoError ) goto error; - - /* stop stream, close, and terminate */ - err = Pa_StopStream( stream ); - if( err != paNoError ) goto error; - - err = Pa_CloseStream( stream ); - if( err != paNoError ) goto error; - - Pa_Terminate(); - printf("Test finished.\n"); - - return err; -error: - Pa_Terminate(); - fprintf( stderr, "An error occurred while using the portaudio stream\n" ); - fprintf( stderr, "Error number: %d\n", err ); - fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) ); - return err; -} diff --git a/spaces/amirDev/crowd-counting-p2p/util/__init__.py b/spaces/amirDev/crowd-counting-p2p/util/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/inference/infer_tool.py b/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/inference/infer_tool.py deleted file mode 100644 index 17781828effcb228794624e23659f83b53b239d0..0000000000000000000000000000000000000000 --- a/spaces/aquaaaaaaaaaaaa/AI-minato_aqua/inference/infer_tool.py +++ /dev/null @@ -1,327 +0,0 @@ -import hashlib -import json -import logging -import os -import time -from pathlib import Path - -import librosa -import maad -import numpy as np -# import onnxruntime -import parselmouth -import soundfile -import torch -import torchaudio - -from hubert import hubert_model -import utils -from models import SynthesizerTrn - -logging.getLogger('matplotlib').setLevel(logging.WARNING) - - -def read_temp(file_name): - if not os.path.exists(file_name): - with open(file_name, "w") as f: - f.write(json.dumps({"info": "temp_dict"})) - return {} - else: - try: - with open(file_name, "r") as f: - data = f.read() - data_dict = json.loads(data) - if os.path.getsize(file_name) > 50 * 1024 * 1024: - f_name = file_name.split("/")[-1] - print(f"clean {f_name}") - for wav_hash in list(data_dict.keys()): - if int(time.time()) - int(data_dict[wav_hash]["time"]) > 14 * 24 * 3600: - del data_dict[wav_hash] - except Exception as e: - print(e) - print(f"{file_name} error,auto rebuild file") - data_dict = {"info": "temp_dict"} - return data_dict - - -def write_temp(file_name, data): - with open(file_name, "w") as f: - f.write(json.dumps(data)) - - -def timeit(func): - def run(*args, **kwargs): - t = time.time() - res = func(*args, **kwargs) - print('executing \'%s\' costed %.3fs' % (func.__name__, time.time() - t)) - return res - - return run - - -def format_wav(audio_path): - if Path(audio_path).suffix == '.wav': - return - raw_audio, raw_sample_rate = librosa.load(audio_path, mono=True, sr=None) - soundfile.write(Path(audio_path).with_suffix(".wav"), raw_audio, raw_sample_rate) - - -def get_end_file(dir_path, end): - file_lists = [] - for root, dirs, files in os.walk(dir_path): - files = [f for f in files if f[0] != '.'] - dirs[:] = [d for d in dirs if d[0] != '.'] - for f_file in files: - if f_file.endswith(end): - file_lists.append(os.path.join(root, f_file).replace("\\", "/")) - return file_lists - - -def get_md5(content): - return hashlib.new("md5", content).hexdigest() - - -def resize2d_f0(x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp(np.arange(0, len(source) * target_len, len(source)) / target_len, np.arange(0, len(source)), - source) - res = np.nan_to_num(target) - return res - -def get_f0(x, p_len,f0_up_key=0): - - time_step = 160 / 16000 * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - - f0 = parselmouth.Sound(x, 16000).to_pitch_ac( - time_step=time_step / 1000, voicing_threshold=0.6, - pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency'] - - pad_size=(p_len - len(f0) + 1) // 2 - if(pad_size>0 or p_len - len(f0) - pad_size>0): - f0 = np.pad(f0,[[pad_size,p_len - len(f0) - pad_size]], mode='constant') - - f0 *= pow(2, f0_up_key / 12) - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (f0_mel_max - f0_mel_min) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0 - -def clean_pitch(input_pitch): - num_nan = np.sum(input_pitch == 1) - if num_nan / len(input_pitch) > 0.9: - input_pitch[input_pitch != 1] = 1 - return input_pitch - - -def plt_pitch(input_pitch): - input_pitch = input_pitch.astype(float) - input_pitch[input_pitch == 1] = np.nan - return input_pitch - - -def f0_to_pitch(ff): - f0_pitch = 69 + 12 * np.log2(ff / 440) - return f0_pitch - - -def fill_a_to_b(a, b): - if len(a) < len(b): - for _ in range(0, len(b) - len(a)): - a.append(a[0]) - - -def mkdir(paths: list): - for path in paths: - if not os.path.exists(path): - os.mkdir(path) - - -class Svc(object): - def __init__(self, net_g_path, config_path, hubert_path="hubert/hubert-soft-0d54a1f4.pt", - onnx=False): - self.onnx = onnx - self.net_g_path = net_g_path - self.hubert_path = hubert_path - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - self.net_g_ms = None - self.hps_ms = utils.get_hparams_from_file(config_path) - self.target_sample = self.hps_ms.data.sampling_rate - self.hop_size = self.hps_ms.data.hop_length - self.speakers = {} - for spk, sid in self.hps_ms.spk.items(): - self.speakers[sid] = spk - self.spk2id = self.hps_ms.spk - # ๅŠ ่ฝฝhubert - self.hubert_soft = hubert_model.hubert_soft(hubert_path) - if torch.cuda.is_available(): - self.hubert_soft = self.hubert_soft.cuda() - self.load_model() - - def load_model(self): - # ่Žทๅ–ๆจกๅž‹้…็ฝฎ - if self.onnx: - raise NotImplementedError - # self.net_g_ms = SynthesizerTrnForONNX( - # 178, - # self.hps_ms.data.filter_length // 2 + 1, - # self.hps_ms.train.segment_size // self.hps_ms.data.hop_length, - # n_speakers=self.hps_ms.data.n_speakers, - # **self.hps_ms.model) - # _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None) - else: - self.net_g_ms = SynthesizerTrn( - self.hps_ms.data.filter_length // 2 + 1, - self.hps_ms.train.segment_size // self.hps_ms.data.hop_length, - **self.hps_ms.model) - _ = utils.load_checkpoint(self.net_g_path, self.net_g_ms, None) - if "half" in self.net_g_path and torch.cuda.is_available(): - _ = self.net_g_ms.half().eval().to(self.dev) - else: - _ = self.net_g_ms.eval().to(self.dev) - - def get_units(self, source, sr): - - source = source.unsqueeze(0).to(self.dev) - with torch.inference_mode(): - start = time.time() - units = self.hubert_soft.units(source) - use_time = time.time() - start - print("hubert use time:{}".format(use_time)) - return units - - - def get_unit_pitch(self, in_path, tran): - source, sr = torchaudio.load(in_path) - source = torchaudio.functional.resample(source, sr, 16000) - if len(source.shape) == 2 and source.shape[1] >= 2: - source = torch.mean(source, dim=0).unsqueeze(0) - soft = self.get_units(source, sr).squeeze(0).cpu().numpy() - f0_coarse, f0 = get_f0(source.cpu().numpy()[0], soft.shape[0]*2, tran) - f0 = resize2d_f0(f0, soft.shape[0]*3) - return soft, f0 - - def infer(self, speaker_id, tran, raw_path): - if type(speaker_id) == str: - speaker_id = self.spk2id[speaker_id] - sid = torch.LongTensor([int(speaker_id)]).to(self.dev).unsqueeze(0) - soft, pitch = self.get_unit_pitch(raw_path, tran) - f0 = torch.FloatTensor(clean_pitch(pitch)).unsqueeze(0).to(self.dev) - if "half" in self.net_g_path and torch.cuda.is_available(): - stn_tst = torch.HalfTensor(soft) - else: - stn_tst = torch.FloatTensor(soft) - with torch.no_grad(): - x_tst = stn_tst.unsqueeze(0).to(self.dev) - start = time.time() - x_tst = torch.repeat_interleave(x_tst, repeats=3, dim=1).transpose(1, 2) - audio = self.net_g_ms.infer(x_tst, f0=f0, g=sid)[0,0].data.float() - use_time = time.time() - start - print("vits use time:{}".format(use_time)) - return audio, audio.shape[-1] - - -# class SvcONNXInferModel(object): -# def __init__(self, hubert_onnx, vits_onnx, config_path): -# self.config_path = config_path -# self.vits_onnx = vits_onnx -# self.hubert_onnx = hubert_onnx -# self.hubert_onnx_session = onnxruntime.InferenceSession(hubert_onnx, providers=['CUDAExecutionProvider', ]) -# self.inspect_onnx(self.hubert_onnx_session) -# self.vits_onnx_session = onnxruntime.InferenceSession(vits_onnx, providers=['CUDAExecutionProvider', ]) -# self.inspect_onnx(self.vits_onnx_session) -# self.hps_ms = utils.get_hparams_from_file(self.config_path) -# self.target_sample = self.hps_ms.data.sampling_rate -# self.feature_input = FeatureInput(self.hps_ms.data.sampling_rate, self.hps_ms.data.hop_length) -# -# @staticmethod -# def inspect_onnx(session): -# for i in session.get_inputs(): -# print("name:{}\tshape:{}\tdtype:{}".format(i.name, i.shape, i.type)) -# for i in session.get_outputs(): -# print("name:{}\tshape:{}\tdtype:{}".format(i.name, i.shape, i.type)) -# -# def infer(self, speaker_id, tran, raw_path): -# sid = np.array([int(speaker_id)], dtype=np.int64) -# soft, pitch = self.get_unit_pitch(raw_path, tran) -# pitch = np.expand_dims(pitch, axis=0).astype(np.int64) -# stn_tst = soft -# x_tst = np.expand_dims(stn_tst, axis=0) -# x_tst_lengths = np.array([stn_tst.shape[0]], dtype=np.int64) -# # ไฝฟ็”จONNX Runtime่ฟ›่กŒๆŽจ็† -# start = time.time() -# audio = self.vits_onnx_session.run(output_names=["audio"], -# input_feed={ -# "hidden_unit": x_tst, -# "lengths": x_tst_lengths, -# "pitch": pitch, -# "sid": sid, -# })[0][0, 0] -# use_time = time.time() - start -# print("vits_onnx_session.run time:{}".format(use_time)) -# audio = torch.from_numpy(audio) -# return audio, audio.shape[-1] -# -# def get_units(self, source, sr): -# source = torchaudio.functional.resample(source, sr, 16000) -# if len(source.shape) == 2 and source.shape[1] >= 2: -# source = torch.mean(source, dim=0).unsqueeze(0) -# source = source.unsqueeze(0) -# # ไฝฟ็”จONNX Runtime่ฟ›่กŒๆŽจ็† -# start = time.time() -# units = self.hubert_onnx_session.run(output_names=["embed"], -# input_feed={"source": source.numpy()})[0] -# use_time = time.time() - start -# print("hubert_onnx_session.run time:{}".format(use_time)) -# return units -# -# def transcribe(self, source, sr, length, transform): -# feature_pit = self.feature_input.compute_f0(source, sr) -# feature_pit = feature_pit * 2 ** (transform / 12) -# feature_pit = resize2d_f0(feature_pit, length) -# coarse_pit = self.feature_input.coarse_f0(feature_pit) -# return coarse_pit -# -# def get_unit_pitch(self, in_path, tran): -# source, sr = torchaudio.load(in_path) -# soft = self.get_units(source, sr).squeeze(0) -# input_pitch = self.transcribe(source.numpy()[0], sr, soft.shape[0], tran) -# return soft, input_pitch - - -class RealTimeVC: - def __init__(self): - self.last_chunk = None - self.last_o = None - self.chunk_len = 16000 # ๅŒบๅ—้•ฟๅบฆ - self.pre_len = 3840 # ไบคๅ‰ๆทกๅŒ–้•ฟๅบฆ๏ผŒ640็š„ๅ€ๆ•ฐ - - """่พ“ๅ…ฅ่พ“ๅ‡บ้ƒฝๆ˜ฏ1็ปดnumpy ้Ÿณ้ข‘ๆณขๅฝขๆ•ฐ็ป„""" - - def process(self, svc_model, speaker_id, f_pitch_change, input_wav_path): - audio, sr = torchaudio.load(input_wav_path) - audio = audio.cpu().numpy()[0] - temp_wav = io.BytesIO() - if self.last_chunk is None: - input_wav_path.seek(0) - audio, sr = svc_model.infer(speaker_id, f_pitch_change, input_wav_path) - audio = audio.cpu().numpy() - self.last_chunk = audio[-self.pre_len:] - self.last_o = audio - return audio[-self.chunk_len:] - else: - audio = np.concatenate([self.last_chunk, audio]) - soundfile.write(temp_wav, audio, sr, format="wav") - temp_wav.seek(0) - audio, sr = svc_model.infer(speaker_id, f_pitch_change, temp_wav) - audio = audio.cpu().numpy() - ret = maad.util.crossfade(self.last_o, audio, self.pre_len) - self.last_chunk = audio[-self.pre_len:] - self.last_o = audio - return ret[self.chunk_len:2 * self.chunk_len] diff --git a/spaces/arch-123/bingo/src/components/external-link.tsx b/spaces/arch-123/bingo/src/components/external-link.tsx deleted file mode 100644 index 011265f364d5a64a770f4c7e9c65c5ade21d623a..0000000000000000000000000000000000000000 --- a/spaces/arch-123/bingo/src/components/external-link.tsx +++ /dev/null @@ -1,30 +0,0 @@ -export function ExternalLink({ - href, - children -}: { - href: string - children: React.ReactNode -}) { - return ( - - {children} - - - ) -} diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/vc/configs/shared_configs.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/vc/configs/shared_configs.py deleted file mode 100644 index 74164a744452a00c7f318fbdcc55438cddcc70be..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/vc/configs/shared_configs.py +++ /dev/null @@ -1,155 +0,0 @@ -from dataclasses import asdict, dataclass, field -from typing import Dict, List - -from coqpit import Coqpit, check_argument - -from TTS.config import BaseAudioConfig, BaseDatasetConfig, BaseTrainingConfig - - -@dataclass -class BaseVCConfig(BaseTrainingConfig): - """Shared parameters among all the tts models. - - Args: - - audio (BaseAudioConfig): - Audio processor config object instance. - - batch_group_size (int): - Size of the batch groups used for bucketing. By default, the dataloader orders samples by the sequence - length for a more efficient and stable training. If `batch_group_size > 1` then it performs bucketing to - prevent using the same batches for each epoch. - - loss_masking (bool): - enable / disable masking loss values against padded segments of samples in a batch. - - min_text_len (int): - Minimum length of input text to be used. All shorter samples will be ignored. Defaults to 0. - - max_text_len (int): - Maximum length of input text to be used. All longer samples will be ignored. Defaults to float("inf"). - - min_audio_len (int): - Minimum length of input audio to be used. All shorter samples will be ignored. Defaults to 0. - - max_audio_len (int): - Maximum length of input audio to be used. All longer samples will be ignored. The maximum length in the - dataset defines the VRAM used in the training. Hence, pay attention to this value if you encounter an - OOM error in training. Defaults to float("inf"). - - compute_f0 (int): - (Not in use yet). - - compute_energy (int): - (Not in use yet). - - compute_linear_spec (bool): - If True data loader computes and returns linear spectrograms alongside the other data. - - precompute_num_workers (int): - Number of workers to precompute features. Defaults to 0. - - use_noise_augment (bool): - Augment the input audio with random noise. - - start_by_longest (bool): - If True, the data loader will start loading the longest batch first. It is useful for checking OOM issues. - Defaults to False. - - shuffle (bool): - If True, the data loader will shuffle the dataset when there is not sampler defined. Defaults to True. - - drop_last (bool): - If True, the data loader will drop the last batch if it is not complete. It helps to prevent - issues that emerge from the partial batch statistics. Defaults to True. - - add_blank (bool): - Add blank characters between each other two characters. It improves performance for some models at expense - of slower run-time due to the longer input sequence. - - datasets (List[BaseDatasetConfig]): - List of datasets used for training. If multiple datasets are provided, they are merged and used together - for training. - - optimizer (str): - Optimizer used for the training. Set one from `torch.optim.Optimizer` or `TTS.utils.training`. - Defaults to ``. - - optimizer_params (dict): - Optimizer kwargs. Defaults to `{"betas": [0.8, 0.99], "weight_decay": 0.0}` - - lr_scheduler (str): - Learning rate scheduler for the training. Use one from `torch.optim.Scheduler` schedulers or - `TTS.utils.training`. Defaults to ``. - - lr_scheduler_params (dict): - Parameters for the generator learning rate scheduler. Defaults to `{"warmup": 4000}`. - - test_sentences (List[str]): - List of sentences to be used at testing. Defaults to '[]' - - eval_split_max_size (int): - Number maximum of samples to be used for evaluation in proportion split. Defaults to None (Disabled). - - eval_split_size (float): - If between 0.0 and 1.0 represents the proportion of the dataset to include in the evaluation set. - If > 1, represents the absolute number of evaluation samples. Defaults to 0.01 (1%). - - use_speaker_weighted_sampler (bool): - Enable / Disable the batch balancer by speaker. Defaults to ```False```. - - speaker_weighted_sampler_alpha (float): - Number that control the influence of the speaker sampler weights. Defaults to ```1.0```. - - use_language_weighted_sampler (bool): - Enable / Disable the batch balancer by language. Defaults to ```False```. - - language_weighted_sampler_alpha (float): - Number that control the influence of the language sampler weights. Defaults to ```1.0```. - - use_length_weighted_sampler (bool): - Enable / Disable the batch balancer by audio length. If enabled the dataset will be divided - into 10 buckets considering the min and max audio of the dataset. The sampler weights will be - computed forcing to have the same quantity of data for each bucket in each training batch. Defaults to ```False```. - - length_weighted_sampler_alpha (float): - Number that control the influence of the length sampler weights. Defaults to ```1.0```. - """ - - audio: BaseAudioConfig = field(default_factory=BaseAudioConfig) - # training params - batch_group_size: int = 0 - loss_masking: bool = None - # dataloading - min_audio_len: int = 1 - max_audio_len: int = float("inf") - min_text_len: int = 1 - max_text_len: int = float("inf") - compute_f0: bool = False - compute_energy: bool = False - compute_linear_spec: bool = False - precompute_num_workers: int = 0 - use_noise_augment: bool = False - start_by_longest: bool = False - shuffle: bool = False - drop_last: bool = False - # dataset - datasets: List[BaseDatasetConfig] = field(default_factory=lambda: [BaseDatasetConfig()]) - # optimizer - optimizer: str = "radam" - optimizer_params: dict = None - # scheduler - lr_scheduler: str = None - lr_scheduler_params: dict = field(default_factory=lambda: {}) - # testing - test_sentences: List[str] = field(default_factory=lambda: []) - # evaluation - eval_split_max_size: int = None - eval_split_size: float = 0.01 - # weighted samplers - use_speaker_weighted_sampler: bool = False - speaker_weighted_sampler_alpha: float = 1.0 - use_language_weighted_sampler: bool = False - language_weighted_sampler_alpha: float = 1.0 - use_length_weighted_sampler: bool = False - length_weighted_sampler_alpha: float = 1.0 diff --git a/spaces/artificialguybr/video-dubbing/Wav2Lip/face_detection/detection/sfd/sfd_detector.py b/spaces/artificialguybr/video-dubbing/Wav2Lip/face_detection/detection/sfd/sfd_detector.py deleted file mode 100644 index 8fbce15253251d403754ab4348f93ae85a6ba2fb..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/Wav2Lip/face_detection/detection/sfd/sfd_detector.py +++ /dev/null @@ -1,59 +0,0 @@ -import os -import cv2 -from torch.utils.model_zoo import load_url - -from ..core import FaceDetector - -from .net_s3fd import s3fd -from .bbox import * -from .detect import * - -models_urls = { - 's3fd': 'https://www.adrianbulat.com/downloads/python-fan/s3fd-619a316812.pth', -} - - -class SFDDetector(FaceDetector): - def __init__(self, device, path_to_detector=os.path.join(os.path.dirname(os.path.abspath(__file__)), 's3fd.pth'), verbose=False): - super(SFDDetector, self).__init__(device, verbose) - - # Initialise the face detector - if not os.path.isfile(path_to_detector): - model_weights = load_url(models_urls['s3fd']) - else: - model_weights = torch.load(path_to_detector) - - self.face_detector = s3fd() - self.face_detector.load_state_dict(model_weights) - self.face_detector.to(device) - self.face_detector.eval() - - def detect_from_image(self, tensor_or_path): - image = self.tensor_or_path_to_ndarray(tensor_or_path) - - bboxlist = detect(self.face_detector, image, device=self.device) - keep = nms(bboxlist, 0.3) - bboxlist = bboxlist[keep, :] - bboxlist = [x for x in bboxlist if x[-1] > 0.5] - - return bboxlist - - def detect_from_batch(self, images): - bboxlists = batch_detect(self.face_detector, images, device=self.device) - keeps = [nms(bboxlists[:, i, :], 0.3) for i in range(bboxlists.shape[1])] - bboxlists = [bboxlists[keep, i, :] for i, keep in enumerate(keeps)] - bboxlists = [[x for x in bboxlist if x[-1] > 0.5] for bboxlist in bboxlists] - - return bboxlists - - @property - def reference_scale(self): - return 195 - - @property - def reference_x_shift(self): - return 0 - - @property - def reference_y_shift(self): - return 0 diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/absl/flags/_helpers.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/absl/flags/_helpers.py deleted file mode 100644 index ea02f2d13ca9b4edb43b71f622a246fb5337874a..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/absl/flags/_helpers.py +++ /dev/null @@ -1,401 +0,0 @@ -# Copyright 2017 The Abseil Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Internal helper functions for Abseil Python flags library.""" - -import collections -import os -import re -import struct -import sys -import textwrap -try: - import fcntl -except ImportError: - fcntl = None -try: - # Importing termios will fail on non-unix platforms. - import termios -except ImportError: - termios = None - - -_DEFAULT_HELP_WIDTH = 80 # Default width of help output. -# Minimal "sane" width of help output. We assume that any value below 40 is -# unreasonable. -_MIN_HELP_WIDTH = 40 - -# Define the allowed error rate in an input string to get suggestions. -# -# We lean towards a high threshold because we tend to be matching a phrase, -# and the simple algorithm used here is geared towards correcting word -# spellings. -# -# For manual testing, consider " --list" which produced a large number -# of spurious suggestions when we used "least_errors > 0.5" instead of -# "least_erros >= 0.5". -_SUGGESTION_ERROR_RATE_THRESHOLD = 0.50 - -# Characters that cannot appear or are highly discouraged in an XML 1.0 -# document. (See http://www.w3.org/TR/REC-xml/#charsets or -# https://en.wikipedia.org/wiki/Valid_characters_in_XML#XML_1.0) -_ILLEGAL_XML_CHARS_REGEX = re.compile( - u'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x84\x86-\x9f\ud800-\udfff\ufffe\uffff]') - -# This is a set of module ids for the modules that disclaim key flags. -# This module is explicitly added to this set so that we never consider it to -# define key flag. -disclaim_module_ids = set([id(sys.modules[__name__])]) - - -# Define special flags here so that help may be generated for them. -# NOTE: Please do NOT use SPECIAL_FLAGS from outside flags module. -# Initialized inside flagvalues.py. -SPECIAL_FLAGS = None - - -# This points to the flags module, initialized in flags/__init__.py. -# This should only be used in adopt_module_key_flags to take SPECIAL_FLAGS into -# account. -FLAGS_MODULE = None - - -class _ModuleObjectAndName( - collections.namedtuple('_ModuleObjectAndName', 'module module_name')): - """Module object and name. - - Fields: - - module: object, module object. - - module_name: str, module name. - """ - - -def get_module_object_and_name(globals_dict): - """Returns the module that defines a global environment, and its name. - - Args: - globals_dict: A dictionary that should correspond to an environment - providing the values of the globals. - - Returns: - _ModuleObjectAndName - pair of module object & module name. - Returns (None, None) if the module could not be identified. - """ - name = globals_dict.get('__name__', None) - module = sys.modules.get(name, None) - # Pick a more informative name for the main module. - return _ModuleObjectAndName(module, - (sys.argv[0] if name == '__main__' else name)) - - -def get_calling_module_object_and_name(): - """Returns the module that's calling into this module. - - We generally use this function to get the name of the module calling a - DEFINE_foo... function. - - Returns: - The module object that called into this one. - - Raises: - AssertionError: Raised when no calling module could be identified. - """ - for depth in range(1, sys.getrecursionlimit()): - # sys._getframe is the right thing to use here, as it's the best - # way to walk up the call stack. - globals_for_frame = sys._getframe(depth).f_globals # pylint: disable=protected-access - module, module_name = get_module_object_and_name(globals_for_frame) - if id(module) not in disclaim_module_ids and module_name is not None: - return _ModuleObjectAndName(module, module_name) - raise AssertionError('No module was found') - - -def get_calling_module(): - """Returns the name of the module that's calling into this module.""" - return get_calling_module_object_and_name().module_name - - -def create_xml_dom_element(doc, name, value): - """Returns an XML DOM element with name and text value. - - Args: - doc: minidom.Document, the DOM document it should create nodes from. - name: str, the tag of XML element. - value: object, whose string representation will be used - as the value of the XML element. Illegal or highly discouraged xml 1.0 - characters are stripped. - - Returns: - An instance of minidom.Element. - """ - s = str(value) - if isinstance(value, bool): - # Display boolean values as the C++ flag library does: no caps. - s = s.lower() - # Remove illegal xml characters. - s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s) - - e = doc.createElement(name) - e.appendChild(doc.createTextNode(s)) - return e - - -def get_help_width(): - """Returns the integer width of help lines that is used in TextWrap.""" - if not sys.stdout.isatty() or termios is None or fcntl is None: - return _DEFAULT_HELP_WIDTH - try: - data = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, '1234') - columns = struct.unpack('hh', data)[1] - # Emacs mode returns 0. - # Here we assume that any value below 40 is unreasonable. - if columns >= _MIN_HELP_WIDTH: - return columns - # Returning an int as default is fine, int(int) just return the int. - return int(os.getenv('COLUMNS', _DEFAULT_HELP_WIDTH)) - - except (TypeError, IOError, struct.error): - return _DEFAULT_HELP_WIDTH - - -def get_flag_suggestions(attempt, longopt_list): - """Returns helpful similar matches for an invalid flag.""" - # Don't suggest on very short strings, or if no longopts are specified. - if len(attempt) <= 2 or not longopt_list: - return [] - - option_names = [v.split('=')[0] for v in longopt_list] - - # Find close approximations in flag prefixes. - # This also handles the case where the flag is spelled right but ambiguous. - distances = [(_damerau_levenshtein(attempt, option[0:len(attempt)]), option) - for option in option_names] - # t[0] is distance, and sorting by t[1] allows us to have stable output. - distances.sort() - - least_errors, _ = distances[0] - # Don't suggest excessively bad matches. - if least_errors >= _SUGGESTION_ERROR_RATE_THRESHOLD * len(attempt): - return [] - - suggestions = [] - for errors, name in distances: - if errors == least_errors: - suggestions.append(name) - else: - break - return suggestions - - -def _damerau_levenshtein(a, b): - """Returns Damerau-Levenshtein edit distance from a to b.""" - memo = {} - - def distance(x, y): - """Recursively defined string distance with memoization.""" - if (x, y) in memo: - return memo[x, y] - if not x: - d = len(y) - elif not y: - d = len(x) - else: - d = min( - distance(x[1:], y) + 1, # correct an insertion error - distance(x, y[1:]) + 1, # correct a deletion error - distance(x[1:], y[1:]) + (x[0] != y[0])) # correct a wrong character - if len(x) >= 2 and len(y) >= 2 and x[0] == y[1] and x[1] == y[0]: - # Correct a transposition. - t = distance(x[2:], y[2:]) + 1 - if d > t: - d = t - - memo[x, y] = d - return d - return distance(a, b) - - -def text_wrap(text, length=None, indent='', firstline_indent=None): - """Wraps a given text to a maximum line length and returns it. - - It turns lines that only contain whitespace into empty lines, keeps new lines, - and expands tabs using 4 spaces. - - Args: - text: str, text to wrap. - length: int, maximum length of a line, includes indentation. - If this is None then use get_help_width() - indent: str, indent for all but first line. - firstline_indent: str, indent for first line; if None, fall back to indent. - - Returns: - str, the wrapped text. - - Raises: - ValueError: Raised if indent or firstline_indent not shorter than length. - """ - # Get defaults where callee used None - if length is None: - length = get_help_width() - if indent is None: - indent = '' - if firstline_indent is None: - firstline_indent = indent - - if len(indent) >= length: - raise ValueError('Length of indent exceeds length') - if len(firstline_indent) >= length: - raise ValueError('Length of first line indent exceeds length') - - text = text.expandtabs(4) - - result = [] - # Create one wrapper for the first paragraph and one for subsequent - # paragraphs that does not have the initial wrapping. - wrapper = textwrap.TextWrapper( - width=length, initial_indent=firstline_indent, subsequent_indent=indent) - subsequent_wrapper = textwrap.TextWrapper( - width=length, initial_indent=indent, subsequent_indent=indent) - - # textwrap does not have any special treatment for newlines. From the docs: - # "...newlines may appear in the middle of a line and cause strange output. - # For this reason, text should be split into paragraphs (using - # str.splitlines() or similar) which are wrapped separately." - for paragraph in (p.strip() for p in text.splitlines()): - if paragraph: - result.extend(wrapper.wrap(paragraph)) - else: - result.append('') # Keep empty lines. - # Replace initial wrapper with wrapper for subsequent paragraphs. - wrapper = subsequent_wrapper - - return '\n'.join(result) - - -def flag_dict_to_args(flag_map, multi_flags=None): - """Convert a dict of values into process call parameters. - - This method is used to convert a dictionary into a sequence of parameters - for a binary that parses arguments using this module. - - Args: - flag_map: dict, a mapping where the keys are flag names (strings). - values are treated according to their type: - - * If value is ``None``, then only the name is emitted. - * If value is ``True``, then only the name is emitted. - * If value is ``False``, then only the name prepended with 'no' is - emitted. - * If value is a string then ``--name=value`` is emitted. - * If value is a collection, this will emit - ``--name=value1,value2,value3``, unless the flag name is in - ``multi_flags``, in which case this will emit - ``--name=value1 --name=value2 --name=value3``. - * Everything else is converted to string an passed as such. - - multi_flags: set, names (strings) of flags that should be treated as - multi-flags. - Yields: - sequence of string suitable for a subprocess execution. - """ - for key, value in flag_map.items(): - if value is None: - yield '--%s' % key - elif isinstance(value, bool): - if value: - yield '--%s' % key - else: - yield '--no%s' % key - elif isinstance(value, (bytes, type(u''))): - # We don't want strings to be handled like python collections. - yield '--%s=%s' % (key, value) - else: - # Now we attempt to deal with collections. - try: - if multi_flags and key in multi_flags: - for item in value: - yield '--%s=%s' % (key, str(item)) - else: - yield '--%s=%s' % (key, ','.join(str(item) for item in value)) - except TypeError: - # Default case. - yield '--%s=%s' % (key, value) - - -def trim_docstring(docstring): - """Removes indentation from triple-quoted strings. - - This is the function specified in PEP 257 to handle docstrings: - https://www.python.org/dev/peps/pep-0257/. - - Args: - docstring: str, a python docstring. - - Returns: - str, docstring with indentation removed. - """ - if not docstring: - return '' - - # If you've got a line longer than this you have other problems... - max_indent = 1 << 29 - - # Convert tabs to spaces (following the normal Python rules) - # and split into a list of lines: - lines = docstring.expandtabs().splitlines() - - # Determine minimum indentation (first line doesn't count): - indent = max_indent - for line in lines[1:]: - stripped = line.lstrip() - if stripped: - indent = min(indent, len(line) - len(stripped)) - # Remove indentation (first line is special): - trimmed = [lines[0].strip()] - if indent < max_indent: - for line in lines[1:]: - trimmed.append(line[indent:].rstrip()) - # Strip off trailing and leading blank lines: - while trimmed and not trimmed[-1]: - trimmed.pop() - while trimmed and not trimmed[0]: - trimmed.pop(0) - # Return a single string: - return '\n'.join(trimmed) - - -def doc_to_help(doc): - """Takes a __doc__ string and reformats it as help.""" - - # Get rid of starting and ending white space. Using lstrip() or even - # strip() could drop more than maximum of first line and right space - # of last line. - doc = doc.strip() - - # Get rid of all empty lines. - whitespace_only_line = re.compile('^[ \t]+$', re.M) - doc = whitespace_only_line.sub('', doc) - - # Cut out common space at line beginnings. - doc = trim_docstring(doc) - - # Just like this module's comment, comments tend to be aligned somehow. - # In other words they all start with the same amount of white space. - # 1) keep double new lines; - # 2) keep ws after new lines if not empty line; - # 3) all other new lines shall be changed to a space; - # Solution: Match new lines between non white space and replace with space. - doc = re.sub(r'(?<=\S)\n(?=\S)', ' ', doc, flags=re.M) - - return doc diff --git a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Pranjal Bhatt.html b/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Pranjal Bhatt.html deleted file mode 100644 index 54bfa5dee87f53aea5794b5f477afdce22bf690e..0000000000000000000000000000000000000000 --- a/spaces/at2507/SM_NLP_RecoSys/Data/Mentor_interviews/Pranjal Bhatt.html +++ /dev/null @@ -1,134 +0,0 @@ - - - - Pranjal Bhatt - - - - -
    -

    Pranjal Bhatt

    - -
    -
    How did you hear about SM?
    • started a mentorship as a mentee back in 2021
    • was a pay-as-you-go mentorship, in order to learn CV
    • canceled after ~ 4 weeks

    Career?
    • Omdena - ML consultancy, lots of varied projects
      • data mining to deep learning model deployment
      • 2 years in December, but planning on switching jobs. Currently Interviewing
    Mentorship exp?
    • at my current job, I do interviewing, and with onboarding, I play mentor with new hires
    • How do SQL

    What do beginners lack, and how can you help?
    • People do not tailor their resume (e.g. you better include the right keywords!)
      • the previous exp is necessary - either candidate lack it or can't sell it
      • github repos - poorly designed
      • reviewer is not going to look through each and every line
      • interactions with other people make it easier - to e.g. explain technical concepts to others
      • How to storytell 
    • 2 visions
      • focuses: look at my personal skills (NLP, DL, etc..) and find mentees who are interested in those niche areas
      • New grads: study sessions, weekly or bi-weekly sessions, run a cohort. Or folks who are shifting careers
    -
    -

    Questions about SM?
    • ISA - does it apply to contract roles?
    • Confirm that Payment Term is 12 months
    • How many active mentorships right now?
    • How many mentee per mentor?
    -
    - -
    - - - \ No newline at end of file diff --git a/spaces/augmentedimaginationhackathon/paperstocode/fronty/src/app/gym-setup/upload.component.ts b/spaces/augmentedimaginationhackathon/paperstocode/fronty/src/app/gym-setup/upload.component.ts deleted file mode 100644 index 525550d172ee4a13d633c13010d23b3963dbb7b3..0000000000000000000000000000000000000000 --- a/spaces/augmentedimaginationhackathon/paperstocode/fronty/src/app/gym-setup/upload.component.ts +++ /dev/null @@ -1,52 +0,0 @@ -import { Component, OnInit } from '@angular/core'; -//import {RequestService} from "../services/request-service/request.service"; -import {take} from "rxjs/operators"; -import {ActivatedRoute} from "@angular/router"; -import {FormBuilder, FormGroup, Validators} from "@angular/forms"; - -@Component({ - selector: 'uploader', - templateUrl: './upload.component.html', - styleUrls: ['./upload.component.scss'] -}) -export class UploadComponent implements OnInit { - public readonly numberOfSteps: number = 1; - public stepOne: number = 1; - public currStep: number = 1; - public form: FormGroup; - - constructor(//private requestService: RequestService, - private activatedRoute: ActivatedRoute, - private formBuilder: FormBuilder - ) { - this.form = formBuilder.group({ - - "paper": [ - "", - //"", - []], - }) - } - - public ngOnInit(): void { - //We will use the gym org id to find all gyms which need setup, then set them upbased on their setupstate or w/e - this.activatedRoute.data.subscribe((data) => { - console.log(data) - }) - } - public stepOneContinue() { - console.log(this.form.value) - if(this.stepOne < this.numberOfSteps) { - this.stepOne++; - } else { - this.stepOne = 1 - } - setTimeout(() => { - if(this.currStep < this.numberOfSteps) { - this.currStep++; - } else { - this.currStep = 1 - } - }, 250); - } -} diff --git a/spaces/augmentedimaginationhackathon/paperstocode/retrieval/control.tex b/spaces/augmentedimaginationhackathon/paperstocode/retrieval/control.tex deleted file mode 100644 index 1bbdccf95999dcdf4e1cbefa0531748cbce0feed..0000000000000000000000000000000000000000 --- a/spaces/augmentedimaginationhackathon/paperstocode/retrieval/control.tex +++ /dev/null @@ -1,554 +0,0 @@ -\documentclass{article} -\usepackage{great} - -\usepackage[utf8]{inputenc} -\usepackage[T1]{fontenc} -\usepackage[hidelinks]{hyperref} -\usepackage{url} -\usepackage{booktabs} -\usepackage{amsfonts} -\usepackage{nicefrac} -\usepackage{microtype} -\usepackage{xcolor} -\usepackage{bm} -\usepackage{amsmath} -\usepackage{graphicx} -\usepackage{xcolor} - -\makeatletter -\usepackage{xspace} -\DeclareRobustCommand\onedot{\futurelet\@let@token\@onedot} -\def\@onedot{\ifx\@let@token.\else.\null\fi\xspace} -\def\eg{\emph{e.g}\onedot} -\def\ie{\emph{i.e}\onedot} -\def\etal{\emph{et al}\onedot} -\def\etc{\emph{etc}\onedot} -\makeatother - -\makeatletter -\newcommand*{\centerfloat}{% - \parindent \z@ - \leftskip \z@ \@plus 1fil \@minus \textwidth - \rightskip\leftskip - \parfillskip \z@skip} -\makeatother - -\title{Adding Conditional Control to Text-to-Image Diffusion Models} - -\author{\texttt{Lvmin Zhang and Maneesh Agrawala}\\\texttt{Stanford University}} - -\begin{document} - -\maketitle - -\begin{abstract} -\vspace{10pt} -We present a neural network structure, ControlNet, to control pretrained large diffusion models to support additional input conditions. -The ControlNet learns task-specific conditions in an end-to-end way, and the learning is robust even when the training dataset is small (< 50k). -Moreover, training a ControlNet is as fast as fine-tuning a diffusion model, and the model can be trained on a personal devices. -Alternatively, if powerful computation clusters are available, the model can scale to large amounts (millions to billions) of data. -We report that large diffusion models like Stable Diffusion can be augmented with ControlNets to enable conditional inputs like edge maps, segmentation maps, keypoints, \etc. -This may enrich the methods to control large diffusion models and further facilitate related applications. - -\vspace{3pt} -{\scriptsize\url{https://github.com/lllyasviel/ControlNet}} -\end{abstract} - -\vspace{40pt} - -\section{Introduction} - -\begin{figure} - \centering - \includegraphics[width=\linewidth]{./imgs/teaser.pdf} - \caption{Control Stable Diffusion with Canny edge map. The canny edge map is input, and the source image is not used when we generate the images on the right. The outputs are achieved with a default prompt \emph{``a high-quality, detailed, and professional image''}. This prompt is used in this paper as a default prompt that does not mention anything about the image contents and object names. Most of figures in this paper are high-resolution images and best viewed when zoomed in.} - \label{fig:teaser} -\end{figure} - -With the presence of large text-to-image models, generating a visually appealing image may require only a short descriptive prompt entered by users. -After typing some texts and getting the images, we may naturally come up with several questions: does this prompt-based control satisfy our needs? -For example in image processing, considering many long-standing tasks with clear problem formulations, can these large models be applied to facilitate these specific tasks? -What kind of framework should we build to handle the wide range of problem conditions and user controls? -In specific tasks, can large models preserve the advantages and capabilities obtained from billions of images? - -To answer these questions, we investigate various image processing applications and have three findings. First, the available data scale in a task-specific domain is not always as large as that in the general image-text domain. The largest dataset size of many specific problems (\eg, object shape/normal, pose understanding, \etc) is often under 100k, \ie, $5\times10^4$ times smaller than LAION-5B. This would require robust neural network training method to avoid overfitting and to preserve generalization ability when the large models are trained for specific problems. - -Second, when image processing tasks are handled with data-driven solutions, large computation clusters are not always available. This makes fast training methods important for optimizing large models to specific tasks within an acceptable amount of time and memory space (\eg, on personal devices). This would further require the utilization of pretrained weights, as well as fine-tuning strategies or transfer learning. - -Third, various image processing problems have diverse forms of problem definitions, user controls, or image annotations. When addressing these problems, although an image diffusion algorithm can be regulated in a ``proceduralโ€™โ€™ way, \eg, constraining denoising process, editing multi-head attention activations, \etc, the behaviors of these hand-crafted rules are fundamentally prescribed by human directives. Considering some specific tasks like depth-to-image, pose-to-human, \etc, these problems essentially require the interpretation of raw inputs into object-level or scene-level understandings, making hand-crafted procedural methods less feasible. To achieve learned solutions in many tasks, the end-to-end learning is indispensable. - -This paper presents ControlNet, an end-to-end neural network architecture that controls large image diffusion models (like Stable Diffusion) to learn task-specific input conditions. The ControlNet clones the weights of a large diffusion model into a "trainable copy" and a "locked copy": the locked copy preserves the network capability learned from billions of images, while the trainable copy is trained on task-specific datasets to learn the conditional control. The trainable and locked neural network blocks are connected with an unique type of convolution layer called "zero convolution", where the convolution weights progressively grow from zeros to optimized parameters in a learned manner. Since the production-ready weights are preserved, the training is robust at datasets of different scale. Since the zero convolution does not add new noise to deep features, the training is as fast as fine tuning a diffusion model, compared to training new layers from scratch. - -We train several ControlNets with various datasets of different conditions, \eg, Canny edges, Hough lines, user scribbles, human key points, segmentation maps, shape normals, depths, \etc. We also experiment ControlNets with both small datasets (with samples less than 50k or even 1k) and large datasets (millions of samples). We also show that in some tasks like depth-to-image, training ControlNets on a personal computer (one Nvidia RTX 3090TI) can achieve competitive results to commercial models trained on large computation clusters with terabytes of GPU memory and thousands of GPU hours. - -\section{Related Work} - -\subsection{HyperNetwork and Neural Network Structure} - -HyperNetwork originates from a neural language processing method \cite{ha2017hypernetworks} to train a small recurrent neural network to influence the weights of a larger one. Successful results of HyperNetwork are also reported in image generation using generative adversarial networks \cite{alaluf2021hyperstyle, dinh2022hyperinverter} and other machine learning tasks \cite{shamsian2021personalized}. Inspired by these ideas, \cite{heathen} provided a method to attach a smaller neural network to Stable Diffusion \cite{rombach2021highresolution} so as to change the artistic style of its output images. This approach gained more popularity after \cite{nai} provided the pretrained weights of several HyperNetworks. ControlNet and HyperNetwork have similarities in the way they influence the behaviors of neural networks. - -ControlNet uses a special type of convolution layer called ``zero convolution''. Early neural network studies \cite{726791,Rumelhart1986,LeCun2015} have extensively discussed the initialization of network weights, including the rationality of initializing the weights with Gaussian distributions and the risks that may incur by initializing the weights with zeros. More recently, \cite{2102.09672} discussed a method to scale the initial weight of several convolution layers in a diffusion model to improve the training, which shares similarity with the idea of zero convolution (and their codes contain a function called ``zero\_module''). Manipulating the initial convolution weights is also discussed in ProGAN~\cite{1710.10196} and StyleGAN~\cite{1812.04948}, as well as Noise2Noise~\cite{1803.04189} and \cite{DBLP:journals/corr/abs-2110-12661}. Stability's model cards \cite{sdd} also mention the use of zero weights in neural layers. - -\subsection{Diffusion Probabilistic Model} - -Diffusion probabilistic model was proposed in \cite{DBLP:journals/corr/Sohl-DicksteinW15}. Successful results of image generation are first reported at small scale \cite{DBLP:journals/corr/abs-2107-00630} and then relatively large scale \cite{DBLP:journals/corr/abs-2105-05233}. This architecture was improved by important training and sampling methods like Denoising Diffusion Probabilistic Model (DDPM) \cite{DBLP:conf/nips/HoJA20}, Denoising Diffusion Implicit Model (DDIM) \cite{DBLP:conf/iclr/SongME21}, and score-based diffusion \cite{DBLP:journals/corr/abs-2011-13456}. -Image diffusion methods can directly use pixel colors as training data, and in that case, researches often consider strategies to save computation powers when handling high-resolution images \cite{DBLP:conf/iclr/SongME21, DBLP:journals/corr/abs-2104-02600, DBLP:journals/corr/abs-2106-00132}, or directly use pyramid-based or multiple-stage methods~\cite{DBLP:journals/corr/abs-2106-15282, ramesh2022hierarchical}. -These methods essentially use U-net \cite{DBLP:conf/miccai/RonnebergerFB15} as their neural network architecture. -In order to reduce the computation power required for training a diffusion model, based on the idea of latent image \cite{DBLP:journals/corr/abs-2012-09841}, the approach Latent Diffusion Model (LDM) \cite{rombach2021highresolution} was proposed and further extended to Stable Diffusion. - -\subsection{Text-to-Image Diffusion} - -Diffusion models can be applied to text-to-image generating tasks to achieve state-of-the-art image generating results. This is often achieved by encoding text inputs into latent vectors using pretrained language models like CLIP \cite{2103.00020}. For instances, Glide \cite{nichol2021glide} is a text-guided diffusion models supporting both image generating and editing. Disco Diffusion is a clip-guided implementation of \cite{DBLP:journals/corr/abs-2105-05233} to process text prompts. Stable Diffusion is a large scale implementation of latent diffusion \cite{rombach2021highresolution} to achieve text-to-image generation. Imagen \cite{saharia2022photorealistic} is a text-to-image structure that does not use latent images and directly diffuse pixels using a pyramid structure. - -\subsection{Personalization,Customization, and Control of Pretrained Diffusion Model} - -Because state-of-the-art image diffusion models are dominated by text-to-image methods, the most straight-forward ways to enhance the control over a diffusion model are often text-guided \cite{nichol2021glide,kim2022diffusionclip,avrahami2022blended, 2211.09800, kawar2022imagic,ramesh2022hierarchical,hertz2022prompt}. This type of control can also be achieved by manipulating CLIP features \cite{ramesh2022hierarchical}. The image diffusion process by itself can provide some functionalities to achieve color-level detail variations \cite{meng2021sdedit} (the community of Stable Diffusion call it img2img). Image diffusion algorithms naturally supports inpainting as an important way to control the results \cite{ramesh2022hierarchical,avrahami2022blended}. Textual Inversion \cite{gal2022image} and DreamBooth \cite{ruiz2022dreambooth} are proposed to customize (or personalize) the contents in the generated results using a small set of images with same topics or objects. - -\subsection{Image-to-Image Translation} - -We would like to point out that, although the ControlNet and image-to-image translation may have several overlapped applications, their motivations are essentially different. Image-to-image translation is targeted to learn a mapping between images in different domains, while a ControlNet is targeted to control a diffusion model with task-specific conditions. - -Pix2Pix \cite{isola2017image} presented the concept of image-to-image translation, and early methods are dominated by conditional generative neural networks ~\cite{isola2017image,zhu2017toward,wang2018high,park2019semantic,choi2018stargan,zhang2020cross,zhou2021cocosnet}. After transformers and Vision Transformers (ViTs) gained popularity, successful results have been reported using autoregressive methods \cite{ramesh2021zero,DBLP:journals/corr/abs-2012-09841, chen2021pre}. Some researches also show that multi-model methods can learn a robust generator from various translation tasks \cite{zhang2021m6,kutuzova2021multimodal,huang2021multimodal,qian2019trinity}. - -We discuss the current strongest methods in image-to-image translation. -Taming Transformer \cite{DBLP:journals/corr/abs-2012-09841} is a vision transformer with the capability to both generate images and perform image-to-image translations. -Palette \cite{10.1145/3528233.3530757} is an unified diffusion-based image-to-image translation framework. -PITI \cite{2205.12952} is a diffusion-based image-to-image translation method that utilizes large-scale pretraining as a way to improve the quality of generated results. -In specific fields like sketch-guided diffusion, \cite{voynov2022sketch} is a optimization-based method that manipulates the diffusion process. -These methods are tested in the experiments. - -\section{Method} - -ControlNet is a neural network architecture that can enhance pretrained image diffusion models with task-specific conditions. -We introduce ControlNet's essential structure and motivate of each part in Section~\ref{he}. -We detail the method to apply ControlNets to image diffusion models using the example of Stable Diffusion in Section~\ref{hei}. -We elaborate the learning objective and general training method in Section~\ref{train}, and then describe several approaches to improve the training in extreme cases such as training with one single laptop or using large-scale computing clusters in Section~\ref{train2}. -Finally, we include the details of several ControlNet implementations with different input conditions in Section~\ref{misc}. - -\subsection{ControlNet} -\label{he} - -ControlNet manipulates the input conditions of neural network blocks so as to further control the overall behavior of an entire neural network. Herein, a "network block" refers to a set of neural layers that are put together as a frequently used unit to build neural networks, \eg, ``resnet'' block, ``conv-bn-relu'' block, multi-head attention block, transformer block, \etc. - -Using 2D feature as an example, given a feature map $\bm{x}\in\mathbb{R}^{h\times w \times c}$ with $\{h, w, c\}$ being height, width, and channel numbers, a neural network block $\mathcal{F}(\cdot;\Theta)$ with a set of parameters $\Theta$ transforms $\bm{x}$ into another feature map $\bm{y}$ with -\begin{equation} - \bm{y}=\mathcal{F}(\bm{x};\Theta) -\end{equation} -and this procedure is visualized in Fig.~\ref{fig:he}-(a). - -We lock all parameters in $\Theta$ and then clone it into a trainable copy $\Theta_\text{c}$. The copied $\Theta_\text{c}$ is trained with an external condition vector $\bm{c}$. In this paper, we call the original and new parameters ``locked copy'' and ``trainable copy''. The motivation of making such copies rather than directly training the original weights is to avoid overfitting when dataset is small and to preserve the production-ready quality of large models learned from billions of images. - -The neural network blocks are connected by an unique type of convolution layer called ``zero convolution'', \ie, $1\times 1$ convolution layer with both weight and bias initialized with zeros. We denote the zero convolution operation as $\mathcal{Z}(\cdot;\cdot)$ and use two instances of parameters $\{\Theta_\text{z1}, \Theta_\text{z2}\}$ to compose the ControlNet structure with -\begin{equation} - \label{key1} - \bm{y}_\text{c}=\mathcal{F}(\bm{x};\Theta)+\mathcal{Z}(\mathcal{F}(\bm{x}+\mathcal{Z}(\bm{c};\Theta_\text{z1});\Theta_\text{c});\Theta_\text{z2}) -\end{equation} -where $\bm{y}_\text{c}$ becomes the output of this neural network block, as visualized in Fig.~\ref{fig:he}-(b). - -Because both the weight and bias of a zero convolution layer are initialized as zeros, in the first training step, we have -\begin{equation} - \label{key2} - \left\{ - \begin{aligned} - &\mathcal{Z}(\bm{c};\Theta_\text{z1}) = \bm{0} \\ - &\mathcal{F}(\bm{x}+\mathcal{Z}(\bm{c};\Theta_\text{z1});\Theta_\text{c})=\mathcal{F}(\bm{x};\Theta_\text{c}) = \mathcal{F}(\bm{x};\Theta)\\ - &\mathcal{Z}(\mathcal{F}(\bm{x}+\mathcal{Z}(\bm{c};\Theta_\text{z1});\Theta_\text{c});\Theta_\text{z2}) =\mathcal{Z}(\mathcal{F}(\bm{x};\Theta_\text{c});\Theta_\text{z2}) = \bm{0} - \end{aligned} - \right. -\end{equation} -and this can be converted to -\begin{equation} - \label{key3} - \bm{y}_\text{c} = \bm{y} -\end{equation} -and Eq-(\ref{key1},\ref{key2},\ref{key3}) indicate that, in the first training step, all the inputs and outputs of both the trainable and locked copy of neural network blocks are consistent with what they would be as if the ControlNet does not exist. In other words, when a ControlNet is applied to some neural network blocks, before any optimization, it will not cause any influence to the deep neural features. The capability, functionality, and result quality of any neural network block is perfectly preserved, and any further optimization will become as fast as fine tuning (compared to train those layers from scratch). - -\begin{figure} - \centering - \includegraphics[width=0.825\linewidth]{./imgs/he.pdf} - \caption{ControlNet. We show the approach to apply a ControlNet to an arbitrary neural network block. The $x, y$ are deep features in neural networks. The ``+'' refers to feature addition. The ``c'' is an extra condition that we want to add to the neural network. The ``zero convolution'' is an $1\times 1$ convolution layer with both weight and bias initialized as zeros.} - \label{fig:he} -\end{figure} - -We briefly deduce the gradient calculation of a zero convolution layer. Considering an $1\times 1$ convolution layer with weight $\bm{W}$ and bias $\bm{B}$, at any spatial position $p$ and channel-wise index $i$, given an input map $\bm{I}\in\mathbb{R}^{h\times w \times c}$, the forward pass can be written as -\begin{equation} - \mathcal{Z}(\bm{I};\{\bm{W},\bm{B}\})_{p,i}=\bm{B}_i + \sum_{j}^c \bm{I}_{p,i} \bm{W}_{i,j} -\end{equation} -and since zero convolution has $\bm{W}=\bm{0}$ and $\bm{B}=\bm{0}$ (before optimization), for anywhere with $\bm{I}_{p,i}$ being non-zero, the gradients become -\begin{equation} - \left\{ - \begin{aligned} - &\frac{\partial\mathcal{Z}(\bm{I};\{\bm{W},\bm{B}\})_{p,i}}{\partial\bm{B}_{i}} = 1 \\ - &\frac{\partial\mathcal{Z}(\bm{I};\{\bm{W},\bm{B}\})_{p,i}}{\partial\bm{I}_{p,i}} = \sum_{j}^c \bm{W}_{i,j} = 0 \\ - &\frac{\partial\mathcal{Z}(\bm{I};\{\bm{W},\bm{B}\})_{p,i}}{\partial\bm{W}_{i,j}} = \bm{I}_{p,i} \neq \bm{0} \\ - \end{aligned} - \right. -\end{equation} -and we can see that although a zero convolution can cause the gradient on the feature term $\bm{I}$ to become zero, the weight's and bias's gradients are not influenced. As long as the feature $\bm{I}$ is non-zero, the weight $\bm{W}$ will be optimized into non-zero matrix in the first gradient descent iteration. Notably, in our case, the feature term is input data or condition vectors sampled from datasets, which naturally ensures non-zero $\bm{I}$. For example, considering a classic gradient descent with an overall loss function $\mathcal{L}$ and a learning rate $\beta_{\text{lr}}\neq 0$, if the ``outside'' gradient ${\partial\mathcal{L}}/{\partial\mathcal{Z}(\bm{I};\{\bm{W},\bm{B}\})}$ is not zero, we will have -\begin{equation} -\bm{W}^* = \bm{W} - \beta_{\text{lr}} \cdot \frac{\partial\mathcal{L}}{\partial\mathcal{Z}(\bm{I};\{\bm{W},\bm{B}\})} \odot \frac{\partial\mathcal{Z}(\bm{I};\{\bm{W},\bm{B}\})}{\partial\bm{W}} \neq \bm{0} -\end{equation} -where $\bm{W}^*$ is the weight after one gradient descent step; $\odot$ is Hadamard product. After this step, we will have -\begin{equation} -\frac{\partial\mathcal{Z}(\bm{I};\{\bm{W}^*,\bm{B}\})_{p,i}}{\partial\bm{I}_{p,i}} = \sum_{j}^c \bm{W}^*_{i,j} \neq \bm{0} -\end{equation} - -where non-zero gradients are obtained and the neural network begins to learn. In this way, the zero convolutions become an unique type of connection layer that progressively grow from zeros to optimized parameters in a learned way. - -\begin{figure} - \centering - \includegraphics[width=\linewidth]{./imgs/sd.pdf} - \caption{ControlNet in Stable Diffusion. The gray blocks are the structure of Stable Diffusion 1.5 (or SD V2.1, since they use the same U-Net architecture), while the blue blocks are ControlNet.} - \label{fig:hesd} -\end{figure} - -\subsection{ControlNet in Image Diffusion Model} -\label{hei} - -We use the Stable Diffusion \cite{rombach2021highresolution} as an example to introduce the method to use ControlNet to control a large diffusion model with task-specific conditions. - -Stable Diffusion is a large text-to-image diffusion model trained on billions of images. The model is essentially an U-net with an encoder, a middle block, and a skip-connected decoder. Both the encoder and decoder have 12 blocks, and the full model has 25 blocks (including the middle block). In those blocks, 8 blocks are down-sampling or up-sampling convolution layers, 17 blocks are main blocks that each contains four resnet layers and two Vision Transformers (ViTs). Each Vit contains several cross-attention and/or self-attention mechanisms. The texts are encoded by OpenAI CLIP, and diffusion time steps are encoded by positional encoding. - -Stable Diffusion uses a pre-processing method similar to VQ-GAN~\cite{DBLP:journals/corr/abs-2012-09841} to convert the entire dataset of $512\times 512$ images into smaller $64\times 64$ ``latent images'' for stabilized training. This requires ControlNets to convert image-based conditions to $64\times 64$ feature space to match the convolution size. We use a tiny network $\mathcal{E}(\cdot)$ of four convolution layers with $4\times 4$ kernels and $2 \times 2$ strides (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full model) to encode image-space conditions $\bm{c}_\text{i}$ into feature maps with -\begin{equation} - \bm{c}_\text{f}=\mathcal{E}(\bm{c}_\text{i}) -\end{equation} -where $\bm{c}_\text{f}$ is the converted feature map. This network convert $512\times 512$ image conditions to $64\times 64$ feature maps. - -As shown in Fig.~\ref{fig:hesd}, we use ControlNet to control each level of the U-net. Note that the way we connect the ControlNet is computationally efficient: since the original weights are locked, no gradient computation on the original encoder is needed for training. This can speed up training and save GPU memory, as half of the gradient computation on the original model can be avoided. Training a stable diffusion model with ControlNet requires only about 23\% more GPU memory and 34\% more time in each training iteration (as tested on a single Nvidia A100 PCIE 40G). - -To be specific, we use ControlNet to create the trainable copy of the 12 encoding blocks and 1 middle block of Stable Diffusion. The 12 blocks are in 4 resolutions ($64\times64,32\times32,16\times16,8\times8$) with each having 3 blocks. The outputs are added to the 12 skip-connections and 1 middle block of the U-net. Since SD is a typical U-net structure, this ControlNet architecture is likely to be usable in other diffusion models. - -\subsection{Training} -\label{train} - -Image diffusion models learn to progressively denoise images to generate samples. The denoising can happen in pixel space or a ``latent'' space encoded from training data. Stable Diffusion uses latent images as the training domain. In this context, the terminology ``image'', ''pixel'', and ``denoising'' all refers to corresponding concepts in the ``perceptual latent space'' \cite{rombach2021highresolution}. - -Given an image $\bm{z}_0$, diffusion algorithms progressively add noise to the image and produces a noisy image $\bm{z}_t$, with $t$ being how many times the noise is added. When $t$ is large enough, the image approximates pure noise. Given a set of conditions including time step $t$, text prompts $\bm{c}_t$, as well as a task-specific conditions $\bm{c}_\text{f}$, image diffusion algorithms learn a network $\epsilon_\theta$ to predict the noise added to the noisy image $\bm{z}_t$ with -\begin{equation} - \mathcal{L} = \mathbb{E}_{\bm{z}_0, t, \bm{c}_t, \bm{c}_\text{f}, \epsilon \sim \mathcal{N}(0, 1) }\Big[ \Vert \epsilon - \epsilon_\theta(z_{t}, t, \bm{c}_t, \bm{c}_\text{f})) \Vert_{2}^{2}\Big] - \label{eq:loss} -\end{equation} -where $\mathcal{L}$ is the overall learning objective of the entire diffusion model. This learning objective can be directly used in fine tuning diffusion models. - -During the training, we randomly replace 50\% text prompts $\bm{c}_t$ with empty strings. This facilitates ControlNet's capability to recognize semantic contents from input condition maps, \eg, Canny edge maps or human scribbles, \etc. This is mainly because when the prompt is not visible for the SD model, the encoder tends to learn more semantics from input control maps as a replacement for the prompt. - -\subsection{Improved Training} -\label{train2} - -We discuss several strategies to improve the training of ControlNets, especially in extreme cases when the computation device is very limited (\eg, on a laptop) or very powerful (\eg, on a computation cluster with large-scale GPUs available). In our experiments, if any of these strategies are used, we will mention in the experimental settings. - -\paragraph{Small-Scale Training} - -When computation device is limited, we find that partially breaking the connection between a ControlNet and the Stable Diffusion can accelerate convergence. By default, we connect the ControlNet to ``SD Middle Block'' and ``SD Decoder Block 1,2,3,4'' as shown in Fig.~\ref{fig:hesd}. We find that disconnecting the link to decoder 1,2,3,4 and only connecting the middle block can improve the training speed by about a factor of 1.6 (tested on RTX 3070TI laptop GPU). When the model shows reasonable association between results and conditions, those disconnected links can be connected again in a continued training to facilitate accurate control. - -\paragraph{Large-Scale Training} - -Herein, the large-scale training refers to the situation where both powerful computation clusters (at least 8 Nvidia A100 80G or equivalent) and large dataset (at least 1 million of training image pairs) are available. This usually applies to tasks where data is easily available, \eg, edge maps detected by Canny. In this case, since the risk of over-fitting is relatively low, we can first train ControlNets for a large enough number of iterations (usually more than 50k steps), and then unlock all weights of the Stable Diffusion and jointly train the entire model as a whole. This would lead to a more problem-specific model. - -\subsection{Implementation} -\label{misc} - -We present several implementations of ControlNets with different image-based conditions to control large diffusion models in various ways. - -\paragraph{Canny Edge} We use Canny edge detector \cite{4767851} (with random thresholds) to obtain 3M edge-image-caption pairs from the internet. The model is trained with 600 GPU-hours with Nvidia A100 80G. The base model is Stable Diffusion 1.5. (See also Fig.~\ref{fig:edge2img}.) - -\paragraph{Canny Edge (Alter)} We rank the image resolutions of the above Canny edge dataset and sampled several sub-set with 1k, 10k, 50k, 500k samples. We use the same experimental setting to test the effect of dataset scale. (See also Fig.~\ref{fig:ex2}.) - -\paragraph{Hough Line} We use a learning-based deep Hough transform \cite{gu2021realtime} to detect straight lines from Places2 \cite{zhou2017places}, and then use BLIP \cite{li2022blip} to generate captions. We obtain 600k edge-image-caption pairs. We use the above Canny model as a starting checkpoint and train the model with 150 GPU-hours with Nvidia A100 80G. (See also Fig.~\ref{fig:hough}.) - -\paragraph{HED Boundary} We use HED boundary detection \cite{7410521} to obtain 3M edge-image-caption pairs from internet. The model is trained with 300 GPU-hours with Nvidia A100 80G. The base model is Stable Diffusion 1.5. (See also Fig.~\ref{fig:hed}.) - -\paragraph{User Sketching} We synthesize human scribbles from images using a combination of HED boundary detection \cite{7410521} and a set of strong data augmentations (random thresholds, randomly masking out a random percentage of scribbles, random morphological transformations, and random non-maximum suppression). We obtain 500k scribble-image-caption pairs from internet. We use the above Canny model as a starting checkpoint and train the model with 150 GPU-hours with Nvidia A100 80G. Note that we also tried a more ``human-like'' synthesizing method \cite{2211.17256} but the method is much slower than a simple HED and we do not notice visible improvements. (See also Fig.~\ref{fig:scribble}.) - -\paragraph{Human Pose (Openpifpaf)} We use learning-based pose estimation method \cite{kreiss2021openpifpaf} to ``find'' humans from internet using a simple rule: an image with human must have at least 30\% of the key points of the whole body detected. We obtain 80k pose-image-caption pairs. Note that we directly use visualized pose images with human skeletons as training condition. The model is trained with 400 GPU-hours on Nvidia RTX 3090TI. The base model is Stable Diffusion 2.1. (See also Fig.~\ref{fig:key}.) - -\paragraph{Human Pose (Openpose)} We use learning-based pose estimation method \cite{8765346} to find humans from internet using the same rule in the above Openpifpaf setting. We obtain 200k pose-image-caption pairs. Note that we directly use visualized pose images with human skeletons as training condition. The model is trained with 300 GPU-hours with Nvidia A100 80G. Other settings are same with the above Openpifpaf. (See also Fig.~\ref{fig:key2}.) - -\paragraph{Semantic Segmentation (COCO)} The COCO-Stuff dataset \cite{1612.03716} captioned by BLIP \cite{li2022blip}. We obtain 164K segmentation-image-caption pairs. The model is trained with 400 GPU-hours on Nvidia RTX 3090TI. The base model is Stable Diffusion 1.5. (See also Fig.~\ref{fig:coco}.) - -\paragraph{Semantic Segmentation (ADE20K)} The ADE20K dataset \cite{8100027} captioned by BLIP \cite{li2022blip}. We obtain 164K segmentation-image-caption pairs. The model is trained with 200 GPU-hours on Nvidia A100 80G. The base model is Stable Diffusion 1.5. (See also Fig.~\ref{fig:ade}.) - -\paragraph{Depth (large-scale)} We use the Midas \cite{DBLP:journals/corr/abs-1907-01341} to obtain 3M depth-image-caption pairs from internet. The model is trained with 500 GPU-hours with Nvidia A100 80G. The base model is Stable Diffusion 1.5. (See also Fig.~\ref{fig:cc3},\ref{fig:cc1},\ref{fig:cc2}.) - -\paragraph{Depth (small-scale)} We rank the image resolutions of the above depth dataset to sample a subset of 200k pairs. This set is used in experimenting the minimal required dataset size to train the model. (See also Fig.~\ref{fig:depth}.) - -\paragraph{Normal Maps} The DIODE dataset \cite{diode_dataset} captioned by BLIP \cite{li2022blip}. We obtain 25,452 normal-image-caption pairs. The model is trained with 100 GPU-hours on Nvidia A100 80G. The base model is Stable Diffusion 1.5. (See also Fig.~\ref{fig:normal}.) - -\paragraph{Normal Maps (extended)} We use the Midas \cite{DBLP:journals/corr/abs-1907-01341} to compute depth map and then perform normal-from-distance to achieve ``coarse'' normal maps. We use the above Normal model as a starting checkpoint and train the model with 200 GPU-hours with Nvidia A100 80G. (See also Fig.~\ref{fig:cc3},\ref{fig:cc1},\ref{fig:cc2}.) - -\paragraph{Cartoon Line Drawing} We use a cartoon line drawing extracting method \cite{Anime2Sketch} to extract line drawings from cartoon illustration from internet. By sorting the cartoon images with popularity, we obtain the top 1M lineart-cartoon-caption pairs. The model is trained with 300 GPU-hours with Nvidia A100 80G. The base model is Waifu Diffusion (an interesting community-developed variation model from stable diffusion \cite{waifu}). (See also Fig.~\ref{fig:anime}.) - -\section{Experiment} - -\subsection{Experimental Settings} - -All results in this paper is achieved with CFG-scale at 9.0. The sampler is DDIM. We use 20 steps by default. We use three types of prompts to test the models: - -(1) No prompt: We use empty string ``'' as prompt. - -(2) Default prompt: Since Stable diffusion is essentially trained with prompts, the empty string might be an unexpected input for the model, and SD tends to generate random texture maps if no prompt is provided. A better setting is to use meaningless prompts like ``an image'', ``a nice image'', ``a professional image'', \etc. In our setting, we use ``a professional, detailed, high-quality image'' as default prompt. - -(3) Automatic prompt: In order to test the state-of-the-art maximized quality of a fully automatic pipeline, we also try to use automatic image captioning methods (\eg, BLIP \cite{li2022blip}) to generate prompts using the results obtained by ``default prompt'' mode. We use the generated prompt to diffusion again. - -(4) User prompt: Users give the prompts. - -\subsection{Qualitative Results} - -We present qualitative results in Fig.~\ref{fig:edge2img}, \ref{fig:hough},\ref{fig:scribble},\ref{fig:hed},\ref{fig:key},\ref{fig:key2},\ref{fig:mj},\ref{fig:ade},\ref{fig:coco},\ref{fig:normal},\ref{fig:depth},\ref{fig:anime}. - -\subsection{Ablation Study} - -Fig.~\ref{fig:abla} shows a comparison to a model trained without using ControlNet. That model is trained with exactly same method with Stability's Depth-to-Image model (Adding a channel to the SD and continue the training). - -Fig.~\ref{fig:ex1} shows the training process. We would like to point out a ``sudden convergence phenomenon'' where the model suddenly be able to follow the input conditions. This can happen during the training process from 5000 to 10000 steps when using 1e-5 as the learning rate. - -Fig.~\ref{fig:ex2} shows Canny-edge-based ControlNets trained with different dataset scales. - -\subsection{Comparison to previous methods} - -Fig.~\ref{fig:depth} shows the comparison to Stability's Depth-to-Image model. - -Fig.~\ref{fig:piti} shows a comparison to PITI \cite{2205.12952}. - -Fig.~\ref{fig:scr} shows a comparison to sketch-guided diffusion \cite{voynov2022sketch}. - -Fig.~\ref{fig:tam} shows a comparison to Taming transformer \cite{DBLP:journals/corr/abs-2012-09841}. - -\subsection{Comparison of pre-trained models} - -We show comparisons of different pre-trained models in Fig.~\ref{fig:cc3}, \ref{fig:cc1}, \ref{fig:cc2}. - -\subsection{More Applications} - -Fig.~\ref{fig:edge_inpaint} show that if the diffusion process is masked, the models can be used in pen-based image editing. - -Fig.~\ref{fig:mat} show that when object is relatively simple, the model can achieve relatively accurate control of the details. - -Fig.~\ref{fig:retrive} shows that when ControlNet is only applied to 50\% diffusion iterations, users can get results that do not follow the input shapes. - -\section{Limitation} - -Fig.~\ref{fig:lim1} shows that when the semantic interpretation is wrong, the model may have difficulty to generate correct contents. - -\section*{Appendix} - -Fig.~\ref{fig:appe} shows all source images in this paper for edge detection, pose extraction, \etc. - -\begin{figure} -\vspace{-35pt} -\centerfloat -\begin{minipage}{1.4\linewidth} -\includegraphics[width=\linewidth]{./imgs/edge2img.pdf} -\caption{Controlling Stable Diffusion with Canny edges. The ``automatic prompts'' are generated by BLIP based on the default result images without using user prompts. See also the Appendix for source images for canny edge detection.} -\label{fig:edge2img} -\end{minipage} -\end{figure} - -\begin{figure} - \centerfloat - \begin{minipage}{1.4\linewidth} - \includegraphics[width=\linewidth]{./imgs/hough.pdf} - \caption{Controlling Stable Diffusion with Hough lines (M-LSD). The ``automatic prompts'' are generated by BLIP based on the default result images without using user prompts. See also the Appendix for source images for line detection.} - \label{fig:hough} - \end{minipage} -\end{figure} - -\begin{figure} - \centerfloat - \begin{minipage}{1.4\linewidth} - \includegraphics[width=\linewidth]{./imgs/scribble.pdf} - \caption{Controlling Stable Diffusion with Human scribbles. The ``automatic prompts'' are generated by BLIP based on the default result images without using user prompts. These scribbles are from \cite{voynov2022sketch}.} - \label{fig:scribble} - \end{minipage} - -\end{figure} - -\begin{figure} - \centerfloat - \begin{minipage}{1.4\linewidth} - \includegraphics[width=\linewidth]{./imgs/hed.pdf} - \caption{Controlling Stable Diffusion with HED boundary map. The ``automatic prompts'' are generated by BLIP based on the default result images without using user prompts. See also the Appendix for source images for HED boundary detection.} - \label{fig:hed} - \end{minipage} - -\end{figure} - -\begin{figure} - \vspace{-50pt} - \centerfloat - \begin{minipage}{1.4\linewidth} - \includegraphics[width=\linewidth]{./imgs/key.pdf} - \vspace{-5pt} - \caption{Controlling Stable Diffusion with Openpifpaf pose. See also the Appendix for source images for Openpifpaf pose detection.} - \label{fig:key} \end{minipage} - -\end{figure} - -\begin{figure} - \vspace{-5pt} - \centerfloat - \begin{minipage}{1.4\linewidth} - \includegraphics[width=\linewidth]{./imgs/key2.pdf} - \vspace{-5pt} - \caption{Controlling Stable Diffusion with Openpose. See also the Appendix for source images for Openpose pose detection.} - \label{fig:key2} \end{minipage} - -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=\linewidth]{./imgs/mj.pdf} - \caption{Controlling Stable Diffusion with human pose to generate different poses for a same person (``Michael Jackson's concert''). Images are not cherry picked. See also the Appendix for source images for Openpose pose detection.} - \label{fig:mj} -\end{figure} - -\begin{figure} - \centerfloat - \begin{minipage}{1.4\linewidth} - \includegraphics[width=\linewidth]{./imgs/ade.pdf} - \caption{Controlling Stable Diffusion with ADE20K \cite{8100027} segmentation map. All results are achieved with default prompt. See also the Appendix for source images for semantic segmentation map extraction.} - \label{fig:ade} \end{minipage} - -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.75\linewidth]{./imgs/seg.pdf} - \caption{Controlling Stable Diffusion with COCO-Stuff \cite{1612.03716} segmentation map.} - \label{fig:coco} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.75\linewidth]{./imgs/shape.pdf} - \caption{Controlling Stable Diffusion with DIODE \cite{diode_dataset} normal map.} - \label{fig:normal} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.86\linewidth]{./imgs/depth.pdf} - \caption{Comparison of Depth-based ControlNet and Stable Diffusion V2 Depth-to-Image. Note that in this experiment, the Depth-based ControlNet is trained at a relatively small scale to test minimal required computation resources. We also provide relatively stronger models that are trained at relatively large scale.} - \label{fig:depth} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.8\linewidth]{./imgs/anime.pdf} - \caption{Controlling Stable Diffusion (anime weights) with cartoon line drawings. The line drawings are inputs and there are no corresponding ``ground truths''. This model may be used in artistic creation tools.} - \label{fig:anime} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.9\linewidth]{./imgs/edge_inpaint.pdf} - \caption{Masked Diffusion. By diffusing images in masked areas, the Canny-edge model can be used to support pen-based editing of image contents. Since all diffusion models naturally support masked diffusion, the other models are also likely to be used in manipulating images.} - \label{fig:edge_inpaint} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.95\linewidth]{./imgs/com_seg.pdf} - \caption{Comparison to Pretraining-Image-to-Image (PITI) \cite{2205.12952}. Note that the semantic consistency of the ``wall'', ``paper'', and ``cup'' is difficult to handle in this task.} - \label{fig:piti} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.6\linewidth]{./imgs/com_scr.pdf} - \caption{Comparison to Sketch-guided diffusion \cite{voynov2022sketch}. This input is one of the most challenging cases in their paper.} - \label{fig:scr} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.68\linewidth]{./imgs/com_tam.pdf} - \caption{Comparison to Taming Transformers \cite{DBLP:journals/corr/abs-2012-09841}. This input is one of the most challenging cases in their paper.} - \label{fig:tam} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=\linewidth]{./imgs/abla.pdf} - \caption{Ablative study. We compare the ControlNet structure with a standard method that Stable Diffusion uses as default way to add conditions to diffusion models.} - \label{fig:abla} -\end{figure} - -\begin{figure} - \centerfloat - \begin{minipage}{1.5\linewidth} - \includegraphics[width=\linewidth]{./imgs/ex1.pdf} - \caption{The sudden converge phenomenon. Because we use zero convolutions, the neural network always predict high-quality images during the entire training. At a certain point of training step, the model suddenly learns to adapt to the input conditions. We call this ``sudden converge phenomenon''.} - \label{fig:ex1} \end{minipage} - -\end{figure} - -\begin{figure} - \centerfloat - \begin{minipage}{1.5\linewidth} - \includegraphics[width=\linewidth]{./imgs/ex2.pdf} - \caption{Training on different scale. We show the Canny-edge-based ControlNet trained on different experimental settings with various dataset size.} - \label{fig:ex2} \end{minipage} - -\end{figure} - -\begin{figure} - \centerfloat - \begin{minipage}{1.4\linewidth} - \includegraphics[width=\linewidth]{./imgs/cc3.pdf} - \caption{Comparison of six detection types and the corresponding results. The scribble map is extracted from the HED map with morphological transforms.} - \label{fig:cc3} \end{minipage} - -\end{figure} - -\begin{figure} - \centerfloat - \begin{minipage}{1.4\linewidth} - \includegraphics[width=\linewidth]{./imgs/cc1.pdf} - \caption{(Continued) Comparison of six detection types and the corresponding results. The scribble map is extracted from the HED map with morphological transforms.} - \label{fig:cc1} \end{minipage} - -\end{figure} - -\begin{figure} - \centerfloat - \begin{minipage}{1.4\linewidth} - \includegraphics[width=\linewidth]{./imgs/cc2.pdf} - \caption{(Continued) Comparison of six detection types and the corresponding results. The scribble map is extracted from the HED map with morphological transforms.} - \label{fig:cc2} \end{minipage} - -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.85\linewidth]{./imgs/mat.pdf} - \caption{Example of simple object. When the diffusion content is relatively simple, the model can achieve very accurate control to manipulate the content materials.} - \label{fig:mat} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.85\linewidth]{./imgs/retrive.pdf} - \caption{Coarse-level control. When users do not want their input shape to be preserved in the images, we can simply replace the last 50\% diffusion iterations with standard SD without ControlNet. The resulting effect is similar to image retrieval but those images are generated.} - \label{fig:retrive} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=0.75\linewidth]{./imgs/lim1.pdf} - \caption{Limitation. When the semantic of input image is mistakenly recognized, the negative effects seem difficult to be eliminated, even if a strong prompt is provided.} - \label{fig:lim1} -\end{figure} - -\begin{figure} - \centering - \includegraphics[width=\linewidth]{./imgs/appe.pdf} - \caption{Appendix: all original source images for edge detection, semantic segmentation, pose extraction, \etc. Note that some images may have copyrights.} - \label{fig:appe} -\end{figure} - -\bibliographystyle{abbrvnat} -\bibliography{diff} - -\end{document} diff --git a/spaces/awaawawawa/iurf7irfuyytruyyugb/style.css b/spaces/awaawawawa/iurf7irfuyytruyyugb/style.css deleted file mode 100644 index 5a8ab58374d428be77f5cf2b55260d88a09d0cd7..0000000000000000000000000000000000000000 --- a/spaces/awaawawawa/iurf7irfuyytruyyugb/style.css +++ /dev/null @@ -1,518 +0,0 @@ -.container { - max-width: 100%; -} - -#txt2img_token_counter { - height: 0px; -} - -#img2img_token_counter { - height: 0px; -} - -#sh{ - min-width: 2em; - min-height: 2em; - max-width: 2em; - max-height: 2em; - flex-grow: 0; - padding-left: 0.25em; - padding-right: 0.25em; - margin: 0.1em 0; - opacity: 0%; - cursor: default; -} - -.output-html p {margin: 0 0.5em;} - -.row > *, -.row > .gr-form > * { - min-width: min(120px, 100%); - flex: 1 1 0%; -} - -.performance { - font-size: 0.85em; - color: #444; - display: flex; - justify-content: space-between; - white-space: nowrap; -} - -.performance .time { - margin-right: 0; -} - -.performance .vram { - margin-left: 0; - text-align: right; -} - -#txt2img_generate, #img2img_generate { - min-height: 4.5em; -} - -@media screen and (min-width: 2500px) { - #txt2img_gallery, #img2img_gallery { - min-height: 768px; - } -} - -#txt2img_gallery img, #img2img_gallery img{ - object-fit: scale-down; -} - -.justify-center.overflow-x-scroll { - justify-content: left; -} - -.justify-center.overflow-x-scroll button:first-of-type { - margin-left: auto; -} - -.justify-center.overflow-x-scroll button:last-of-type { - margin-right: auto; -} - -#random_seed, #random_subseed, #reuse_seed, #reuse_subseed, #open_folder{ - min-width: auto; - flex-grow: 0; - padding-left: 0.25em; - padding-right: 0.25em; -} - -#hidden_element{ - display: none; -} - -#seed_row, #subseed_row{ - gap: 0.5rem; -} - -#subseed_show_box{ - min-width: auto; - flex-grow: 0; -} - -#subseed_show_box > div{ - border: 0; - height: 100%; -} - -#subseed_show{ - min-width: auto; - flex-grow: 0; - padding: 0; -} - -#subseed_show label{ - height: 100%; -} - -#roll_col{ - min-width: unset !important; - flex-grow: 0 !important; - padding: 0.4em 0; -} - -#roll, #paste, #style_create, #style_apply{ - min-width: 2em; - min-height: 2em; - max-width: 2em; - max-height: 2em; - flex-grow: 0; - padding-left: 0.25em; - padding-right: 0.25em; - margin: 0.1em 0; -} - -#interrogate_col{ - min-width: 0 !important; - max-width: 8em !important; -} -#interrogate, #deepbooru{ - margin: 0em 0.25em 0.9em 0.25em; - min-width: 8em; - max-width: 8em; -} - -#style_pos_col, #style_neg_col{ - min-width: 8em !important; -} - -#txt2img_style_index, #txt2img_style2_index, #img2img_style_index, #img2img_style2_index{ - margin-top: 1em; -} - -.gr-form{ - background: transparent; -} - -.my-4{ - margin-top: 0; - margin-bottom: 0; -} - -#toprow div{ - border: none; - gap: 0; - background: transparent; -} - -#resize_mode{ - flex: 1.5; -} - -button{ - align-self: stretch !important; -} - -.overflow-hidden, .gr-panel{ - overflow: visible !important; -} - -#x_type, #y_type{ - max-width: 10em; -} - -#txt2img_preview, #img2img_preview, #ti_preview{ - position: absolute; - width: 320px; - left: 0; - right: 0; - margin-left: auto; - margin-right: auto; - margin-top: 34px; - z-index: 100; - border: none; - border-top-left-radius: 0; - border-top-right-radius: 0; -} - -@media screen and (min-width: 768px) { - #txt2img_preview, #img2img_preview, #ti_preview { - position: absolute; - } -} - -@media screen and (max-width: 767px) { - #txt2img_preview, #img2img_preview, #ti_preview { - position: relative; - } -} - -#txt2img_preview div.left-0.top-0, #img2img_preview div.left-0.top-0, #ti_preview div.left-0.top-0{ - display: none; -} - -fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block span{ - position: absolute; - top: -0.6em; - line-height: 1.2em; - padding: 0 0.5em; - margin: 0; - - background-color: white; - border-top: 1px solid #eee; - border-left: 1px solid #eee; - border-right: 1px solid #eee; - - z-index: 300; -} - -.dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{ - background-color: rgb(31, 41, 55); - border-top: 1px solid rgb(55 65 81); - border-left: 1px solid rgb(55 65 81); - border-right: 1px solid rgb(55 65 81); -} - -#settings fieldset span.text-gray-500, #settings .gr-block.gr-box span.text-gray-500, #settings label.block span{ - position: relative; - border: none; - margin-right: 8em; -} - -.gr-panel div.flex-col div.justify-between label span{ - margin: 0; -} - -#settings .gr-panel div.flex-col div.justify-between div{ - position: relative; - z-index: 200; -} - -input[type="range"]{ - margin: 0.5em 0 -0.3em 0; -} - -#txt2img_sampling label{ - padding-left: 0.6em; - padding-right: 0.6em; -} - -#mask_bug_info { - text-align: center; - display: block; - margin-top: -0.75em; - margin-bottom: -0.75em; -} - -#txt2img_negative_prompt, #img2img_negative_prompt{ -} - -#txt2img_progressbar, #img2img_progressbar, #ti_progressbar{ - position: absolute; - z-index: 1000; - right: 0; - padding-left: 5px; - padding-right: 5px; - display: block; -} - -#txt2img_progress_row, #img2img_progress_row{ - margin-bottom: 10px; - margin-top: -18px; -} - -.progressDiv{ - width: 100%; - height: 20px; - background: #b4c0cc; - border-radius: 8px; -} - -.dark .progressDiv{ - background: #424c5b; -} - -.progressDiv .progress{ - width: 0%; - height: 20px; - background: #0060df; - color: white; - font-weight: bold; - line-height: 20px; - padding: 0 8px 0 0; - text-align: right; - border-radius: 8px; -} - -#lightboxModal{ - display: none; - position: fixed; - z-index: 1001; - padding-top: 100px; - left: 0; - top: 0; - width: 100%; - height: 100%; - overflow: auto; - background-color: rgba(20, 20, 20, 0.95); - user-select: none; - -webkit-user-select: none; -} - -.modalControls { - display: grid; - grid-template-columns: 32px auto 1fr 32px; - grid-template-areas: "zoom tile space close"; - position: absolute; - top: 0; - left: 0; - right: 0; - padding: 16px; - gap: 16px; - background-color: rgba(0,0,0,0.2); -} - -.modalClose { - grid-area: close; -} - -.modalZoom { - grid-area: zoom; -} - -.modalTileImage { - grid-area: tile; -} - -.modalClose, -.modalZoom, -.modalTileImage { - color: white; - font-size: 35px; - font-weight: bold; - cursor: pointer; -} - -.modalClose:hover, -.modalClose:focus, -.modalZoom:hover, -.modalZoom:focus { - color: #999; - text-decoration: none; - cursor: pointer; -} - -#modalImage { - display: block; - margin-left: auto; - margin-right: auto; - margin-top: auto; - width: auto; -} - -.modalImageFullscreen { - object-fit: contain; - height: 90%; -} - -.modalPrev, -.modalNext { - cursor: pointer; - position: absolute; - top: 50%; - width: auto; - padding: 16px; - margin-top: -50px; - color: white; - font-weight: bold; - font-size: 20px; - transition: 0.6s ease; - border-radius: 0 3px 3px 0; - user-select: none; - -webkit-user-select: none; -} - -.modalNext { - right: 0; - border-radius: 3px 0 0 3px; -} - -.modalPrev:hover, -.modalNext:hover { - background-color: rgba(0, 0, 0, 0.8); -} - -#imageARPreview{ - position:absolute; - top:0px; - left:0px; - border:2px solid red; - background:rgba(255, 0, 0, 0.3); - z-index: 900; - pointer-events:none; - display:none -} - -#txt2img_interrupt, #img2img_interrupt{ - position: absolute; - width: 50%; - height: 72px; - background: #b4c0cc; - border-radius: 0px; - display: none; -} - -#txt2img_skip, #img2img_skip{ - position: absolute; - width: 50%; - right: 0px; - height: 72px; - background: #b4c0cc; - border-radius: 0px; - display: none; -} - -.red { - color: red; -} - -.gallery-item { - --tw-bg-opacity: 0 !important; -} - -#context-menu{ - z-index:9999; - position:absolute; - display:block; - padding:0px 0; - border:2px solid #a55000; - border-radius:8px; - box-shadow:1px 1px 2px #CE6400; - width: 200px; -} - -.context-menu-items{ - list-style: none; - margin: 0; - padding: 0; -} - -.context-menu-items a{ - display:block; - padding:5px; - cursor:pointer; -} - -.context-menu-items a:hover{ - background: #a55000; -} - -#quicksettings { - gap: 0.4em; -} - -#quicksettings > div{ - border: none; - background: none; - flex: unset; - gap: 0.5em; -} - -#quicksettings > div > div{ - max-width: 32em; - min-width: 24em; - padding: 0; -} - -#refresh_sd_model_checkpoint, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name{ - max-width: 2.5em; - min-width: 2.5em; - height: 2.4em; -} - - -canvas[key="mask"] { - z-index: 12 !important; - filter: invert(); - mix-blend-mode: multiply; - pointer-events: none; -} - - -/* gradio 3.4.1 stuff for editable scrollbar values */ -.gr-box > div > div > input.gr-text-input{ - position: absolute; - right: 0.5em; - top: -0.6em; - z-index: 200; - width: 8em; -} -#quicksettings .gr-box > div > div > input.gr-text-input { - top: -1.12em; -} - -.row.gr-compact{ - overflow: visible; -} - -#img2img_image, #img2img_image > .h-60, #img2img_image > .h-60 > div, #img2img_image > .h-60 > div > img, -img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h-60 > div > img -{ - height: 480px !important; - max-height: 480px !important; - min-height: 480px !important; -} diff --git a/spaces/awacke1/Gamification-Grabble/app.py b/spaces/awacke1/Gamification-Grabble/app.py deleted file mode 100644 index 67d024441dcab77300921c83127c140922ea53a5..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Gamification-Grabble/app.py +++ /dev/null @@ -1,115 +0,0 @@ - -import numpy as np -import streamlit as st - -# Define game constants -LETTER_VALUES = {'A': 1, 'B': 3, 'C': 3, 'D': 2, 'E': 1, 'F': 4, 'G': 2, 'H': 4, 'I': 1, 'J': 8, 'K': 5, 'L': 1, 'M': 3, 'N': 1, 'O': 1, 'P': 3, 'Q': 10, 'R': 1, 'S': 1, 'T': 1, 'U': 1, 'V': 4, 'W': 4, 'X': 8, 'Y': 4, 'Z': 10, ' ': 0} -TILE_DISTRIBUTION = {'A': 9, 'B': 2, 'C': 2, 'D': 4, 'E': 12, 'F': 2, 'G': 3, 'H': 2, 'I': 9, 'J': 1, 'K': 1, 'L': 4, 'M': 2, 'N': 6, 'O': 8, 'P': 2, 'Q': 1, 'R': 6, 'S': 4, 'T': 6, 'U': 4, 'V': 2, 'W': 2, 'X': 1, 'Y': 2, 'Z': 1, ' ': 2} - -def get_tile(): - """Get a random tile from the bag.""" - tiles = [] - for tile, count in TILE_DISTRIBUTION.items(): - tiles.extend([tile] * count) - return np.random.choice(tiles) - -def get_word_score(word, letter_values): - """Calculate the score for a given word.""" - score = 0 - for letter in word: - score += letter_values[letter] - return score - -def display_board(board): - """Display the game board.""" - headers = [''] + [str(i+1) for i in range(len(board))] - data = [headers] - for i in range(len(board)): - row = [chr(i+65)] - for j in range(len(board[i])): - row.append(chr(board[i][j])) - data.append(row) - st.table(data=data) - - -# Create the game board -board = np.zeros((15, 15), dtype=int) - -# Create a rack of tiles for the user to choose from -rack = [] -for i in range(7): - rack.append(get_tile()) - -# Display the board and rack -st.write('Scrabble Game') -display_board(board) -st.write('Rack:', rack) - -# Allow the user to select a tile and place it on the board -letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ' '] -tile = st.selectbox('Select a tile:', letters) - -expander = st.expander('Select a position on the board') -with expander: - row = st.number_input('Row:', min_value=1, max_value=15, value=1) - col = st.number_input('Column:', min_value=1, max_value=15, value=1) - -if st.button('Place tile'): - # Check if the selected tile is in the rack - if tile in rack: - # Place the tile on the board - row -= 1 - col -= 1 - board[row][col] = tile - rack.remove(tile) - - # Calculate the score for the word - word = '' - for i in range(len(board)): - for j in range(len(board[i])): - if board[i][j] != 0: - word += chr(board[i][j]) - if len(word) > 1: - score = get_word_score(word, LETTER_VALUES) - st.write(word, ':', score, 'points') - word = '' - - # Refill the rack with new tiles - while len(rack) < 7 and len(TILE_DISTRIBUTION) > 0: - tile = get_tile() - rack.append(tile) - TILE_DISTRIBUTION[tile] -= 1 - if TILE_DISTRIBUTION[tile] == 0: - del TILE_DISTRIBUTION[tile] - - # Display the updated board and rack - display_board(board) - st.write('Rack:', rack) -else: - st.write('Tile not in rack.') - - -st.markdown(""" -Create a top ten game list for word games that you can write in computer code. -Sure, here's a list of the top ten word games that can be implemented in computer code: - -Scrabble: A classic game where players take turns creating words on a board using letter tiles and scoring points based on the letters used and placement on the board. - -Boggle: In this game, players must find as many words as possible by connecting adjacent letters on a grid of letter cubes within a fixed time limit. - -Crosswords: A game where players must fill in a grid of white and black squares with words that fit a series of clues. - -Hangman: A game where players must guess a hidden word by suggesting letters, with each incorrect guess resulting in the drawing of a part of a stick figure on a noose. - -Word jumble: In this game, players are presented with a scrambled word or phrase and must unscramble it to reveal the correct order of letters. - -Word search: A puzzle where players must find a list of hidden words within a grid of letters, often arranged in a pattern or theme. - -Typing games: These games test players' typing speed and accuracy by presenting them with a series of words or phrases to type within a certain time limit. - -Anagrams: A game where players must create as many words as possible using the letters from a given word or phrase. - -Word association: A game where players take turns saying a word that is associated with the previous word, creating a chain of related words. - -Ghost: A game where players take turns saying letters, attempting to form a word. The catch is that each player must add a letter to the word, but cannot form a complete word themselves or they lose the round. -""") \ No newline at end of file diff --git a/spaces/bigslime/stablediffusion-infinity/PyPatchMatch/csrc/pyinterface.h b/spaces/bigslime/stablediffusion-infinity/PyPatchMatch/csrc/pyinterface.h deleted file mode 100644 index e53c88ae9dfcea7f9766828168d3ad35a404b699..0000000000000000000000000000000000000000 --- a/spaces/bigslime/stablediffusion-infinity/PyPatchMatch/csrc/pyinterface.h +++ /dev/null @@ -1,38 +0,0 @@ -#include -#include -#include -#include - -extern "C" { - -struct PM_shape_t { - int width, height, channels; -}; - -enum PM_dtype_e { - PM_UINT8, - PM_INT8, - PM_UINT16, - PM_INT16, - PM_INT32, - PM_FLOAT32, - PM_FLOAT64, -}; - -struct PM_mat_t { - void *data_ptr; - PM_shape_t shape; - int dtype; -}; - -void PM_set_random_seed(unsigned int seed); -void PM_set_verbose(int value); - -void PM_free_pymat(PM_mat_t pymat); -PM_mat_t PM_inpaint(PM_mat_t image, PM_mat_t mask, int patch_size); -PM_mat_t PM_inpaint_regularity(PM_mat_t image, PM_mat_t mask, PM_mat_t ijmap, int patch_size, float guide_weight); -PM_mat_t PM_inpaint2(PM_mat_t image, PM_mat_t mask, PM_mat_t global_mask, int patch_size); -PM_mat_t PM_inpaint2_regularity(PM_mat_t image, PM_mat_t mask, PM_mat_t global_mask, PM_mat_t ijmap, int patch_size, float guide_weight); - -} /* extern "C" */ - diff --git a/spaces/bioriAsaeru/text-to-voice/Bommarillu Hindi Dubbed Movie Download HOT.md b/spaces/bioriAsaeru/text-to-voice/Bommarillu Hindi Dubbed Movie Download HOT.md deleted file mode 100644 index 89ee45e262309eed1da16b42fe185ec902379b69..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Bommarillu Hindi Dubbed Movie Download HOT.md +++ /dev/null @@ -1,9 +0,0 @@ -
    -

    bommarillu (2006) telugu movie music by dsp starring siddartha, genelia,. dum (happy) 2015 full hindi dubbed movie with telugu songs allu arjun,. bollywood and hollywood full movies online free bommarillu (2006) dvdrip telugu full movie watch online free.

    -

    bommarillu hindi dubbed movie download


    Download >> https://urloso.com/2uyS7p



    -

    bommarillu (2006) hindi dubbed full movie (2006) watch online free download 720p hd video movie free download 720p hd tv. watch movie online in hd print quality free download bollywood movies free. watch movies online in hd print quality free download bollywood movies free.

    -

    watch bollywood and hollywood full movies online free bommarillu (2006) dvdrip telugu full movie watch online free. bommarillu hindi dubbed full movie (2006) watch online free download 720p hd video movie free download 720p hd tv. watch movie online in hd print quality free download bollywood movies free. watch movies online in hd print quality free download bollywood movies free.

    -

    6. mutyalin koottu. watch online movies in hd print quality free download,watch full movies online bollywood movies download latest hollywood movies in dvd print quality free. watch online movies is my hobby and i daily watch 1 or 2 movies online and specially the indian movies on their release day i'm always watch on different websites in cam print but i always use google search to find the movies,then i decide that i make a platform for users where they can see hd/dvd print quality movies and i listed all latest movies. i also capture the different categories of movies like if you want to see hollywood movies, or you want to see punjabi movies or you are interested in bollywood movies then i have all these type of categories in my website. i also focus on categories of movies based on actress and actors, like a person want to see all movies of amir khan from my website there he select category amir khan movis list then all movies of amir khan will be displayed. so we provide the list of movies from all actress and actors so you can find any movie and watch in high print quality. so i try my best to understand the needs of users who want to watch a movie,but still if you have any suggestion for me or you want to give me any advice you are always welcome.make comment on video i will surely reply you. i provide online full movies to watch and free download so always stay connected with our website to enjoy the latest movies and if you dont have time to watch just make that movie on download and when will you free then you will watch that movie in best print.

    -

    899543212b
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Dolphin Imaging Software Crack Download [UPDATED].md b/spaces/bioriAsaeru/text-to-voice/Dolphin Imaging Software Crack Download [UPDATED].md deleted file mode 100644 index 4c1926b46e3f4fb77319d6369eb5c6d9f4e4c485..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Dolphin Imaging Software Crack Download [UPDATED].md +++ /dev/null @@ -1,6 +0,0 @@ -

    Dolphin Imaging Software Crack Download


    Download Ziphttps://urloso.com/2uyQ25



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/Facebook Certificate For Login [VERIFIED].md b/spaces/bioriAsaeru/text-to-voice/Facebook Certificate For Login [VERIFIED].md deleted file mode 100644 index dc440d4fde9e27ed92a7bb4112d2d3bd81b399ad..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Facebook Certificate For Login [VERIFIED].md +++ /dev/null @@ -1,32 +0,0 @@ - -

    Unfortunately, expired TLS certificates impact organizations across all regions, industries and sizes. According to a recent Venafi study of CIOs from the U.S., U.K., France, Germany and Australia, 60% experienced certificate-related outages that impacted critical business applications or services within the last year.

    -

    With the ongoing explosion of sites, properties and services across the web, these certificate-related outages are going to grow in number and impact until operations, PKI and InfoSec teams embrace the mantra of automation.

    -

    facebook certificate for login


    Download File · https://urloso.com/2uyRts



    -

    I am having this issue too, not only on Facebook but all apps cannot connect to authentication servers. I am on strong wifi, verified by ookla speed test app, but all apps are failing when trying to access account info while facebook app give the above message. Something happened to SSL. I'm on iOS 14.2.

    -

    If you no longer have access to any of the email addresses or phone numbers associated with your account, enter one you have access to now. Then, email security@facebookmail.com to explain your situation.

    -

    Many of our TrustedForm users are often surprised to find out that in addition to using it to reach out to prospects safely, they can also claim TrustedForm for Facebook Lead Ads certificates! By using TrustedForm, you can verify proof of opt-in and document consent for every lead that comes in through Facebook.

    -

    If you are using an existing TrustedForm claim step in LeadConduit, please be aware that TrustedForm for Facebook Lead Ads does not support filtering with GeoIP data, masked certificates, or several other features of a standard web lead TrustedForm certificate. You can exempt TrustedForm for Facebook Lead Ads from this type of filtering by adding a nested rule, exempting the Facebook Lead Ads source.

    -

    Do you want to recover an old Facebook account that you can no longer access? If you've forgotten your login or password, or if you deactivated your account, getting your old Facebook back is easier than you'd think. The steps are a little different if your account was disabled for a violation, but you can still get back in by appealing Facebook's decision. This wikiHow guide will show you simple ways to reopen your old Facebook account.

    -

    If you don't secure the connection between your website and your client, a man in the middle can change the page content to try stealing users credentials. When the Bob connects to your website his browser fetches the page content. If there is a man in the middle between Bob's PC and your server, he can change the page content before sending them to Bob. In this case, when Bob tries to login your website with Facebook, he will connect to malicious guys website, which tries to fool the user and steal his Facebook credentials.

    -

    When you login through Google/Facebook the url you are redirected to is off-course https, but after your users are authenticated it is up to your site to generate a session cookie and maintain user sessions.

    -

    When a website that requires a secure (HTTPS) connection tries to secure communication with your computer, Firefox cross-checks this attempt to make sure that the website certificate and the connection method are actually secure. If Firefox cannot establish a secure connection, it will display a Secure Connection Failed or Did Not Connect: Potential Security Issue error page.

    -

    -

    Firefox uses certificates on secure websites to ensure that your information is being sent to the intended recipient and can't be read by eavesdroppers. An incorrect system date can cause Firefox to detect that the website's security certificate is expired or invalid. Make sure your computer is set to the correct date, time and time zone. For more information, see How to troubleshoot time related errors on secure websites.

    -

    Q: Do I still need my own backend authentication systems with Cognito Identity?
    No. Cognito Identity supports login through Amazon, Facebook, Twitter, and Google, as well as providing support for unauthenticated users. With Cognito Identity you can support federated authentication, profile data sync store and AWS access token distribution without writing any backend code.

    -

    There is a problem with this website's security certificate. The security certificate presented by this website was not issued by a trusted certificate authority.

    Security certificate problems may indicate an attempt to fool you or intercept data you send to the server.
    We recommend that you close this webpage and do not continue to this Web site.

    -

    To resolve this issue, an organization that hosts the secure Web site can purchase a certificate for each Web server from a third-party provider. Or, the organization can install a Microsoft Enterprise certification authority in the Active Directory forest. Then, the organization can use this certification authority to generate a certificate for each Web server.

    Note Users of client computers that do not belong to the organization's Active Directory forest can visit the certification authority Web site to download the certificate.

    -

    In Windows Vista, the same issue occurs with self-signed certificates. However, the option to install certificates is not available unless you run Windows Internet Explorer with administrator rights. To do this, right-click the Internet Explorer icon, and then select Run as Administrator.

    -

    When the client computer connects to a Web server that is running Windows Server 2003, the client computer reuses the certification authority certificate. The client computer does not use another certificate that is signed by the certification authority.

    -

    If you have a website or mobile app that is being developed to communicate with or integrate with Google services, such as a website or mobile app that supports Google logins, you must follow the requirements of Google's Platform Guidelines.

    -

    you could also try hitting the advanced button and then continue, just to see where it takes you, check to see if it looks like facebook or if it's a fake site for password phising or something. After getting into the site, you could check to see if the IP address matches with what it should be as well, Use wireshark or you might be able to find it under Sources or network tab in the DevTools page on chrome (right click anywhere on page and select inspect).

    -

    There is nothing from any of the 7 other machines on the network indicating there is anything new. There are no portals or additional logins to be had. The issue being only facebook (which I can ping) and that I get the same type of error on multiple browsers. That is the weird part. And advanced does not go to the standard "continue on to site unsecure" ability.

    -

    Education is one of the easy keys to being an industry fit. Picking up the domain that suits you the best from the pool of options is a bit confusing. Great Learning offers you a plethora of choices in the fields of your interests. You can walk through the courses, understand what suits your specifications and master them. The programs will help you with the best content. You will gain Degree and PG certificates from recognized universities on successful completion of the registered course.

    -

    React Native and Firebase SDK make the implementation of Google login pretty straightforward. Let's build a simple app that only has a single Google login button. Once the user successfully logs into Google, we are going to display the user info retrieved from their Google account as well as a logout button.

    -

    You can also add Facebook Login to your app if you're interested in providing even more login options to your users. You can check out this guide to Facebook Login in React Native with Firebase if you're looking to learn more on how to set up Facebook sign-in.

    -

    Next, we need to integrate authentication config to the sign-in function. After a successful login, we store the accessToken and idToken to Firebase. Now, we can try to login with Google on our demo React Native app.

    -

    The Board of Accountancy regulates Certified Public Accountants, Public Accountants, and Accounting Practitioners. The board examines applicants and issues certificates and licenses to certified public accountants and accounting practitioners. The board may discipline certificate and license holders, prescribe rules and regulations, investigate complaints, and, in general, regulate the practice of accounting in the state.

    -

    After completing your profile, please fax your tax exemption certificate to (866) 665-9570 Attn: Tax Department

    We will then record your Tax Exemption Certificate so that your orders are not charged state sales tax for all states where you are exempt. Orders placed by individuals cannot be exempted; the order must be for the entity named on the certificate. Orders shipped to states where you are not exempt will be charged applicable sales tax.

    -

    If YES:

      Select the Email tab and enter your login information.

    If NO:
      Select the User ID tab and click Log In.Log in with User ID (which may or may not be your email address) and password.

    -

    Keycloak is a single sign on solution for web apps and RESTful web services. The goal of Keycloakis to make security simple so that it is easy for application developers to secure the apps and services they have deployedin their organization. Security features that developers normally have to write for themselves are provided out of the boxand are easily tailorable to the individual requirements of your organization. Keycloak provides customizableuser interfaces for login, registration, administration, and account management. You can also use Keycloak as anintegration platform to hook it into existing LDAP and Active Directory servers. You can also delegate authentication to thirdparty identity providers like Facebook and Google.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Free Download Alpha and Omega 5 Family Vacation - Watch the Animated Adventure Online.md b/spaces/bioriAsaeru/text-to-voice/Free Download Alpha and Omega 5 Family Vacation - Watch the Animated Adventure Online.md deleted file mode 100644 index 2d2c962e4118d62446372ce7c1612c8ea442846e..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Free Download Alpha and Omega 5 Family Vacation - Watch the Animated Adventure Online.md +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    The film opens up with Kate, Humphrey and the pups, who are on their way to Alfred Creek Falls for their first family vacation together. The pups - especially Claudette - are feeling lonely for not bringing the other animals, but Kate reminds them that it's their first vacation to share experiences. Claudette has a flashback of her and Fleet from the third film and ends up embarrassing herself in front of her family. Kate feels uncomfortable from this and Runt starts teasing Claudette for her infatuation with Fleet. Stinky suggests his plan for creating a wolf park to supervise the young wolves, but Humphrey disowns it, much to Stinky's dismay. After this, they continue their trip but eventually come across two female wolves running to hide from human wolf trappers. The wolves inform Kate and Humphrey that the trappers are trapping wolves and wolf pups to relocate them on the other side of the country, and that all the other wolves are hiding in the mountains near the border. The two wolves are captured when they try to escape. Humphrey suggests going across the border to escape the trappers, as Kate senses the Canadian express they used before.

    -

    free download Alpha and Omega 5: Family Vacation


    Download File ⚹⚹⚹ https://urloso.com/2uyOiD



    -

    As they travel on the road, Kate and Humphrey start to worry about Runt's sprained ankle, but the trappers once again catch up to them. Just as they are about to shoot the wolves (who are hiding in a small rock den), Runt and the porcupines scare them away. The next day, the animals reach the same RV from before. They encounter Garn and Debbie, the woman who recently broke up with her boyfriend, Garn. She decides to shelter the animals for the day. The wolf trappers arrive and ask her if she has seen the wolves, but she denies it while the animals hide. When they leave, Humphrey instructs Debbie to drive them to Idaho, where the trappers won't follow them anymore. Debbie also mentions she was planning on driving to Idaho, so she takes them there, but the wolf trappers spot them again. A car chase ensues and eventually Debbie drives in the forest to let the animals free, but the trappers catch up and follow them. Debbie notices the two female wolves from earlier in crates and frees them. Just as they reach Idaho, the wolf trappers start shooting at them, with Runt falling behind. In order to save his family, Humphrey tells Kate once again to lead the pups to safety, while he turns around and attacks the trappers.

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/bioriAsaeru/text-to-voice/Galaxy 11 Match !FREE! Full Version.md b/spaces/bioriAsaeru/text-to-voice/Galaxy 11 Match !FREE! Full Version.md deleted file mode 100644 index 59619ef72ebee305cca278a2145c715053616bdf..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Galaxy 11 Match !FREE! Full Version.md +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    osVersion (OS Version): Create a filter rule based on the Intune device operating system (OS) version. Enter the full string value (using -eq, -ne, -in, -notIn operators), or partial value (using -startswith, -contains, -notcontains operators).

    -

    Even businesses that still rely on legacy Windows apps without mobile or web versions should consider using DeX. You can leverage a virtual desktop app, like VMware, Citrix or Amazon WorkSpaces, to access a full Windows desktop within DeX.

    -

    Galaxy 11 Match Full Version


    Download Filehttps://urloso.com/2uyQhh



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/blmdsydm/faster-whisper-webui/src/download.py b/spaces/blmdsydm/faster-whisper-webui/src/download.py deleted file mode 100644 index 20565153f9e582be73246a1e2a3b7be3f368b322..0000000000000000000000000000000000000000 --- a/spaces/blmdsydm/faster-whisper-webui/src/download.py +++ /dev/null @@ -1,78 +0,0 @@ -from tempfile import mkdtemp -from typing import List -from yt_dlp import YoutubeDL - -import yt_dlp -from yt_dlp.postprocessor import PostProcessor - -class FilenameCollectorPP(PostProcessor): - def __init__(self): - super(FilenameCollectorPP, self).__init__(None) - self.filenames = [] - - def run(self, information): - self.filenames.append(information["filepath"]) - return [], information - -def download_url(url: str, maxDuration: int = None, destinationDirectory: str = None, playlistItems: str = "1") -> List[str]: - try: - return _perform_download(url, maxDuration=maxDuration, outputTemplate=None, destinationDirectory=destinationDirectory, playlistItems=playlistItems) - except yt_dlp.utils.DownloadError as e: - # In case of an OS error, try again with a different output template - if e.msg and e.msg.find("[Errno 36] File name too long") >= 0: - return _perform_download(url, maxDuration=maxDuration, outputTemplate="%(title).10s %(id)s.%(ext)s") - pass - -def _perform_download(url: str, maxDuration: int = None, outputTemplate: str = None, destinationDirectory: str = None, playlistItems: str = "1"): - # Create a temporary directory to store the downloaded files - if destinationDirectory is None: - destinationDirectory = mkdtemp() - - ydl_opts = { - "format": "bestaudio/best", - 'paths': { - 'home': destinationDirectory - } - } - if (playlistItems): - ydl_opts['playlist_items'] = playlistItems - - # Add output template if specified - if outputTemplate: - ydl_opts['outtmpl'] = outputTemplate - - filename_collector = FilenameCollectorPP() - - with YoutubeDL(ydl_opts) as ydl: - if maxDuration and maxDuration > 0: - info = ydl.extract_info(url, download=False) - entries = "entries" in info and info["entries"] or [info] - - total_duration = 0 - - # Compute total duration - for entry in entries: - total_duration += float(entry["duration"]) - - if total_duration >= maxDuration: - raise ExceededMaximumDuration(videoDuration=total_duration, maxDuration=maxDuration, message="Video is too long") - - ydl.add_post_processor(filename_collector) - ydl.download([url]) - - if len(filename_collector.filenames) <= 0: - raise Exception("Cannot download " + url) - - result = [] - - for filename in filename_collector.filenames: - result.append(filename) - print("Downloaded " + filename) - - return result - -class ExceededMaximumDuration(Exception): - def __init__(self, videoDuration, maxDuration, message): - self.videoDuration = videoDuration - self.maxDuration = maxDuration - super().__init__(message) \ No newline at end of file diff --git a/spaces/bookbot/Grad-TTS-Weildan-Playground/Grad-TTS/inference.py b/spaces/bookbot/Grad-TTS-Weildan-Playground/Grad-TTS/inference.py deleted file mode 100644 index ec2e185e92db96c0015903703690c9660c27d33e..0000000000000000000000000000000000000000 --- a/spaces/bookbot/Grad-TTS-Weildan-Playground/Grad-TTS/inference.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -import argparse -import json -import datetime as dt -import numpy as np -from scipy.io.wavfile import write - -import torch - -import params -from model import GradTTS -from text import text_to_sequence, cmudict -from text.symbols import symbols -from utils import intersperse - -import sys -sys.path.append('./hifi-gan/') -from env import AttrDict -from models import Generator as HiFiGAN - - -HIFIGAN_CONFIG = './checkpts/hifigan-config.json' -HIFIGAN_CHECKPT = './checkpts/hifigan.pt' - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('-f', '--file', type=str, required=True, help='path to a file with texts to synthesize') - parser.add_argument('-c', '--checkpoint', type=str, required=True, help='path to a checkpoint of Grad-TTS') - parser.add_argument('-t', '--timesteps', type=int, required=False, default=10, help='number of timesteps of reverse diffusion') - parser.add_argument('-s', '--speaker_id', type=int, required=False, default=None, help='speaker id for multispeaker model') - args = parser.parse_args() - - if not isinstance(args.speaker_id, type(None)): - assert params.n_spks > 1, "Ensure you set right number of speakers in `params.py`." - spk = torch.LongTensor([args.speaker_id]).cuda() - else: - spk = None - - print('Initializing Grad-TTS...') - generator = GradTTS(len(symbols)+1, params.n_spks, params.spk_emb_dim, - params.n_enc_channels, params.filter_channels, - params.filter_channels_dp, params.n_heads, params.n_enc_layers, - params.enc_kernel, params.enc_dropout, params.window_size, - params.n_feats, params.dec_dim, params.beta_min, params.beta_max, params.pe_scale) - generator.load_state_dict(torch.load(args.checkpoint, map_location=lambda loc, storage: loc)) - _ = generator.cuda().eval() - print(f'Number of parameters: {generator.nparams}') - - print('Initializing HiFi-GAN...') - with open(HIFIGAN_CONFIG) as f: - h = AttrDict(json.load(f)) - vocoder = HiFiGAN(h) - vocoder.load_state_dict(torch.load(HIFIGAN_CHECKPT, map_location=lambda loc, storage: loc)['generator']) - _ = vocoder.cuda().eval() - vocoder.remove_weight_norm() - - with open(args.file, 'r', encoding='utf-8') as f: - texts = [line.strip() for line in f.readlines()] - cmu = cmudict.CMUDict('./resources/cmu_dictionary') - - with torch.no_grad(): - for i, text in enumerate(texts): - print(f'Synthesizing {i} text...', end=' ') - x = torch.LongTensor(intersperse(text_to_sequence(text, dictionary=cmu), len(symbols))).cuda()[None] - x_lengths = torch.LongTensor([x.shape[-1]]).cuda() - - t = dt.datetime.now() - y_enc, y_dec, attn = generator.forward(x, x_lengths, n_timesteps=args.timesteps, temperature=1.5, - stoc=False, spk=spk, length_scale=0.91) - t = (dt.datetime.now() - t).total_seconds() - print(f'Grad-TTS RTF: {t * 22050 / (y_dec.shape[-1] * 256)}') - - audio = (vocoder.forward(y_dec).cpu().squeeze().clamp(-1, 1).numpy() * 32768).astype(np.int16) - - write(f'./out/sample_{i}.wav', 22050, audio) - - print('Done. Check out `out` folder for samples.') diff --git a/spaces/brianaaas/BeedAiTe/README.md b/spaces/brianaaas/BeedAiTe/README.md deleted file mode 100644 index ed2d2b8acb8c21d18d8838b92055d08da1e64150..0000000000000000000000000000000000000000 --- a/spaces/brianaaas/BeedAiTe/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: BeedAiTe -emoji: ๐Ÿ‘ -colorFrom: indigo -colorTo: blue -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/cccccch/VITS-fast-fine-tuning-DingZhen/text/shanghainese.py b/spaces/cccccch/VITS-fast-fine-tuning-DingZhen/text/shanghainese.py deleted file mode 100644 index cb29c24a08d2e406e8399cf7bc9fe5cb43cb9c61..0000000000000000000000000000000000000000 --- a/spaces/cccccch/VITS-fast-fine-tuning-DingZhen/text/shanghainese.py +++ /dev/null @@ -1,64 +0,0 @@ -import re -import cn2an -import opencc - - -converter = opencc.OpenCC('zaonhe') - -# List of (Latin alphabet, ipa) pairs: -_latin_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('A', 'แด‡'), - ('B', 'bi'), - ('C', 'si'), - ('D', 'di'), - ('E', 'i'), - ('F', 'แด‡f'), - ('G', 'dส‘i'), - ('H', 'แด‡tษ•สฐ'), - ('I', 'แด€i'), - ('J', 'dส‘แด‡'), - ('K', 'kสฐแด‡'), - ('L', 'แด‡l'), - ('M', 'แด‡m'), - ('N', 'แด‡n'), - ('O', 'o'), - ('P', 'pสฐi'), - ('Q', 'kสฐiu'), - ('R', 'แด€l'), - ('S', 'แด‡s'), - ('T', 'tสฐi'), - ('U', 'ษฆiu'), - ('V', 'vi'), - ('W', 'dแด€bษคliu'), - ('X', 'แด‡ks'), - ('Y', 'uแด€i'), - ('Z', 'zแด‡') -]] - - -def _number_to_shanghainese(num): - num = cn2an.an2cn(num).replace('ไธ€ๅ','ๅ').replace('ไบŒๅ', 'ๅปฟ').replace('ไบŒ', 'ไธค') - return re.sub(r'((?:^|[^ไธ‰ๅ››ไบ”ๅ…ญไธƒๅ…ซไน])ๅ|ๅปฟ)ไธค', r'\1ไบŒ', num) - - -def number_to_shanghainese(text): - return re.sub(r'\d+(?:\.?\d+)?', lambda x: _number_to_shanghainese(x.group()), text) - - -def latin_to_ipa(text): - for regex, replacement in _latin_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def shanghainese_to_ipa(text): - text = number_to_shanghainese(text.upper()) - text = converter.convert(text).replace('-','').replace('$',' ') - text = re.sub(r'[A-Z]', lambda x: latin_to_ipa(x.group())+' ', text) - text = re.sub(r'[ใ€๏ผ›๏ผš]', '๏ผŒ', text) - text = re.sub(r'\s*๏ผŒ\s*', ', ', text) - text = re.sub(r'\s*ใ€‚\s*', '. ', text) - text = re.sub(r'\s*๏ผŸ\s*', '? ', text) - text = re.sub(r'\s*๏ผ\s*', '! ', text) - text = re.sub(r'\s*$', '', text) - return text diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/README.md b/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/README.md deleted file mode 100644 index c2fa6eec7282b2be8549e308bbc7d26dff15cb89..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/legacy/token-classification/README.md +++ /dev/null @@ -1,294 +0,0 @@ -## Token classification - -Based on the scripts [`run_ner.py`](https://github.com/huggingface/transformers/blob/main/examples/legacy/token-classification/run_ner.py). - -The following examples are covered in this section: - -* NER on the GermEval 2014 (German NER) dataset -* Emerging and Rare Entities task: WNUTโ€™17 (English NER) dataset - -Details and results for the fine-tuning provided by @stefan-it. - -### GermEval 2014 (German NER) dataset - -#### Data (Download and pre-processing steps) - -Data can be obtained from the [GermEval 2014](https://sites.google.com/site/germeval2014ner/data) shared task page. - -Here are the commands for downloading and pre-processing train, dev and test datasets. The original data format has four (tab-separated) columns, in a pre-processing step only the two relevant columns (token and outer span NER annotation) are extracted: - -```bash -curl -L 'https://drive.google.com/uc?export=download&id=1Jjhbal535VVz2ap4v4r_rN1UEHTdLK5P' \ -| grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > train.txt.tmp -curl -L 'https://drive.google.com/uc?export=download&id=1ZfRcQThdtAR5PPRjIDtrVP7BtXSCUBbm' \ -| grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > dev.txt.tmp -curl -L 'https://drive.google.com/uc?export=download&id=1u9mb7kNJHWQCWyweMDRMuTFoOHOfeBTH' \ -| grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > test.txt.tmp -``` - -The GermEval 2014 dataset contains some strange "control character" tokens like `'\x96', '\u200e', '\x95', '\xad' or '\x80'`. -One problem with these tokens is, that `BertTokenizer` returns an empty token for them, resulting in misaligned `InputExample`s. -The `preprocess.py` script located in the `scripts` folder a) filters these tokens and b) splits longer sentences into smaller ones (once the max. subtoken length is reached). - -Let's define some variables that we need for further pre-processing steps and training the model: - -```bash -export MAX_LENGTH=128 -export BERT_MODEL=bert-base-multilingual-cased -``` - -Run the pre-processing script on training, dev and test datasets: - -```bash -python3 scripts/preprocess.py train.txt.tmp $BERT_MODEL $MAX_LENGTH > train.txt -python3 scripts/preprocess.py dev.txt.tmp $BERT_MODEL $MAX_LENGTH > dev.txt -python3 scripts/preprocess.py test.txt.tmp $BERT_MODEL $MAX_LENGTH > test.txt -``` - -The GermEval 2014 dataset has much more labels than CoNLL-2002/2003 datasets, so an own set of labels must be used: - -```bash -cat train.txt dev.txt test.txt | cut -d " " -f 2 | grep -v "^$"| sort | uniq > labels.txt -``` - -#### Prepare the run - -Additional environment variables must be set: - -```bash -export OUTPUT_DIR=germeval-model -export BATCH_SIZE=32 -export NUM_EPOCHS=3 -export SAVE_STEPS=750 -export SEED=1 -``` - -#### Run the Pytorch version - -To start training, just run: - -```bash -python3 run_ner.py --data_dir ./ \ ---labels ./labels.txt \ ---model_name_or_path $BERT_MODEL \ ---output_dir $OUTPUT_DIR \ ---max_seq_length $MAX_LENGTH \ ---num_train_epochs $NUM_EPOCHS \ ---per_device_train_batch_size $BATCH_SIZE \ ---save_steps $SAVE_STEPS \ ---seed $SEED \ ---do_train \ ---do_eval \ ---do_predict -``` - -If your GPU supports half-precision training, just add the `--fp16` flag. After training, the model will be both evaluated on development and test datasets. - -#### JSON-based configuration file - -Instead of passing all parameters via commandline arguments, the `run_ner.py` script also supports reading parameters from a json-based configuration file: - -```json -{ - "data_dir": ".", - "labels": "./labels.txt", - "model_name_or_path": "bert-base-multilingual-cased", - "output_dir": "germeval-model", - "max_seq_length": 128, - "num_train_epochs": 3, - "per_device_train_batch_size": 32, - "save_steps": 750, - "seed": 1, - "do_train": true, - "do_eval": true, - "do_predict": true -} -``` - -It must be saved with a `.json` extension and can be used by running `python3 run_ner.py config.json`. - -#### Evaluation - -Evaluation on development dataset outputs the following for our example: - -```bash -10/04/2019 00:42:06 - INFO - __main__ - ***** Eval results ***** -10/04/2019 00:42:06 - INFO - __main__ - f1 = 0.8623348017621146 -10/04/2019 00:42:06 - INFO - __main__ - loss = 0.07183869666975543 -10/04/2019 00:42:06 - INFO - __main__ - precision = 0.8467916366258111 -10/04/2019 00:42:06 - INFO - __main__ - recall = 0.8784592370979806 -``` - -On the test dataset the following results could be achieved: - -```bash -10/04/2019 00:42:42 - INFO - __main__ - ***** Eval results ***** -10/04/2019 00:42:42 - INFO - __main__ - f1 = 0.8614389652384803 -10/04/2019 00:42:42 - INFO - __main__ - loss = 0.07064602487454782 -10/04/2019 00:42:42 - INFO - __main__ - precision = 0.8604651162790697 -10/04/2019 00:42:42 - INFO - __main__ - recall = 0.8624150210424085 -``` - -#### Run the Tensorflow 2 version - -To start training, just run: - -```bash -python3 run_tf_ner.py --data_dir ./ \ ---labels ./labels.txt \ ---model_name_or_path $BERT_MODEL \ ---output_dir $OUTPUT_DIR \ ---max_seq_length $MAX_LENGTH \ ---num_train_epochs $NUM_EPOCHS \ ---per_device_train_batch_size $BATCH_SIZE \ ---save_steps $SAVE_STEPS \ ---seed $SEED \ ---do_train \ ---do_eval \ ---do_predict -``` - -Such as the Pytorch version, if your GPU supports half-precision training, just add the `--fp16` flag. After training, the model will be both evaluated on development and test datasets. - -#### Evaluation - -Evaluation on development dataset outputs the following for our example: -```bash - precision recall f1-score support - - LOCderiv 0.7619 0.6154 0.6809 52 - PERpart 0.8724 0.8997 0.8858 4057 - OTHpart 0.9360 0.9466 0.9413 711 - ORGpart 0.7015 0.6989 0.7002 269 - LOCpart 0.7668 0.8488 0.8057 496 - LOC 0.8745 0.9191 0.8963 235 - ORGderiv 0.7723 0.8571 0.8125 91 - OTHderiv 0.4800 0.6667 0.5581 18 - OTH 0.5789 0.6875 0.6286 16 - PERderiv 0.5385 0.3889 0.4516 18 - PER 0.5000 0.5000 0.5000 2 - ORG 0.0000 0.0000 0.0000 3 - -micro avg 0.8574 0.8862 0.8715 5968 -macro avg 0.8575 0.8862 0.8713 5968 -``` - -On the test dataset the following results could be achieved: -```bash - precision recall f1-score support - - PERpart 0.8847 0.8944 0.8896 9397 - OTHpart 0.9376 0.9353 0.9365 1639 - ORGpart 0.7307 0.7044 0.7173 697 - LOC 0.9133 0.9394 0.9262 561 - LOCpart 0.8058 0.8157 0.8107 1150 - ORG 0.0000 0.0000 0.0000 8 - OTHderiv 0.5882 0.4762 0.5263 42 - PERderiv 0.6571 0.5227 0.5823 44 - OTH 0.4906 0.6667 0.5652 39 - ORGderiv 0.7016 0.7791 0.7383 172 - LOCderiv 0.8256 0.6514 0.7282 109 - PER 0.0000 0.0000 0.0000 11 - -micro avg 0.8722 0.8774 0.8748 13869 -macro avg 0.8712 0.8774 0.8740 13869 -``` - -### Emerging and Rare Entities task: WNUTโ€™17 (English NER) dataset - -Description of the WNUTโ€™17 task from the [shared task website](http://noisy-text.github.io/2017/index.html): - -> The WNUTโ€™17 shared task focuses on identifying unusual, previously-unseen entities in the context of emerging discussions. -> Named entities form the basis of many modern approaches to other tasks (like event clustering and summarization), but recall on -> them is a real problem in noisy text - even among annotators. This drop tends to be due to novel entities and surface forms. - -Six labels are available in the dataset. An overview can be found on this [page](http://noisy-text.github.io/2017/files/). - -#### Data (Download and pre-processing steps) - -The dataset can be downloaded from the [official GitHub](https://github.com/leondz/emerging_entities_17) repository. - -The following commands show how to prepare the dataset for fine-tuning: - -```bash -mkdir -p data_wnut_17 - -curl -L 'https://github.com/leondz/emerging_entities_17/raw/master/wnut17train.conll' | tr '\t' ' ' > data_wnut_17/train.txt.tmp -curl -L 'https://github.com/leondz/emerging_entities_17/raw/master/emerging.dev.conll' | tr '\t' ' ' > data_wnut_17/dev.txt.tmp -curl -L 'https://raw.githubusercontent.com/leondz/emerging_entities_17/master/emerging.test.annotated' | tr '\t' ' ' > data_wnut_17/test.txt.tmp -``` - -Let's define some variables that we need for further pre-processing steps: - -```bash -export MAX_LENGTH=128 -export BERT_MODEL=bert-large-cased -``` - -Here we use the English BERT large model for fine-tuning. -The `preprocess.py` scripts splits longer sentences into smaller ones (once the max. subtoken length is reached): - -```bash -python3 scripts/preprocess.py data_wnut_17/train.txt.tmp $BERT_MODEL $MAX_LENGTH > data_wnut_17/train.txt -python3 scripts/preprocess.py data_wnut_17/dev.txt.tmp $BERT_MODEL $MAX_LENGTH > data_wnut_17/dev.txt -python3 scripts/preprocess.py data_wnut_17/test.txt.tmp $BERT_MODEL $MAX_LENGTH > data_wnut_17/test.txt -``` - -In the last pre-processing step, the `labels.txt` file needs to be generated. This file contains all available labels: - -```bash -cat data_wnut_17/train.txt data_wnut_17/dev.txt data_wnut_17/test.txt | cut -d " " -f 2 | grep -v "^$"| sort | uniq > data_wnut_17/labels.txt -``` - -#### Run the Pytorch version - -Fine-tuning with the PyTorch version can be started using the `run_ner.py` script. In this example we use a JSON-based configuration file. - -This configuration file looks like: - -```json -{ - "data_dir": "./data_wnut_17", - "labels": "./data_wnut_17/labels.txt", - "model_name_or_path": "bert-large-cased", - "output_dir": "wnut-17-model-1", - "max_seq_length": 128, - "num_train_epochs": 3, - "per_device_train_batch_size": 32, - "save_steps": 425, - "seed": 1, - "do_train": true, - "do_eval": true, - "do_predict": true, - "fp16": false -} -``` - -If your GPU supports half-precision training, please set `fp16` to `true`. - -Save this JSON-based configuration under `wnut_17.json`. The fine-tuning can be started with `python3 run_ner_old.py wnut_17.json`. - -#### Evaluation - -Evaluation on development dataset outputs the following: - -```bash -05/29/2020 23:33:44 - INFO - __main__ - ***** Eval results ***** -05/29/2020 23:33:44 - INFO - __main__ - eval_loss = 0.26505235286212275 -05/29/2020 23:33:44 - INFO - __main__ - eval_precision = 0.7008264462809918 -05/29/2020 23:33:44 - INFO - __main__ - eval_recall = 0.507177033492823 -05/29/2020 23:33:44 - INFO - __main__ - eval_f1 = 0.5884802220680084 -05/29/2020 23:33:44 - INFO - __main__ - epoch = 3.0 -``` - -On the test dataset the following results could be achieved: - -```bash -05/29/2020 23:33:44 - INFO - transformers.trainer - ***** Running Prediction ***** -05/29/2020 23:34:02 - INFO - __main__ - eval_loss = 0.30948806500973547 -05/29/2020 23:34:02 - INFO - __main__ - eval_precision = 0.5840108401084011 -05/29/2020 23:34:02 - INFO - __main__ - eval_recall = 0.3994439295644115 -05/29/2020 23:34:02 - INFO - __main__ - eval_f1 = 0.47440836543753434 -``` - -WNUTโ€™17 is a very difficult task. Current state-of-the-art results on this dataset can be found [here](https://nlpprogress.com/english/named_entity_recognition.html). diff --git a/spaces/chendl/compositional_test/transformers/scripts/fsmt/gen-card-allenai-wmt19.py b/spaces/chendl/compositional_test/transformers/scripts/fsmt/gen-card-allenai-wmt19.py deleted file mode 100644 index df0f5851c82eede7c6711db7b6108dee95fc8696..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/scripts/fsmt/gen-card-allenai-wmt19.py +++ /dev/null @@ -1,152 +0,0 @@ -#!/usr/bin/env python -# Copyright 2020 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Usage: -# ./gen-card-allenai-wmt19.py - -import os -from pathlib import Path - -def write_model_card(model_card_dir, src_lang, tgt_lang, model_name): - - texts = { - "en": "Machine learning is great, isn't it?", - "ru": "ะœะฐัˆะธะฝะฝะพะต ะพะฑัƒั‡ะตะฝะธะต - ัั‚ะพ ะทะดะพั€ะพะฒะพ, ะฝะต ั‚ะฐะบ ะปะธ?", - "de": "Maschinelles Lernen ist groรŸartig, nicht wahr?", - } - - # BLUE scores as follows: - # "pair": [fairseq, transformers] - scores = { - "wmt19-de-en-6-6-base": [0, 38.37], - "wmt19-de-en-6-6-big": [0, 39.90], - } - pair = f"{src_lang}-{tgt_lang}" - - readme = f""" ---- - -language: -- {src_lang} -- {tgt_lang} -thumbnail: -tags: -- translation -- wmt19 -- allenai -license: apache-2.0 -datasets: -- wmt19 -metrics: -- bleu ---- - -# FSMT - -## Model description - -This is a ported version of fairseq-based [wmt19 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. - -For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). - -2 models are available: - -* [wmt19-de-en-6-6-big](https://huggingface.co/allenai/wmt19-de-en-6-6-big) -* [wmt19-de-en-6-6-base](https://huggingface.co/allenai/wmt19-de-en-6-6-base) - - -## Intended uses & limitations - -#### How to use - -```python -from transformers import FSMTForConditionalGeneration, FSMTTokenizer -mname = "allenai/{model_name}" -tokenizer = FSMTTokenizer.from_pretrained(mname) -model = FSMTForConditionalGeneration.from_pretrained(mname) - -input = "{texts[src_lang]}" -input_ids = tokenizer.encode(input, return_tensors="pt") -outputs = model.generate(input_ids) -decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) -print(decoded) # {texts[tgt_lang]} - -``` - -#### Limitations and bias - - -## Training data - -Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). - -## Eval results - -Here are the BLEU scores: - -model | transformers --------|--------- -{model_name} | {scores[model_name][1]} - -The score was calculated using this code: - -```bash -git clone https://github.com/huggingface/transformers -cd transformers -export PAIR={pair} -export DATA_DIR=data/$PAIR -export SAVE_DIR=data/$PAIR -export BS=8 -export NUM_BEAMS=5 -mkdir -p $DATA_DIR -sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source -sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target -echo $PAIR -PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS -``` - -## Data Sources - -- [training, etc.](http://www.statmt.org/wmt19/) -- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) - - -### BibTeX entry and citation info - -``` -@misc{{kasai2020deep, - title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, - author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, - year={{2020}}, - eprint={{2006.10369}}, - archivePrefix={{arXiv}}, - primaryClass={{cs.CL}} -}} -``` - -""" - model_card_dir.mkdir(parents=True, exist_ok=True) - path = os.path.join(model_card_dir, "README.md") - print(f"Generating {path}") - with open(path, "w", encoding="utf-8") as f: - f.write(readme) - -# make sure we are under the root of the project -repo_dir = Path(__file__).resolve().parent.parent.parent -model_cards_dir = repo_dir / "model_cards" - -for model_name in ["wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big"]: - model_card_dir = model_cards_dir / "allenai" / model_name - write_model_card(model_card_dir, src_lang="de", tgt_lang="en", model_name=model_name) diff --git a/spaces/chidojawbreaker/ct-i-rad/app.py b/spaces/chidojawbreaker/ct-i-rad/app.py deleted file mode 100644 index b6a8aecadb6e26ce16aa25231a43054174163981..0000000000000000000000000000000000000000 --- a/spaces/chidojawbreaker/ct-i-rad/app.py +++ /dev/null @@ -1,37 +0,0 @@ -import gradio as gr -import numpy as np -import tensorflow as tf -from tensorflow.keras.preprocessing import image - -model = tf.keras.models.load_model('model.hdf5') - -def predict(Image): - test_image = tf.image.resize(Image,[224,224]) - test_image = image.img_to_array(test_image) - test_image = test_image/255.0 - test_image = np.expand_dims(test_image, axis = 0) - prediction = model.predict(test_image) - result = np.argmax(prediction, axis=1) - if result[0] == 0: - prediction = 'COVID DETECTED' - elif result[0] == 1: - prediction = 'HEALTHY' - elif result[0] == 2: - prediction = 'LUNG CANCER DETECTED' - else: - prediction = 'PNEUMONIA DETECTED' - return prediction - -title = "CT-iRAD" -description = "Welcome to CT-iRAD -- a web based decision support system for radiologists when screening lung diseases -- COVID-19, LUNG CANCER and PNEUMONIA -- in CT images. Please Upload the CT scan image for screening below." -examples = [ - ["covid.jpg"], - ["healthy.jpg"], - ["lung cancer.jpg"], - ["pneumonia.jpg"] -] - -iRAD = gr.Interface(predict,"image","text", -title=title,description=description,examples=examples,theme="peach") - -iRAD.launch() \ No newline at end of file diff --git a/spaces/chronopt-research/ViTExCo/src/models/vit/blocks.py b/spaces/chronopt-research/ViTExCo/src/models/vit/blocks.py deleted file mode 100644 index 305114e2274b360dd4fce3eaa08f13922563fbff..0000000000000000000000000000000000000000 --- a/spaces/chronopt-research/ViTExCo/src/models/vit/blocks.py +++ /dev/null @@ -1,80 +0,0 @@ -import torch.nn as nn -from timm.models.layers import DropPath - - -class FeedForward(nn.Module): - def __init__(self, dim, hidden_dim, dropout, out_dim=None): - super().__init__() - self.fc1 = nn.Linear(dim, hidden_dim) - self.act = nn.GELU() - if out_dim is None: - out_dim = dim - self.fc2 = nn.Linear(hidden_dim, out_dim) - self.drop = nn.Dropout(dropout) - - @property - def unwrapped(self): - return self - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class Attention(nn.Module): - def __init__(self, dim, heads, dropout): - super().__init__() - self.heads = heads - head_dim = dim // heads - self.scale = head_dim**-0.5 - self.attn = None - - self.qkv = nn.Linear(dim, dim * 3) - self.attn_drop = nn.Dropout(dropout) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(dropout) - - @property - def unwrapped(self): - return self - - def forward(self, x, mask=None): - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.heads, C // self.heads).permute(2, 0, 3, 1, 4) - q, k, v = ( - qkv[0], - qkv[1], - qkv[2], - ) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - - return x, attn - - -class Block(nn.Module): - def __init__(self, dim, heads, mlp_dim, dropout, drop_path): - super().__init__() - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.attn = Attention(dim, heads, dropout) - self.mlp = FeedForward(dim, mlp_dim, dropout) - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - - def forward(self, x, mask=None, return_attention=False): - y, attn = self.attn(self.norm1(x), mask) - if return_attention: - return attn - x = x + self.drop_path(y) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/dateutil/tz/_common.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/dateutil/tz/_common.py deleted file mode 100644 index e6ac11831522b266114d5b68ee1da298e3aeb14a..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/dateutil/tz/_common.py +++ /dev/null @@ -1,419 +0,0 @@ -from six import PY2 - -from functools import wraps - -from datetime import datetime, timedelta, tzinfo - - -ZERO = timedelta(0) - -__all__ = ['tzname_in_python2', 'enfold'] - - -def tzname_in_python2(namefunc): - """Change unicode output into bytestrings in Python 2 - - tzname() API changed in Python 3. It used to return bytes, but was changed - to unicode strings - """ - if PY2: - @wraps(namefunc) - def adjust_encoding(*args, **kwargs): - name = namefunc(*args, **kwargs) - if name is not None: - name = name.encode() - - return name - - return adjust_encoding - else: - return namefunc - - -# The following is adapted from Alexander Belopolsky's tz library -# https://github.com/abalkin/tz -if hasattr(datetime, 'fold'): - # This is the pre-python 3.6 fold situation - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - return dt.replace(fold=fold) - -else: - class _DatetimeWithFold(datetime): - """ - This is a class designed to provide a PEP 495-compliant interface for - Python versions before 3.6. It is used only for dates in a fold, so - the ``fold`` attribute is fixed at ``1``. - - .. versionadded:: 2.6.0 - """ - __slots__ = () - - def replace(self, *args, **kwargs): - """ - Return a datetime with the same attributes, except for those - attributes given new values by whichever keyword arguments are - specified. Note that tzinfo=None can be specified to create a naive - datetime from an aware datetime with no conversion of date and time - data. - - This is reimplemented in ``_DatetimeWithFold`` because pypy3 will - return a ``datetime.datetime`` even if ``fold`` is unchanged. - """ - argnames = ( - 'year', 'month', 'day', 'hour', 'minute', 'second', - 'microsecond', 'tzinfo' - ) - - for arg, argname in zip(args, argnames): - if argname in kwargs: - raise TypeError('Duplicate argument: {}'.format(argname)) - - kwargs[argname] = arg - - for argname in argnames: - if argname not in kwargs: - kwargs[argname] = getattr(self, argname) - - dt_class = self.__class__ if kwargs.get('fold', 1) else datetime - - return dt_class(**kwargs) - - @property - def fold(self): - return 1 - - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - if getattr(dt, 'fold', 0) == fold: - return dt - - args = dt.timetuple()[:6] - args += (dt.microsecond, dt.tzinfo) - - if fold: - return _DatetimeWithFold(*args) - else: - return datetime(*args) - - -def _validate_fromutc_inputs(f): - """ - The CPython version of ``fromutc`` checks that the input is a ``datetime`` - object and that ``self`` is attached as its ``tzinfo``. - """ - @wraps(f) - def fromutc(self, dt): - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - return f(self, dt) - - return fromutc - - -class _tzinfo(tzinfo): - """ - Base class for all ``dateutil`` ``tzinfo`` objects. - """ - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - - dt = dt.replace(tzinfo=self) - - wall_0 = enfold(dt, fold=0) - wall_1 = enfold(dt, fold=1) - - same_offset = wall_0.utcoffset() == wall_1.utcoffset() - same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) - - return same_dt and not same_offset - - def _fold_status(self, dt_utc, dt_wall): - """ - Determine the fold status of a "wall" datetime, given a representation - of the same datetime as a (naive) UTC datetime. This is calculated based - on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all - datetimes, and that this offset is the actual number of hours separating - ``dt_utc`` and ``dt_wall``. - - :param dt_utc: - Representation of the datetime as UTC - - :param dt_wall: - Representation of the datetime as "wall time". This parameter must - either have a `fold` attribute or have a fold-naive - :class:`datetime.tzinfo` attached, otherwise the calculation may - fail. - """ - if self.is_ambiguous(dt_wall): - delta_wall = dt_wall - dt_utc - _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) - else: - _fold = 0 - - return _fold - - def _fold(self, dt): - return getattr(dt, 'fold', 0) - - def _fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurrence, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - - # Re-implement the algorithm from Python's datetime.py - dtoff = dt.utcoffset() - if dtoff is None: - raise ValueError("fromutc() requires a non-None utcoffset() " - "result") - - # The original datetime.py code assumes that `dst()` defaults to - # zero during ambiguous times. PEP 495 inverts this presumption, so - # for pre-PEP 495 versions of python, we need to tweak the algorithm. - dtdst = dt.dst() - if dtdst is None: - raise ValueError("fromutc() requires a non-None dst() result") - delta = dtoff - dtdst - - dt += delta - # Set fold=1 so we can default to being in the fold for - # ambiguous dates. - dtdst = enfold(dt, fold=1).dst() - if dtdst is None: - raise ValueError("fromutc(): dt.dst gave inconsistent " - "results; cannot convert") - return dt + dtdst - - @_validate_fromutc_inputs - def fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurrence, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - dt_wall = self._fromutc(dt) - - # Calculate the fold status given the two datetimes. - _fold = self._fold_status(dt, dt_wall) - - # Set the default fold value for ambiguous dates - return enfold(dt_wall, fold=_fold) - - -class tzrangebase(_tzinfo): - """ - This is an abstract base class for time zones represented by an annual - transition into and out of DST. Child classes should implement the following - methods: - - * ``__init__(self, *args, **kwargs)`` - * ``transitions(self, year)`` - this is expected to return a tuple of - datetimes representing the DST on and off transitions in standard - time. - - A fully initialized ``tzrangebase`` subclass should also provide the - following attributes: - * ``hasdst``: Boolean whether or not the zone uses DST. - * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects - representing the respective UTC offsets. - * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short - abbreviations in DST and STD, respectively. - * ``_hasdst``: Whether or not the zone has DST. - - .. versionadded:: 2.6.0 - """ - def __init__(self): - raise NotImplementedError('tzrangebase is an abstract base class') - - def utcoffset(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_base_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - if self._isdst(dt): - return self._dst_abbr - else: - return self._std_abbr - - def fromutc(self, dt): - """ Given a datetime in UTC, return local time """ - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - # Get transitions - if there are none, fixed offset - transitions = self.transitions(dt.year) - if transitions is None: - return dt + self.utcoffset(dt) - - # Get the transition times in UTC - dston, dstoff = transitions - - dston -= self._std_offset - dstoff -= self._std_offset - - utc_transitions = (dston, dstoff) - dt_utc = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt_utc, utc_transitions) - - if isdst: - dt_wall = dt + self._dst_offset - else: - dt_wall = dt + self._std_offset - - _fold = int(not isdst and self.is_ambiguous(dt_wall)) - - return enfold(dt_wall, fold=_fold) - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - if not self.hasdst: - return False - - start, end = self.transitions(dt.year) - - dt = dt.replace(tzinfo=None) - return (end <= dt < end + self._dst_base_offset) - - def _isdst(self, dt): - if not self.hasdst: - return False - elif dt is None: - return None - - transitions = self.transitions(dt.year) - - if transitions is None: - return False - - dt = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt, transitions) - - # Handle ambiguous dates - if not isdst and self.is_ambiguous(dt): - return not self._fold(dt) - else: - return isdst - - def _naive_isdst(self, dt, transitions): - dston, dstoff = transitions - - dt = dt.replace(tzinfo=None) - - if dston < dstoff: - isdst = dston <= dt < dstoff - else: - isdst = not dstoff <= dt < dston - - return isdst - - @property - def _dst_base_offset(self): - return self._dst_offset - self._std_offset - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(...)" % self.__class__.__name__ - - __reduce__ = object.__reduce__ diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/color-4b6a4814.js b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/color-4b6a4814.js deleted file mode 100644 index b25e8fddf0db3510fd246ce9020ae61e0da1bcda..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/color-4b6a4814.js +++ /dev/null @@ -1,2 +0,0 @@ -import{ax as o}from"./index-f877dfd5.js";const t=r=>o[r%o.length];export{t as g}; -//# sourceMappingURL=color-4b6a4814.js.map diff --git a/spaces/cihyFjudo/fairness-paper-search/CLEO 4 Mod for GTA San Andreas Teleport to Anywhere on the Map.md b/spaces/cihyFjudo/fairness-paper-search/CLEO 4 Mod for GTA San Andreas Teleport to Anywhere on the Map.md deleted file mode 100644 index cf59f9502e965aedff2e89cec9f0fcd25922312f..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/CLEO 4 Mod for GTA San Andreas Teleport to Anywhere on the Map.md +++ /dev/null @@ -1,6 +0,0 @@ -

    gta san andreas teleport mod cleo 4


    Download 🔗 https://tinurli.com/2uwju3



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Recetario De Cocina Chiapaneca Pdf Download Una Muestra De Amor Por Chiapas Y Su Historia A Travs De Sus Platillos.md b/spaces/cihyFjudo/fairness-paper-search/Recetario De Cocina Chiapaneca Pdf Download Una Muestra De Amor Por Chiapas Y Su Historia A Travs De Sus Platillos.md deleted file mode 100644 index 758f1b308381b3e22ddf7af36e959ae902a6bd61..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Recetario De Cocina Chiapaneca Pdf Download Una Muestra De Amor Por Chiapas Y Su Historia A Travs De Sus Platillos.md +++ /dev/null @@ -1,9 +0,0 @@ - -

    Studies carried out by Susan Bennett (1997), Jonathan Burston (1998) and Maurya Wickstrom (1999) suggest that American theatre has become a massculture business. This critique is now even more apparent given the current state of Broadway comedy best exemplified by Mel Brooks's adaptationsof his film The Producers (1968), including his Broadway show The Producers (2001) and his movie remake of that show, The Producers (2005). Brooks's original film may have been topical, shocking and full of controversy, but by 2001 those same old gags about Nazis, dumbblondes and homosexuals had become outdated. For this reason, the international acclaim of The Producers (2001) with theatre critics, as well as the critical backlash by many film critics, illustrates scholars' observations namely that modern Broadway's mass audience is not necessarilyinterested in provocative or original comedy but applauds recycled jokes.

    -

    The Producers 2005 Full Movie Download


    Download Filehttps://tinurli.com/2uwiUA



    -

    Choose your favorite from thousands of beautiful vertical pictures The Producers (2005) in the highest quality, click download to your phone or computer. Now you can set a new wallpaper for your screen saver or lock screen. All The Producers (2005) wallpapers are free and can be downloaded in any popular resolutions: 2160x3840, 1440x2560, 1366x768, 1080x1920, 1024x600, 960x544, 800x1280, 800x600, 720x1280, 540x960, 480x854, 480x800, 360x640, 320x480, 320x240, 240x400, etc. . both to a computer and to a mobile phone via mob.org. The catalog is constantly updated with new beautiful photos The Producers (2005)" and original pictures.

    -

    Attention! All wallpapers of The Producers (2005) on the site were found freely distributed on the Internet or downloaded by our users and are presented for informational purposes only. By downloading free pictures The Producers (2005) to your phone on our website, you agree to review and remove the screensaver from your phone.

    -

    A Data Guide for this study is available as a web page and for download. The India Human Development Survey 2005 (IHDS) is a nationally representative, multi-topic survey of 41,554 households in 1,503 villages and 971 urban neighborhoods across India. Two one-hour interviews in each household covered topics concerning health, education, employment, economic status, marriage, fertility, gender relations, and social capital. Children aged 8-11 completed short reading, writing and arithmetic tests. Additional village, school, and medical facility interviews are also available.","dateModified":"Wed Aug 08 14:58:03 EDT 2018","spatialCoverage":"India","distribution":["contentURL":" =/pcms/studies/0/2/2/6/22626/V12","@type":"DataDownload","encodingFormat":"application/zip","fileFormat":"SAS","contentURL":" =/pcms/studies/0/2/2/6/22626/V12","@type":"DataDownload","encodingFormat":"application/zip","fileFormat":"SPSS","contentURL":" =/pcms/studies/0/2/2/6/22626/V12","@type":"DataDownload","encodingFormat":"application/zip","fileFormat":"Stata","contentURL":" =/pcms/studies/0/2/2/6/22626/V12","@type":"DataDownload","encodingFormat":"application/zip","fileFormat":"ASCII","contentURL":" =/pcms/studies/0/2/2/6/22626/V12","@type":"DataDownload","encodingFormat":"application/zip","fileFormat":"R","contentURL":" =/pcms/studies/0/2/2/6/22626/V12","@type":"DataDownload","encodingFormat":"application/zip","fileFormat":"Delimited"],"@context":" ","version":"V12","url":" ","datePublished":"Wed Jul 30 12:59:38 EDT 2008","license":" ","dateCreated":"2018-08-08 14:58:03.0","temporalCoverage":["2004-11-01 -- 2005-10-30"],"name":"India Human Development Survey (IHDS), 2005","publisher":"@type":"Organization","name":"Inter-university Consortium for Political and Social Research [distributor]","alternateName":"ICPSR","url":" "}function modalCloseHandler(helpTerm,elem) $('#helpViewDivId').unbind('hidden.bs.modal');$('#helpViewDivId').on('hidden.bs.modal', function (e) $(elem).focus(););function displayHelp(helpTerm,elem)ReactDOM.render(React.createElement(HelpViewer, modalId:'helpViewDivId', helpTerm:helpTerm, context:'pcms', application:'icpsr',module:null,appUrl:' '), document.getElementById('helpViewDiv'));$('#helpViewDivId').on('shown.bs.modal', function (e) $('#helpViewDivId .modal-header .close').focus(); modalCloseHandler(helpTerm,elem););$('#helpViewDivId').modal('show');function displayPlainHelp(helpTerm,elem)ReactDOM.render(React.createElement(getHelpText, helpTerm:helpTerm, context:'pcms', application:'icpsr',module:null,appUrl:' '), document.getElementById('citation-help-text'));$("#data-doc").treetable( expandable: true );$(function () $('[data-toggle="popover"]').popover(););$(document).ready(function(e)var variables = ;variables.tab = "summary";variables.studyId = "22626";variables.size = "0";variables.versionLabel = "V12";variables.publishManagerUrl = " ";variables.childCareUrl = " ";variables.searchServerUrl = " ";variables.bibliographyServerUrl = " ";variables.varsServerUrl = " ";variables.searchConfig = null;variables.searchResults = null;variables.restrictedDataTypes = "idars":false,"useAgreement":false,"restricted":false,"vde":false,"enclave":false;variables.staff = false;variables.currentTenant = "icpsr";variables.currentArchive = "dsdr";variables.studyTenant = "ddf";variables.studyArchive = "DSDR";variables.title = "India Human Development Survey (IHDS), 2005";variables.metadata = "purpose":"","collectionNotes":["The IHDS dataset was produced by the National Council of Applied Economic Research (NCAER), New Delhi, and the University of Maryland.","IHDS User Guides and Questionnaires are available for download from the IHDS 1 page.","For additional information regarding the India Human Development Survey, please visit the India Human Development Survey Website."],"studyDesign":"","description":"A Data Guide for this study is available as a web page and for download. The India Human Development Survey 2005 (IHDS) is a nationally representative, multi-topic survey of 41,554 households in 1,503 villages and 971 urban neighborhoods across India. Two one-hour interviews in each household covered topics concerning health, education, employment, economic status, marriage, fertility, gender relations, and social capital. Children aged 8-11 completed short reading, writing and arithmetic tests. Additional village, school, and medical facility interviews are also available.","jsonld":"\"funder\":[\"@type\":\"Organization\",\"name\":\"United States Department of Health and Human Services. National Institutes of Health. Eunice Kennedy Shriver National Institute of Child Health and Human Development\"],\"identifier\":\" \",\"creator\":[\"affiliation\":[\"University of Maryland\"],\"@type\":\"Person\",\"name\":\"Desai, Sonalde\",\"affiliation\":[\"University of Maryland\"],\"@type\":\"Person\",\"name\":\"Vanneman, Reeve\",\"@type\":\"Organization\",\"name\":\"National Council of Applied Economic Research, New Delhi\"],\"keywords\":[\"child care\",\"child rearing\",\"demographic characteristics\",\"disposable income\",\"education\",\"employment\",\"extended families\",\"family background\",\"family life\",\"fertility\",\"gender roles\",\"gender stereotypes\",\"health\",\"housework\",\"housing\",\"income\",\"marital status\",\"parents\",\"social status\",\"socioeconomic status\"],\"citation\":\"Desai, Sonalde, Vanneman, Reeve, and National Council of Applied Economic Research, New Delhi. India Human Development Survey (IHDS), 2005. Inter-university Consortium for Political and Social Research [distributor], 2018-08-08. \",\"@type\":\"Dataset\",\"description\":\"A Data Guide for this study is available as a web page and for download. The India Human Development Survey 2005 (IHDS) is a nationally representative, multi-topic survey of 41,554 households in 1,503 villages and 971 urban neighborhoods across India. Two one-hour interviews in each household covered topics concerning health, education, employment, economic status, marriage, fertility, gender relations, and social capital. Children aged 8-11 completed short reading, writing and arithmetic tests. Additional village, school, and medical facility interviews are also available.\",\"dateModified\":\"Wed Aug 08 14:58:03 EDT 2018\",\"spatialCoverage\":\"India\",\"distribution\":[\"contentURL\":\" =/pcms/studies/0/2/2/6/22626/V12\",\"@type\":\"DataDownload\",\"encodingFormat\":\"application/zip\",\"fileFormat\":\"SAS\",\"contentURL\":\" =/pcms/studies/0/2/2/6/22626/V12\",\"@type\":\"DataDownload\",\"encodingFormat\":\"application/zip\",\"fileFormat\":\"SPSS\",\"contentURL\":\" =/pcms/studies/0/2/2/6/22626/V12\",\"@type\":\"DataDownload\",\"encodingFormat\":\"application/zip\",\"fileFormat\":\"Stata\",\"contentURL\":\" =/pcms/studies/0/2/2/6/22626/V12\",\"@type\":\"DataDownload\",\"encodingFormat\":\"application/zip\",\"fileFormat\":\"ASCII\",\"contentURL\":\" =/pcms/studies/0/2/2/6/22626/V12\",\"@type\":\"DataDownload\",\"encodingFormat\":\"application/zip\",\"fileFormat\":\"R\",\"contentURL\":\" =/pcms/studies/0/2/2/6/22626/V12\",\"@type\":\"DataDownload\",\"encodingFormat\":\"application/zip\",\"fileFormat\":\"Delimited\"],\"@context\":\" \",\"version\":\"V12\",\"url\":\" \",\"datePublished\":\"Wed Jul 30 12:59:38 EDT 2008\",\"license\":\" \",\"dateCreated\":\"2018-08-08 14:58:03.0\",\"temporalCoverage\":[\"2004-11-01 -- 2005-10-30\"],\"name\":\"India Human Development Survey (IHDS), 2005\",\"publisher\":\"@type\":\"Organization\",\"name\":\"Inter-university Consortium for Political and Social Research [distributor]\",\"alternateName\":\"ICPSR\",\"url\":\" \"","analysisUnit":["individual, household, and village"],"source":"","title":"India Human Development Survey (IHDS), 2005","sampProc":"Nationally representative, multi-topic survey of 41,554 households in 1,503 villages and 971 urban neighborhoods across India.","timeMeth":"","kindOfData":["survey data"],"keyword":["child care","child rearing","demographic characteristics","disposable income","education","employment","extended families","family background","family life","fertility","gender roles","gender stereotypes","health","housework","housing","income","marital status","parents","social status","socioeconomic status"],"isReplacedBy":"","publishStatus":"PUBLISHED","creator":["personName":"Desai, Sonalde","display":"Sonalde Desai, University of Maryland","personEmail":"","personOrcid":"","personId":"ceiiw","personLastName":"Desai","personOrgName":["University of Maryland"],"personFirstName":"Sonalde","affiliationsDisplay":"University of Maryland","personName":"Vanneman, Reeve","display":"Reeve Vanneman, University of Maryland","personEmail":"","personOrcid":"","personId":"l6MfK","personLastName":"Vanneman","personOrgName":["University of Maryland"],"personFirstName":"Reeve","affiliationsDisplay":"University of Maryland","orgName":"National Council of Applied Economic Research, New Delhi","orgEmail":"","orgId":"5eeMJ"],"commonScales":"","collectionMode":["record abstracts","coded on-site observation","cognitive assessment test","face-to-face interview","mixed mode","on-site questionnaire"],"citation":"\n Desai, Sonalde, Vanneman, Reeve, and National Council of Applied Economic Research, New Delhi. India Human Development Survey (IHDS), 2005. Inter-university Consortium for Political and Social Research [distributor], 2018-08-08. \n","created":"2008-07-30","alternative":"","weights":"","versionMismatch":false,"distributor":["Inter-university Consortium for Political and Social Research"],"collectionChanges":["2018-08-08 Added an updated version of the Data Guide documentation.","2018-02-15 The citation of this study may have changed due to the new version control system that has been implemented. The previous citation was:

    ' : '\U0001d4ab', - '\\' : '\U0001d4ac', - '\\' : '\U0000211b', - '\\' : '\U0001d4ae', - '\\' : '\U0001d4af', - '\\' : '\U0001d4b0', - '\\' : '\U0001d4b1', - '\\' : '\U0001d4b2', - '\\' : '\U0001d4b3', - '\\' : '\U0001d4b4', - '\\' : '\U0001d4b5', - '\\' : '\U0001d5ba', - '\\' : '\U0001d5bb', - '\\' : '\U0001d5bc', - '\\' : '\U0001d5bd', - '\\' : '\U0001d5be', - '\\' : '\U0001d5bf', - '\\' : '\U0001d5c0', - '\\' : '\U0001d5c1', - '\\' : '\U0001d5c2', - '\\' : '\U0001d5c3', - '\\' : '\U0001d5c4', - '\\' : '\U0001d5c5', - '\\' : '\U0001d5c6', - '\\' : '\U0001d5c7', - '\\' : '\U0001d5c8', - '\\