diff --git a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/theb/__init__.py b/spaces/101-5/gpt4free/g4f/.v1/gpt4free/theb/__init__.py deleted file mode 100644 index 0177194efbaf0e79c8ff62f4191ef8c3a5578a05..0000000000000000000000000000000000000000 --- a/spaces/101-5/gpt4free/g4f/.v1/gpt4free/theb/__init__.py +++ /dev/null @@ -1,76 +0,0 @@ -from json import loads -from queue import Queue, Empty -from re import findall -from threading import Thread -from typing import Generator, Optional - -from curl_cffi import requests -from fake_useragent import UserAgent - - -class Completion: - # experimental - part1 = '{"role":"assistant","id":"chatcmpl' - part2 = '"},"index":0,"finish_reason":null}]}}' - regex = rf'{part1}(.*){part2}' - - timer = None - message_queue = Queue() - stream_completed = False - last_msg_id = None - - @staticmethod - def request(prompt: str, proxy: Optional[str] = None): - headers = { - 'authority': 'chatbot.theb.ai', - 'content-type': 'application/json', - 'origin': 'https://chatbot.theb.ai', - 'user-agent': UserAgent().random, - } - - proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else None - - options = {} - if Completion.last_msg_id: - options['parentMessageId'] = Completion.last_msg_id - - requests.post( - 'https://chatbot.theb.ai/api/chat-process', - headers=headers, - proxies=proxies, - content_callback=Completion.handle_stream_response, - json={'prompt': prompt, 'options': options}, - timeout=100000 - ) - - Completion.stream_completed = True - - @staticmethod - def create(prompt: str, proxy: Optional[str] = None) -> Generator[str, None, None]: - Completion.stream_completed = False - - Thread(target=Completion.request, args=[prompt, proxy]).start() - - while not Completion.stream_completed or not Completion.message_queue.empty(): - try: - message = Completion.message_queue.get(timeout=0.01) - for message in findall(Completion.regex, message): - message_json = loads(Completion.part1 + message + Completion.part2) - Completion.last_msg_id = message_json['id'] - yield message_json['delta'] - - except Empty: - pass - - @staticmethod - def handle_stream_response(response): - Completion.message_queue.put(response.decode()) - - @staticmethod - def get_response(prompt: str, proxy: Optional[str] = None) -> str: - response_list = [] - for message in Completion.create(prompt, proxy): - response_list.append(message) - return ''.join(response_list) - - Completion.message_queue.put(response.decode(errors='replace')) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Easy Worship 2009 Full Crack.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Easy Worship 2009 Full Crack.md deleted file mode 100644 index dd0a3faba1f8cce60bfa922dbe8aa46d647d2377..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Easy Worship 2009 Full Crack.md +++ /dev/null @@ -1,34 +0,0 @@ -
-

How to Download Easy Worship 2009 Full Crack for Free

-

Easy Worship 2009 is a software that helps you to create and present worship songs, Bible verses, videos, and other media in your church. It is a powerful and easy-to-use tool that allows you to customize your worship service with different themes, fonts, backgrounds, transitions, and more. You can also use it to display live video feeds, DVDs, PowerPoint presentations, and web pages.

-

If you want to download Easy Worship 2009 full crack for free, you have come to the right place. In this article, we will show you how to download and install Easy Worship 2009 full crack from a reliable source. You will be able to enjoy all the features of the software without paying anything.

-

download easy worship 2009 full crack


DOWNLOAD https://byltly.com/2uKyI7



-

Step 1: Download Easy Worship 2009 Full Crack

-

The first step is to download Easy Worship 2009 full crack from a trusted website. You can use the link below to download the software from our website. The file is safe and virus-free.

-

Download Easy Worship 2009 Full Crack

-

Step 2: Disable Windows Defender

-

The next step is to disable Windows Defender on your computer. This is a security feature that prevents you from installing cracked software. To do this, follow these steps:

- -

Step 3: Extract the File

-

The final step is to extract the file that you downloaded in step 1. To do this, follow these steps:

- -

Conclusion

-

Easy Worship 2009 full crack is a great software for creating and presenting worship media in your church. You can download it for free from our website and use it without any limitations. You can also update it regularly with new songs, Bible versions, and other resources.

-

We hope this article helped you download and install Easy Worship 2009 full crack for free. If you have any questions or feedback, feel free to leave a comment below.

-

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/ALA - Little Melissa 34 Sets !!! -.md b/spaces/1gistliPinn/ChatGPT4/Examples/ALA - Little Melissa 34 Sets !!! -.md deleted file mode 100644 index b506ea708d3f653b3e0ba4ac8c4878e27f45786a..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/ALA - Little Melissa 34 Sets !!! -.md +++ /dev/null @@ -1,6 +0,0 @@ -

{ALA - Little Melissa 34 Sets !!!} -


DOWNLOAD ———>>> https://imgfil.com/2uxXBV



-
- 899543212b
-
-
-

diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Download Steam Api.dll Resident Evil 6 Reloaded High Quality.md b/spaces/1gistliPinn/ChatGPT4/Examples/Download Steam Api.dll Resident Evil 6 Reloaded High Quality.md deleted file mode 100644 index 3efe66666393e37556c064cb22708a0d1038efa6..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Download Steam Api.dll Resident Evil 6 Reloaded High Quality.md +++ /dev/null @@ -1,24 +0,0 @@ -
-

How to Download and Fix steam_api.dll for Resident Evil 6 Reloaded

-

If you are a fan of the Resident Evil series, you may have encountered an error message related to steam_api.dll when trying to launch Resident Evil 6 Reloaded. This file is part of the Steam client application developed by Valve Corporation, which is a digital distribution platform for video games. The file is used by game developers to integrate their games with the Steam platform, specifically to access the Steam API, which provides various services such as authentication, user profiles, game stats, and cloud storage.

-

The error message may indicate that the file is missing, corrupted, or not installed properly. In such cases, you may need to reinstall the affected game or the Steam client to restore the missing file. However, before you do that, you can try some simple solutions that may fix the problem without reinstalling anything. Here are some steps you can follow to download and fix steam_api.dll for Resident Evil 6 Reloaded.

-

download steam api.dll resident evil 6 reloaded


Downloadhttps://imgfil.com/2uxXC0



-

Step 1: Check your antivirus software

-

Some antivirus software may flag the steam_api.dll file as a potential threat, as it can be used to modify the behavior of video games. However, this file is an integral part of the Steam client and is not a virus or malware. If you encounter such warnings, you can usually ignore them or add the file to the list of exceptions in your antivirus software. To do that, you need to open your antivirus software and look for a setting that allows you to exclude certain files or folders from scanning. Then, you need to add the steam_api.dll file or the folder where it is located to the exclusion list. The location of the file may vary depending on where you installed the game or the Steam client, but it is commonly found in one of these paths:

- -

After adding the file or folder to the exclusion list, you need to restart your computer and try launching the game again. If the error message persists, you can move on to the next step.

-

Step 2: Download a new copy of steam_api.dll

-

If your antivirus software is not the cause of the problem, it may be that your steam_api.dll file is corrupted or outdated. In that case, you can try downloading a new copy of the file from a reliable source and replacing it with the old one. There are many websites that offer free downloads of DLL files, but not all of them are safe or trustworthy. You need to be careful when choosing where to download the file from, as some sites may contain malware or viruses that can harm your computer. One of the websites that we recommend is DLL-files.com[^1^], which is a reputable and secure site that provides various versions of DLL files for free. To download steam_api.dll from DLL-files.com, you need to follow these steps:

-
    -
  1. Go to https://www.dll-files.com/steam_api.dll.html and scroll down to find the table of available versions of steam_api.dll.
  2. -
  3. Choose wisely. Most of the time, just pick the highest version. However, some games may require a specific version of steam_api.dll that matches their own version. To find out which version of steam_api.dll you need for Resident Evil 6 Reloaded, you can right-click on the game's executable file (re6.exe) and select Properties. Then, go to the Details tab and look for the Product version field. For example, if your game's product version is 1.0.6.165, you may need to download steam_api.dll version 7.9.87.40.
  4. -
  5. Click on the Download button next to the version that you want and save the ZIP file to your computer.
  6. -
  7. Extract the ZIP file using a program like WinRAR or 7-Zip and copy the steam_api.dll file inside it.
  8. -
  9. Paste the steam_api.dll file into the same folder where your game or Steam client is installed, depending on where you found the original file in Step 1.
  10. -
  11. If you are prompted to overwrite or replace an existing file, click Yes. -

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Facebook Video to MP4 Online - Fast Free and Easy.md b/spaces/1phancelerku/anime-remove-background/Download Facebook Video to MP4 Online - Fast Free and Easy.md deleted file mode 100644 index 39d3d34e13d104a3fbdc4e855895d6909e7a2fd4..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Facebook Video to MP4 Online - Fast Free and Easy.md +++ /dev/null @@ -1,116 +0,0 @@ -
    -

    Download Facebook Video to MP4: A Complete Guide

    -

    Facebook is one of the most popular social media platforms in the world, with billions of users and millions of videos uploaded every day. You may have come across some interesting or useful videos on Facebook that you want to save on your device for offline viewing, sharing, or editing. But how can you download Facebook videos to MP4, which is a widely supported and versatile video format?

    -

    download facebook video to mp4


    Download Filehttps://jinyurl.com/2uNUp2



    -

    In this article, we will show you why you should download Facebook videos to MP4, how to do it with two different methods, and some tips and tricks for downloading Facebook videos to MP4. Let's get started!

    -

    Why Download Facebook Videos to MP4?

    -

    Before we dive into the methods of downloading Facebook videos to MP4, let's first understand why you should do it in the first place. Here are some benefits of MP4 format and some use cases for downloading Facebook videos.

    -

    Benefits of MP4 Format

    -

    MP4 is a digital multimedia container format that can store video, audio, subtitles, images, and other data. It is one of the most common and widely compatible video formats on the web and various devices. Here are some advantages of MP4 format:

    -

    How to download facebook video to mp4 online
    -Download facebook video to mp4 free
    -Download facebook video to mp4 hd
    -Download facebook video to mp4 converter
    -Download facebook video to mp4 android
    -Download facebook video to mp4 iphone
    -Download facebook video to mp4 mac
    -Download facebook video to mp4 chrome
    -Download facebook video to mp4 firefox
    -Download facebook video to mp4 safari
    -Download facebook live video to mp4
    -Download private facebook video to mp4
    -Download facebook story video to mp4
    -Download facebook 360 video to mp4
    -Download facebook watch video to mp4
    -Download facebook messenger video to mp4
    -Download facebook group video to mp4
    -Download facebook page video to mp4
    -Download facebook profile video to mp4
    -Download facebook cover video to mp4
    -Best way to download facebook video to mp4
    -Fastest way to download facebook video to mp4
    -Easiest way to download facebook video to mp4
    -Simplest way to download facebook video to mp4
    -Quickest way to download facebook video to mp4
    -Download multiple facebook videos to mp4
    -Download entire facebook videos playlist to mp4
    -Download long facebook videos to mp4
    -Download short facebook videos to mp4
    -Download high quality facebook videos to mp4
    -Download low quality facebook videos to mp4
    -Download any facebook videos to mp4
    -Save and download facebook videos as mp4 files
    -Convert and download facebook videos in mp4 format
    -Edit and download facebook videos in mp4 format
    -Crop and download facebook videos in mp4 format
    -Trim and download facebook videos in mp4 format
    -Cut and download facebook videos in mp4 format
    -Merge and download facebook videos in mp4 format
    -Split and download facebook videos in mp4 format
    -Rotate and download facebook videos in mp4 format
    -Flip and download facebook videos in mp4 format
    -Resize and download facebook videos in mp4 format
    -Compress and download facebook videos in mp4 format
    -Enhance and download facebook videos in mp4 format
    -Add subtitles and download facebook videos in mp4 format
    -Add watermark and download facebook videos in mp4 format
    -Add music and download facebook videos in mp4 format
    -Add effects and download facebook videos in mp4 format

    - -

    Use Cases for Downloading Facebook Videos

    -

    There are many reasons why you may want to download Facebook videos to MP4. Here are some common scenarios:

    - -

    How to Download Facebook Videos to MP4?

    -

    Now that you know why you should download Facebook videos to MP4, let's see how you can do it with two different methods. One is using an online Facebook video downloader, and the other is using a desktop Facebook video downloader software.

    -

    Method 1: Use an Online Facebook Video Downloader

    -

    An online Facebook video downloader is a web-based tool that allows you to download Facebook videos to MP4 without installing any software or app on your device. All you need is a web browser and an internet connection. Here are the steps to use an online Facebook video downloader:

    -

    Step 1: Copy the Facebook Video URL

    -

    The first step is to copy the URL of the Facebook video that you want to download. To do this, you can either right click on the three dots icon on the top right corner of the video and select "Copy link", or go to the video page and copy the URL from the address bar of your browser.

    -

    Step 2: Paste the URL into the Online Downloader

    -

    The next step is to paste the URL into the online downloader. To do this, you can go to any online Facebook video downloader website, such as [FBDownloader], [Getfvid], or [FB Video Saver]. Then, you can paste the URL into the input box and click on the "Download" or "Go" button.

    -

    Step 3: Choose MP4 as the Output Format and Download

    -

    The final step is to choose MP4 as the output format and download the video. To do this, you can look for the MP4 option among the available formats and quality options. Usually, MP4 is the default or recommended format for most online downloaders. Then, you can right-click on the "Download" or "Save" button and select "Save link as" or "Save target as" to save the video on your device.

    -

    Method 2: Use a Desktop Facebook Video Downloader Software

    -

    A desktop Facebook video downloader software is a program that you need to install and run on your computer. It usually offers more features and functions than an online downloader, such as batch downloading, video conversion, video editing, and more. Here are the steps to use a desktop Facebook video downloader software:

    -

    Step 1: Install and Launch the Software

    -

    The first step is to install and launch the software on your computer. To do this, you can go to the official website of the software and download the installation file. Some examples of desktop Facebook video downloader software are [iTubeGo], [4K Video Downloader], and [Wondershare UniConverter]. Then, you can follow the instructions to install and launch the software.

    -

    Step 2: Copy and Paste the Facebook Video URL into the Software

    -

    The next step is to copy and paste the Facebook video URL into the software. To do this, you can use the same method as in step 1 of method 1 to copy the URL of the Facebook video that you want to download. Then, you can paste it into the software by clicking on the "Paste URL" or "Add URL" button.

    -

    Step 3: Select MP4 as the Output Format and Download

    -

    The final step is to select MP4 as the output format and download the video. To do this, you can look for the MP4 option in the settings or preferences of the software. You can also adjust the quality, resolution, and other parameters of the output video according to your needs. Then, you can click on the "Download" or "Start" button to save the video on your computer.

    -

    Tips and Tricks for Downloading Facebook Videos to MP4

    -

    Now that you know how to download Facebook videos to MP4 with two different methods, here are some tips and tricks for downloading Facebook videos to MP4 more easily and effectively:

    -

    Check the Video Quality and Size Before Downloading

    -

    Before you download a Facebook video to MP4, you should check the video quality and size to make sure it meets your expectations and requirements. You can do this by hovering over the video on Facebook and looking at the information that appears on the bottom right corner. You can also use the online or desktop downloader tools to preview the video quality and size before downloading.

    -

    Respect the Copyrights and Privacy of the Video Owners

    -

    When you download a Facebook video to MP4, you should respect the copyrights and privacy of the video owners. You should not download or use any videos that are protected by intellectual property rights or personal data protection laws without their permission or consent. You should also not download or use any videos that are illegal, harmful, or offensive.

    -

    Manage and Organize Your Downloaded Videos

    -

    After you download a Facebook video to MP4, you should manage and organize your downloaded videos properly. You can do this by creating folders and subfolders on your device to store your videos by categories, topics, or dates. You can also rename your videos with descriptive titles and tags to make them easier to find and access.

    -

    Conclusion

    -

    In conclusion, downloading Facebook videos to MP4 is a useful and convenient way to save, share, or edit your favorite videos from Facebook. You can do it with two different methods: using an online Facebook video downloader or using a desktop Facebook video downloader software. Both methods are easy and effective, but they have their own advantages and disadvantages. You can choose the one that suits your needs and preferences best.

    -

    We hope this article has helped you learn how to download Facebook videos to MP4 with a complete guide. If you have any questions or feedback, please feel free to leave a comment below. Happy downloading!

    -

    FAQs

    -

    Here are some frequently asked questions about downloading Facebook videos to MP4:

    -
      -
    1. Can I download Facebook videos to MP4 on my mobile device?
    2. -

      Yes, you can download Facebook videos to MP4 on your mobile device with an online Facebook video downloader. However, you may need to use a mobile browser that supports downloading files, such as Chrome or Safari. Alternatively, you can use a mobile app that can download Facebook videos to MP4, such as [Video Downloader for Facebook] or [Video Downloader for FB].

      -
    3. Can I download live videos from Facebook to MP4?
    4. -

      Yes, you can download live videos from Facebook to MP4 with a desktop Facebook video downloader software. However, you may need to wait until the live stream is over before you can download it. Alternatively, you can use a screen recorder software or app that can capture live videos from Facebook and save them as MP4 files.

      -
    5. Can I download private videos from Facebook to MP4?
    6. -

      Yes, you can download private videos from Facebook to MP4 with an online or desktop Facebook video downloader tool. However, you may need to log in to your Facebook account before you can access the private videos. Alternatively, you can use a browser extension that can download private videos from Facebook to MP4, such as [FBDown Video Downloader] or [Video Downloader PLUS].

      -
    7. Can I convert other video formats to MP4?
    8. -

      Yes, you can convert other video formats to MP4 with a desktop Facebook video downloader software or a standalone video converter software or app. You can choose from various formats and codecs, such as AVI, MKV, MOV, WMV, FLV, MPEG, H.264, HEVC, etc.

      -
    9. Can I edit my downloaded videos from Facebook?
    10. -

      Yes, you can edit your downloaded videos from Facebook with a desktop Facebook video downloader software or a standalone video editor software or app. You can perform various editing tasks, such as trimming, cropping, rotating, merging, splitting, adding effects, subtitles, music, etc. to your videos.

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/232labs/VToonify/vtoonify/model/encoder/align_all_parallel.py b/spaces/232labs/VToonify/vtoonify/model/encoder/align_all_parallel.py deleted file mode 100644 index a3bdf8d1c4b02687249709a2da3c21794b22be92..0000000000000000000000000000000000000000 --- a/spaces/232labs/VToonify/vtoonify/model/encoder/align_all_parallel.py +++ /dev/null @@ -1,231 +0,0 @@ -""" -brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset) -author: lzhbrian (https://lzhbrian.me) -date: 2020.1.5 -note: code is heavily borrowed from - https://github.com/NVlabs/ffhq-dataset - http://dlib.net/face_landmark_detection.py.html - -requirements: - apt install cmake - conda install Pillow numpy scipy - pip install dlib - # download face landmark model from: - # http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 -""" -from argparse import ArgumentParser -import time -import numpy as np -import PIL -import PIL.Image -import os -import scipy -import scipy.ndimage -import dlib -import multiprocessing as mp -import math - -#from configs.paths_config import model_paths -SHAPE_PREDICTOR_PATH = 'shape_predictor_68_face_landmarks.dat'#model_paths["shape_predictor"] -cnn_model_path = 'mmod_human_face_detector.dat' -def get_landmark(filepath, predictor): - """get landmark with dlib - :return: np.array shape=(68, 2) - """ - detector = dlib.get_frontal_face_detector() - cnn_face_detector = dlib.cnn_face_detection_model_v1('localmodel/mmod_human_face_detector.dat') # Load the MMod CNN model - - if type(filepath) == str: - img = dlib.load_rgb_image(filepath) - else: - img = filepath - - # Try multiple times if necessary - - num_attempts = 3 - dets = [] - for attempt in range(num_attempts): - dets = detector(img, 1) - if len(dets) > 0: - break - - # If no faces are detected using HOG-based detector, try using MMod CNN-based detector - if len(dets) == 0: - dets = cnn_face_detector(img, 1) - dets = [rect.rect for rect in dets] # Convert mmod_rectangles to rectangles - - if len(dets) == 0: - print('Error: no face detected!') - return None - - shape = None - for k, d in enumerate(dets): - shape = predictor(img, d) - - if shape is None: - print( - 'Error: No face detected! If you are sure there are faces in your input, you may rerun the code several times until the face is detected. Sometimes the detector is unstable.') - t = list(shape.parts()) - a = [] - for tt in t: - a.append([tt.x, tt.y]) - lm = np.array(a) - return lm - -def align_face(filepath, predictor): - """ - :param filepath: str - :return: PIL Image - """ - - lm = get_landmark(filepath, predictor) - if lm is None: - return None - - lm_chin = lm[0: 17] # left-right - lm_eyebrow_left = lm[17: 22] # left-right - lm_eyebrow_right = lm[22: 27] # left-right - lm_nose = lm[27: 31] # top-down - lm_nostrils = lm[31: 36] # top-down - lm_eye_left = lm[36: 42] # left-clockwise - lm_eye_right = lm[42: 48] # left-clockwise - lm_mouth_outer = lm[48: 60] # left-clockwise - lm_mouth_inner = lm[60: 68] # left-clockwise - - # Calculate auxiliary vectors. - eye_left = np.mean(lm_eye_left, axis=0) - eye_right = np.mean(lm_eye_right, axis=0) - eye_avg = (eye_left + eye_right) * 0.5 - eye_to_eye = eye_right - eye_left - mouth_left = lm_mouth_outer[0] - mouth_right = lm_mouth_outer[6] - mouth_avg = (mouth_left + mouth_right) * 0.5 - eye_to_mouth = mouth_avg - eye_avg - - # Choose oriented crop rectangle. - x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1] - x /= np.hypot(*x) - x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8) - y = np.flipud(x) * [-1, 1] - c = eye_avg + eye_to_mouth * 0.1 - quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) - qsize = np.hypot(*x) * 2 - - # read image - if type(filepath) == str: - img = PIL.Image.open(filepath) - else: - img = PIL.Image.fromarray(filepath) - - output_size = 256 - transform_size = 256 - enable_padding = True - - # Shrink. - shrink = int(np.floor(qsize / output_size * 0.5)) - if shrink > 1: - rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink))) - img = img.resize(rsize, PIL.Image.ANTIALIAS) - quad /= shrink - qsize /= shrink - - # Crop. - border = max(int(np.rint(qsize * 0.1)), 3) - crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), - int(np.ceil(max(quad[:, 1])))) - crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), - min(crop[3] + border, img.size[1])) - if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]: - img = img.crop(crop) - quad -= crop[0:2] - - # Pad. - pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))), - int(np.ceil(max(quad[:, 1])))) - pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), - max(pad[3] - img.size[1] + border, 0)) - if enable_padding and max(pad) > border - 4: - pad = np.maximum(pad, int(np.rint(qsize * 0.3))) - img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect') - h, w, _ = img.shape - y, x, _ = np.ogrid[:h, :w, :1] - mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]), - 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3])) - blur = qsize * 0.02 - img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0) - img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0) - img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB') - quad += pad[:2] - - # Transform. - img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR) - if output_size < transform_size: - img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS) - - # Save aligned image. - return img - - -def chunks(lst, n): - """Yield successive n-sized chunks from lst.""" - for i in range(0, len(lst), n): - yield lst[i:i + n] - - -def extract_on_paths(file_paths): - predictor = dlib.shape_predictor(SHAPE_PREDICTOR_PATH) - pid = mp.current_process().name - print('\t{} is starting to extract on #{} images'.format(pid, len(file_paths))) - tot_count = len(file_paths) - count = 0 - for file_path, res_path in file_paths: - count += 1 - if count % 100 == 0: - print('{} done with {}/{}'.format(pid, count, tot_count)) - try: - res = align_face(file_path, predictor) - res = res.convert('RGB') - os.makedirs(os.path.dirname(res_path), exist_ok=True) - res.save(res_path) - except Exception: - continue - print('\tDone!') - - -def parse_args(): - parser = ArgumentParser(add_help=False) - parser.add_argument('--num_threads', type=int, default=1) - parser.add_argument('--root_path', type=str, default='') - args = parser.parse_args() - return args - - -def run(args): - root_path = args.root_path - out_crops_path = root_path + '_crops' - if not os.path.exists(out_crops_path): - os.makedirs(out_crops_path, exist_ok=True) - - file_paths = [] - for root, dirs, files in os.walk(root_path): - for file in files: - file_path = os.path.join(root, file) - fname = os.path.join(out_crops_path, os.path.relpath(file_path, root_path)) - res_path = '{}.jpg'.format(os.path.splitext(fname)[0]) - if os.path.splitext(file_path)[1] == '.txt' or os.path.exists(res_path): - continue - file_paths.append((file_path, res_path)) - - file_chunks = list(chunks(file_paths, int(math.ceil(len(file_paths) / args.num_threads)))) - print(len(file_chunks)) - pool = mp.Pool(args.num_threads) - print('Running on {} paths\nHere we goooo'.format(len(file_paths))) - tic = time.time() - pool.map(extract_on_paths, file_chunks) - toc = time.time() - print('Mischief managed in {}s'.format(toc - tic)) - - -if __name__ == '__main__': - args = parse_args() - run(args) diff --git a/spaces/AIFILMS/StyleGANEX/models/stylegan2/lpips/networks_basic.py b/spaces/AIFILMS/StyleGANEX/models/stylegan2/lpips/networks_basic.py deleted file mode 100644 index ec3f045f9f22dbf49e18e9edca25d04ccc551da9..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/StyleGANEX/models/stylegan2/lpips/networks_basic.py +++ /dev/null @@ -1,187 +0,0 @@ - -from __future__ import absolute_import - -import sys -import torch -import torch.nn as nn -import torch.nn.init as init -from torch.autograd import Variable -import numpy as np -from pdb import set_trace as st -from skimage import color -from IPython import embed -from models.stylegan2.lpips import pretrained_networks as pn - -import models.stylegan2.lpips as util - -def spatial_average(in_tens, keepdim=True): - return in_tens.mean([2,3],keepdim=keepdim) - -def upsample(in_tens, out_H=64): # assumes scale factor is same for H and W - in_H = in_tens.shape[2] - scale_factor = 1.*out_H/in_H - - return nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False)(in_tens) - -# Learned perceptual metric -class PNetLin(nn.Module): - def __init__(self, pnet_type='vgg', pnet_rand=False, pnet_tune=False, use_dropout=True, spatial=False, version='0.1', lpips=True): - super(PNetLin, self).__init__() - - self.pnet_type = pnet_type - self.pnet_tune = pnet_tune - self.pnet_rand = pnet_rand - self.spatial = spatial - self.lpips = lpips - self.version = version - self.scaling_layer = ScalingLayer() - - if(self.pnet_type in ['vgg','vgg16']): - net_type = pn.vgg16 - self.chns = [64,128,256,512,512] - elif(self.pnet_type=='alex'): - net_type = pn.alexnet - self.chns = [64,192,384,256,256] - elif(self.pnet_type=='squeeze'): - net_type = pn.squeezenet - self.chns = [64,128,256,384,384,512,512] - self.L = len(self.chns) - - self.net = net_type(pretrained=not self.pnet_rand, requires_grad=self.pnet_tune) - - if(lpips): - self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) - self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) - self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) - self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) - self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) - self.lins = [self.lin0,self.lin1,self.lin2,self.lin3,self.lin4] - if(self.pnet_type=='squeeze'): # 7 layers for squeezenet - self.lin5 = NetLinLayer(self.chns[5], use_dropout=use_dropout) - self.lin6 = NetLinLayer(self.chns[6], use_dropout=use_dropout) - self.lins+=[self.lin5,self.lin6] - - def forward(self, in0, in1, retPerLayer=False): - # v0.0 - original release had a bug, where input was not scaled - in0_input, in1_input = (self.scaling_layer(in0), self.scaling_layer(in1)) if self.version=='0.1' else (in0, in1) - outs0, outs1 = self.net.forward(in0_input), self.net.forward(in1_input) - feats0, feats1, diffs = {}, {}, {} - - for kk in range(self.L): - feats0[kk], feats1[kk] = util.normalize_tensor(outs0[kk]), util.normalize_tensor(outs1[kk]) - diffs[kk] = (feats0[kk]-feats1[kk])**2 - - if(self.lpips): - if(self.spatial): - res = [upsample(self.lins[kk].model(diffs[kk]), out_H=in0.shape[2]) for kk in range(self.L)] - else: - res = [spatial_average(self.lins[kk].model(diffs[kk]), keepdim=True) for kk in range(self.L)] - else: - if(self.spatial): - res = [upsample(diffs[kk].sum(dim=1,keepdim=True), out_H=in0.shape[2]) for kk in range(self.L)] - else: - res = [spatial_average(diffs[kk].sum(dim=1,keepdim=True), keepdim=True) for kk in range(self.L)] - - val = res[0] - for l in range(1,self.L): - val += res[l] - - if(retPerLayer): - return (val, res) - else: - return val - -class ScalingLayer(nn.Module): - def __init__(self): - super(ScalingLayer, self).__init__() - self.register_buffer('shift', torch.Tensor([-.030,-.088,-.188])[None,:,None,None]) - self.register_buffer('scale', torch.Tensor([.458,.448,.450])[None,:,None,None]) - - def forward(self, inp): - return (inp - self.shift) / self.scale - - -class NetLinLayer(nn.Module): - ''' A single linear layer which does a 1x1 conv ''' - def __init__(self, chn_in, chn_out=1, use_dropout=False): - super(NetLinLayer, self).__init__() - - layers = [nn.Dropout(),] if(use_dropout) else [] - layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False),] - self.model = nn.Sequential(*layers) - - -class Dist2LogitLayer(nn.Module): - ''' takes 2 distances, puts through fc layers, spits out value between [0,1] (if use_sigmoid is True) ''' - def __init__(self, chn_mid=32, use_sigmoid=True): - super(Dist2LogitLayer, self).__init__() - - layers = [nn.Conv2d(5, chn_mid, 1, stride=1, padding=0, bias=True),] - layers += [nn.LeakyReLU(0.2,True),] - layers += [nn.Conv2d(chn_mid, chn_mid, 1, stride=1, padding=0, bias=True),] - layers += [nn.LeakyReLU(0.2,True),] - layers += [nn.Conv2d(chn_mid, 1, 1, stride=1, padding=0, bias=True),] - if(use_sigmoid): - layers += [nn.Sigmoid(),] - self.model = nn.Sequential(*layers) - - def forward(self,d0,d1,eps=0.1): - return self.model.forward(torch.cat((d0,d1,d0-d1,d0/(d1+eps),d1/(d0+eps)),dim=1)) - -class BCERankingLoss(nn.Module): - def __init__(self, chn_mid=32): - super(BCERankingLoss, self).__init__() - self.net = Dist2LogitLayer(chn_mid=chn_mid) - # self.parameters = list(self.net.parameters()) - self.loss = torch.nn.BCELoss() - - def forward(self, d0, d1, judge): - per = (judge+1.)/2. - self.logit = self.net.forward(d0,d1) - return self.loss(self.logit, per) - -# L2, DSSIM metrics -class FakeNet(nn.Module): - def __init__(self, use_gpu=True, colorspace='Lab'): - super(FakeNet, self).__init__() - self.use_gpu = use_gpu - self.colorspace=colorspace - -class L2(FakeNet): - - def forward(self, in0, in1, retPerLayer=None): - assert(in0.size()[0]==1) # currently only supports batchSize 1 - - if(self.colorspace=='RGB'): - (N,C,X,Y) = in0.size() - value = torch.mean(torch.mean(torch.mean((in0-in1)**2,dim=1).view(N,1,X,Y),dim=2).view(N,1,1,Y),dim=3).view(N) - return value - elif(self.colorspace=='Lab'): - value = util.l2(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), - util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') - ret_var = Variable( torch.Tensor((value,) ) ) - if(self.use_gpu): - ret_var = ret_var.cuda() - return ret_var - -class DSSIM(FakeNet): - - def forward(self, in0, in1, retPerLayer=None): - assert(in0.size()[0]==1) # currently only supports batchSize 1 - - if(self.colorspace=='RGB'): - value = util.dssim(1.*util.tensor2im(in0.data), 1.*util.tensor2im(in1.data), range=255.).astype('float') - elif(self.colorspace=='Lab'): - value = util.dssim(util.tensor2np(util.tensor2tensorlab(in0.data,to_norm=False)), - util.tensor2np(util.tensor2tensorlab(in1.data,to_norm=False)), range=100.).astype('float') - ret_var = Variable( torch.Tensor((value,) ) ) - if(self.use_gpu): - ret_var = ret_var.cuda() - return ret_var - -def print_network(net): - num_params = 0 - for param in net.parameters(): - num_params += param.numel() - print('Network',net) - print('Total number of parameters: %d' % num_params) diff --git a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/imagenet_zeroshot_data.py b/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/imagenet_zeroshot_data.py deleted file mode 100644 index d32e55328d6799ccb8d61625f43abb80a33d6c17..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/imagenet_zeroshot_data.py +++ /dev/null @@ -1,1088 +0,0 @@ -# NOTE: This script is currently not supported for CLAP. - -imagenet_classnames = [ - "tench", - "goldfish", - "great white shark", - "tiger shark", - "hammerhead shark", - "electric ray", - "stingray", - "rooster", - "hen", - "ostrich", - "brambling", - "goldfinch", - "house finch", - "junco", - "indigo bunting", - "American robin", - "bulbul", - "jay", - "magpie", - "chickadee", - "American dipper", - "kite (bird of prey)", - "bald eagle", - "vulture", - "great grey owl", - "fire salamander", - "smooth newt", - "newt", - "spotted salamander", - "axolotl", - "American bullfrog", - "tree frog", - "tailed frog", - "loggerhead sea turtle", - "leatherback sea turtle", - "mud turtle", - "terrapin", - "box turtle", - "banded gecko", - "green iguana", - "Carolina anole", - "desert grassland whiptail lizard", - "agama", - "frilled-necked lizard", - "alligator lizard", - "Gila monster", - "European green lizard", - "chameleon", - "Komodo dragon", - "Nile crocodile", - "American alligator", - "triceratops", - "worm snake", - "ring-necked snake", - "eastern hog-nosed snake", - "smooth green snake", - "kingsnake", - "garter snake", - "water snake", - "vine snake", - "night snake", - "boa constrictor", - "African rock python", - "Indian cobra", - "green mamba", - "sea snake", - "Saharan horned viper", - "eastern diamondback rattlesnake", - "sidewinder rattlesnake", - "trilobite", - "harvestman", - "scorpion", - "yellow garden spider", - "barn spider", - "European garden spider", - "southern black widow", - "tarantula", - "wolf spider", - "tick", - "centipede", - "black grouse", - "ptarmigan", - "ruffed grouse", - "prairie grouse", - "peafowl", - "quail", - "partridge", - "african grey parrot", - "macaw", - "sulphur-crested cockatoo", - "lorikeet", - "coucal", - "bee eater", - "hornbill", - "hummingbird", - "jacamar", - "toucan", - "duck", - "red-breasted merganser", - "goose", - "black swan", - "tusker", - "echidna", - "platypus", - "wallaby", - "koala", - "wombat", - "jellyfish", - "sea anemone", - "brain coral", - "flatworm", - "nematode", - "conch", - "snail", - "slug", - "sea slug", - "chiton", - "chambered nautilus", - "Dungeness crab", - "rock crab", - "fiddler crab", - "red king crab", - "American lobster", - "spiny lobster", - "crayfish", - "hermit crab", - "isopod", - "white stork", - "black stork", - "spoonbill", - "flamingo", - "little blue heron", - "great egret", - "bittern bird", - "crane bird", - "limpkin", - "common gallinule", - "American coot", - "bustard", - "ruddy turnstone", - "dunlin", - "common redshank", - "dowitcher", - "oystercatcher", - "pelican", - "king penguin", - "albatross", - "grey whale", - "killer whale", - "dugong", - "sea lion", - "Chihuahua", - "Japanese Chin", - "Maltese", - "Pekingese", - "Shih Tzu", - "King Charles Spaniel", - "Papillon", - "toy terrier", - "Rhodesian Ridgeback", - "Afghan Hound", - "Basset Hound", - "Beagle", - "Bloodhound", - "Bluetick Coonhound", - "Black and Tan Coonhound", - "Treeing Walker Coonhound", - "English foxhound", - "Redbone Coonhound", - "borzoi", - "Irish Wolfhound", - "Italian Greyhound", - "Whippet", - "Ibizan Hound", - "Norwegian Elkhound", - "Otterhound", - "Saluki", - "Scottish Deerhound", - "Weimaraner", - "Staffordshire Bull Terrier", - "American Staffordshire Terrier", - "Bedlington Terrier", - "Border Terrier", - "Kerry Blue Terrier", - "Irish Terrier", - "Norfolk Terrier", - "Norwich Terrier", - "Yorkshire Terrier", - "Wire Fox Terrier", - "Lakeland Terrier", - "Sealyham Terrier", - "Airedale Terrier", - "Cairn Terrier", - "Australian Terrier", - "Dandie Dinmont Terrier", - "Boston Terrier", - "Miniature Schnauzer", - "Giant Schnauzer", - "Standard Schnauzer", - "Scottish Terrier", - "Tibetan Terrier", - "Australian Silky Terrier", - "Soft-coated Wheaten Terrier", - "West Highland White Terrier", - "Lhasa Apso", - "Flat-Coated Retriever", - "Curly-coated Retriever", - "Golden Retriever", - "Labrador Retriever", - "Chesapeake Bay Retriever", - "German Shorthaired Pointer", - "Vizsla", - "English Setter", - "Irish Setter", - "Gordon Setter", - "Brittany dog", - "Clumber Spaniel", - "English Springer Spaniel", - "Welsh Springer Spaniel", - "Cocker Spaniel", - "Sussex Spaniel", - "Irish Water Spaniel", - "Kuvasz", - "Schipperke", - "Groenendael dog", - "Malinois", - "Briard", - "Australian Kelpie", - "Komondor", - "Old English Sheepdog", - "Shetland Sheepdog", - "collie", - "Border Collie", - "Bouvier des Flandres dog", - "Rottweiler", - "German Shepherd Dog", - "Dobermann", - "Miniature Pinscher", - "Greater Swiss Mountain Dog", - "Bernese Mountain Dog", - "Appenzeller Sennenhund", - "Entlebucher Sennenhund", - "Boxer", - "Bullmastiff", - "Tibetan Mastiff", - "French Bulldog", - "Great Dane", - "St. Bernard", - "husky", - "Alaskan Malamute", - "Siberian Husky", - "Dalmatian", - "Affenpinscher", - "Basenji", - "pug", - "Leonberger", - "Newfoundland dog", - "Great Pyrenees dog", - "Samoyed", - "Pomeranian", - "Chow Chow", - "Keeshond", - "brussels griffon", - "Pembroke Welsh Corgi", - "Cardigan Welsh Corgi", - "Toy Poodle", - "Miniature Poodle", - "Standard Poodle", - "Mexican hairless dog (xoloitzcuintli)", - "grey wolf", - "Alaskan tundra wolf", - "red wolf or maned wolf", - "coyote", - "dingo", - "dhole", - "African wild dog", - "hyena", - "red fox", - "kit fox", - "Arctic fox", - "grey fox", - "tabby cat", - "tiger cat", - "Persian cat", - "Siamese cat", - "Egyptian Mau", - "cougar", - "lynx", - "leopard", - "snow leopard", - "jaguar", - "lion", - "tiger", - "cheetah", - "brown bear", - "American black bear", - "polar bear", - "sloth bear", - "mongoose", - "meerkat", - "tiger beetle", - "ladybug", - "ground beetle", - "longhorn beetle", - "leaf beetle", - "dung beetle", - "rhinoceros beetle", - "weevil", - "fly", - "bee", - "ant", - "grasshopper", - "cricket insect", - "stick insect", - "cockroach", - "praying mantis", - "cicada", - "leafhopper", - "lacewing", - "dragonfly", - "damselfly", - "red admiral butterfly", - "ringlet butterfly", - "monarch butterfly", - "small white butterfly", - "sulphur butterfly", - "gossamer-winged butterfly", - "starfish", - "sea urchin", - "sea cucumber", - "cottontail rabbit", - "hare", - "Angora rabbit", - "hamster", - "porcupine", - "fox squirrel", - "marmot", - "beaver", - "guinea pig", - "common sorrel horse", - "zebra", - "pig", - "wild boar", - "warthog", - "hippopotamus", - "ox", - "water buffalo", - "bison", - "ram (adult male sheep)", - "bighorn sheep", - "Alpine ibex", - "hartebeest", - "impala (antelope)", - "gazelle", - "arabian camel", - "llama", - "weasel", - "mink", - "European polecat", - "black-footed ferret", - "otter", - "skunk", - "badger", - "armadillo", - "three-toed sloth", - "orangutan", - "gorilla", - "chimpanzee", - "gibbon", - "siamang", - "guenon", - "patas monkey", - "baboon", - "macaque", - "langur", - "black-and-white colobus", - "proboscis monkey", - "marmoset", - "white-headed capuchin", - "howler monkey", - "titi monkey", - "Geoffroy's spider monkey", - "common squirrel monkey", - "ring-tailed lemur", - "indri", - "Asian elephant", - "African bush elephant", - "red panda", - "giant panda", - "snoek fish", - "eel", - "silver salmon", - "rock beauty fish", - "clownfish", - "sturgeon", - "gar fish", - "lionfish", - "pufferfish", - "abacus", - "abaya", - "academic gown", - "accordion", - "acoustic guitar", - "aircraft carrier", - "airliner", - "airship", - "altar", - "ambulance", - "amphibious vehicle", - "analog clock", - "apiary", - "apron", - "trash can", - "assault rifle", - "backpack", - "bakery", - "balance beam", - "balloon", - "ballpoint pen", - "Band-Aid", - "banjo", - "baluster / handrail", - "barbell", - "barber chair", - "barbershop", - "barn", - "barometer", - "barrel", - "wheelbarrow", - "baseball", - "basketball", - "bassinet", - "bassoon", - "swimming cap", - "bath towel", - "bathtub", - "station wagon", - "lighthouse", - "beaker", - "military hat (bearskin or shako)", - "beer bottle", - "beer glass", - "bell tower", - "baby bib", - "tandem bicycle", - "bikini", - "ring binder", - "binoculars", - "birdhouse", - "boathouse", - "bobsleigh", - "bolo tie", - "poke bonnet", - "bookcase", - "bookstore", - "bottle cap", - "hunting bow", - "bow tie", - "brass memorial plaque", - "bra", - "breakwater", - "breastplate", - "broom", - "bucket", - "buckle", - "bulletproof vest", - "high-speed train", - "butcher shop", - "taxicab", - "cauldron", - "candle", - "cannon", - "canoe", - "can opener", - "cardigan", - "car mirror", - "carousel", - "tool kit", - "cardboard box / carton", - "car wheel", - "automated teller machine", - "cassette", - "cassette player", - "castle", - "catamaran", - "CD player", - "cello", - "mobile phone", - "chain", - "chain-link fence", - "chain mail", - "chainsaw", - "storage chest", - "chiffonier", - "bell or wind chime", - "china cabinet", - "Christmas stocking", - "church", - "movie theater", - "cleaver", - "cliff dwelling", - "cloak", - "clogs", - "cocktail shaker", - "coffee mug", - "coffeemaker", - "spiral or coil", - "combination lock", - "computer keyboard", - "candy store", - "container ship", - "convertible", - "corkscrew", - "cornet", - "cowboy boot", - "cowboy hat", - "cradle", - "construction crane", - "crash helmet", - "crate", - "infant bed", - "Crock Pot", - "croquet ball", - "crutch", - "cuirass", - "dam", - "desk", - "desktop computer", - "rotary dial telephone", - "diaper", - "digital clock", - "digital watch", - "dining table", - "dishcloth", - "dishwasher", - "disc brake", - "dock", - "dog sled", - "dome", - "doormat", - "drilling rig", - "drum", - "drumstick", - "dumbbell", - "Dutch oven", - "electric fan", - "electric guitar", - "electric locomotive", - "entertainment center", - "envelope", - "espresso machine", - "face powder", - "feather boa", - "filing cabinet", - "fireboat", - "fire truck", - "fire screen", - "flagpole", - "flute", - "folding chair", - "football helmet", - "forklift", - "fountain", - "fountain pen", - "four-poster bed", - "freight car", - "French horn", - "frying pan", - "fur coat", - "garbage truck", - "gas mask or respirator", - "gas pump", - "goblet", - "go-kart", - "golf ball", - "golf cart", - "gondola", - "gong", - "gown", - "grand piano", - "greenhouse", - "radiator grille", - "grocery store", - "guillotine", - "hair clip", - "hair spray", - "half-track", - "hammer", - "hamper", - "hair dryer", - "hand-held computer", - "handkerchief", - "hard disk drive", - "harmonica", - "harp", - "combine harvester", - "hatchet", - "holster", - "home theater", - "honeycomb", - "hook", - "hoop skirt", - "gymnastic horizontal bar", - "horse-drawn vehicle", - "hourglass", - "iPod", - "clothes iron", - "carved pumpkin", - "jeans", - "jeep", - "T-shirt", - "jigsaw puzzle", - "rickshaw", - "joystick", - "kimono", - "knee pad", - "knot", - "lab coat", - "ladle", - "lampshade", - "laptop computer", - "lawn mower", - "lens cap", - "letter opener", - "library", - "lifeboat", - "lighter", - "limousine", - "ocean liner", - "lipstick", - "slip-on shoe", - "lotion", - "music speaker", - "loupe magnifying glass", - "sawmill", - "magnetic compass", - "messenger bag", - "mailbox", - "tights", - "one-piece bathing suit", - "manhole cover", - "maraca", - "marimba", - "mask", - "matchstick", - "maypole", - "maze", - "measuring cup", - "medicine cabinet", - "megalith", - "microphone", - "microwave oven", - "military uniform", - "milk can", - "minibus", - "miniskirt", - "minivan", - "missile", - "mitten", - "mixing bowl", - "mobile home", - "ford model t", - "modem", - "monastery", - "monitor", - "moped", - "mortar and pestle", - "graduation cap", - "mosque", - "mosquito net", - "vespa", - "mountain bike", - "tent", - "computer mouse", - "mousetrap", - "moving van", - "muzzle", - "metal nail", - "neck brace", - "necklace", - "baby pacifier", - "notebook computer", - "obelisk", - "oboe", - "ocarina", - "odometer", - "oil filter", - "pipe organ", - "oscilloscope", - "overskirt", - "bullock cart", - "oxygen mask", - "product packet / packaging", - "paddle", - "paddle wheel", - "padlock", - "paintbrush", - "pajamas", - "palace", - "pan flute", - "paper towel", - "parachute", - "parallel bars", - "park bench", - "parking meter", - "railroad car", - "patio", - "payphone", - "pedestal", - "pencil case", - "pencil sharpener", - "perfume", - "Petri dish", - "photocopier", - "plectrum", - "Pickelhaube", - "picket fence", - "pickup truck", - "pier", - "piggy bank", - "pill bottle", - "pillow", - "ping-pong ball", - "pinwheel", - "pirate ship", - "drink pitcher", - "block plane", - "planetarium", - "plastic bag", - "plate rack", - "farm plow", - "plunger", - "Polaroid camera", - "pole", - "police van", - "poncho", - "pool table", - "soda bottle", - "plant pot", - "potter's wheel", - "power drill", - "prayer rug", - "printer", - "prison", - "missile", - "projector", - "hockey puck", - "punching bag", - "purse", - "quill", - "quilt", - "race car", - "racket", - "radiator", - "radio", - "radio telescope", - "rain barrel", - "recreational vehicle", - "fishing casting reel", - "reflex camera", - "refrigerator", - "remote control", - "restaurant", - "revolver", - "rifle", - "rocking chair", - "rotisserie", - "eraser", - "rugby ball", - "ruler measuring stick", - "sneaker", - "safe", - "safety pin", - "salt shaker", - "sandal", - "sarong", - "saxophone", - "scabbard", - "weighing scale", - "school bus", - "schooner", - "scoreboard", - "CRT monitor", - "screw", - "screwdriver", - "seat belt", - "sewing machine", - "shield", - "shoe store", - "shoji screen / room divider", - "shopping basket", - "shopping cart", - "shovel", - "shower cap", - "shower curtain", - "ski", - "balaclava ski mask", - "sleeping bag", - "slide rule", - "sliding door", - "slot machine", - "snorkel", - "snowmobile", - "snowplow", - "soap dispenser", - "soccer ball", - "sock", - "solar thermal collector", - "sombrero", - "soup bowl", - "keyboard space bar", - "space heater", - "space shuttle", - "spatula", - "motorboat", - "spider web", - "spindle", - "sports car", - "spotlight", - "stage", - "steam locomotive", - "through arch bridge", - "steel drum", - "stethoscope", - "scarf", - "stone wall", - "stopwatch", - "stove", - "strainer", - "tram", - "stretcher", - "couch", - "stupa", - "submarine", - "suit", - "sundial", - "sunglasses", - "sunglasses", - "sunscreen", - "suspension bridge", - "mop", - "sweatshirt", - "swim trunks / shorts", - "swing", - "electrical switch", - "syringe", - "table lamp", - "tank", - "tape player", - "teapot", - "teddy bear", - "television", - "tennis ball", - "thatched roof", - "front curtain", - "thimble", - "threshing machine", - "throne", - "tile roof", - "toaster", - "tobacco shop", - "toilet seat", - "torch", - "totem pole", - "tow truck", - "toy store", - "tractor", - "semi-trailer truck", - "tray", - "trench coat", - "tricycle", - "trimaran", - "tripod", - "triumphal arch", - "trolleybus", - "trombone", - "hot tub", - "turnstile", - "typewriter keyboard", - "umbrella", - "unicycle", - "upright piano", - "vacuum cleaner", - "vase", - "vaulted or arched ceiling", - "velvet fabric", - "vending machine", - "vestment", - "viaduct", - "violin", - "volleyball", - "waffle iron", - "wall clock", - "wallet", - "wardrobe", - "military aircraft", - "sink", - "washing machine", - "water bottle", - "water jug", - "water tower", - "whiskey jug", - "whistle", - "hair wig", - "window screen", - "window shade", - "Windsor tie", - "wine bottle", - "airplane wing", - "wok", - "wooden spoon", - "wool", - "split-rail fence", - "shipwreck", - "sailboat", - "yurt", - "website", - "comic book", - "crossword", - "traffic or street sign", - "traffic light", - "dust jacket", - "menu", - "plate", - "guacamole", - "consomme", - "hot pot", - "trifle", - "ice cream", - "popsicle", - "baguette", - "bagel", - "pretzel", - "cheeseburger", - "hot dog", - "mashed potatoes", - "cabbage", - "broccoli", - "cauliflower", - "zucchini", - "spaghetti squash", - "acorn squash", - "butternut squash", - "cucumber", - "artichoke", - "bell pepper", - "cardoon", - "mushroom", - "Granny Smith apple", - "strawberry", - "orange", - "lemon", - "fig", - "pineapple", - "banana", - "jackfruit", - "cherimoya (custard apple)", - "pomegranate", - "hay", - "carbonara", - "chocolate syrup", - "dough", - "meatloaf", - "pizza", - "pot pie", - "burrito", - "red wine", - "espresso", - "tea cup", - "eggnog", - "mountain", - "bubble", - "cliff", - "coral reef", - "geyser", - "lakeshore", - "promontory", - "sandbar", - "beach", - "valley", - "volcano", - "baseball player", - "bridegroom", - "scuba diver", - "rapeseed", - "daisy", - "yellow lady's slipper", - "corn", - "acorn", - "rose hip", - "horse chestnut seed", - "coral fungus", - "agaric", - "gyromitra", - "stinkhorn mushroom", - "earth star fungus", - "hen of the woods mushroom", - "bolete", - "corn cob", - "toilet paper", -] - - -openai_imagenet_template = [ - lambda c: f"a bad photo of a {c}.", - lambda c: f"a photo of many {c}.", - lambda c: f"a sculpture of a {c}.", - lambda c: f"a photo of the hard to see {c}.", - lambda c: f"a low resolution photo of the {c}.", - lambda c: f"a rendering of a {c}.", - lambda c: f"graffiti of a {c}.", - lambda c: f"a bad photo of the {c}.", - lambda c: f"a cropped photo of the {c}.", - lambda c: f"a tattoo of a {c}.", - lambda c: f"the embroidered {c}.", - lambda c: f"a photo of a hard to see {c}.", - lambda c: f"a bright photo of a {c}.", - lambda c: f"a photo of a clean {c}.", - lambda c: f"a photo of a dirty {c}.", - lambda c: f"a dark photo of the {c}.", - lambda c: f"a drawing of a {c}.", - lambda c: f"a photo of my {c}.", - lambda c: f"the plastic {c}.", - lambda c: f"a photo of the cool {c}.", - lambda c: f"a close-up photo of a {c}.", - lambda c: f"a black and white photo of the {c}.", - lambda c: f"a painting of the {c}.", - lambda c: f"a painting of a {c}.", - lambda c: f"a pixelated photo of the {c}.", - lambda c: f"a sculpture of the {c}.", - lambda c: f"a bright photo of the {c}.", - lambda c: f"a cropped photo of a {c}.", - lambda c: f"a plastic {c}.", - lambda c: f"a photo of the dirty {c}.", - lambda c: f"a jpeg corrupted photo of a {c}.", - lambda c: f"a blurry photo of the {c}.", - lambda c: f"a photo of the {c}.", - lambda c: f"a good photo of the {c}.", - lambda c: f"a rendering of the {c}.", - lambda c: f"a {c} in a video game.", - lambda c: f"a photo of one {c}.", - lambda c: f"a doodle of a {c}.", - lambda c: f"a close-up photo of the {c}.", - lambda c: f"a photo of a {c}.", - lambda c: f"the origami {c}.", - lambda c: f"the {c} in a video game.", - lambda c: f"a sketch of a {c}.", - lambda c: f"a doodle of the {c}.", - lambda c: f"a origami {c}.", - lambda c: f"a low resolution photo of a {c}.", - lambda c: f"the toy {c}.", - lambda c: f"a rendition of the {c}.", - lambda c: f"a photo of the clean {c}.", - lambda c: f"a photo of a large {c}.", - lambda c: f"a rendition of a {c}.", - lambda c: f"a photo of a nice {c}.", - lambda c: f"a photo of a weird {c}.", - lambda c: f"a blurry photo of a {c}.", - lambda c: f"a cartoon {c}.", - lambda c: f"art of a {c}.", - lambda c: f"a sketch of the {c}.", - lambda c: f"a embroidered {c}.", - lambda c: f"a pixelated photo of a {c}.", - lambda c: f"itap of the {c}.", - lambda c: f"a jpeg corrupted photo of the {c}.", - lambda c: f"a good photo of a {c}.", - lambda c: f"a plushie {c}.", - lambda c: f"a photo of the nice {c}.", - lambda c: f"a photo of the small {c}.", - lambda c: f"a photo of the weird {c}.", - lambda c: f"the cartoon {c}.", - lambda c: f"art of the {c}.", - lambda c: f"a drawing of the {c}.", - lambda c: f"a photo of the large {c}.", - lambda c: f"a black and white photo of a {c}.", - lambda c: f"the plushie {c}.", - lambda c: f"a dark photo of a {c}.", - lambda c: f"itap of a {c}.", - lambda c: f"graffiti of the {c}.", - lambda c: f"a toy {c}.", - lambda c: f"itap of my {c}.", - lambda c: f"a photo of a cool {c}.", - lambda c: f"a photo of a small {c}.", - lambda c: f"a tattoo of the {c}.", -] diff --git a/spaces/AIWaves/SOP_Generation-single/__init__.py b/spaces/AIWaves/SOP_Generation-single/__init__.py deleted file mode 100644 index 69b468b54240b0a357eac1ba7573971cf65b412c..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/SOP_Generation-single/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .evolve import * -from .SOP import * -from .State import * -from .utils import * \ No newline at end of file diff --git a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet152_8xb16_cifar10.py b/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet152_8xb16_cifar10.py deleted file mode 100644 index 3f307b6aa81661558b8308094de6e8327d08c830..0000000000000000000000000000000000000000 --- a/spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet152_8xb16_cifar10.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/resnet152_cifar.py', - '../_base_/datasets/cifar10_bs16.py', - '../_base_/schedules/cifar10_bs128.py', '../_base_/default_runtime.py' -] diff --git a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/activations.py b/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/activations.py deleted file mode 100644 index 8bd6f2917a56d72db56555d0ff54b2311bc21778..0000000000000000000000000000000000000000 --- a/spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/modules/activations.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -from torch import Tensor -from typing import Union, Callable - - -class CustomGLU(nn.Module): - """Custom Gated Linear Unit activation. - Applies a modified gated linear unit :math:`a * f(b)` where :math:`a` is the first half - of the input matrices, :math:`b` is the second half, and :math:`f` is a provided activation - function (i.e. sigmoid, swish, etc.). - - Args: - activation (nn.Module): The custom activation to apply in the Gated Linear Unit - dim (int): the dimension on which to split the input. Default: -1 - - Shape: - - Input: :math:`(\ast_1, N, \ast_2)` where `*` means, any number of additional - dimensions - - Output: :math:`(\ast_1, M, \ast_2)` where :math:`M=N/2` - - Examples:: - >>> m = CustomGLU(nn.Sigmoid()) - >>> input = torch.randn(4, 2) - >>> output = m(input) - """ - def __init__(self, activation: nn.Module, dim: int = -1): - super(CustomGLU, self).__init__() - self.dim = dim - self.activation = activation - - def forward(self, x: Tensor): - assert x.shape[self.dim] % 2 == 0 # M = N / 2 - a, b = torch.chunk(x, 2, dim=self.dim) - return a * self.activation(b) - - -class SwiGLU(CustomGLU): - """SiLU Gated Linear Unit activation. - Applies SiLU Gated Linear Unit :math:`a * SiLU(b)` where :math:`a` is - the first half of the input matrices, :math:`b` is the second half. - - Args: - dim (int): the dimension on which to split the input. Default: -1 - """ - def __init__(self, dim: int = -1): - super(SwiGLU, self).__init__(nn.SiLU(), dim) - - -class GeGLU(CustomGLU): - """GeLU Gated Linear Unit activation. - Applies GeLU Gated Linear Unit :math:`a * GELU(b)` where :math:`a` is - the first half of the input matrices, :math:`b` is the second half. - - Args: - dim (int): the dimension on which to split the input. Default: -1 - """ - def __init__(self, dim: int = -1): - super(GeGLU, self).__init__(nn.GELU(), dim) - - -class ReGLU(CustomGLU): - """ReLU Gated Linear Unit activation. - Applies ReLU Gated Linear Unit :math:`a * ReLU(b)` where :math:`a` is - the first half of the input matrices, :math:`b` is the second half. - - Args: - dim (int): the dimension on which to split the input. Default: -1 - """ - def __init__(self, dim: int = -1): - super(ReGLU, self).__init__(nn.ReLU(), dim) - - -def get_activation_fn( - activation: Union[str, Callable[[Tensor], Tensor]] -) -> Union[str, Callable[[Tensor], Tensor]]: - """Helper function to map an activation string to the activation class. - If the supplied activation is not a string that is recognized, the activation is passed back. - - Args: - activation (Union[str, Callable[[Tensor], Tensor]]): Activation to check - """ - if isinstance(activation, str): - if activation == "reglu": - return ReGLU() - elif activation == "geglu": - return GeGLU() - elif activation == "swiglu": - return SwiGLU() - return activation diff --git a/spaces/Abbasghanbari/Abo/README.md b/spaces/Abbasghanbari/Abo/README.md deleted file mode 100644 index 727d719dd86ab25ffdfe12b0e928c7aae2be45a3..0000000000000000000000000000000000000000 --- a/spaces/Abbasghanbari/Abo/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Abo -emoji: 💻 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Acytoo.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Acytoo.py deleted file mode 100644 index d36ca6da22ddfa43690abdd0db27e6f971320f93..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Acytoo.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider - - -class Acytoo(AsyncGeneratorProvider): - url = 'https://chat.acytoo.com' - working = True - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - - async with ClientSession( - headers=_create_header() - ) as session: - async with session.post( - cls.url + '/api/completions', - proxy=proxy, - json=_create_payload(messages, **kwargs) - ) as response: - response.raise_for_status() - async for stream in response.content.iter_any(): - if stream: - yield stream.decode() - - -def _create_header(): - return { - 'accept': '*/*', - 'content-type': 'application/json', - } - - -def _create_payload(messages: list[dict[str, str]], temperature: float = 0.5, **kwargs): - return { - 'key' : '', - 'model' : 'gpt-3.5-turbo', - 'messages' : messages, - 'temperature' : temperature, - 'password' : '' - } \ No newline at end of file diff --git a/spaces/Adapter/CoAdapter/ldm/modules/image_degradation/__init__.py b/spaces/Adapter/CoAdapter/ldm/modules/image_degradation/__init__.py deleted file mode 100644 index 7836cada81f90ded99c58d5942eea4c3477f58fc..0000000000000000000000000000000000000000 --- a/spaces/Adapter/CoAdapter/ldm/modules/image_degradation/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr -from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/actions/SwapChess.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/actions/SwapChess.js deleted file mode 100644 index 0bfb6c65ff1529561a56a741650a3895407874f1..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/actions/SwapChess.js +++ /dev/null @@ -1,30 +0,0 @@ -var SwapChess = function (chess1, chess2, board, bejeweled) { - var tileXYZ1 = board.chessToTileXYZ(chess1); - var tileXYZ2 = board.chessToTileXYZ(chess2); - var tileX1 = tileXYZ1.x, - tileY1 = tileXYZ1.y, - tileX2 = tileXYZ2.x, - tileY2 = tileXYZ2.y, - tileZ = tileXYZ1.z; - - // TileZ of chess1 and chess2 are the same, change tileZ of chess2 to a different value - board.setChessTileZ(chess2, `#${tileZ}`); - - // Move chess1 to tileXYZ2, chess2 to tileXYZ1 - var moveTo1 = bejeweled.getChessMoveTo(chess1); - var moveTo2 = bejeweled.getChessMoveTo(chess2); - moveTo1.moveTo(tileX2, tileY2); - moveTo2.moveTo(tileX1, tileY1); - - // Change tileZ of chess2 back - board.setChessTileZ(chess2, tileZ); - - if (moveTo1.isRunning) { - bejeweled.waitEvent(moveTo1, 'complete'); - } - if (moveTo2.isRunning) { - bejeweled.waitEvent(moveTo2, 'complete'); - } -}; - -export default SwapChess; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/PressCell.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/PressCell.js deleted file mode 100644 index 8590d442600cf74a4c0300a2bd7c9e662535f94a..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridtable/input/PressCell.js +++ /dev/null @@ -1,22 +0,0 @@ -import Press from '../../press/Press.js'; -import EmitCellEvent from './EmitCellEvent.js'; - -const GetValue = Phaser.Utils.Objects.GetValue; - -var PressCell = function (table, tableConfig) { - var pressConfig = GetValue(tableConfig, 'press', undefined); - if (pressConfig === false) { - return; - } - - table._press = new Press(table, pressConfig); - table._press - .on('pressstart', function (press, gameObject, lastPointer) { - EmitCellEvent(this.eventEmitter, 'cell.pressstart', table, press.worldX, press.worldY, lastPointer); - }, this) - .on('pressend', function (press, gameObject, lastPointer) { - EmitCellEvent(this.eventEmitter, 'cell.pressend', table, press.worldX, press.worldY, lastPointer); - }, this) -}; - -export default PressCell; \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/Methods.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/Methods.js deleted file mode 100644 index cc2e514004b6a143b96fb541f03084267476153f..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/scrollablepanel/scrollableblock/Methods.js +++ /dev/null @@ -1,21 +0,0 @@ -import GetChildrenWidth from './GetChildrenWidth.js'; -import GetChildrenHeight from './GetChildrenHeight.js'; -import GetChildrenSizers from './GetChildrenSizers.js'; -import ResetChildPosition from './ResetChildPosition.js'; -import LayoutChildren from './LayoutChildren.js'; -import ChildrenMaskMethods from '../../../../plugins/gameobjects/container/containerlite/mask/ChildrenMaskMethods.js'; - -var methods = { - getChildrenWidth: GetChildrenWidth, - getChildrenHeight: GetChildrenHeight, - getChildrenSizers: GetChildrenSizers, - resetChildPosition: ResetChildPosition, - layoutChildren: LayoutChildren -}; - -Object.assign( - methods, - ChildrenMaskMethods -); - -export default methods; \ No newline at end of file diff --git a/spaces/Ajitku/BTMLabs/README.md b/spaces/Ajitku/BTMLabs/README.md deleted file mode 100644 index 963f4b1a10758c63fcbea141252e3b948a695a9c..0000000000000000000000000000000000000000 --- a/spaces/Ajitku/BTMLabs/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: BTMLabs -emoji: 📊 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: 3.41.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Akshay-More-007/starcoder/README.md b/spaces/Akshay-More-007/starcoder/README.md deleted file mode 100644 index 511b956c2f0163c021557d9fc30cc054e5cd9947..0000000000000000000000000000000000000000 --- a/spaces/Akshay-More-007/starcoder/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Starcoder -emoji: 👁 -colorFrom: purple -colorTo: green -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: openrail ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/quicktour.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/quicktour.md deleted file mode 100644 index e0676ce2a9ca169322c79c17c4cfd224b6163f43..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/quicktour.md +++ /dev/null @@ -1,123 +0,0 @@ - - -# 훑어보기 - -🧨 Diffusers로 빠르게 시작하고 실행하세요! -이 훑어보기는 여러분이 개발자, 일반사용자 상관없이 시작하는 데 도움을 주며, 추론을 위해 [`DiffusionPipeline`] 사용하는 방법을 보여줍니다. - -시작하기에 앞서서, 필요한 모든 라이브러리가 설치되어 있는지 확인하세요: - -```bash -pip install --upgrade diffusers accelerate transformers -``` - -- [`accelerate`](https://huggingface.co/docs/accelerate/index)은 추론 및 학습을 위한 모델 불러오기 속도를 높입니다. -- [`transformers`](https://huggingface.co/docs/transformers/index)는 [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview)과 같이 가장 널리 사용되는 확산 모델을 실행하기 위해 필요합니다. - -## DiffusionPipeline - -[`DiffusionPipeline`]은 추론을 위해 사전학습된 확산 시스템을 사용하는 가장 쉬운 방법입니다. 다양한 양식의 많은 작업에 [`DiffusionPipeline`]을 바로 사용할 수 있습니다. 지원되는 작업은 아래의 표를 참고하세요: - -| **Task** | **Description** | **Pipeline** -|------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| -| Unconditional Image Generation | 가우시안 노이즈에서 이미지 생성 | [unconditional_image_generation](./using-diffusers/unconditional_image_generation`) | -| Text-Guided Image Generation | 텍스트 프롬프트로 이미지 생성 | [conditional_image_generation](./using-diffusers/conditional_image_generation) | -| Text-Guided Image-to-Image Translation | 텍스트 프롬프트에 따라 이미지 조정 | [img2img](./using-diffusers/img2img) | -| Text-Guided Image-Inpainting | 마스크 및 텍스트 프롬프트가 주어진 이미지의 마스킹된 부분을 채우기 | [inpaint](./using-diffusers/inpaint) | -| Text-Guided Depth-to-Image Translation | 깊이 추정을 통해 구조를 유지하면서 텍스트 프롬프트에 따라 이미지의 일부를 조정 | [depth2image](./using-diffusers/depth2image) | - -확산 파이프라인이 다양한 작업에 대해 어떻게 작동하는지는 [**Using Diffusers**](./using-diffusers/overview)를 참고하세요. - -예를들어, [`DiffusionPipeline`] 인스턴스를 생성하여 시작하고, 다운로드하려는 파이프라인 체크포인트를 지정합니다. -모든 [Diffusers' checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads)에 대해 [`DiffusionPipeline`]을 사용할 수 있습니다. -하지만, 이 가이드에서는 [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion)을 사용하여 text-to-image를 하는데 [`DiffusionPipeline`]을 사용합니다. - -[Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion) 기반 모델을 실행하기 전에 [license](https://huggingface.co/spaces/CompVis/stable-diffusion-license)를 주의 깊게 읽으세요. -이는 모델의 향상된 이미지 생성 기능과 이것으로 생성될 수 있는 유해한 콘텐츠 때문입니다. 선택한 Stable Diffusion 모델(*예*: [`runwayml/stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5))로 이동하여 라이센스를 읽으세요. - -다음과 같이 모델을 로드할 수 있습니다: - -```python ->>> from diffusers import DiffusionPipeline - ->>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") -``` - -[`DiffusionPipeline`]은 모든 모델링, 토큰화 및 스케줄링 구성요소를 다운로드하고 캐시합니다. -모델은 약 14억개의 매개변수로 구성되어 있으므로 GPU에서 실행하는 것이 좋습니다. -PyTorch에서와 마찬가지로 생성기 객체를 GPU로 옮길 수 있습니다. - -```python ->>> pipeline.to("cuda") -``` - -이제 `pipeline`을 사용할 수 있습니다: - -```python ->>> image = pipeline("An image of a squirrel in Picasso style").images[0] -``` - -출력은 기본적으로 [PIL Image object](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class)로 래핑됩니다. - -다음과 같이 함수를 호출하여 이미지를 저장할 수 있습니다: - -```python ->>> image.save("image_of_squirrel_painting.png") -``` - -**참고**: 다음을 통해 가중치를 다운로드하여 로컬에서 파이프라인을 사용할 수도 있습니다: - -``` -git lfs install -git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 -``` - -그리고 저장된 가중치를 파이프라인에 불러옵니다. - -```python ->>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5") -``` - -파이프라인 실행은 동일한 모델 아키텍처이므로 위의 코드와 동일합니다. - -```python ->>> generator.to("cuda") ->>> image = generator("An image of a squirrel in Picasso style").images[0] ->>> image.save("image_of_squirrel_painting.png") -``` - -확산 시스템은 각각 장점이 있는 여러 다른 [schedulers](./api/schedulers/overview)와 함께 사용할 수 있습니다. 기본적으로 Stable Diffusion은 `PNDMScheduler`로 실행되지만 다른 스케줄러를 사용하는 방법은 매우 간단합니다. *예* [`EulerDiscreteScheduler`] 스케줄러를 사용하려는 경우, 다음과 같이 사용할 수 있습니다: - -```python ->>> from diffusers import EulerDiscreteScheduler - ->>> pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") - ->>> # change scheduler to Euler ->>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) -``` - -스케줄러 변경 방법에 대한 자세한 내용은 [Using Schedulers](./using-diffusers/schedulers) 가이드를 참고하세요. - -[Stability AI's](https://stability.ai/)의 Stable Diffusion 모델은 인상적인 이미지 생성 모델이며 텍스트에서 이미지를 생성하는 것보다 훨씬 더 많은 작업을 수행할 수 있습니다. 우리는 Stable Diffusion만을 위한 전체 문서 페이지를 제공합니다 [link](./conceptual/stable_diffusion). - -만약 더 적은 메모리, 더 높은 추론 속도, Mac과 같은 특정 하드웨어 또는 ONNX 런타임에서 실행되도록 Stable Diffusion을 최적화하는 방법을 알고 싶다면 최적화 페이지를 살펴보세요: - -- [Optimized PyTorch on GPU](./optimization/fp16) -- [Mac OS with PyTorch](./optimization/mps) -- [ONNX](./optimization/onnx) -- [OpenVINO](./optimization/open_vino) - -확산 모델을 미세조정하거나 학습시키려면, [**training section**](./training/overview)을 살펴보세요. - -마지막으로, 생성된 이미지를 공개적으로 배포할 때 신중을 기해 주세요 🤗. \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/watermark.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/watermark.py deleted file mode 100644 index 5b6e36d9f44756da494cee0b996b1871721872e7..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stable_diffusion_xl/watermark.py +++ /dev/null @@ -1,36 +0,0 @@ -import numpy as np -import torch - -from ...utils import is_invisible_watermark_available - - -if is_invisible_watermark_available(): - from imwatermark import WatermarkEncoder - - -# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 -WATERMARK_MESSAGE = 0b101100111110110010010000011110111011000110011110 -# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 -WATERMARK_BITS = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] - - -class StableDiffusionXLWatermarker: - def __init__(self): - self.watermark = WATERMARK_BITS - self.encoder = WatermarkEncoder() - - self.encoder.set_watermark("bits", self.watermark) - - def apply_watermark(self, images: torch.FloatTensor): - # can't encode images that are smaller than 256 - if images.shape[-1] < 256: - return images - - images = (255 * (images / 2 + 0.5)).cpu().permute(0, 2, 3, 1).float().numpy() - - images = [self.encoder.encode(image, "dwtDct") for image in images] - - images = torch.from_numpy(np.array(images)).permute(0, 3, 1, 2) - - images = torch.clamp(2 * (images / 255 - 0.5), min=-1.0, max=1.0) - return images diff --git a/spaces/Andy1621/uniformer_image_detection/configs/vfnet/README.md b/spaces/Andy1621/uniformer_image_detection/configs/vfnet/README.md deleted file mode 100644 index d1a94d155149250e76d922185763c13d64509a62..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_detection/configs/vfnet/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# VarifocalNet: An IoU-aware Dense Object Detector - -## Introduction - -[ALGORITHM] - -**VarifocalNet (VFNet)** learns to predict the IoU-aware classification score which mixes the object presence confidence and localization accuracy together as the detection score for a bounding box. The learning is supervised by the proposed Varifocal Loss (VFL), based on a new star-shaped bounding box feature representation (the features at nine yellow sampling points). Given the new representation, the object localization accuracy is further improved by refining the initially regressed bounding box. The full paper is available at: [https://arxiv.org/abs/2008.13367](https://arxiv.org/abs/2008.13367). - -
      - -

      Learning to Predict the IoU-aware Classification Score.

      -
      - -## Citing VarifocalNet - -```latex -@article{zhang2020varifocalnet, - title={VarifocalNet: An IoU-aware Dense Object Detector}, - author={Zhang, Haoyang and Wang, Ying and Dayoub, Feras and S{\"u}nderhauf, Niko}, - journal={arXiv preprint arXiv:2008.13367}, - year={2020} -} -``` - -## Results and Models - -| Backbone | Style | DCN | MS train | Lr schd |Inf time (fps) | box AP (val) | box AP (test-dev) | Config | Download | -|:------------:|:---------:|:-------:|:--------:|:-------:|:-------------:|:------------:|:-----------------:|:------:|:--------:| -| R-50 | pytorch | N | N | 1x | - | 41.6 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet_r50_fpn_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco.json)| -| R-50 | pytorch | N | Y | 2x | - | 44.5 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet_r50_fpn_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco.json)| -| R-50 | pytorch | Y | Y | 2x | - | 47.8 | 48.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| -| R-101 | pytorch | N | N | 1x | - | 43.0 | 43.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet_r101_fpn_1x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco.json)| -| R-101 | pytorch | N | Y | 2x | - | 46.2 | 46.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet_r101_fpn_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco.json)| -| R-101 | pytorch | Y | Y | 2x | - | 49.0 | 49.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| -| X-101-32x4d | pytorch | Y | Y | 2x | - | 49.7 | 50.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| -| X-101-64x4d | pytorch | Y | Y | 2x | - | 50.4 | 50.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth) | [log](https://openmmlab.oss-cn-hangzhou.aliyuncs.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json)| - -**Notes:** - -- The MS-train scale range is 1333x[480:960] (`range` mode) and the inference scale keeps 1333x800. -- DCN means using `DCNv2` in both backbone and head. -- Inference time will be updated soon. -- More results and pre-trained models can be found in [VarifocalNet-Github](https://github.com/hyz-xmaster/VarifocalNet) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py deleted file mode 100644 index c32bf48751f0a18983bff0d99310870b71801663..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py +++ /dev/null @@ -1,2 +0,0 @@ -_base_ = './ccnet_r50-d8_512x512_160k_ade20k.py' -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context_59.py b/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context_59.py deleted file mode 100644 index 038993c6a434d843ddcd1f754bec191ae9da983e..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_480x480_40k_pascal_context_59.py +++ /dev/null @@ -1,10 +0,0 @@ -_base_ = [ - '../_base_/models/deeplabv3_r50-d8.py', - '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', - '../_base_/schedules/schedule_40k.py' -] -model = dict( - decode_head=dict(num_classes=59), - auxiliary_head=dict(num_classes=59), - test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) -optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001) diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/roi_pool.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/roi_pool.py deleted file mode 100644 index d339d8f2941eabc1cbe181a9c6c5ab5ff4ff4e5f..0000000000000000000000000000000000000000 --- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/roi_pool.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -from torch.autograd import Function -from torch.autograd.function import once_differentiable -from torch.nn.modules.utils import _pair - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', - ['roi_pool_forward', 'roi_pool_backward']) - - -class RoIPoolFunction(Function): - - @staticmethod - def symbolic(g, input, rois, output_size, spatial_scale): - return g.op( - 'MaxRoiPool', - input, - rois, - pooled_shape_i=output_size, - spatial_scale_f=spatial_scale) - - @staticmethod - def forward(ctx, input, rois, output_size, spatial_scale=1.0): - ctx.output_size = _pair(output_size) - ctx.spatial_scale = spatial_scale - ctx.input_shape = input.size() - - assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!' - - output_shape = (rois.size(0), input.size(1), ctx.output_size[0], - ctx.output_size[1]) - output = input.new_zeros(output_shape) - argmax = input.new_zeros(output_shape, dtype=torch.int) - - ext_module.roi_pool_forward( - input, - rois, - output, - argmax, - pooled_height=ctx.output_size[0], - pooled_width=ctx.output_size[1], - spatial_scale=ctx.spatial_scale) - - ctx.save_for_backward(rois, argmax) - return output - - @staticmethod - @once_differentiable - def backward(ctx, grad_output): - rois, argmax = ctx.saved_tensors - grad_input = grad_output.new_zeros(ctx.input_shape) - - ext_module.roi_pool_backward( - grad_output, - rois, - argmax, - grad_input, - pooled_height=ctx.output_size[0], - pooled_width=ctx.output_size[1], - spatial_scale=ctx.spatial_scale) - - return grad_input, None, None, None - - -roi_pool = RoIPoolFunction.apply - - -class RoIPool(nn.Module): - - def __init__(self, output_size, spatial_scale=1.0): - super(RoIPool, self).__init__() - - self.output_size = _pair(output_size) - self.spatial_scale = float(spatial_scale) - - def forward(self, input, rois): - return roi_pool(input, rois, self.output_size, self.spatial_scale) - - def __repr__(self): - s = self.__class__.__name__ - s += f'(output_size={self.output_size}, ' - s += f'spatial_scale={self.spatial_scale})' - return s diff --git a/spaces/AquaSuisei/ChatGPTXE/modules/presets.py b/spaces/AquaSuisei/ChatGPTXE/modules/presets.py deleted file mode 100644 index a6e601700ba70e4e2167345be8540cca78797b00..0000000000000000000000000000000000000000 --- a/spaces/AquaSuisei/ChatGPTXE/modules/presets.py +++ /dev/null @@ -1,198 +0,0 @@ -# -*- coding:utf-8 -*- -import gradio as gr -from pathlib import Path - -# ChatGPT 设置 -initial_prompt = "You are a helpful assistant." -API_HOST = "api.openai.com" -COMPLETION_URL = "https://api.openai.com/v1/chat/completions" -BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants" -USAGE_API_URL="https://api.openai.com/dashboard/billing/usage" -HISTORY_DIR = Path("history") -TEMPLATES_DIR = "templates" - -# 错误信息 -standard_error_msg = "☹️发生了错误:" # 错误信息的标准前缀 -error_retrieve_prompt = "请检查网络连接,或者API-Key是否有效。" # 获取对话时发生错误 -connection_timeout_prompt = "连接超时,无法获取对话。" # 连接超时 -read_timeout_prompt = "读取超时,无法获取对话。" # 读取超时 -proxy_error_prompt = "代理错误,无法获取对话。" # 代理错误 -ssl_error_prompt = "SSL错误,无法获取对话。" # SSL 错误 -no_apikey_msg = "API key长度不是51位,请检查是否输入正确。" # API key 长度不足 51 位 -no_input_msg = "请输入对话内容。" # 未输入对话内容 - -timeout_streaming = 30 # 流式对话时的超时时间 -timeout_all = 200 # 非流式对话时的超时时间 -enable_streaming_option = True # 是否启用选择选择是否实时显示回答的勾选框 -HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True -CONCURRENT_COUNT = 100 # 允许同时使用的用户数量 - -SIM_K = 5 -INDEX_QUERY_TEMPRATURE = 1.0 - -title = """

      川虎ChatGPT 🚀

      """ -description = """\ -
      - -由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发 - -访问川虎ChatGPT的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本 - -此App使用 `gpt-3.5-turbo` 大语言模型 -
      -""" - -footer = """\ -
      {versions}
      -""" - -summarize_prompt = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt - -MODELS = [ - "gpt-3.5-turbo", - "gpt-3.5-turbo-0301", - "gpt-4", - "gpt-4-0314", - "gpt-4-32k", - "gpt-4-32k-0314", -] # 可选的模型 - -MODEL_SOFT_TOKEN_LIMIT = { - "gpt-3.5-turbo": { - "streaming": 3500, - "all": 3500 - }, - "gpt-3.5-turbo-0301": { - "streaming": 3500, - "all": 3500 - }, - "gpt-4": { - "streaming": 7500, - "all": 7500 - }, - "gpt-4-0314": { - "streaming": 7500, - "all": 7500 - }, - "gpt-4-32k": { - "streaming": 31000, - "all": 31000 - }, - "gpt-4-32k-0314": { - "streaming": 31000, - "all": 31000 - } -} - -REPLY_LANGUAGES = [ - "简体中文", - "繁體中文", - "English", - "日本語", - "Español", - "Français", - "Deutsch", - "跟随问题语言(不稳定)" -] - - -WEBSEARCH_PTOMPT_TEMPLATE = """\ -Web search results: - -{web_results} -Current date: {current_date} - -Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. -Query: {query} -Reply in {reply_language} -""" - -PROMPT_TEMPLATE = """\ -Context information is below. ---------------------- -{context_str} ---------------------- -Current date: {current_date}. -Using the provided context information, write a comprehensive reply to the given query. -Make sure to cite results using [number] notation after the reference. -If the provided context information refer to multiple subjects with the same name, write separate answers for each subject. -Use prior knowledge only if the given context didn't provide enough information. -Answer the question: {query_str} -Reply in {reply_language} -""" - -REFINE_TEMPLATE = """\ -The original question is as follows: {query_str} -We have provided an existing answer: {existing_answer} -We have the opportunity to refine the existing answer -(only if needed) with some more context below. ------------- -{context_msg} ------------- -Given the new context, refine the original answer to better -Reply in {reply_language} -If the context isn't useful, return the original answer. -""" - -ALREADY_CONVERTED_MARK = "" - -small_and_beautiful_theme = gr.themes.Soft( - primary_hue=gr.themes.Color( - c50="#02C160", - c100="rgba(2, 193, 96, 0.2)", - c200="#02C160", - c300="rgba(2, 193, 96, 0.32)", - c400="rgba(2, 193, 96, 0.32)", - c500="rgba(2, 193, 96, 1.0)", - c600="rgba(2, 193, 96, 1.0)", - c700="rgba(2, 193, 96, 0.32)", - c800="rgba(2, 193, 96, 0.32)", - c900="#02C160", - c950="#02C160", - ), - secondary_hue=gr.themes.Color( - c50="#576b95", - c100="#576b95", - c200="#576b95", - c300="#576b95", - c400="#576b95", - c500="#576b95", - c600="#576b95", - c700="#576b95", - c800="#576b95", - c900="#576b95", - c950="#576b95", - ), - neutral_hue=gr.themes.Color( - name="gray", - c50="#f9fafb", - c100="#f3f4f6", - c200="#e5e7eb", - c300="#d1d5db", - c400="#B2B2B2", - c500="#808080", - c600="#636363", - c700="#515151", - c800="#393939", - c900="#272727", - c950="#171717", - ), - radius_size=gr.themes.sizes.radius_sm, - ).set( - button_primary_background_fill="#06AE56", - button_primary_background_fill_dark="#06AE56", - button_primary_background_fill_hover="#07C863", - button_primary_border_color="#06AE56", - button_primary_border_color_dark="#06AE56", - button_primary_text_color="#FFFFFF", - button_primary_text_color_dark="#FFFFFF", - button_secondary_background_fill="#F2F2F2", - button_secondary_background_fill_dark="#2B2B2B", - button_secondary_text_color="#393939", - button_secondary_text_color_dark="#FFFFFF", - # background_fill_primary="#F7F7F7", - # background_fill_primary_dark="#1F1F1F", - block_title_text_color="*primary_500", - block_title_background_fill="*primary_100", - input_background_fill="#F6F6F6", - ) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/__init__.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/__init__.py deleted file mode 100644 index e589bb917e23823e25f9fff7e0849c4d6d4a62bc..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Subpackage containing all of pip's command line interface related code -""" - -# This file intentionally does not import submodules diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/more_itertools/recipes.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/more_itertools/recipes.py deleted file mode 100644 index a2596423a4c3dbd15a357241477a0af0a531f9ec..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/more_itertools/recipes.py +++ /dev/null @@ -1,698 +0,0 @@ -"""Imported from the recipes section of the itertools documentation. - -All functions taken from the recipes section of the itertools library docs -[1]_. -Some backward-compatible usability improvements have been made. - -.. [1] http://docs.python.org/library/itertools.html#recipes - -""" -import warnings -from collections import deque -from itertools import ( - chain, - combinations, - count, - cycle, - groupby, - islice, - repeat, - starmap, - tee, - zip_longest, -) -import operator -from random import randrange, sample, choice - -__all__ = [ - 'all_equal', - 'before_and_after', - 'consume', - 'convolve', - 'dotproduct', - 'first_true', - 'flatten', - 'grouper', - 'iter_except', - 'ncycles', - 'nth', - 'nth_combination', - 'padnone', - 'pad_none', - 'pairwise', - 'partition', - 'powerset', - 'prepend', - 'quantify', - 'random_combination_with_replacement', - 'random_combination', - 'random_permutation', - 'random_product', - 'repeatfunc', - 'roundrobin', - 'sliding_window', - 'tabulate', - 'tail', - 'take', - 'triplewise', - 'unique_everseen', - 'unique_justseen', -] - - -def take(n, iterable): - """Return first *n* items of the iterable as a list. - - >>> take(3, range(10)) - [0, 1, 2] - - If there are fewer than *n* items in the iterable, all of them are - returned. - - >>> take(10, range(3)) - [0, 1, 2] - - """ - return list(islice(iterable, n)) - - -def tabulate(function, start=0): - """Return an iterator over the results of ``func(start)``, - ``func(start + 1)``, ``func(start + 2)``... - - *func* should be a function that accepts one integer argument. - - If *start* is not specified it defaults to 0. It will be incremented each - time the iterator is advanced. - - >>> square = lambda x: x ** 2 - >>> iterator = tabulate(square, -3) - >>> take(4, iterator) - [9, 4, 1, 0] - - """ - return map(function, count(start)) - - -def tail(n, iterable): - """Return an iterator over the last *n* items of *iterable*. - - >>> t = tail(3, 'ABCDEFG') - >>> list(t) - ['E', 'F', 'G'] - - """ - return iter(deque(iterable, maxlen=n)) - - -def consume(iterator, n=None): - """Advance *iterable* by *n* steps. If *n* is ``None``, consume it - entirely. - - Efficiently exhausts an iterator without returning values. Defaults to - consuming the whole iterator, but an optional second argument may be - provided to limit consumption. - - >>> i = (x for x in range(10)) - >>> next(i) - 0 - >>> consume(i, 3) - >>> next(i) - 4 - >>> consume(i) - >>> next(i) - Traceback (most recent call last): - File "", line 1, in - StopIteration - - If the iterator has fewer items remaining than the provided limit, the - whole iterator will be consumed. - - >>> i = (x for x in range(3)) - >>> consume(i, 5) - >>> next(i) - Traceback (most recent call last): - File "", line 1, in - StopIteration - - """ - # Use functions that consume iterators at C speed. - if n is None: - # feed the entire iterator into a zero-length deque - deque(iterator, maxlen=0) - else: - # advance to the empty slice starting at position n - next(islice(iterator, n, n), None) - - -def nth(iterable, n, default=None): - """Returns the nth item or a default value. - - >>> l = range(10) - >>> nth(l, 3) - 3 - >>> nth(l, 20, "zebra") - 'zebra' - - """ - return next(islice(iterable, n, None), default) - - -def all_equal(iterable): - """ - Returns ``True`` if all the elements are equal to each other. - - >>> all_equal('aaaa') - True - >>> all_equal('aaab') - False - - """ - g = groupby(iterable) - return next(g, True) and not next(g, False) - - -def quantify(iterable, pred=bool): - """Return the how many times the predicate is true. - - >>> quantify([True, False, True]) - 2 - - """ - return sum(map(pred, iterable)) - - -def pad_none(iterable): - """Returns the sequence of elements and then returns ``None`` indefinitely. - - >>> take(5, pad_none(range(3))) - [0, 1, 2, None, None] - - Useful for emulating the behavior of the built-in :func:`map` function. - - See also :func:`padded`. - - """ - return chain(iterable, repeat(None)) - - -padnone = pad_none - - -def ncycles(iterable, n): - """Returns the sequence elements *n* times - - >>> list(ncycles(["a", "b"], 3)) - ['a', 'b', 'a', 'b', 'a', 'b'] - - """ - return chain.from_iterable(repeat(tuple(iterable), n)) - - -def dotproduct(vec1, vec2): - """Returns the dot product of the two iterables. - - >>> dotproduct([10, 10], [20, 20]) - 400 - - """ - return sum(map(operator.mul, vec1, vec2)) - - -def flatten(listOfLists): - """Return an iterator flattening one level of nesting in a list of lists. - - >>> list(flatten([[0, 1], [2, 3]])) - [0, 1, 2, 3] - - See also :func:`collapse`, which can flatten multiple levels of nesting. - - """ - return chain.from_iterable(listOfLists) - - -def repeatfunc(func, times=None, *args): - """Call *func* with *args* repeatedly, returning an iterable over the - results. - - If *times* is specified, the iterable will terminate after that many - repetitions: - - >>> from operator import add - >>> times = 4 - >>> args = 3, 5 - >>> list(repeatfunc(add, times, *args)) - [8, 8, 8, 8] - - If *times* is ``None`` the iterable will not terminate: - - >>> from random import randrange - >>> times = None - >>> args = 1, 11 - >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP - [2, 4, 8, 1, 8, 4] - - """ - if times is None: - return starmap(func, repeat(args)) - return starmap(func, repeat(args, times)) - - -def _pairwise(iterable): - """Returns an iterator of paired items, overlapping, from the original - - >>> take(4, pairwise(count())) - [(0, 1), (1, 2), (2, 3), (3, 4)] - - On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`. - - """ - a, b = tee(iterable) - next(b, None) - yield from zip(a, b) - - -try: - from itertools import pairwise as itertools_pairwise -except ImportError: - pairwise = _pairwise -else: - - def pairwise(iterable): - yield from itertools_pairwise(iterable) - - pairwise.__doc__ = _pairwise.__doc__ - - -def grouper(iterable, n, fillvalue=None): - """Collect data into fixed-length chunks or blocks. - - >>> list(grouper('ABCDEFG', 3, 'x')) - [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] - - """ - if isinstance(iterable, int): - warnings.warn( - "grouper expects iterable as first parameter", DeprecationWarning - ) - n, iterable = iterable, n - args = [iter(iterable)] * n - return zip_longest(fillvalue=fillvalue, *args) - - -def roundrobin(*iterables): - """Yields an item from each iterable, alternating between them. - - >>> list(roundrobin('ABC', 'D', 'EF')) - ['A', 'D', 'E', 'B', 'F', 'C'] - - This function produces the same output as :func:`interleave_longest`, but - may perform better for some inputs (in particular when the number of - iterables is small). - - """ - # Recipe credited to George Sakkis - pending = len(iterables) - nexts = cycle(iter(it).__next__ for it in iterables) - while pending: - try: - for next in nexts: - yield next() - except StopIteration: - pending -= 1 - nexts = cycle(islice(nexts, pending)) - - -def partition(pred, iterable): - """ - Returns a 2-tuple of iterables derived from the input iterable. - The first yields the items that have ``pred(item) == False``. - The second yields the items that have ``pred(item) == True``. - - >>> is_odd = lambda x: x % 2 != 0 - >>> iterable = range(10) - >>> even_items, odd_items = partition(is_odd, iterable) - >>> list(even_items), list(odd_items) - ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9]) - - If *pred* is None, :func:`bool` is used. - - >>> iterable = [0, 1, False, True, '', ' '] - >>> false_items, true_items = partition(None, iterable) - >>> list(false_items), list(true_items) - ([0, False, ''], [1, True, ' ']) - - """ - if pred is None: - pred = bool - - evaluations = ((pred(x), x) for x in iterable) - t1, t2 = tee(evaluations) - return ( - (x for (cond, x) in t1 if not cond), - (x for (cond, x) in t2 if cond), - ) - - -def powerset(iterable): - """Yields all possible subsets of the iterable. - - >>> list(powerset([1, 2, 3])) - [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)] - - :func:`powerset` will operate on iterables that aren't :class:`set` - instances, so repeated elements in the input will produce repeated elements - in the output. Use :func:`unique_everseen` on the input to avoid generating - duplicates: - - >>> seq = [1, 1, 0] - >>> list(powerset(seq)) - [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)] - >>> from more_itertools import unique_everseen - >>> list(powerset(unique_everseen(seq))) - [(), (1,), (0,), (1, 0)] - - """ - s = list(iterable) - return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1)) - - -def unique_everseen(iterable, key=None): - """ - Yield unique elements, preserving order. - - >>> list(unique_everseen('AAAABBBCCDAABBB')) - ['A', 'B', 'C', 'D'] - >>> list(unique_everseen('ABBCcAD', str.lower)) - ['A', 'B', 'C', 'D'] - - Sequences with a mix of hashable and unhashable items can be used. - The function will be slower (i.e., `O(n^2)`) for unhashable items. - - Remember that ``list`` objects are unhashable - you can use the *key* - parameter to transform the list to a tuple (which is hashable) to - avoid a slowdown. - - >>> iterable = ([1, 2], [2, 3], [1, 2]) - >>> list(unique_everseen(iterable)) # Slow - [[1, 2], [2, 3]] - >>> list(unique_everseen(iterable, key=tuple)) # Faster - [[1, 2], [2, 3]] - - Similary, you may want to convert unhashable ``set`` objects with - ``key=frozenset``. For ``dict`` objects, - ``key=lambda x: frozenset(x.items())`` can be used. - - """ - seenset = set() - seenset_add = seenset.add - seenlist = [] - seenlist_add = seenlist.append - use_key = key is not None - - for element in iterable: - k = key(element) if use_key else element - try: - if k not in seenset: - seenset_add(k) - yield element - except TypeError: - if k not in seenlist: - seenlist_add(k) - yield element - - -def unique_justseen(iterable, key=None): - """Yields elements in order, ignoring serial duplicates - - >>> list(unique_justseen('AAAABBBCCDAABBB')) - ['A', 'B', 'C', 'D', 'A', 'B'] - >>> list(unique_justseen('ABBCcAD', str.lower)) - ['A', 'B', 'C', 'A', 'D'] - - """ - return map(next, map(operator.itemgetter(1), groupby(iterable, key))) - - -def iter_except(func, exception, first=None): - """Yields results from a function repeatedly until an exception is raised. - - Converts a call-until-exception interface to an iterator interface. - Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel - to end the loop. - - >>> l = [0, 1, 2] - >>> list(iter_except(l.pop, IndexError)) - [2, 1, 0] - - Multiple exceptions can be specified as a stopping condition: - - >>> l = [1, 2, 3, '...', 4, 5, 6] - >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) - [7, 6, 5] - >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) - [4, 3, 2] - >>> list(iter_except(lambda: 1 + l.pop(), (IndexError, TypeError))) - [] - - """ - try: - if first is not None: - yield first() - while 1: - yield func() - except exception: - pass - - -def first_true(iterable, default=None, pred=None): - """ - Returns the first true value in the iterable. - - If no true value is found, returns *default* - - If *pred* is not None, returns the first item for which - ``pred(item) == True`` . - - >>> first_true(range(10)) - 1 - >>> first_true(range(10), pred=lambda x: x > 5) - 6 - >>> first_true(range(10), default='missing', pred=lambda x: x > 9) - 'missing' - - """ - return next(filter(pred, iterable), default) - - -def random_product(*args, repeat=1): - """Draw an item at random from each of the input iterables. - - >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP - ('c', 3, 'Z') - - If *repeat* is provided as a keyword argument, that many items will be - drawn from each iterable. - - >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP - ('a', 2, 'd', 3) - - This equivalent to taking a random selection from - ``itertools.product(*args, **kwarg)``. - - """ - pools = [tuple(pool) for pool in args] * repeat - return tuple(choice(pool) for pool in pools) - - -def random_permutation(iterable, r=None): - """Return a random *r* length permutation of the elements in *iterable*. - - If *r* is not specified or is ``None``, then *r* defaults to the length of - *iterable*. - - >>> random_permutation(range(5)) # doctest:+SKIP - (3, 4, 0, 1, 2) - - This equivalent to taking a random selection from - ``itertools.permutations(iterable, r)``. - - """ - pool = tuple(iterable) - r = len(pool) if r is None else r - return tuple(sample(pool, r)) - - -def random_combination(iterable, r): - """Return a random *r* length subsequence of the elements in *iterable*. - - >>> random_combination(range(5), 3) # doctest:+SKIP - (2, 3, 4) - - This equivalent to taking a random selection from - ``itertools.combinations(iterable, r)``. - - """ - pool = tuple(iterable) - n = len(pool) - indices = sorted(sample(range(n), r)) - return tuple(pool[i] for i in indices) - - -def random_combination_with_replacement(iterable, r): - """Return a random *r* length subsequence of elements in *iterable*, - allowing individual elements to be repeated. - - >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP - (0, 0, 1, 2, 2) - - This equivalent to taking a random selection from - ``itertools.combinations_with_replacement(iterable, r)``. - - """ - pool = tuple(iterable) - n = len(pool) - indices = sorted(randrange(n) for i in range(r)) - return tuple(pool[i] for i in indices) - - -def nth_combination(iterable, r, index): - """Equivalent to ``list(combinations(iterable, r))[index]``. - - The subsequences of *iterable* that are of length *r* can be ordered - lexicographically. :func:`nth_combination` computes the subsequence at - sort position *index* directly, without computing the previous - subsequences. - - >>> nth_combination(range(5), 3, 5) - (0, 3, 4) - - ``ValueError`` will be raised If *r* is negative or greater than the length - of *iterable*. - ``IndexError`` will be raised if the given *index* is invalid. - """ - pool = tuple(iterable) - n = len(pool) - if (r < 0) or (r > n): - raise ValueError - - c = 1 - k = min(r, n - r) - for i in range(1, k + 1): - c = c * (n - k + i) // i - - if index < 0: - index += c - - if (index < 0) or (index >= c): - raise IndexError - - result = [] - while r: - c, n, r = c * r // n, n - 1, r - 1 - while index >= c: - index -= c - c, n = c * (n - r) // n, n - 1 - result.append(pool[-1 - n]) - - return tuple(result) - - -def prepend(value, iterator): - """Yield *value*, followed by the elements in *iterator*. - - >>> value = '0' - >>> iterator = ['1', '2', '3'] - >>> list(prepend(value, iterator)) - ['0', '1', '2', '3'] - - To prepend multiple values, see :func:`itertools.chain` - or :func:`value_chain`. - - """ - return chain([value], iterator) - - -def convolve(signal, kernel): - """Convolve the iterable *signal* with the iterable *kernel*. - - >>> signal = (1, 2, 3, 4, 5) - >>> kernel = [3, 2, 1] - >>> list(convolve(signal, kernel)) - [3, 8, 14, 20, 26, 14, 5] - - Note: the input arguments are not interchangeable, as the *kernel* - is immediately consumed and stored. - - """ - kernel = tuple(kernel)[::-1] - n = len(kernel) - window = deque([0], maxlen=n) * n - for x in chain(signal, repeat(0, n - 1)): - window.append(x) - yield sum(map(operator.mul, kernel, window)) - - -def before_and_after(predicate, it): - """A variant of :func:`takewhile` that allows complete access to the - remainder of the iterator. - - >>> it = iter('ABCdEfGhI') - >>> all_upper, remainder = before_and_after(str.isupper, it) - >>> ''.join(all_upper) - 'ABC' - >>> ''.join(remainder) # takewhile() would lose the 'd' - 'dEfGhI' - - Note that the first iterator must be fully consumed before the second - iterator can generate valid results. - """ - it = iter(it) - transition = [] - - def true_iterator(): - for elem in it: - if predicate(elem): - yield elem - else: - transition.append(elem) - return - - def remainder_iterator(): - yield from transition - yield from it - - return true_iterator(), remainder_iterator() - - -def triplewise(iterable): - """Return overlapping triplets from *iterable*. - - >>> list(triplewise('ABCDE')) - [('A', 'B', 'C'), ('B', 'C', 'D'), ('C', 'D', 'E')] - - """ - for (a, _), (b, c) in pairwise(pairwise(iterable)): - yield a, b, c - - -def sliding_window(iterable, n): - """Return a sliding window of width *n* over *iterable*. - - >>> list(sliding_window(range(6), 4)) - [(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5)] - - If *iterable* has fewer than *n* items, then nothing is yielded: - - >>> list(sliding_window(range(3), 4)) - [] - - For a variant with more features, see :func:`windowed`. - """ - it = iter(iterable) - window = deque(islice(it, n), maxlen=n) - if len(window) == n: - yield tuple(window) - for x in it: - window.append(x) - yield tuple(window) diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/pkg_helpers.bash b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/pkg_helpers.bash deleted file mode 100644 index ed9acb00ae8627b96c057b4493d368c7dfeda8ae..0000000000000000000000000000000000000000 --- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/dev/packaging/pkg_helpers.bash +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -e -# Copyright (c) Facebook, Inc. and its affiliates. - -# Function to retry functions that sometimes timeout or have flaky failures -retry () { - $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) -} -# Install with pip a bit more robustly than the default -pip_install() { - retry pip install --progress-bar off "$@" -} - - -setup_cuda() { - # Now work out the CUDA settings - # Like other torch domain libraries, we choose common GPU architectures only. - # See https://github.com/pytorch/pytorch/blob/master/torch/utils/cpp_extension.py - # and https://github.com/pytorch/vision/blob/main/packaging/pkg_helpers.bash for reference. - export FORCE_CUDA=1 - case "$CU_VERSION" in - cu113) - export CUDA_HOME=/usr/local/cuda-11.3/ - export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0;8.6+PTX" - ;; - cu112) - export CUDA_HOME=/usr/local/cuda-11.2/ - export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0;8.6+PTX" - ;; - cu111) - export CUDA_HOME=/usr/local/cuda-11.1/ - export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0;8.6+PTX" - ;; - cu110) - export CUDA_HOME=/usr/local/cuda-11.0/ - export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX;8.0+PTX" - ;; - cu102) - export CUDA_HOME=/usr/local/cuda-10.2/ - export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX" - ;; - cu101) - export CUDA_HOME=/usr/local/cuda-10.1/ - export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX" - ;; - cu100) - export CUDA_HOME=/usr/local/cuda-10.0/ - export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0;7.5+PTX" - ;; - cu92) - export CUDA_HOME=/usr/local/cuda-9.2/ - export TORCH_CUDA_ARCH_LIST="3.7;5.0;5.2;6.0;6.1+PTX;7.0+PTX" - ;; - cpu) - unset FORCE_CUDA - export CUDA_VISIBLE_DEVICES= - ;; - *) - echo "Unrecognized CU_VERSION=$CU_VERSION" - exit 1 - ;; - esac -} - -setup_wheel_python() { - case "$PYTHON_VERSION" in - 3.6) python_abi=cp36-cp36m ;; - 3.7) python_abi=cp37-cp37m ;; - 3.8) python_abi=cp38-cp38 ;; - 3.9) python_abi=cp39-cp39 ;; - *) - echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION" - exit 1 - ;; - esac - export PATH="/opt/python/$python_abi/bin:$PATH" -} diff --git a/spaces/BAAI/AltDiffusion-m9/footer.html b/spaces/BAAI/AltDiffusion-m9/footer.html deleted file mode 100644 index b58ca8b79cc930a56952881f4922bda406fd3581..0000000000000000000000000000000000000000 --- a/spaces/BAAI/AltDiffusion-m9/footer.html +++ /dev/null @@ -1,18 +0,0 @@ - - - diff --git a/spaces/BAAI/vid2vid-zero/gradio_demo/style.css b/spaces/BAAI/vid2vid-zero/gradio_demo/style.css deleted file mode 100644 index c4739b4ea5fc35e774a049e3dacc443f7f0eac19..0000000000000000000000000000000000000000 --- a/spaces/BAAI/vid2vid-zero/gradio_demo/style.css +++ /dev/null @@ -1,3 +0,0 @@ -h1 { - text-align: center; -} diff --git a/spaces/Benson/text-generation/Examples/Choque De Clanes Th 15 Nueva Versin Hack.md b/spaces/Benson/text-generation/Examples/Choque De Clanes Th 15 Nueva Versin Hack.md deleted file mode 100644 index cb33d42a95318aa1eb052126ad04ba24005f5542..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Choque De Clanes Th 15 Nueva Versin Hack.md +++ /dev/null @@ -1,88 +0,0 @@ -
      -

      Choque de clanes TH 15 Nueva versión Hack Descargar: Todo lo que necesita saber

      -

      Clash of Clans es uno de los juegos de estrategia más populares y adictivos en dispositivos móviles. Tiene millones de jugadores en todo el mundo que construyen sus aldeas, entrenan a sus tropas y compiten en guerras épicas de clanes. El juego se actualiza constantemente con nuevas características y contenido, y la última adición es el Ayuntamiento 15 (TH 15), que trae nuevos edificios, tropas, hechizos y desafíos para el juego.

      -

      choque de clanes th 15 nueva versión hack


      Download ->>> https://bltlly.com/2v6N1n



      -

      Pero ¿qué pasa si quieres salir adelante de la competencia y disfrutar de todos los beneficios de TH 15 sin gastar demasiado tiempo y dinero en el juego? Ahí es donde entra una versión hack. Una versión hack es una versión modificada del juego que le da recursos ilimitados, gemas y otras ventajas. Algunos jugadores usan versiones hackeadas para progresar más rápido, experimentar con diferentes estrategias o simplemente divertirse.

      -

      Sin embargo, el uso de una versión de hackeo no está exento de riesgos. Puede enfrentar problemas legales, prohibiciones de cuentas, infecciones de malware u otros problemas. Es por eso que usted necesita ser cuidadoso e informado antes de descargar y utilizar una versión hack de Clash of Clans TH 15. En este artículo, le diremos todo lo que necesita saber sobre Clash of Clans TH 15 nueva versión hack descarga, incluyendo cómo hacerlo, cuáles son los riesgos y beneficios, y cuáles son algunos consejos y trucos para jugar el juego con eficacia.

      -

      Cómo descargar una versión Hack de choque de clanes TH 15?

      -

      Hay muchos sitios web y aplicaciones que afirman ofrecer versiones hack de Clash of Clans TH 15 de forma gratuita o por una tarifa. Sin embargo, no todos ellos son fiables o seguros. Algunos pueden contener virus, spyware u otro software malicioso que puede dañar su dispositivo o robar su información personal. Es posible que algunos no funcionen o que tu juego falle o falle.

      - -
        -
      • Hacer algunas investigaciones antes de descargar nada. Leer comentarios, valoraciones, y comentarios de otros usuarios que han intentado la versión hack. Busque testimonios positivos y pruebas de que la versión hack funciona como se anuncia.
      • -
      • Compruebe la reputación y la credibilidad de la página web o aplicación que ofrece la versión hack. Busque signos de profesionalismo, como una descripción clara y detallada de la versión de hackeo, una información de contacto, una política de privacidad y un descargo de responsabilidad.
      • -
      • Evite descargar cualquier cosa de fuentes desconocidas o sospechosas, como anuncios emergentes, correos electrónicos no deseados o enlaces aleatorios. Estos pueden ser intentos de phishing o estafas que pueden engañarle para que revele su información personal o financiera o descargue malware.
      • -
      • Utilice el software antivirus y el firewall en su dispositivo para protegerlo de amenazas potenciales. Escanee cualquier archivo que descargue antes de abrirlo. Elimina cualquier archivo que parezca sospechoso o que cause problemas.
      • -
      • Copia de seguridad de los datos originales del juego antes de instalar una versión hack. De esta manera, puedes restaurar tu juego a su estado normal si algo sale mal o si quieres volver a la versión oficial.
      • -
      -

      Una vez que encuentre una fuente confiable para descargar una versión hack de Clash of Clans TH 15, siga estos pasos:

      -

      -
        -
      1. Descargar el archivo APK (para dispositivos Android) o el archivo IPA (para dispositivos iOS) de la versión de corte de la fuente.
      2. -
      3. Habilitar fuentes desconocidas en la configuración del dispositivo para permitir la instalación de aplicaciones desde fuera de la tienda de aplicaciones oficial.
      4. -
      5. Busque el archivo descargado en su dispositivo y toque en él para instalarlo.
      6. -
      7. Lanzar la versión hack y disfrutar de jugar Clash of Clans TH 15 con recursos ilimitados, gemas, y otras características.
      8. -

      ¿Cuáles son los riesgos y beneficios de usar una versión Hack de choque de clanes TH 15?

      - -

      Riesgos

      -
        -
      • Puede violar los términos del servicio y el acuerdo de licencia de usuario final del juego, lo que puede resultar en acciones legales, multas o demandas del desarrollador o editor del juego.
      • -
      • Usted puede ser expulsado del servidor del juego o perder su cuenta permanentemente si es detectado o reportado por otros jugadores o el sistema de seguridad del juego.
      • -
      • Puede perder su progreso, logros, recompensas o compras si desinstala la versión de hackeo o vuelve a la versión oficial.
      • -
      • Puede dañar su dispositivo o comprometer su rendimiento, seguridad o funcionalidad si descarga una versión defectuosa, dañada o maliciosa.
      • -
      • Puede arruinar la diversión, el desafío y el equilibrio del juego mediante el uso de ventajas injustas sobre otros jugadores o saltando la mecánica de juego prevista.
      • -
      -

      Beneficios

      -
        -
      • Usted puede ahorrar tiempo y dinero mediante la obtención de recursos ilimitados, gemas, y otras características sin gastar dinero real o molienda durante horas.
      • -
      • Puedes explorar nuevos aspectos del juego que de otra manera son inaccesibles, como nuevos edificios, tropas, hechizos y desafíos.
      • -
      • Puedes experimentar con diferentes estrategias, tácticas y combinaciones que pueden ayudarte a mejorar tus habilidades y conocimientos del juego.
      • -
      • Usted puede tener más diversión y satisfacción al lograr sus objetivos más rápido, más fácil y más eficiente.
      • -
      • Puedes impresionar a tus amigos, compañeros de clan u oponentes mostrando tus logros, estadísticas o diseño base.
      • -
      -

      ¿Cuáles son algunos consejos y trucos para jugar choque de clanes TH 15 con eficacia?

      -

      Ya sea que uses una versión hack o no, jugar Clash of Clans TH 15 puede ser desafiante y gratificante. Aquí hay algunos consejos y trucos que pueden ayudarte a jugar el juego de manera efectiva:

      -
        - -
      • Construir y actualizar los nuevos edificios que vienen con TH 15, tales como la casa del animal doméstico, la cabaña del constructor, la torre del infierno nivel 7, y el nivel de artillería del águila 4. Estos edificios pueden proporcionarle nuevas capacidades defensivas y ofensivas.
      • -
      • Entrena y mejora las nuevas tropas y hechizos que vienen con TH 15, como el globo cohete, el jinete del dragón, el Super Archer, y el hechizo de invisibilidad. Estas tropas y hechizos pueden darte una ventaja en las batallas.
      • -
      • Recoge y actualiza las nuevas mascotas que vienen con TH 15, como L.A.S.S.I., Electro Owl, Mighty Yak y Unicornio. Estas mascotas pueden acompañar a tus héroes y proporcionarles apoyo y habilidades adicionales.
      • -
      • Usa el nuevo Cuartel de Asedio de nivel 5 y el Taller de Asedio de nivel 5 para desplegar más tropas y máquinas de asedio en las batallas. También puedes usar la nueva máquina de asedio Log Launcher para atravesar paredes e infligir daño a edificios enemigos.
      • -
      • Únete a un clan o crea tu propio clan para participar en guerras de clanes, juegos de clanes, ligas de guerra de clanes y beneficios de clanes. También puedes chatear con otros jugadores, solicitar y donar tropas y hechizos, y compartir repeticiones y estrategias.
      • -
      -

      Conclusión

      -

      Clash of Clans TH 15 es una emocionante actualización que trae nuevas características y contenido al juego. Sin embargo, si quieres disfrutar de todos los beneficios de TH 15 sin gastar demasiado tiempo y dinero en el juego, usted puede considerar la descarga de una versión hack de Clash of Clans TH 15. Una versión hack puede darle recursos ilimitados, gemas y otras ventajas que pueden ayudarle a progresar más rápido y divertirse más. Sin embargo, el uso de una versión hack también viene con riesgos, como problemas legales, prohibiciones de cuenta, infecciones de malware o problemas de juego. Por lo tanto, usted necesita ser cuidadoso e informado antes de descargar y utilizar una versión hack de Clash of Clans TH 15. También necesitas seguir algunos consejos y trucos para jugar el juego de manera efectiva y aprovechar al máximo tu experiencia de juego.

      - -

      Preguntas frecuentes

      -

      Aquí hay algunas preguntas frecuentes relacionadas con el tema de Clash of Clans TH 15 nueva versión hack descargar:

      -

      Q: ¿Es legal usar una versión hack de Clash of Clans TH 15?

      -

      A: No, no es legal usar una versión hackeada de Clash of Clans TH 15. Viola los términos del servicio y el acuerdo de licencia de usuario final del juego, lo que puede resultar en acciones legales, multas o demandas del desarrollador o editor del juego. También puede ser expulsado del servidor del juego o perder su cuenta permanentemente si es detectado o reportado por otros jugadores o el sistema de seguridad del juego.

      -

      Q: ¿Es seguro usar una versión hack de Clash of Clans TH 15?

      -

      A: No, no es seguro usar una versión hack de Clash of Clans TH 15. Puede dañar su dispositivo o comprometer su rendimiento, seguridad o funcionalidad si descarga una versión defectuosa, dañada o maliciosa. También puede perder su progreso, logros, recompensas o compras si desinstala la versión de hackeo o vuelve a la versión oficial. También puede arruinar la diversión, el desafío y el equilibrio del juego mediante el uso de ventajas injustas sobre otros jugadores o saltando la mecánica de juego prevista.

      -

      Q: ¿Cómo puedo obtener gemas gratis en Clash of Clans TH 15?

      -

      A: Hay algunas formas legítimas de obtener gemas gratis en Clash of Clans TH 15 sin usar una versión hack. Algunos de ellos son:

      -
        -
      • Completar logros y eventos
      • -
      • Eliminar obstáculos y cajas de gemas
      • -
      • Abriendo los regalos del clan y las recompensas del pase de temporada
      • -
      • Participar en encuestas y ofertas
      • -
      • Comprar ofertas especiales y paquetes
      • -
      -

      Q: ¿Cuál es la mejor estrategia para el choque de clanes TH 15?

      - -
        -
      • Usa una mezcla equilibrada de tropas y hechizos que puedan lidiar con diferentes tipos de defensas y situaciones
      • -
      • Usa máquinas de asedio y mascotas para apoyar a tus héroes y al ejército principal
      • -
      • Utilice exploradores y repeticiones para analizar la base de su enemigo y planificar su ataque en consecuencia
      • -
      • Utiliza técnicas de canalización para guiar a tus tropas al núcleo de la base del enemigo
      • -
      • Usa hechizos sabiamente y oportunamente para mejorar las habilidades de tus tropas o contrarrestar las defensas del enemigo
      • -
      -

      Q: ¿Cómo puedo unirme a un buen clan en Clash of Clans TH 15?

      -

      A: Unirse a un buen clan en Clash of Clans TH 15 puede mejorar tu experiencia de juego proporcionándote interacción social, donaciones de tropas, beneficios de clan, guerras de clan, juegos de clan y ligas de guerra de clan. Algunas maneras de encontrar y unirse a un buen clan son:

      -
        -
      • Usa la función de búsqueda de clanes en el juego para filtrar clanes por nombre, ubicación, nivel, miembros, trofeos, frecuencia de guerra, victorias de guerra, liga de guerra, nivel mínimo de ayuntamiento, etc.
      • -
      • Utilice sitios web externos o aplicaciones como [Clash of Stats](https://www.clashofstats.com/), [Clash Champs](https://www.clashchamps.com/), [Clash Leaders](https:/ww.clashleaders.com/), etc. para encontrar clanes basados en diversos criterios y estadísticas.
      • -
      • Utilice plataformas de medios sociales como [Reddit](https:/www.reddit.com/r/ClashOfClans/), [Discord](https://discord.com/invite/clashofclans), [Facebook](https:/ww.facebook.com/ClashofClans/), [Twitter https:///tter.cofashans), etc.
      • -
      • Pídele a tus amigos, familiares o conocidos que jueguen a Clash of Clans que te inviten a sus clanes o te recomienden algunos buenos clanes.
      • -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Benson/text-generation/Examples/Cmo Descargar Oficina 2019 Gratis.md b/spaces/Benson/text-generation/Examples/Cmo Descargar Oficina 2019 Gratis.md deleted file mode 100644 index 6c6fcc5d498935d6aa8e459227f09c287360174a..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Cmo Descargar Oficina 2019 Gratis.md +++ /dev/null @@ -1,61 +0,0 @@ - -

      Cómo descargar Office 2019 gratis

      -

      Microsoft Office es una de las suites de productividad más populares y ampliamente utilizadas en el mundo. Incluye potentes aplicaciones como Word, Excel, PowerPoint, Outlook y más. Sin embargo, obtener la última versión de Office puede ser caro, especialmente si desea usarlo en varios dispositivos.

      -

      Afortunadamente, hay algunas maneras de descargar Office 2019 gratis legalmente. En este artículo, te mostraremos qué es Office 2019, por qué lo quieres y cómo obtenerlo sin pagar un centavo.

      -

      cómo descargar oficina 2019 gratis


      Download Zip ✒ ✒ ✒ https://bltlly.com/2v6JyK



      -

      ¿Qué es Office 2019 y por qué es posible que lo desee

      -

      Office 2019 es la última versión de la suite de software de oficina de Microsoft. Fue lanzado en septiembre de 2018 y es una compra única que no requiere una suscripción. A diferencia de Office 365, que es un servicio basado en la nube que ofrece actualizaciones regulares y nuevas características, Office 2019 es un producto independiente que no recibirá cambios ni mejoras importantes.

      -

      Sin embargo, eso no significa que Office 2019 sea inferior o obsoleto. De hecho, hay algunas razones por las que podría preferir Office 2019 sobre Office 365.

      -

      Oficina 2019 vs Oficina 365

      -

      La principal diferencia entre Office 2019 y Office 365 es cómo se conectan a la nube. Ambas suites cuentan con acceso a OneDrive, el servicio de almacenamiento en la nube de Microsoft. Pero, Office 2019 no viene con ningún espacio de almacenamiento en OneDrive y no obtiene acceso a las versiones en línea de aplicaciones como Word, Excel y PowerPoint. Office 365, por otro lado, incluye 1 TB de almacenamiento gratuito y puede editar fácilmente todos sus archivos en línea.

      - -

      Entonces, ¿cuál debes elegir? Depende de tus necesidades y preferencias. Si desea tener las últimas funciones y actualizaciones, acceder a sus archivos desde cualquier lugar y usar varios dispositivos, Office 365 podría ser una mejor opción para usted. Si desea ahorrar dinero a largo plazo, usar sus archivos sin conexión y no necesita aplicaciones o servicios adicionales, Office 2019 podría ser suficiente para usted.

      -

      Características y beneficios de Office 2019

      -

      A pesar de que Office 2019 no tiene todas las campanas y silbatos de Office 365, todavía tiene algunas características y beneficios impresionantes que pueden mejorar su productividad y creatividad. Estos son algunos de ellos:

      -
        -
      • Nuevas herramientas de entintado: Puede usar su pluma o dedo para dibujar, escribir, resaltar y borrar en Word, Excel, PowerPoint y Outlook. También puede convertir su tinta a formas o texto, o realizar problemas matemáticos complejos con Ink Math Assistant.
      • -
      • Nuevos tipos de datos: Puede trabajar con nuevos tipos de datos en Excel, como Stocks y Geografía. Estos tipos de datos pueden extraer información de fuentes en línea y actualizarse automáticamente.
      • -
      • Nuevas funciones: Puede usar nuevas funciones en Excel, como TEXTJOIN, CONCAT, IFS, SWITCH y más. Continuando con el artículo:
      • Nuevos gráficos y efectos visuales: Puede crear gráficos e imágenes impresionantes en Excel y PowerPoint, como Embudo, Mapa, Cronología y modelos 3D. Estos gráficos y gráficos pueden ayudarlo a presentar sus datos de una manera más atractiva e interactiva.
      • -
      • Nuevas animaciones y transiciones: Puede agregar nuevas animaciones y transiciones en PowerPoint, como Morph, Zoom y 3D. Estas animaciones y transiciones pueden ayudarle a crear presentaciones dinámicas y cautivadoras.
      • - -
      • Nuevas herramientas de aprendizaje: Puede usar nuevas herramientas de aprendizaje en Word y Outlook, como Leer en voz alta, Espaciado de texto y Modo de enfoque. Estas herramientas de aprendizaje pueden ayudarte a mejorar tus habilidades de lectura y escritura.
      • -
      -

      Cómo obtener Office 2019 gratis legalmente

      -

      Si estás interesado en obtener Office 2019 gratis legalmente, tienes algunas opciones que considerar. Aquí están algunas de ellas:

      -

      -

      Opción 1: Usar Microsoft 365 para la Web

      -

      Una de las maneras más fáciles de obtener Office 2019 gratis es usar Microsoft 365 para la web. Esta es una versión en línea gratuita de Office que incluye Word, Excel, PowerPoint, OneNote y Outlook. Puede acceder a estas aplicaciones desde cualquier navegador y crear, editar y compartir sus archivos en línea. También obtiene 5 GB de almacenamiento gratuito en OneDrive.

      -

      Para usar Microsoft 365 para la web, solo necesita una cuenta de Microsoft. Si no lo tienes, puedes crear uno gratis aquí: https://signup.live.com/. Una vez que tenga una cuenta, puede iniciar sesión aquí: https://www.office.com/. A continuación, puede comenzar a usar las aplicaciones desde la página de inicio o el lanzador de aplicaciones.

      -

      Opción 2: Utilice el programa de descuento de Microsoft Workplace

      -

      Otra manera de obtener Office 2019 de forma gratuita es utilizar Microsoft Workplace Discount Program. Este es un programa que permite a los empleados elegibles de las organizaciones participantes obtener Office 2019 a un precio con descuento o incluso gratis. Puede comprobar si su organización forma parte de este programa aquí: https://www.microsoft.com/en-us/home-use-program.

      -

      Para utilizar Microsoft Workplace Discount Program, necesita una dirección de correo electrónico de trabajo válida de su organización. Si su organización es elegible, recibirá un correo electrónico con un enlace para comprar Office 2019 a un precio reducido o gratis. A continuación, puede descargar e instalar Office 2019 en su dispositivo personal.

      -

      Opción 3: Utilice el servidor en línea de Microsoft Office

      - -

      Para usar Microsoft Office Online Server, necesita una licencia de Windows Server y una licencia de Office. Usted puede obtener estas licencias de forma gratuita si usted es un estudiante o un educador. Puedes comprobar si eres elegible aquí: https://www.microsoft.com/en-us/education/products/office. Una vez que tenga las licencias, puede descargar e instalar Office Online Server en su servidor aquí: https://www.microsoft.com/en-us/download/details.aspx?id=49030. Luego, puede configurar y usar las aplicaciones desde su servidor.

      Continuando con el artículo:

      Cómo instalar y activar Office 2019 en su PC o Mac

      -

      Si ha comprado u obtenido Office 2019 a través de una de las opciones anteriores, puede instalarlo y activarlo en su PC o Mac. Estos son los pasos para hacerlo:

      -

      Paso 1: Descargar Office 2019 desde una fuente de confianza

      -

      El primer paso es descargar Office 2019 desde una fuente confiable. Puede hacerlo desde la Tienda de Microsoft, el sitio web de Microsoft o el enlace que recibió de su organización o escuela. Asegúrese de descargar la versión correcta para su dispositivo y sistema operativo.

      -

      Paso 2: Ejecute el archivo de configuración y siga las instrucciones

      -

      El segundo paso es ejecutar el archivo de configuración y seguir las instrucciones. Dependiendo de su dispositivo y sistema operativo, el archivo de configuración podría ser un . exe, . dmg, o archivo . iso. Haga doble clic en el archivo y permita que se ejecute. Luego, siga las instrucciones en la pantalla para instalar Office 2019 en su dispositivo.

      -

      Paso 3: Ingrese su clave de producto o inicie sesión con su cuenta de Microsoft

      - -

      Para activar Office 2019, debe ingresar su clave de producto o iniciar sesión con su cuenta de Microsoft. Puede hacer esto cuando inicie cualquiera de las aplicaciones de Office por primera vez. Verá una solicitud para activar Office 2019. Siga las instrucciones en la pantalla para introducir su clave de producto o iniciar sesión con su cuenta de Microsoft.

      -

      Conclusión

      -

      Office 2019 es una suite de productividad potente y versátil que puede ayudarlo a crear, editar y compartir documentos, hojas de cálculo, presentaciones y más. Sin embargo, también puede ser caro, especialmente si desea usarlo en varios dispositivos.

      -

      En este artículo, le hemos mostrado cómo descargar Office 2019 gratis legalmente. Puede utilizar Microsoft 365 para la web, Microsoft Workplace Discount Program o Microsoft Office Online Server. También puede instalar y activar Office 2019 en su PC o Mac siguiendo algunos pasos simples.

      -

      Esperamos que este artículo haya sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación.

      -

      Preguntas frecuentes

      -
        -
      • Q: ¿Es Office 2019 compatible con Windows 10?
      • -
      • A: Sí, Office 2019 es compatible con Windows 10. También es compatible con Windows 8.1 y Windows Server 2019.
      • -
      • Q: ¿Es Office 2019 compatible con Mac OS?
      • -
      • A: Sí, Office 2019 es compatible con Mac OS. También es compatible con Mac OS X 10.14 Mojave y versiones posteriores.
      • -
      • Q: ¿Cuántos dispositivos puedo instalar Office 2019 en?
      • -
      • A: Puede instalar Office 2019 en un dispositivo por licencia. Si desea usarlo en varios dispositivos, debe comprar varias licencias o usar Office 365 en su lugar.
      • -
      • Q: ¿Cuánto tiempo dura Office 2019?
      • -
      • A: Office 2019 dura tanto como su dispositivo lo soporte. No caduca ni requiere renovación. Sin embargo, no recibe ninguna actualización importante o nuevas características.
      • - -
      • A: Sí, puede actualizar de Office 2016 a Office 2019. Sin embargo, necesita comprar una nueva licencia para Office 2019 o usar una de las opciones anteriores para obtenerla de forma gratuita.
      • -

      64aa2da5cf
      -
      -
      \ No newline at end of file diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/__init__.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/__init__.py deleted file mode 100644 index a6d6b377dfcdf246972c05659673308cfa40db37..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/retries/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""New retry v2 handlers. - -This package obsoletes the botocore/retryhandler.py module and contains -new retry logic. - -""" diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/initialise.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/initialise.py deleted file mode 100644 index d5fd4b71fed1bb4871717f978f0c470280f099c1..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/colorama/initialise.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -import atexit -import contextlib -import sys - -from .ansitowin32 import AnsiToWin32 - - -def _wipe_internal_state_for_tests(): - global orig_stdout, orig_stderr - orig_stdout = None - orig_stderr = None - - global wrapped_stdout, wrapped_stderr - wrapped_stdout = None - wrapped_stderr = None - - global atexit_done - atexit_done = False - - global fixed_windows_console - fixed_windows_console = False - - try: - # no-op if it wasn't registered - atexit.unregister(reset_all) - except AttributeError: - # python 2: no atexit.unregister. Oh well, we did our best. - pass - - -def reset_all(): - if AnsiToWin32 is not None: # Issue #74: objects might become None at exit - AnsiToWin32(orig_stdout).reset_all() - - -def init(autoreset=False, convert=None, strip=None, wrap=True): - - if not wrap and any([autoreset, convert, strip]): - raise ValueError('wrap=False conflicts with any other arg=True') - - global wrapped_stdout, wrapped_stderr - global orig_stdout, orig_stderr - - orig_stdout = sys.stdout - orig_stderr = sys.stderr - - if sys.stdout is None: - wrapped_stdout = None - else: - sys.stdout = wrapped_stdout = \ - wrap_stream(orig_stdout, convert, strip, autoreset, wrap) - if sys.stderr is None: - wrapped_stderr = None - else: - sys.stderr = wrapped_stderr = \ - wrap_stream(orig_stderr, convert, strip, autoreset, wrap) - - global atexit_done - if not atexit_done: - atexit.register(reset_all) - atexit_done = True - - -def deinit(): - if orig_stdout is not None: - sys.stdout = orig_stdout - if orig_stderr is not None: - sys.stderr = orig_stderr - - -def just_fix_windows_console(): - global fixed_windows_console - - if sys.platform != "win32": - return - if fixed_windows_console: - return - if wrapped_stdout is not None or wrapped_stderr is not None: - # Someone already ran init() and it did stuff, so we won't second-guess them - return - - # On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the - # native ANSI support in the console as a side-effect. We only need to actually - # replace sys.stdout/stderr if we're in the old-style conversion mode. - new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False) - if new_stdout.convert: - sys.stdout = new_stdout - new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False) - if new_stderr.convert: - sys.stderr = new_stderr - - fixed_windows_console = True - -@contextlib.contextmanager -def colorama_text(*args, **kwargs): - init(*args, **kwargs) - try: - yield - finally: - deinit() - - -def reinit(): - if wrapped_stdout is not None: - sys.stdout = wrapped_stdout - if wrapped_stderr is not None: - sys.stderr = wrapped_stderr - - -def wrap_stream(stream, convert, strip, autoreset, wrap): - if wrap: - wrapper = AnsiToWin32(stream, - convert=convert, strip=strip, autoreset=autoreset) - if wrapper.should_wrap(): - stream = wrapper.stream - return stream - - -# Use this for initial setup as well, to reduce code duplication -_wipe_internal_state_for_tests() diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/crt.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/crt.py deleted file mode 100644 index 7b5d1301365038629b23c630c71bf6c65461d34f..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/crt.py +++ /dev/null @@ -1,644 +0,0 @@ -# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You -# may not use this file except in compliance with the License. A copy of -# the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "license" file accompanying this file. This file is -# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF -# ANY KIND, either express or implied. See the License for the specific -# language governing permissions and limitations under the License. -import logging -import threading -from io import BytesIO - -import awscrt.http -import botocore.awsrequest -import botocore.session -from awscrt.auth import AwsCredentials, AwsCredentialsProvider -from awscrt.io import ( - ClientBootstrap, - ClientTlsContext, - DefaultHostResolver, - EventLoopGroup, - TlsContextOptions, -) -from awscrt.s3 import S3Client, S3RequestTlsMode, S3RequestType -from botocore import UNSIGNED -from botocore.compat import urlsplit -from botocore.config import Config -from botocore.exceptions import NoCredentialsError - -from s3transfer.constants import GB, MB -from s3transfer.exceptions import TransferNotDoneError -from s3transfer.futures import BaseTransferFuture, BaseTransferMeta -from s3transfer.utils import CallArgs, OSUtils, get_callbacks - -logger = logging.getLogger(__name__) - - -class CRTCredentialProviderAdapter: - def __init__(self, botocore_credential_provider): - self._botocore_credential_provider = botocore_credential_provider - self._loaded_credentials = None - self._lock = threading.Lock() - - def __call__(self): - credentials = self._get_credentials().get_frozen_credentials() - return AwsCredentials( - credentials.access_key, credentials.secret_key, credentials.token - ) - - def _get_credentials(self): - with self._lock: - if self._loaded_credentials is None: - loaded_creds = ( - self._botocore_credential_provider.load_credentials() - ) - if loaded_creds is None: - raise NoCredentialsError() - self._loaded_credentials = loaded_creds - return self._loaded_credentials - - -def create_s3_crt_client( - region, - botocore_credential_provider=None, - num_threads=None, - target_throughput=5 * GB / 8, - part_size=8 * MB, - use_ssl=True, - verify=None, -): - """ - :type region: str - :param region: The region used for signing - - :type botocore_credential_provider: - Optional[botocore.credentials.CredentialResolver] - :param botocore_credential_provider: Provide credentials for CRT - to sign the request if not set, the request will not be signed - - :type num_threads: Optional[int] - :param num_threads: Number of worker threads generated. Default - is the number of processors in the machine. - - :type target_throughput: Optional[int] - :param target_throughput: Throughput target in Bytes. - Default is 0.625 GB/s (which translates to 5 Gb/s). - - :type part_size: Optional[int] - :param part_size: Size, in Bytes, of parts that files will be downloaded - or uploaded in. - - :type use_ssl: boolean - :param use_ssl: Whether or not to use SSL. By default, SSL is used. - Note that not all services support non-ssl connections. - - :type verify: Optional[boolean/string] - :param verify: Whether or not to verify SSL certificates. - By default SSL certificates are verified. You can provide the - following values: - - * False - do not validate SSL certificates. SSL will still be - used (unless use_ssl is False), but SSL certificates - will not be verified. - * path/to/cert/bundle.pem - A filename of the CA cert bundle to - use. Specify this argument if you want to use a custom CA cert - bundle instead of the default one on your system. - """ - - event_loop_group = EventLoopGroup(num_threads) - host_resolver = DefaultHostResolver(event_loop_group) - bootstrap = ClientBootstrap(event_loop_group, host_resolver) - provider = None - tls_connection_options = None - - tls_mode = ( - S3RequestTlsMode.ENABLED if use_ssl else S3RequestTlsMode.DISABLED - ) - if verify is not None: - tls_ctx_options = TlsContextOptions() - if verify: - tls_ctx_options.override_default_trust_store_from_path( - ca_filepath=verify - ) - else: - tls_ctx_options.verify_peer = False - client_tls_option = ClientTlsContext(tls_ctx_options) - tls_connection_options = client_tls_option.new_connection_options() - if botocore_credential_provider: - credentails_provider_adapter = CRTCredentialProviderAdapter( - botocore_credential_provider - ) - provider = AwsCredentialsProvider.new_delegate( - credentails_provider_adapter - ) - - target_gbps = target_throughput * 8 / GB - return S3Client( - bootstrap=bootstrap, - region=region, - credential_provider=provider, - part_size=part_size, - tls_mode=tls_mode, - tls_connection_options=tls_connection_options, - throughput_target_gbps=target_gbps, - ) - - -class CRTTransferManager: - def __init__(self, crt_s3_client, crt_request_serializer, osutil=None): - """A transfer manager interface for Amazon S3 on CRT s3 client. - - :type crt_s3_client: awscrt.s3.S3Client - :param crt_s3_client: The CRT s3 client, handling all the - HTTP requests and functions under then hood - - :type crt_request_serializer: s3transfer.crt.BaseCRTRequestSerializer - :param crt_request_serializer: Serializer, generates unsigned crt HTTP - request. - - :type osutil: s3transfer.utils.OSUtils - :param osutil: OSUtils object to use for os-related behavior when - using with transfer manager. - """ - if osutil is None: - self._osutil = OSUtils() - self._crt_s3_client = crt_s3_client - self._s3_args_creator = S3ClientArgsCreator( - crt_request_serializer, self._osutil - ) - self._future_coordinators = [] - self._semaphore = threading.Semaphore(128) # not configurable - # A counter to create unique id's for each transfer submitted. - self._id_counter = 0 - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, *args): - cancel = False - if exc_type: - cancel = True - self._shutdown(cancel) - - def download( - self, bucket, key, fileobj, extra_args=None, subscribers=None - ): - if extra_args is None: - extra_args = {} - if subscribers is None: - subscribers = {} - callargs = CallArgs( - bucket=bucket, - key=key, - fileobj=fileobj, - extra_args=extra_args, - subscribers=subscribers, - ) - return self._submit_transfer("get_object", callargs) - - def upload(self, fileobj, bucket, key, extra_args=None, subscribers=None): - if extra_args is None: - extra_args = {} - if subscribers is None: - subscribers = {} - callargs = CallArgs( - bucket=bucket, - key=key, - fileobj=fileobj, - extra_args=extra_args, - subscribers=subscribers, - ) - return self._submit_transfer("put_object", callargs) - - def delete(self, bucket, key, extra_args=None, subscribers=None): - if extra_args is None: - extra_args = {} - if subscribers is None: - subscribers = {} - callargs = CallArgs( - bucket=bucket, - key=key, - extra_args=extra_args, - subscribers=subscribers, - ) - return self._submit_transfer("delete_object", callargs) - - def shutdown(self, cancel=False): - self._shutdown(cancel) - - def _cancel_transfers(self): - for coordinator in self._future_coordinators: - if not coordinator.done(): - coordinator.cancel() - - def _finish_transfers(self): - for coordinator in self._future_coordinators: - coordinator.result() - - def _wait_transfers_done(self): - for coordinator in self._future_coordinators: - coordinator.wait_until_on_done_callbacks_complete() - - def _shutdown(self, cancel=False): - if cancel: - self._cancel_transfers() - try: - self._finish_transfers() - - except KeyboardInterrupt: - self._cancel_transfers() - except Exception: - pass - finally: - self._wait_transfers_done() - - def _release_semaphore(self, **kwargs): - self._semaphore.release() - - def _submit_transfer(self, request_type, call_args): - on_done_after_calls = [self._release_semaphore] - coordinator = CRTTransferCoordinator(transfer_id=self._id_counter) - components = { - 'meta': CRTTransferMeta(self._id_counter, call_args), - 'coordinator': coordinator, - } - future = CRTTransferFuture(**components) - afterdone = AfterDoneHandler(coordinator) - on_done_after_calls.append(afterdone) - - try: - self._semaphore.acquire() - on_queued = self._s3_args_creator.get_crt_callback( - future, 'queued' - ) - on_queued() - crt_callargs = self._s3_args_creator.get_make_request_args( - request_type, - call_args, - coordinator, - future, - on_done_after_calls, - ) - crt_s3_request = self._crt_s3_client.make_request(**crt_callargs) - except Exception as e: - coordinator.set_exception(e, True) - on_done = self._s3_args_creator.get_crt_callback( - future, 'done', after_subscribers=on_done_after_calls - ) - on_done(error=e) - else: - coordinator.set_s3_request(crt_s3_request) - self._future_coordinators.append(coordinator) - - self._id_counter += 1 - return future - - -class CRTTransferMeta(BaseTransferMeta): - """Holds metadata about the CRTTransferFuture""" - - def __init__(self, transfer_id=None, call_args=None): - self._transfer_id = transfer_id - self._call_args = call_args - self._user_context = {} - - @property - def call_args(self): - return self._call_args - - @property - def transfer_id(self): - return self._transfer_id - - @property - def user_context(self): - return self._user_context - - -class CRTTransferFuture(BaseTransferFuture): - def __init__(self, meta=None, coordinator=None): - """The future associated to a submitted transfer request via CRT S3 client - - :type meta: s3transfer.crt.CRTTransferMeta - :param meta: The metadata associated to the transfer future. - - :type coordinator: s3transfer.crt.CRTTransferCoordinator - :param coordinator: The coordinator associated to the transfer future. - """ - self._meta = meta - if meta is None: - self._meta = CRTTransferMeta() - self._coordinator = coordinator - - @property - def meta(self): - return self._meta - - def done(self): - return self._coordinator.done() - - def result(self, timeout=None): - self._coordinator.result(timeout) - - def cancel(self): - self._coordinator.cancel() - - def set_exception(self, exception): - """Sets the exception on the future.""" - if not self.done(): - raise TransferNotDoneError( - 'set_exception can only be called once the transfer is ' - 'complete.' - ) - self._coordinator.set_exception(exception, override=True) - - -class BaseCRTRequestSerializer: - def serialize_http_request(self, transfer_type, future): - """Serialize CRT HTTP requests. - - :type transfer_type: string - :param transfer_type: the type of transfer made, - e.g 'put_object', 'get_object', 'delete_object' - - :type future: s3transfer.crt.CRTTransferFuture - - :rtype: awscrt.http.HttpRequest - :returns: An unsigned HTTP request to be used for the CRT S3 client - """ - raise NotImplementedError('serialize_http_request()') - - -class BotocoreCRTRequestSerializer(BaseCRTRequestSerializer): - def __init__(self, session, client_kwargs=None): - """Serialize CRT HTTP request using botocore logic - It also takes into account configuration from both the session - and any keyword arguments that could be passed to - `Session.create_client()` when serializing the request. - - :type session: botocore.session.Session - - :type client_kwargs: Optional[Dict[str, str]]) - :param client_kwargs: The kwargs for the botocore - s3 client initialization. - """ - self._session = session - if client_kwargs is None: - client_kwargs = {} - self._resolve_client_config(session, client_kwargs) - self._client = session.create_client(**client_kwargs) - self._client.meta.events.register( - 'request-created.s3.*', self._capture_http_request - ) - self._client.meta.events.register( - 'after-call.s3.*', self._change_response_to_serialized_http_request - ) - self._client.meta.events.register( - 'before-send.s3.*', self._make_fake_http_response - ) - - def _resolve_client_config(self, session, client_kwargs): - user_provided_config = None - if session.get_default_client_config(): - user_provided_config = session.get_default_client_config() - if 'config' in client_kwargs: - user_provided_config = client_kwargs['config'] - - client_config = Config(signature_version=UNSIGNED) - if user_provided_config: - client_config = user_provided_config.merge(client_config) - client_kwargs['config'] = client_config - client_kwargs["service_name"] = "s3" - - def _crt_request_from_aws_request(self, aws_request): - url_parts = urlsplit(aws_request.url) - crt_path = url_parts.path - if url_parts.query: - crt_path = f'{crt_path}?{url_parts.query}' - headers_list = [] - for name, value in aws_request.headers.items(): - if isinstance(value, str): - headers_list.append((name, value)) - else: - headers_list.append((name, str(value, 'utf-8'))) - - crt_headers = awscrt.http.HttpHeaders(headers_list) - # CRT requires body (if it exists) to be an I/O stream. - crt_body_stream = None - if aws_request.body: - if hasattr(aws_request.body, 'seek'): - crt_body_stream = aws_request.body - else: - crt_body_stream = BytesIO(aws_request.body) - - crt_request = awscrt.http.HttpRequest( - method=aws_request.method, - path=crt_path, - headers=crt_headers, - body_stream=crt_body_stream, - ) - return crt_request - - def _convert_to_crt_http_request(self, botocore_http_request): - # Logic that does CRTUtils.crt_request_from_aws_request - crt_request = self._crt_request_from_aws_request(botocore_http_request) - if crt_request.headers.get("host") is None: - # If host is not set, set it for the request before using CRT s3 - url_parts = urlsplit(botocore_http_request.url) - crt_request.headers.set("host", url_parts.netloc) - if crt_request.headers.get('Content-MD5') is not None: - crt_request.headers.remove("Content-MD5") - return crt_request - - def _capture_http_request(self, request, **kwargs): - request.context['http_request'] = request - - def _change_response_to_serialized_http_request( - self, context, parsed, **kwargs - ): - request = context['http_request'] - parsed['HTTPRequest'] = request.prepare() - - def _make_fake_http_response(self, request, **kwargs): - return botocore.awsrequest.AWSResponse( - None, - 200, - {}, - FakeRawResponse(b""), - ) - - def _get_botocore_http_request(self, client_method, call_args): - return getattr(self._client, client_method)( - Bucket=call_args.bucket, Key=call_args.key, **call_args.extra_args - )['HTTPRequest'] - - def serialize_http_request(self, transfer_type, future): - botocore_http_request = self._get_botocore_http_request( - transfer_type, future.meta.call_args - ) - crt_request = self._convert_to_crt_http_request(botocore_http_request) - return crt_request - - -class FakeRawResponse(BytesIO): - def stream(self, amt=1024, decode_content=None): - while True: - chunk = self.read(amt) - if not chunk: - break - yield chunk - - -class CRTTransferCoordinator: - """A helper class for managing CRTTransferFuture""" - - def __init__(self, transfer_id=None, s3_request=None): - self.transfer_id = transfer_id - self._s3_request = s3_request - self._lock = threading.Lock() - self._exception = None - self._crt_future = None - self._done_event = threading.Event() - - @property - def s3_request(self): - return self._s3_request - - def set_done_callbacks_complete(self): - self._done_event.set() - - def wait_until_on_done_callbacks_complete(self, timeout=None): - self._done_event.wait(timeout) - - def set_exception(self, exception, override=False): - with self._lock: - if not self.done() or override: - self._exception = exception - - def cancel(self): - if self._s3_request: - self._s3_request.cancel() - - def result(self, timeout=None): - if self._exception: - raise self._exception - try: - self._crt_future.result(timeout) - except KeyboardInterrupt: - self.cancel() - raise - finally: - if self._s3_request: - self._s3_request = None - self._crt_future.result(timeout) - - def done(self): - if self._crt_future is None: - return False - return self._crt_future.done() - - def set_s3_request(self, s3_request): - self._s3_request = s3_request - self._crt_future = self._s3_request.finished_future - - -class S3ClientArgsCreator: - def __init__(self, crt_request_serializer, os_utils): - self._request_serializer = crt_request_serializer - self._os_utils = os_utils - - def get_make_request_args( - self, request_type, call_args, coordinator, future, on_done_after_calls - ): - recv_filepath = None - send_filepath = None - s3_meta_request_type = getattr( - S3RequestType, request_type.upper(), S3RequestType.DEFAULT - ) - on_done_before_calls = [] - if s3_meta_request_type == S3RequestType.GET_OBJECT: - final_filepath = call_args.fileobj - recv_filepath = self._os_utils.get_temp_filename(final_filepath) - file_ondone_call = RenameTempFileHandler( - coordinator, final_filepath, recv_filepath, self._os_utils - ) - on_done_before_calls.append(file_ondone_call) - elif s3_meta_request_type == S3RequestType.PUT_OBJECT: - send_filepath = call_args.fileobj - data_len = self._os_utils.get_file_size(send_filepath) - call_args.extra_args["ContentLength"] = data_len - - crt_request = self._request_serializer.serialize_http_request( - request_type, future - ) - - return { - 'request': crt_request, - 'type': s3_meta_request_type, - 'recv_filepath': recv_filepath, - 'send_filepath': send_filepath, - 'on_done': self.get_crt_callback( - future, 'done', on_done_before_calls, on_done_after_calls - ), - 'on_progress': self.get_crt_callback(future, 'progress'), - } - - def get_crt_callback( - self, - future, - callback_type, - before_subscribers=None, - after_subscribers=None, - ): - def invoke_all_callbacks(*args, **kwargs): - callbacks_list = [] - if before_subscribers is not None: - callbacks_list += before_subscribers - callbacks_list += get_callbacks(future, callback_type) - if after_subscribers is not None: - callbacks_list += after_subscribers - for callback in callbacks_list: - # The get_callbacks helper will set the first augment - # by keyword, the other augments need to be set by keyword - # as well - if callback_type == "progress": - callback(bytes_transferred=args[0]) - else: - callback(*args, **kwargs) - - return invoke_all_callbacks - - -class RenameTempFileHandler: - def __init__(self, coordinator, final_filename, temp_filename, osutil): - self._coordinator = coordinator - self._final_filename = final_filename - self._temp_filename = temp_filename - self._osutil = osutil - - def __call__(self, **kwargs): - error = kwargs['error'] - if error: - self._osutil.remove_file(self._temp_filename) - else: - try: - self._osutil.rename_file( - self._temp_filename, self._final_filename - ) - except Exception as e: - self._osutil.remove_file(self._temp_filename) - # the CRT future has done already at this point - self._coordinator.set_exception(e) - - -class AfterDoneHandler: - def __init__(self, coordinator): - self._coordinator = coordinator - - def __call__(self, **kwargs): - self._coordinator.set_done_callbacks_complete() diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/file_util.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/file_util.py deleted file mode 100644 index 1f1e444b1c30d93ca28ac15115ef73e63b9f6169..0000000000000000000000000000000000000000 --- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/file_util.py +++ /dev/null @@ -1,249 +0,0 @@ -"""distutils.file_util - -Utility functions for operating on single files. -""" - -import os -from distutils.errors import DistutilsFileError -from distutils import log - -# for generating verbose output in 'copy_file()' -_copy_action = {None: 'copying', 'hard': 'hard linking', 'sym': 'symbolically linking'} - - -def _copy_file_contents(src, dst, buffer_size=16 * 1024): # noqa: C901 - """Copy the file 'src' to 'dst'; both must be filenames. Any error - opening either file, reading from 'src', or writing to 'dst', raises - DistutilsFileError. Data is read/written in chunks of 'buffer_size' - bytes (default 16k). No attempt is made to handle anything apart from - regular files. - """ - # Stolen from shutil module in the standard library, but with - # custom error-handling added. - fsrc = None - fdst = None - try: - try: - fsrc = open(src, 'rb') - except OSError as e: - raise DistutilsFileError("could not open '{}': {}".format(src, e.strerror)) - - if os.path.exists(dst): - try: - os.unlink(dst) - except OSError as e: - raise DistutilsFileError( - "could not delete '{}': {}".format(dst, e.strerror) - ) - - try: - fdst = open(dst, 'wb') - except OSError as e: - raise DistutilsFileError( - "could not create '{}': {}".format(dst, e.strerror) - ) - - while True: - try: - buf = fsrc.read(buffer_size) - except OSError as e: - raise DistutilsFileError( - "could not read from '{}': {}".format(src, e.strerror) - ) - - if not buf: - break - - try: - fdst.write(buf) - except OSError as e: - raise DistutilsFileError( - "could not write to '{}': {}".format(dst, e.strerror) - ) - finally: - if fdst: - fdst.close() - if fsrc: - fsrc.close() - - -def copy_file( # noqa: C901 - src, - dst, - preserve_mode=1, - preserve_times=1, - update=0, - link=None, - verbose=1, - dry_run=0, -): - """Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is - copied there with the same name; otherwise, it must be a filename. (If - the file exists, it will be ruthlessly clobbered.) If 'preserve_mode' - is true (the default), the file's mode (type and permission bits, or - whatever is analogous on the current platform) is copied. If - 'preserve_times' is true (the default), the last-modified and - last-access times are copied as well. If 'update' is true, 'src' will - only be copied if 'dst' does not exist, or if 'dst' does exist but is - older than 'src'. - - 'link' allows you to make hard links (os.link) or symbolic links - (os.symlink) instead of copying: set it to "hard" or "sym"; if it is - None (the default), files are copied. Don't set 'link' on systems that - don't support it: 'copy_file()' doesn't check if hard or symbolic - linking is available. If hardlink fails, falls back to - _copy_file_contents(). - - Under Mac OS, uses the native file copy function in macostools; on - other systems, uses '_copy_file_contents()' to copy file contents. - - Return a tuple (dest_name, copied): 'dest_name' is the actual name of - the output file, and 'copied' is true if the file was copied (or would - have been copied, if 'dry_run' true). - """ - # XXX if the destination file already exists, we clobber it if - # copying, but blow up if linking. Hmmm. And I don't know what - # macostools.copyfile() does. Should definitely be consistent, and - # should probably blow up if destination exists and we would be - # changing it (ie. it's not already a hard/soft link to src OR - # (not update) and (src newer than dst). - - from distutils.dep_util import newer - from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE - - if not os.path.isfile(src): - raise DistutilsFileError( - "can't copy '%s': doesn't exist or not a regular file" % src - ) - - if os.path.isdir(dst): - dir = dst - dst = os.path.join(dst, os.path.basename(src)) - else: - dir = os.path.dirname(dst) - - if update and not newer(src, dst): - if verbose >= 1: - log.debug("not copying %s (output up-to-date)", src) - return (dst, 0) - - try: - action = _copy_action[link] - except KeyError: - raise ValueError("invalid value '%s' for 'link' argument" % link) - - if verbose >= 1: - if os.path.basename(dst) == os.path.basename(src): - log.info("%s %s -> %s", action, src, dir) - else: - log.info("%s %s -> %s", action, src, dst) - - if dry_run: - return (dst, 1) - - # If linking (hard or symbolic), use the appropriate system call - # (Unix only, of course, but that's the caller's responsibility) - elif link == 'hard': - if not (os.path.exists(dst) and os.path.samefile(src, dst)): - try: - os.link(src, dst) - return (dst, 1) - except OSError: - # If hard linking fails, fall back on copying file - # (some special filesystems don't support hard linking - # even under Unix, see issue #8876). - pass - elif link == 'sym': - if not (os.path.exists(dst) and os.path.samefile(src, dst)): - os.symlink(src, dst) - return (dst, 1) - - # Otherwise (non-Mac, not linking), copy the file contents and - # (optionally) copy the times and mode. - _copy_file_contents(src, dst) - if preserve_mode or preserve_times: - st = os.stat(src) - - # According to David Ascher , utime() should be done - # before chmod() (at least under NT). - if preserve_times: - os.utime(dst, (st[ST_ATIME], st[ST_MTIME])) - if preserve_mode: - os.chmod(dst, S_IMODE(st[ST_MODE])) - - return (dst, 1) - - -# XXX I suspect this is Unix-specific -- need porting help! -def move_file(src, dst, verbose=1, dry_run=0): # noqa: C901 - - """Move a file 'src' to 'dst'. If 'dst' is a directory, the file will - be moved into it with the same name; otherwise, 'src' is just renamed - to 'dst'. Return the new full name of the file. - - Handles cross-device moves on Unix using 'copy_file()'. What about - other systems??? - """ - from os.path import exists, isfile, isdir, basename, dirname - import errno - - if verbose >= 1: - log.info("moving %s -> %s", src, dst) - - if dry_run: - return dst - - if not isfile(src): - raise DistutilsFileError("can't move '%s': not a regular file" % src) - - if isdir(dst): - dst = os.path.join(dst, basename(src)) - elif exists(dst): - raise DistutilsFileError( - "can't move '{}': destination '{}' already exists".format(src, dst) - ) - - if not isdir(dirname(dst)): - raise DistutilsFileError( - "can't move '{}': destination '{}' not a valid path".format(src, dst) - ) - - copy_it = False - try: - os.rename(src, dst) - except OSError as e: - (num, msg) = e.args - if num == errno.EXDEV: - copy_it = True - else: - raise DistutilsFileError( - "couldn't move '{}' to '{}': {}".format(src, dst, msg) - ) - - if copy_it: - copy_file(src, dst, verbose=verbose) - try: - os.unlink(src) - except OSError as e: - (num, msg) = e.args - try: - os.unlink(dst) - except OSError: - pass - raise DistutilsFileError( - "couldn't move '%s' to '%s' by copy/delete: " - "delete '%s' failed: %s" % (src, dst, src, msg) - ) - return dst - - -def write_file(filename, contents): - """Create a file with the specified name and write 'contents' (a - sequence of strings without line terminators) to it. - """ - f = open(filename, "w") - try: - for line in contents: - f.write(line + "\n") - finally: - f.close() diff --git a/spaces/Billyosoro/ESRGAN/realesrgan/archs/__init__.py b/spaces/Billyosoro/ESRGAN/realesrgan/archs/__init__.py deleted file mode 100644 index f3fbbf3b78e33b61fd4c33a564a9a617010d90de..0000000000000000000000000000000000000000 --- a/spaces/Billyosoro/ESRGAN/realesrgan/archs/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -import importlib -from basicsr.utils import scandir -from os import path as osp - -# automatically scan and import arch modules for registry -# scan all the files that end with '_arch.py' under the archs folder -arch_folder = osp.dirname(osp.abspath(__file__)) -arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')] -# import all the arch modules -_arch_modules = [importlib.import_module(f'realesrgan.archs.{file_name}') for file_name in arch_filenames] diff --git a/spaces/BuBBLe1q/anything-v3.0/app.py b/spaces/BuBBLe1q/anything-v3.0/app.py deleted file mode 100644 index 99a6a3762d5e337f08e960c4a31b4ac2467bca49..0000000000000000000000000000000000000000 --- a/spaces/BuBBLe1q/anything-v3.0/app.py +++ /dev/null @@ -1,8 +0,0 @@ -import gradio as gr - -description = """
      - -
      - """ - -gr.Interface.load("models/Linaqruf/anything-v3.0", description=description).launch() \ No newline at end of file diff --git a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_category_to_system.h b/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_category_to_system.h deleted file mode 100644 index fd378fae7314fac33f4fadf5cb1ae348dbeaa0e7..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_category_to_system.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include -#include -#include -#include -#include -#include -#include - -namespace thrust -{ - -namespace detail -{ - -// forward declaration -template struct is_iterator_system; - -template struct device_iterator_category_to_backend_system; - -// XXX this should work entirely differently -// we should just specialize this metafunction for iterator_category_with_system_and_traversal -template - struct iterator_category_to_system - // convertible to host iterator? - : eval_if< - or_< - is_convertible, - is_convertible - >::value, - - detail::identity_, - - // convertible to device iterator? - eval_if< - or_< - is_convertible, - is_convertible - >::value, - - detail::identity_, - - // unknown system - detail::identity_ - > // if device - > // if host -{ -}; // end iterator_category_to_system - - -template - struct iterator_category_or_traversal_to_system - : eval_if< - is_iterator_system::value, - detail::identity_, - iterator_category_to_system - > -{ -}; // end iterator_category_or_traversal_to_system - -} // end detail -} // end thrust - diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/scan.h b/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/scan.h deleted file mode 100644 index f47dbbc3087c613f36de65f704505340bb8a85b0..0000000000000000000000000000000000000000 --- a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/scan.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include - -// this system inherits scan -#include - diff --git a/spaces/CVPR/WALT/configs/_base_/datasets/walt_vehicle.py b/spaces/CVPR/WALT/configs/_base_/datasets/walt_vehicle.py deleted file mode 100644 index 466fa524d0f43b8684a01abe57188501787db8a4..0000000000000000000000000000000000000000 --- a/spaces/CVPR/WALT/configs/_base_/datasets/walt_vehicle.py +++ /dev/null @@ -1,49 +0,0 @@ -dataset_type = 'WaltDataset' -data_root = 'data/cwalt_train/' -data_root_test = 'data/cwalt_test/' -img_norm_cfg = dict( - mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True, with_mask=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=5, - workers_per_gpu=5, - train=dict( - type=dataset_type, - ann_file=data_root + '/', - img_prefix=data_root + '/', - pipeline=train_pipeline), - val=dict( - type=dataset_type, - ann_file=data_root_test + '/', - img_prefix=data_root_test + '/', - pipeline=test_pipeline), - test=dict( - type=dataset_type, - ann_file=data_root_test + '/', - img_prefix=data_root_test + '/', - pipeline=test_pipeline)) -evaluation = dict(metric=['bbox', 'segm']) diff --git a/spaces/CVPR/lama-example/models/ade20k/segm_lib/nn/modules/unittest.py b/spaces/CVPR/lama-example/models/ade20k/segm_lib/nn/modules/unittest.py deleted file mode 100644 index 0675c022e4ba85d38d1f813490f6740150909524..0000000000000000000000000000000000000000 --- a/spaces/CVPR/lama-example/models/ade20k/segm_lib/nn/modules/unittest.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -# File : unittest.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import unittest - -import numpy as np -from torch.autograd import Variable - - -def as_numpy(v): - if isinstance(v, Variable): - v = v.data - return v.cpu().numpy() - - -class TorchTestCase(unittest.TestCase): - def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3): - npa, npb = as_numpy(a), as_numpy(b) - self.assertTrue( - np.allclose(npa, npb, atol=atol), - 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max()) - ) diff --git a/spaces/ChrisCaviar/ControlNet-v1-1/depth_estimator.py b/spaces/ChrisCaviar/ControlNet-v1-1/depth_estimator.py deleted file mode 100644 index 8af14987f58b59329e5c8441dec43f1075a29d8b..0000000000000000000000000000000000000000 --- a/spaces/ChrisCaviar/ControlNet-v1-1/depth_estimator.py +++ /dev/null @@ -1,25 +0,0 @@ -import numpy as np -import PIL.Image -from controlnet_aux.util import HWC3 -from transformers import pipeline - -from cv_utils import resize_image - - -class DepthEstimator: - def __init__(self): - self.model = pipeline('depth-estimation') - - def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image: - detect_resolution = kwargs.pop('detect_resolution', 512) - image_resolution = kwargs.pop('image_resolution', 512) - image = np.array(image) - image = HWC3(image) - image = resize_image(image, resolution=detect_resolution) - image = PIL.Image.fromarray(image) - image = self.model(image) - image = image['depth'] - image = np.array(image) - image = HWC3(image) - image = resize_image(image, resolution=image_resolution) - return PIL.Image.fromarray(image) diff --git a/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/libJPG/jpge.h b/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/libJPG/jpge.h deleted file mode 100644 index a46c805ab80aab491f7f9508b3a008b149866bee..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/crazy_functions/test_project/cpp/libJPG/jpge.h +++ /dev/null @@ -1,172 +0,0 @@ - -// jpge.h - C++ class for JPEG compression. -// Public domain, Rich Geldreich -// Alex Evans: Added RGBA support, linear memory allocator. -#ifndef JPEG_ENCODER_H -#define JPEG_ENCODER_H - -#include - -namespace jpge -{ - typedef unsigned char uint8; - typedef signed short int16; - typedef signed int int32; - typedef unsigned short uint16; - typedef unsigned int uint32; - typedef unsigned int uint; - - // JPEG chroma subsampling factors. Y_ONLY (grayscale images) and H2V2 (color images) are the most common. - enum subsampling_t { Y_ONLY = 0, H1V1 = 1, H2V1 = 2, H2V2 = 3 }; - - // JPEG compression parameters structure. - struct params - { - inline params() : m_quality(85), m_subsampling(H2V2), m_no_chroma_discrim_flag(false), m_two_pass_flag(false) { } - - inline bool check_valid() const - { - if ((m_quality < 1) || (m_quality > 100)) return false; - if ((uint)m_subsampling > (uint)H2V2) return false; - return true; - } - - // Quality: 1-100, higher is better. Typical values are around 50-95. - int m_quality; - - // m_subsampling: - // 0 = Y (grayscale) only - // 1 = YCbCr, no subsampling (H1V1, YCbCr 1x1x1, 3 blocks per MCU) - // 2 = YCbCr, H2V1 subsampling (YCbCr 2x1x1, 4 blocks per MCU) - // 3 = YCbCr, H2V2 subsampling (YCbCr 4x1x1, 6 blocks per MCU-- very common) - subsampling_t m_subsampling; - - // Disables CbCr discrimination - only intended for testing. - // If true, the Y quantization table is also used for the CbCr channels. - bool m_no_chroma_discrim_flag; - - bool m_two_pass_flag; - }; - - // Writes JPEG image to a file. - // num_channels must be 1 (Y) or 3 (RGB), image pitch must be width*num_channels. - bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params()); - - // Writes JPEG image to memory buffer. - // On entry, buf_size is the size of the output buffer pointed at by pBuf, which should be at least ~1024 bytes. - // If return value is true, buf_size will be set to the size of the compressed data. - bool compress_image_to_jpeg_file_in_memory(void *pBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params = params()); - - // Output stream abstract class - used by the jpeg_encoder class to write to the output stream. - // put_buf() is generally called with len==JPGE_OUT_BUF_SIZE bytes, but for headers it'll be called with smaller amounts. - class output_stream - { - public: - virtual ~output_stream() { }; - virtual bool put_buf(const void* Pbuf, int64_t len) = 0; - template inline bool put_obj(const T& obj) { return put_buf(&obj, sizeof(T)); } - }; - - // Lower level jpeg_encoder class - useful if more control is needed than the above helper functions. - class jpeg_encoder - { - public: - jpeg_encoder(); - ~jpeg_encoder(); - - // Initializes the compressor. - // pStream: The stream object to use for writing compressed data. - // params - Compression parameters structure, defined above. - // width, height - Image dimensions. - // channels - May be 1, or 3. 1 indicates grayscale, 3 indicates RGB source data. - // Returns false on out of memory or if a stream write fails. - bool init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params = params()); - - const params &get_params() const { return m_params; } - - // Deinitializes the compressor, freeing any allocated memory. May be called at any time. - void deinit(); - - uint get_total_passes() const { return m_params.m_two_pass_flag ? 2 : 1; } - inline uint get_cur_pass() { return m_pass_num; } - - // Call this method with each source scanline. - // width * src_channels bytes per scanline is expected (RGB or Y format). - // You must call with NULL after all scanlines are processed to finish compression. - // Returns false on out of memory or if a stream write fails. - bool process_scanline(const void* pScanline); - - private: - jpeg_encoder(const jpeg_encoder &); - jpeg_encoder &operator =(const jpeg_encoder &); - - typedef int32 sample_array_t; - - output_stream *m_pStream; - params m_params; - uint8 m_num_components; - uint8 m_comp_h_samp[3], m_comp_v_samp[3]; - int m_image_x, m_image_y, m_image_bpp, m_image_bpl; - int m_image_x_mcu, m_image_y_mcu; - int m_image_bpl_xlt, m_image_bpl_mcu; - int m_mcus_per_row; - int m_mcu_x, m_mcu_y; - uint8 *m_mcu_lines[16]; - uint8 m_mcu_y_ofs; - sample_array_t m_sample_array[64]; - int16 m_coefficient_array[64]; - int32 m_quantization_tables[2][64]; - uint m_huff_codes[4][256]; - uint8 m_huff_code_sizes[4][256]; - uint8 m_huff_bits[4][17]; - uint8 m_huff_val[4][256]; - uint32 m_huff_count[4][256]; - int m_last_dc_val[3]; - enum { JPGE_OUT_BUF_SIZE = 2048 }; - uint8 m_out_buf[JPGE_OUT_BUF_SIZE]; - uint8 *m_pOut_buf; - uint m_out_buf_left; - uint32 m_bit_buffer; - uint m_bits_in; - uint8 m_pass_num; - bool m_all_stream_writes_succeeded; - - void optimize_huffman_table(int table_num, int table_len); - void emit_byte(uint8 i); - void emit_word(uint i); - void emit_marker(int marker); - void emit_jfif_app0(); - void emit_dqt(); - void emit_sof(); - void emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag); - void emit_dhts(); - void emit_sos(); - void emit_markers(); - void compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val); - void compute_quant_table(int32 *dst, int16 *src); - void adjust_quant_table(int32 *dst, int32 *src); - void first_pass_init(); - bool second_pass_init(); - bool jpg_open(int p_x_res, int p_y_res, int src_channels); - void load_block_8_8_grey(int x); - void load_block_8_8(int x, int y, int c); - void load_block_16_8(int x, int c); - void load_block_16_8_8(int x, int c); - void load_quantized_coefficients(int component_num); - void flush_output_buffer(); - void put_bits(uint bits, uint len); - void code_coefficients_pass_one(int component_num); - void code_coefficients_pass_two(int component_num); - void code_block(int component_num); - void process_mcu_row(); - bool terminate_pass_one(); - bool terminate_pass_two(); - bool process_end_of_image(); - void load_mcu(const void* src); - void clear(); - void init(); - }; - -} // namespace jpge - -#endif // JPEG_ENCODER \ No newline at end of file diff --git a/spaces/Cong723/gpt-academic-public/request_llm/bridge_all.py b/spaces/Cong723/gpt-academic-public/request_llm/bridge_all.py deleted file mode 100644 index fddc9a756f062b68610737123ea39b6a83698a42..0000000000000000000000000000000000000000 --- a/spaces/Cong723/gpt-academic-public/request_llm/bridge_all.py +++ /dev/null @@ -1,240 +0,0 @@ - -""" - 该文件中主要包含2个函数,是所有LLM的通用接口,它们会继续向下调用更底层的LLM模型,处理多模型并行等细节 - - 不具备多线程能力的函数:正常对话时使用,具备完备的交互功能,不可多线程 - 1. predict(...) - - 具备多线程调用能力的函数:在函数插件中被调用,灵活而简洁 - 2. predict_no_ui_long_connection(...) -""" -import tiktoken -from functools import lru_cache -from concurrent.futures import ThreadPoolExecutor -from toolbox import get_conf, trimmed_format_exc - -from .bridge_chatgpt import predict_no_ui_long_connection as chatgpt_noui -from .bridge_chatgpt import predict as chatgpt_ui - -from .bridge_chatglm import predict_no_ui_long_connection as chatglm_noui -from .bridge_chatglm import predict as chatglm_ui - -from .bridge_newbing import predict_no_ui_long_connection as newbing_noui -from .bridge_newbing import predict as newbing_ui - -# from .bridge_tgui import predict_no_ui_long_connection as tgui_noui -# from .bridge_tgui import predict as tgui_ui - -colors = ['#FF00FF', '#00FFFF', '#FF0000', '#990099', '#009999', '#990044'] - -class LazyloadTiktoken(object): - def __init__(self, model): - self.model = model - - @staticmethod - @lru_cache(maxsize=128) - def get_encoder(model): - print('正在加载tokenizer,如果是第一次运行,可能需要一点时间下载参数') - tmp = tiktoken.encoding_for_model(model) - print('加载tokenizer完毕') - return tmp - - def encode(self, *args, **kwargs): - encoder = self.get_encoder(self.model) - return encoder.encode(*args, **kwargs) - - def decode(self, *args, **kwargs): - encoder = self.get_encoder(self.model) - return encoder.decode(*args, **kwargs) - -# Endpoint 重定向 -API_URL_REDIRECT, = get_conf("API_URL_REDIRECT") -openai_endpoint = "https://api.openai.com/v1/chat/completions" -api2d_endpoint = "https://openai.api2d.net/v1/chat/completions" -newbing_endpoint = "wss://sydney.bing.com/sydney/ChatHub" -# 兼容旧版的配置 -try: - API_URL, = get_conf("API_URL") - if API_URL != "https://api.openai.com/v1/chat/completions": - openai_endpoint = API_URL - print("警告!API_URL配置选项将被弃用,请更换为API_URL_REDIRECT配置") -except: - pass -# 新版配置 -if openai_endpoint in API_URL_REDIRECT: openai_endpoint = API_URL_REDIRECT[openai_endpoint] -if api2d_endpoint in API_URL_REDIRECT: api2d_endpoint = API_URL_REDIRECT[api2d_endpoint] -if newbing_endpoint in API_URL_REDIRECT: newbing_endpoint = API_URL_REDIRECT[newbing_endpoint] - - -# 获取tokenizer -tokenizer_gpt35 = LazyloadTiktoken("gpt-3.5-turbo") -tokenizer_gpt4 = LazyloadTiktoken("gpt-4") -get_token_num_gpt35 = lambda txt: len(tokenizer_gpt35.encode(txt, disallowed_special=())) -get_token_num_gpt4 = lambda txt: len(tokenizer_gpt4.encode(txt, disallowed_special=())) - - -model_info = { - # openai - "gpt-3.5-turbo": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": openai_endpoint, - "max_token": 4096, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, - - "gpt-4": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": openai_endpoint, - "max_token": 8192, - "tokenizer": tokenizer_gpt4, - "token_cnt": get_token_num_gpt4, - }, - - # api_2d - "api2d-gpt-3.5-turbo": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": api2d_endpoint, - "max_token": 4096, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, - - "api2d-gpt-4": { - "fn_with_ui": chatgpt_ui, - "fn_without_ui": chatgpt_noui, - "endpoint": api2d_endpoint, - "max_token": 8192, - "tokenizer": tokenizer_gpt4, - "token_cnt": get_token_num_gpt4, - }, - - # chatglm - "chatglm": { - "fn_with_ui": chatglm_ui, - "fn_without_ui": chatglm_noui, - "endpoint": None, - "max_token": 1024, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, - # newbing - "newbing": { - "fn_with_ui": newbing_ui, - "fn_without_ui": newbing_noui, - "endpoint": newbing_endpoint, - "max_token": 4096, - "tokenizer": tokenizer_gpt35, - "token_cnt": get_token_num_gpt35, - }, -} - - -def LLM_CATCH_EXCEPTION(f): - """ - 装饰器函数,将错误显示出来 - """ - def decorated(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience): - try: - return f(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience) - except Exception as e: - tb_str = '\n```\n' + trimmed_format_exc() + '\n```\n' - observe_window[0] = tb_str - return tb_str - return decorated - - -def predict_no_ui_long_connection(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience=False): - """ - 发送至LLM,等待回复,一次性完成,不显示中间过程。但内部用stream的方法避免中途网线被掐。 - inputs: - 是本次问询的输入 - sys_prompt: - 系统静默prompt - llm_kwargs: - LLM的内部调优参数 - history: - 是之前的对话列表 - observe_window = None: - 用于负责跨越线程传递已经输出的部分,大部分时候仅仅为了fancy的视觉效果,留空即可。observe_window[0]:观测窗。observe_window[1]:看门狗 - """ - import threading, time, copy - - model = llm_kwargs['llm_model'] - n_model = 1 - if '&' not in model: - assert not model.startswith("tgui"), "TGUI不支持函数插件的实现" - - # 如果只询问1个大语言模型: - method = model_info[model]["fn_without_ui"] - return method(inputs, llm_kwargs, history, sys_prompt, observe_window, console_slience) - else: - # 如果同时询问多个大语言模型: - executor = ThreadPoolExecutor(max_workers=4) - models = model.split('&') - n_model = len(models) - - window_len = len(observe_window) - assert window_len==3 - window_mutex = [["", time.time(), ""] for _ in range(n_model)] + [True] - - futures = [] - for i in range(n_model): - model = models[i] - method = model_info[model]["fn_without_ui"] - llm_kwargs_feedin = copy.deepcopy(llm_kwargs) - llm_kwargs_feedin['llm_model'] = model - future = executor.submit(LLM_CATCH_EXCEPTION(method), inputs, llm_kwargs_feedin, history, sys_prompt, window_mutex[i], console_slience) - futures.append(future) - - def mutex_manager(window_mutex, observe_window): - while True: - time.sleep(0.25) - if not window_mutex[-1]: break - # 看门狗(watchdog) - for i in range(n_model): - window_mutex[i][1] = observe_window[1] - # 观察窗(window) - chat_string = [] - for i in range(n_model): - chat_string.append( f"【{str(models[i])} 说】: {window_mutex[i][0]} " ) - res = '

      \n\n---\n\n'.join(chat_string) - # # # # # # # # # # # - observe_window[0] = res - - t_model = threading.Thread(target=mutex_manager, args=(window_mutex, observe_window), daemon=True) - t_model.start() - - return_string_collect = [] - while True: - worker_done = [h.done() for h in futures] - if all(worker_done): - executor.shutdown() - break - time.sleep(1) - - for i, future in enumerate(futures): # wait and get - return_string_collect.append( f"【{str(models[i])} 说】: {future.result()} " ) - - window_mutex[-1] = False # stop mutex thread - res = '

      \n\n---\n\n'.join(return_string_collect) - return res - - -def predict(inputs, llm_kwargs, *args, **kwargs): - """ - 发送至LLM,流式获取输出。 - 用于基础的对话功能。 - inputs 是本次问询的输入 - top_p, temperature是LLM的内部调优参数 - history 是之前的对话列表(注意无论是inputs还是history,内容太长了都会触发token数量溢出的错误) - chatbot 为WebUI中显示的对话列表,修改它,然后yeild出去,可以直接修改对话界面内容 - additional_fn代表点击的哪个按钮,按钮见functional.py - """ - - method = model_info[llm_kwargs['llm_model']]["fn_with_ui"] - yield from method(inputs, llm_kwargs, *args, **kwargs) - diff --git a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/mask.py b/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/mask.py deleted file mode 100644 index d660607b1a798c38ed0495ec4acb3b14de735d35..0000000000000000000000000000000000000000 --- a/spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/mask.py +++ /dev/null @@ -1,82 +0,0 @@ -import cv2 -import numpy as np - -import util -from util import nb as neighbour - - -def find_white_components(mask, min_area = 0): - mask = (mask == 0) * 1 - return find_black_components(mask, min_area); - -def find_black_components(mask, min_area = 0): - """ - find components of zeros. - mask is a 0-1 matrix, ndarray. - """ - neighbour_type = neighbour.N4 - visited = mask.copy() - c_mask = util.img.black(mask) - - root_idx = [1] - def get_new_root(): - root_idx[0] += 1 - return root_idx[0] - - def is_visited(xy): - x, y = xy - return visited[y][x] - - def set_visited(xy): - x, y = xy - visited[y][x] = 255 - - def set_root(xy, root): - x, y = xy - c_mask[y][x] = root - - def get_root(xy): - x, y = xy - return c_mask[y][x] - - rows, cols = np.shape(mask) - q = [] - for y in xrange(rows): - for x in xrange(cols): - xy = (x, y) - if is_visited(xy): - continue - - q.append(xy) - new_root = get_new_root() - while len(q) > 0: - cp = q.pop() - set_root(cp, new_root) - set_visited(cp) - nbs = neighbour.get_neighbours(cp[0], cp[1], cols, rows, neighbour_type) - for nb in nbs: - if not is_visited(nb) and nb not in q: -# q.append(nb) - q.insert(0, nb) - - components = {} - for y in xrange(rows): - for x in xrange(cols): - root = get_root((x, y)) - if root == 0: - continue - - if root not in components: - components[root] = [] - - components[root].append((x,y)) - - ret = [] - - for root in components: - if len(components[root]) >= min_area: - ret.append(components[root]) - - return ret - - diff --git a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/tasks/image_text_pretrain.py b/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/tasks/image_text_pretrain.py deleted file mode 100644 index db955f27bb7dc8093cffd95b3a26917bb681c846..0000000000000000000000000000000000000000 --- a/spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/tasks/image_text_pretrain.py +++ /dev/null @@ -1,18 +0,0 @@ -""" - Copyright (c) 2022, salesforce.com, inc. - All rights reserved. - SPDX-License-Identifier: BSD-3-Clause - For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause -""" - -from video_llama.common.registry import registry -from video_llama.tasks.base_task import BaseTask - - -@registry.register_task("image_text_pretrain") -class ImageTextPretrainTask(BaseTask): - def __init__(self): - super().__init__() - - def evaluation(self, model, data_loader, cuda_enabled=True): - pass diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/chat_interface.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/chat_interface.py deleted file mode 100644 index 7c6bc63455e91bc0709eb3d238e573eb18897271..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/chat_interface.py +++ /dev/null @@ -1,355 +0,0 @@ -""" -This file defines a useful high-level abstraction to build Gradio chatbots: ChatInterface. -""" - - -from __future__ import annotations - -import inspect -import warnings -from typing import Callable, Generator - -from gradio_client.documentation import document, set_documentation_group - -from gradio.blocks import Blocks -from gradio.components import ( - Button, - Chatbot, - Markdown, - State, - Textbox, -) -from gradio.helpers import create_examples as Examples # noqa: N812 -from gradio.layouts import Group, Row -from gradio.themes import ThemeClass as Theme - -set_documentation_group("chatinterface") - - -@document() -class ChatInterface(Blocks): - """ - ChatInterface is Gradio's high-level abstraction for creating chatbot UIs, and allows you to create - a web-based demo around a chatbot model in a few lines of code. Only one parameter is required: fn, which - takes a function that governs the response of the chatbot based on the user input and chat history. Additional - parameters can be used to control the appearance and behavior of the demo. - - Example: - import gradio as gr - - def echo(message, history): - return message - - demo = gr.ChatInterface(fn=echo, examples=["hello", "hola", "merhaba"], title="Echo Bot") - demo.launch() - Demos: chatinterface_random_response, chatinterface_streaming_echo - Guides: creating-a-chatbot-fast, sharing-your-app - """ - - def __init__( - self, - fn: Callable, - *, - chatbot: Chatbot | None = None, - textbox: Textbox | None = None, - examples: list[str] | None = None, - cache_examples: bool | None = None, - title: str | None = None, - description: str | None = None, - theme: Theme | str | None = None, - css: str | None = None, - analytics_enabled: bool | None = None, - submit_btn: str | None | Button = "Submit", - retry_btn: str | None | Button = "🔄 Retry", - undo_btn: str | None | Button = "↩️ Undo", - clear_btn: str | None | Button = "🗑️ Clear", - ): - """ - Parameters: - fn: the function to wrap the chat interface around. Should accept two parameters: a string input message and list of two-element lists of the form [[user_message, bot_message], ...] representing the chat history, and return a string response. See the Chatbot documentation for more information on the chat history format. - chatbot: an instance of the gr.Chatbot component to use for the chat interface, if you would like to customize the chatbot properties. If not provided, a default gr.Chatbot component will be created. - textbox: an instance of the gr.Textbox component to use for the chat interface, if you would like to customize the textbox properties. If not provided, a default gr.Textbox component will be created. - examples: sample inputs for the function; if provided, appear below the chatbot and can be clicked to populate the chatbot input. - cache_examples: If True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False. - title: a title for the interface; if provided, appears above chatbot in large font. Also used as the tab title when opened in a browser window. - description: a description for the interface; if provided, appears above the chatbot and beneath the title in regular font. Accepts Markdown and HTML content. - theme: Theme to use, loaded from gradio.themes. - css: custom css or path to custom css file to use with interface. - analytics_enabled: Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True. - submit_btn: Text to display on the submit button. If None, no button will be displayed. If a Button object, that button will be used. - retry_btn: Text to display on the retry button. If None, no button will be displayed. If a Button object, that button will be used. - undo_btn: Text to display on the delete last button. If None, no button will be displayed. If a Button object, that button will be used. - clear_btn: Text to display on the clear button. If None, no button will be displayed. If a Button object, that button will be used. - """ - super().__init__( - analytics_enabled=analytics_enabled, - mode="chat_interface", - css=css, - title=title or "Gradio", - theme=theme, - ) - if len(inspect.signature(fn).parameters) != 2: - warnings.warn( - "The function to ChatInterface should take two inputs (message, history) and return a single string response.", - UserWarning, - ) - - self.fn = fn - self.examples = examples - if self.space_id and cache_examples is None: - self.cache_examples = True - else: - self.cache_examples = cache_examples or False - self.buttons: list[Button] = [] - - with self: - if title: - Markdown( - f"

      {self.title}

      " - ) - if description: - Markdown(description) - - with Group(): - if chatbot: - self.chatbot = chatbot.render() - else: - self.chatbot = Chatbot(label="Chatbot") - with Row(): - if textbox: - self.textbox = textbox.render() - else: - self.textbox = Textbox( - container=False, - show_label=False, - placeholder="Type a message...", - scale=10, - ) - if submit_btn: - if isinstance(submit_btn, Button): - submit_btn.render() - elif isinstance(submit_btn, str): - submit_btn = Button( - submit_btn, variant="primary", scale=1, min_width=0 - ) - else: - raise ValueError( - f"The submit_btn parameter must be a gr.Button, string, or None, not {type(submit_btn)}" - ) - self.buttons.append(submit_btn) - - with Row(): - self.stop_btn = Button("Stop", variant="stop", visible=False) - - for btn in [retry_btn, undo_btn, clear_btn]: - if btn: - if isinstance(btn, Button): - btn.render() - elif isinstance(btn, str): - btn = Button(btn, variant="secondary") - else: - raise ValueError( - f"All the _btn parameters must be a gr.Button, string, or None, not {type(btn)}" - ) - self.buttons.append(btn) - - self.fake_api_btn = Button("Fake API", visible=False) - self.fake_response_textbox = Textbox(label="Response", visible=False) - ( - self.submit_btn, - self.retry_btn, - self.undo_btn, - self.clear_btn, - ) = self.buttons - - if examples: - if inspect.isgeneratorfunction(self.fn): - examples_fn = self._examples_stream_fn - else: - examples_fn = self._examples_fn - - self.examples_handler = Examples( - examples=examples, - inputs=self.textbox, - outputs=self.chatbot, - fn=examples_fn, - cache_examples=self.cache_examples, - ) - - self.saved_input = State() - - self._setup_events() - self._setup_api() - - def _setup_events(self): - if inspect.isgeneratorfunction(self.fn): - submit_fn = self._stream_fn - else: - submit_fn = self._submit_fn - - self.textbox.submit( - self._clear_and_save_textbox, - [self.textbox], - [self.textbox, self.saved_input], - api_name=False, - queue=False, - ).then( - self._display_input, - [self.saved_input, self.chatbot], - [self.chatbot], - api_name=False, - queue=False, - ).then( - submit_fn, - [self.saved_input, self.chatbot], - [self.chatbot], - api_name=False, - ) - - if self.submit_btn: - self.submit_btn.click( - self._clear_and_save_textbox, - [self.textbox], - [self.textbox, self.saved_input], - api_name=False, - queue=False, - ).then( - self._display_input, - [self.saved_input, self.chatbot], - [self.chatbot], - api_name=False, - queue=False, - ).then( - submit_fn, - [self.saved_input, self.chatbot], - [self.chatbot], - api_name=False, - ) - - if self.retry_btn: - self.retry_btn.click( - self._delete_prev_fn, - [self.chatbot], - [self.chatbot, self.saved_input], - api_name=False, - queue=False, - ).then( - self._display_input, - [self.saved_input, self.chatbot], - [self.chatbot], - api_name=False, - queue=False, - ).then( - submit_fn, - [self.saved_input, self.chatbot], - [self.chatbot], - api_name=False, - ) - - if self.undo_btn: - self.undo_btn.click( - self._delete_prev_fn, - [self.chatbot], - [self.chatbot, self.saved_input], - api_name=False, - queue=False, - ).then( - lambda x: x, - [self.saved_input], - [self.textbox], - api_name=False, - queue=False, - ) - - if self.clear_btn: - self.clear_btn.click( - lambda: ([], None), - None, - [self.chatbot, self.saved_input], - queue=False, - api_name=False, - ) - - def _setup_api(self): - if inspect.isgeneratorfunction(self.fn): - api_fn = self._api_stream_fn - else: - api_fn = self._api_submit_fn - - # Use a gr.State() instead of self.chatbot so that the API doesn't require passing forth - # a chat history, instead it is just stored internally in the state. - history = State([]) - - self.fake_api_btn.click( - api_fn, - [self.textbox, history], - [self.textbox, history], - api_name="chat", - ) - - def _clear_and_save_textbox(self, message: str) -> tuple[str, str]: - return "", message - - def _display_input( - self, message: str, history: list[list[str | None]] - ) -> list[list[str | None]]: - history.append([message, None]) - return history - - def _submit_fn( - self, message: str, history_with_input: list[list[str | None]] - ) -> list[list[str | None]]: - history = history_with_input[:-1] - response = self.fn(message, history) - history.append([message, response]) - return history - - def _stream_fn( - self, message: str, history_with_input: list[list[str | None]] - ) -> Generator[list[list[str | None]], None, None]: - history = history_with_input[:-1] - generator = self.fn(message, history) - try: - first_response = next(generator) - yield history + [[message, first_response]] - except StopIteration: - yield history + [[message, None]] - for response in generator: - yield history + [[message, response]] - - def _api_submit_fn( - self, message: str, history: list[list[str | None]] - ) -> tuple[str, list[list[str | None]]]: - response = self.fn(message, history) - history.append([message, response]) - return response, history - - def _api_stream_fn( - self, message: str, history: list[list[str | None]] - ) -> Generator[tuple[str | None, list[list[str | None]]], None, None]: - generator = self.fn(message, history) - try: - first_response = next(generator) - yield first_response, history + [[message, first_response]] - except StopIteration: - yield None, history + [[message, None]] - for response in generator: - yield response, history + [[message, response]] - - def _examples_fn(self, message: str) -> list[list[str | None]]: - return [[message, self.fn(message, [])]] - - def _examples_stream_fn( - self, message: str - ) -> Generator[list[list[str | None]], None, None]: - for response in self.fn(message, []): - yield [[message, response]] - - def _delete_prev_fn( - self, history: list[list[str | None]] - ) -> tuple[list[list[str | None]], str]: - try: - message, _ = history.pop() - except IndexError: - message = "" - return history, message or "" diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/cli.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/cli.py deleted file mode 100644 index aa8e8b9b099adbde4cee9f683feaaa5023895120..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/cli.py +++ /dev/null @@ -1,14 +0,0 @@ -import sys - -import gradio.deploy_space -import gradio.reload - - -def cli(): - args = sys.argv[1:] - if len(args) == 0: - raise ValueError("No file specified.") - if args[0] == "deploy": - gradio.deploy_space.deploy() - else: - gradio.reload.main() diff --git a/spaces/Dimitre/sentence-similarity-use/README.md b/spaces/Dimitre/sentence-similarity-use/README.md deleted file mode 100644 index 9a01c8d94d19668d29238c87009e90d8876036ad..0000000000000000000000000000000000000000 --- a/spaces/Dimitre/sentence-similarity-use/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Sentence Similarity Use -emoji: 💩 -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.1.5 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/frechet_inception_distance.py b/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/frechet_inception_distance.py deleted file mode 100644 index 41f71fe4bfb85218cc283b3f7bc3a34fea5f790d..0000000000000000000000000000000000000000 --- a/spaces/Dinoking/Guccio-AI-Designer/models/stylegan/stylegan_tf/metrics/frechet_inception_distance.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Frechet Inception Distance (FID).""" - -import os -import numpy as np -import scipy -import tensorflow as tf -import dnnlib.tflib as tflib - -from metrics import metric_base -from training import misc - -#---------------------------------------------------------------------------- - -class FID(metric_base.MetricBase): - def __init__(self, num_images, minibatch_per_gpu, **kwargs): - super().__init__(**kwargs) - self.num_images = num_images - self.minibatch_per_gpu = minibatch_per_gpu - - def _evaluate(self, Gs, num_gpus): - minibatch_size = num_gpus * self.minibatch_per_gpu - inception = misc.load_pkl('https://drive.google.com/uc?id=1MzTY44rLToO5APn8TZmfR7_ENSe5aZUn') # inception_v3_features.pkl - activations = np.empty([self.num_images, inception.output_shape[1]], dtype=np.float32) - - # Calculate statistics for reals. - cache_file = self._get_cache_file_for_reals(num_images=self.num_images) - os.makedirs(os.path.dirname(cache_file), exist_ok=True) - if os.path.isfile(cache_file): - mu_real, sigma_real = misc.load_pkl(cache_file) - else: - for idx, images in enumerate(self._iterate_reals(minibatch_size=minibatch_size)): - begin = idx * minibatch_size - end = min(begin + minibatch_size, self.num_images) - activations[begin:end] = inception.run(images[:end-begin], num_gpus=num_gpus, assume_frozen=True) - if end == self.num_images: - break - mu_real = np.mean(activations, axis=0) - sigma_real = np.cov(activations, rowvar=False) - misc.save_pkl((mu_real, sigma_real), cache_file) - - # Construct TensorFlow graph. - result_expr = [] - for gpu_idx in range(num_gpus): - with tf.device('/gpu:%d' % gpu_idx): - Gs_clone = Gs.clone() - inception_clone = inception.clone() - latents = tf.random_normal([self.minibatch_per_gpu] + Gs_clone.input_shape[1:]) - images = Gs_clone.get_output_for(latents, None, is_validation=True, randomize_noise=True) - images = tflib.convert_images_to_uint8(images) - result_expr.append(inception_clone.get_output_for(images)) - - # Calculate statistics for fakes. - for begin in range(0, self.num_images, minibatch_size): - end = min(begin + minibatch_size, self.num_images) - activations[begin:end] = np.concatenate(tflib.run(result_expr), axis=0)[:end-begin] - mu_fake = np.mean(activations, axis=0) - sigma_fake = np.cov(activations, rowvar=False) - - # Calculate FID. - m = np.square(mu_fake - mu_real).sum() - s, _ = scipy.linalg.sqrtm(np.dot(sigma_fake, sigma_real), disp=False) # pylint: disable=no-member - dist = m + np.trace(sigma_fake + sigma_real - 2*s) - self._report_result(np.real(dist)) - -#---------------------------------------------------------------------------- diff --git a/spaces/DiweshUIT/Spectrometer/app.py b/spaces/DiweshUIT/Spectrometer/app.py deleted file mode 100644 index 2f23f60e1161e739270b9ed8247d31fa5de8c5cc..0000000000000000000000000000000000000000 --- a/spaces/DiweshUIT/Spectrometer/app.py +++ /dev/null @@ -1,145 +0,0 @@ -import os -os.system('pip install -e pycopy-colorsys') -from gradio.components import Label -import colorsys -import cv2 as cv -import gradio as gr -import matplotlib -import math -import matplotlib.pyplot as plt -import numpy as np - - -def image_mod(image): - #plt.figure(figsize=(10,10)) - #image1 = cv.imread(r"/content/photo1.jpg") - grey = cv.cvtColor(image, cv.COLOR_BGR2GRAY) - #plt.imshow(grey) - shape=(grey.shape) - #pixelc=( grey.shape[0] * grey.shape[1]) - - return grey -def greet(image): - grey = cv.cvtColor(image, cv.COLOR_BGR2GRAY) - #plt.imshow(grey) - shape=(grey.shape) - pixelc=( grey.shape[0] * grey.shape[1]) - return shape, pixelc - - -def avg(image): - l=[] - -#You're free to do a resize or not, just for the example - cap = cv.resize(image, (340,480)) - for x in range (0,340,1): - for y in range(0,480,1): - color = cap[y,x] - l.append(color) - #print(color) - n=len(l) - l2 = [item[0] for item in l] -#print(l2) - sumred=0 -#l2 = [item[1] for item in l] - #print(l2) - for ele in range(0, len(l2)): - sumred = sumred + l2[ele] - answer=sumred/n - sumgreen=0 - l3 = [item[1] for item in l] - #print(l3) - for ele in range(0, len(l3)): - sumgreen = sumgreen + l3[ele] - answer1=sumgreen/n - sumblue=0 - l4 = [item[2] for item in l] -#print(l4) - for ele in range(0, len(l4)): - sumblue = sumblue + l4[ele] - answer2=sumblue/n -#print(answer2) - newp=(answer1+answer2+answer)/3 - red=answer #red ko blue see change - green=answer2 - blue=answer1 - #rgb_to_name((0, 0, 0)) - fig=plt.figure() - plt.imshow([[(math.ceil(red), math.ceil(blue), math.ceil(green))]]) - #plt.show() - return plt -def wave(image): - l=[] - -#You're free to do a resize or not, just for the example - cap = cv.resize(image, (340,480)) - for x in range (0,340,1): - for y in range(0,480,1): - color = cap[y,x] - l.append(color) - #print(color) - n=len(l) - l2 = [item[0] for item in l] -#print(l2) - sumred=0 -#l2 = [item[1] for item in l] - #print(l2) - for ele in range(0, len(l2)): - sumred = sumred + l2[ele] - answer=sumred/n - sumgreen=0 - l3 = [item[1] for item in l] - #print(l3) - for ele in range(0, len(l3)): - sumgreen = sumgreen + l3[ele] - answer1=sumgreen/n - sumblue=0 - l4 = [item[2] for item in l] -#print(l4) - for ele in range(0, len(l4)): - sumblue = sumblue + l4[ele] - answer2=sumblue/n -#print(answer2) - newp=(answer1+answer2+answer)/3 - a1=math.ceil(answer) - a2=math.ceil(answer1) - a3=math.ceil(answer2) - - - #rgb normal: range (0-255, 0-255, 0.255) - blue=answer2 #red ko blue see change - green=answer1 - red=answer #red ko blue see change - #rgb normal: range (0-255, 0-255, 0.255) - #get rgb percentage: range (0-1, 0-1, 0-1 ) - red_percentage= red / float(255) - green_percentage= green/ float(255) - blue_percentage=blue / float(255) - - - #get hsv percentage: range (0-1, 0-1, 0-1) - color_hsv_percentage=colorsys.rgb_to_hsv(red_percentage, green_percentage, blue_percentage) - #print('color_hsv_percentage: ', color_hsv_percentage) - - - - #get normal hsv: range (0-360, 0-255, 0-255) - color_h=round(360*color_hsv_percentage[0]) - color_s=round(255*color_hsv_percentage[1]) - color_v=round(255*color_hsv_percentage[2]) - - color_hsv=[color_h, color_s, color_h] - l= 650 - 250 / 270 *color_h - #print('color_hsv: ', color_hsv) - return l - - - -demo4=gr.Interface(wave,gr.Image(label="Select your Image"),outputs=[gr.outputs.Textbox(label="Wave Length in nm (Nanometre)")],title="Nature of Wave Length Emitted") -#demo5=gr.Interface(avg,gr.Image(label="Average Color seen by Cameraman"),outputs="text",title="Color Analysis") -output3 = gr.Plot() -demo1 = gr.Interface(image_mod, gr.Image(label="UPLOAD YOUR IMAGE HERE",shape=(2000, 2000)),"image",title="Spectrometer") -demo3=gr.Interface(avg,gr.Image(label="Average Color seen by the camera"),outputs=[gr.Plot(label="Matplotlib Plot")],title="Expected Color Seen by Camera") -demo2 = gr.Interface(greet,gr.Image(label="UPLOAD YOUR IMAGE HERE"),outputs="text",title="Dimension and No.of Pixels") -demo = gr.TabbedInterface([demo1,demo2,demo3,demo4]) -demo.launch() \ No newline at end of file diff --git a/spaces/DragGan/DragGan-Inversion/PTI/configs/__init__.py b/spaces/DragGan/DragGan-Inversion/PTI/configs/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/model.py b/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/model.py deleted file mode 100644 index ede4360148e260363887662bae7fe68c987ee60e..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/PTI/models/e4e/stylegan2/model.py +++ /dev/null @@ -1,674 +0,0 @@ -import math -import random -import torch -from torch import nn -from torch.nn import functional as F - -from .op.fused_act import FusedLeakyReLU, fused_leaky_relu -from .op.upfirdn2d import upfirdn2d - - -class PixelNorm(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) - - -def make_kernel(k): - k = torch.tensor(k, dtype=torch.float32) - - if k.ndim == 1: - k = k[None, :] * k[:, None] - - k /= k.sum() - - return k - - -class Upsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) * (factor ** 2) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad) - - return out - - -class Downsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad) - - return out - - -class Blur(nn.Module): - def __init__(self, kernel, pad, upsample_factor=1): - super().__init__() - - kernel = make_kernel(kernel) - - if upsample_factor > 1: - kernel = kernel * (upsample_factor ** 2) - - self.register_buffer('kernel', kernel) - - self.pad = pad - - def forward(self, input): - out = upfirdn2d(input, self.kernel, pad=self.pad) - - return out - - -class EqualConv2d(nn.Module): - def __init__( - self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True - ): - super().__init__() - - self.weight = nn.Parameter( - torch.randn(out_channel, in_channel, kernel_size, kernel_size) - ) - self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) - - self.stride = stride - self.padding = padding - - if bias: - self.bias = nn.Parameter(torch.zeros(out_channel)) - - else: - self.bias = None - - def forward(self, input): - out = F.conv2d( - input, - self.weight * self.scale, - bias=self.bias, - stride=self.stride, - padding=self.padding, - ) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},' - f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' - ) - - -class EqualLinear(nn.Module): - def __init__( - self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None - ): - super().__init__() - - self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) - - if bias: - self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) - - else: - self.bias = None - - self.activation = activation - - self.scale = (1 / math.sqrt(in_dim)) * lr_mul - self.lr_mul = lr_mul - - def forward(self, input): - if self.activation: - out = F.linear(input, self.weight * self.scale) - out = fused_leaky_relu(out, self.bias * self.lr_mul) - - else: - out = F.linear( - input, self.weight * self.scale, bias=self.bias * self.lr_mul - ) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' - ) - - -class ScaledLeakyReLU(nn.Module): - def __init__(self, negative_slope=0.2): - super().__init__() - - self.negative_slope = negative_slope - - def forward(self, input): - out = F.leaky_relu(input, negative_slope=self.negative_slope) - - return out * math.sqrt(2) - - -class ModulatedConv2d(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - demodulate=True, - upsample=False, - downsample=False, - blur_kernel=[1, 3, 3, 1], - ): - super().__init__() - - self.eps = 1e-8 - self.kernel_size = kernel_size - self.in_channel = in_channel - self.out_channel = out_channel - self.upsample = upsample - self.downsample = downsample - - if upsample: - factor = 2 - p = (len(blur_kernel) - factor) - (kernel_size - 1) - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 + 1 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor) - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1)) - - fan_in = in_channel * kernel_size ** 2 - self.scale = 1 / math.sqrt(fan_in) - self.padding = kernel_size // 2 - - self.weight = nn.Parameter( - torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) - ) - - self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) - - self.demodulate = demodulate - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, ' - f'upsample={self.upsample}, downsample={self.downsample})' - ) - - def forward(self, input, style): - batch, in_channel, height, width = input.shape - - style = self.modulation(style).view(batch, 1, in_channel, 1, 1) - weight = self.scale * self.weight * style - - if self.demodulate: - demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) - weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) - - weight = weight.view( - batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - - if self.upsample: - input = input.view(1, batch * in_channel, height, width) - weight = weight.view( - batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - weight = weight.transpose(1, 2).reshape( - batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size - ) - out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - out = self.blur(out) - - elif self.downsample: - input = self.blur(input) - _, _, height, width = input.shape - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - else: - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=self.padding, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - return out - - -class NoiseInjection(nn.Module): - def __init__(self): - super().__init__() - - self.weight = nn.Parameter(torch.zeros(1)) - - def forward(self, image, noise=None): - if noise is None: - batch, _, height, width = image.shape - noise = image.new_empty(batch, 1, height, width).normal_() - - return image + self.weight * noise - - -class ConstantInput(nn.Module): - def __init__(self, channel, size=4): - super().__init__() - - self.input = nn.Parameter(torch.randn(1, channel, size, size)) - - def forward(self, input): - batch = input.shape[0] - out = self.input.repeat(batch, 1, 1, 1) - - return out - - -class StyledConv(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=False, - blur_kernel=[1, 3, 3, 1], - demodulate=True, - ): - super().__init__() - - self.conv = ModulatedConv2d( - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=upsample, - blur_kernel=blur_kernel, - demodulate=demodulate, - ) - - self.noise = NoiseInjection() - # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) - # self.activate = ScaledLeakyReLU(0.2) - self.activate = FusedLeakyReLU(out_channel) - - def forward(self, input, style, noise=None): - out = self.conv(input, style) - out = self.noise(out, noise=noise) - # out = out + self.bias - out = self.activate(out) - - return out - - -class ToRGB(nn.Module): - def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - if upsample: - self.upsample = Upsample(blur_kernel) - - self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False) - self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) - - def forward(self, input, style, skip=None): - out = self.conv(input, style) - out = out + self.bias - - if skip is not None: - skip = self.upsample(skip) - - out = out + skip - - return out - - -class Generator(nn.Module): - def __init__( - self, - size, - style_dim, - n_mlp, - channel_multiplier=2, - blur_kernel=[1, 3, 3, 1], - lr_mlp=0.01, - ): - super().__init__() - - self.size = size - - self.style_dim = style_dim - - layers = [PixelNorm()] - - for i in range(n_mlp): - layers.append( - EqualLinear( - style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu' - ) - ) - - self.style = nn.Sequential(*layers) - - self.channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - self.input = ConstantInput(self.channels[4]) - self.conv1 = StyledConv( - self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel - ) - self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False) - - self.log_size = int(math.log(size, 2)) - self.num_layers = (self.log_size - 2) * 2 + 1 - - self.convs = nn.ModuleList() - self.upsamples = nn.ModuleList() - self.to_rgbs = nn.ModuleList() - self.noises = nn.Module() - - in_channel = self.channels[4] - - for layer_idx in range(self.num_layers): - res = (layer_idx + 5) // 2 - shape = [1, 1, 2 ** res, 2 ** res] - self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape)) - - for i in range(3, self.log_size + 1): - out_channel = self.channels[2 ** i] - - self.convs.append( - StyledConv( - in_channel, - out_channel, - 3, - style_dim, - upsample=True, - blur_kernel=blur_kernel, - ) - ) - - self.convs.append( - StyledConv( - out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel - ) - ) - - self.to_rgbs.append(ToRGB(out_channel, style_dim)) - - in_channel = out_channel - - self.n_latent = self.log_size * 2 - 2 - - def make_noise(self): - device = self.input.input.device - - noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)] - - for i in range(3, self.log_size + 1): - for _ in range(2): - noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device)) - - return noises - - def mean_latent(self, n_latent): - latent_in = torch.randn( - n_latent, self.style_dim, device=self.input.input.device - ) - latent = self.style(latent_in).mean(0, keepdim=True) - - return latent - - def get_latent(self, input): - return self.style(input) - - def forward( - self, - styles, - return_latents=False, - return_features=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - noise=None, - randomize_noise=True, - ): - if not input_is_latent: - styles = [self.style(s) for s in styles] - - if noise is None: - if randomize_noise: - noise = [None] * self.num_layers - else: - noise = [ - getattr(self.noises, f'noise_{i}') for i in range(self.num_layers) - ] - - if truncation < 1: - style_t = [] - - for style in styles: - style_t.append( - truncation_latent + truncation * (style - truncation_latent) - ) - - styles = style_t - - if len(styles) < 2: - inject_index = self.n_latent - - if styles[0].ndim < 3: - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - else: - latent = styles[0] - - else: - if inject_index is None: - inject_index = random.randint(1, self.n_latent - 1) - - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1) - - latent = torch.cat([latent, latent2], 1) - - out = self.input(latent) - out = self.conv1(out, latent[:, 0], noise=noise[0]) - - skip = self.to_rgb1(out, latent[:, 1]) - - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs - ): - out = conv1(out, latent[:, i], noise=noise1) - out = conv2(out, latent[:, i + 1], noise=noise2) - skip = to_rgb(out, latent[:, i + 2], skip) - - i += 2 - - image = skip - - if return_latents: - return image, latent - elif return_features: - return image, out - else: - return image, None - - -class ConvLayer(nn.Sequential): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - downsample=False, - blur_kernel=[1, 3, 3, 1], - bias=True, - activate=True, - ): - layers = [] - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - layers.append(Blur(blur_kernel, pad=(pad0, pad1))) - - stride = 2 - self.padding = 0 - - else: - stride = 1 - self.padding = kernel_size // 2 - - layers.append( - EqualConv2d( - in_channel, - out_channel, - kernel_size, - padding=self.padding, - stride=stride, - bias=bias and not activate, - ) - ) - - if activate: - if bias: - layers.append(FusedLeakyReLU(out_channel)) - - else: - layers.append(ScaledLeakyReLU(0.2)) - - super().__init__(*layers) - - -class ResBlock(nn.Module): - def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - self.conv1 = ConvLayer(in_channel, in_channel, 3) - self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True) - - self.skip = ConvLayer( - in_channel, out_channel, 1, downsample=True, activate=False, bias=False - ) - - def forward(self, input): - out = self.conv1(input) - out = self.conv2(out) - - skip = self.skip(input) - out = (out + skip) / math.sqrt(2) - - return out - - -class Discriminator(nn.Module): - def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - convs = [ConvLayer(3, channels[size], 1)] - - log_size = int(math.log(size, 2)) - - in_channel = channels[size] - - for i in range(log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - - convs.append(ResBlock(in_channel, out_channel, blur_kernel)) - - in_channel = out_channel - - self.convs = nn.Sequential(*convs) - - self.stddev_group = 4 - self.stddev_feat = 1 - - self.final_conv = ConvLayer(in_channel + 1, channels[4], 3) - self.final_linear = nn.Sequential( - EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'), - EqualLinear(channels[4], 1), - ) - - def forward(self, input): - out = self.convs(input) - - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - - out = out.view(batch, -1) - out = self.final_linear(out) - - return out diff --git a/spaces/DragGan/DragGan-Inversion/stylegan_human/edit/edit_config.py b/spaces/DragGan/DragGan-Inversion/stylegan_human/edit/edit_config.py deleted file mode 100644 index 25fb4e500f5ce6ec6ec07631899b851492b08bb9..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan-Inversion/stylegan_human/edit/edit_config.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) SenseTime Research. All rights reserved. - -attr_dict = dict( - interface_gan={ # strength - # strength: negative for shorter, positive for longer - 'upper_length': [-1], - 'bottom_length': [1] - }, - stylespace={ # layer, strength, threshold - # strength: negative for shorter, positive for longer - 'upper_length': [5, -5, 0.0028], - 'bottom_length': [3, 5, 0.003] - }, - sefa={ # layer, strength - # -5 # strength: negative for longer, positive for shorter - 'upper_length': [[4, 5, 6, 7], 5], - 'bottom_length': [[4, 5, 6, 7], 5] - } -) diff --git a/spaces/ECCV2022/bytetrack/tutorials/motr/motr_det.py b/spaces/ECCV2022/bytetrack/tutorials/motr/motr_det.py deleted file mode 100644 index b9f74fdf8520385a79653a557631fa4a9ac1b9fc..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tutorials/motr/motr_det.py +++ /dev/null @@ -1,677 +0,0 @@ -# ------------------------------------------------------------------------ -# Copyright (c) 2021 megvii-model. All Rights Reserved. -# ------------------------------------------------------------------------ -# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# ------------------------------------------------------------------------ -# Modified from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# ------------------------------------------------------------------------ - -""" -DETR model and criterion classes. -""" -import copy -import math -import numpy as np -import torch -import torch.nn.functional as F -from torch import nn, Tensor -from typing import List - -from util import box_ops -from util.misc import (NestedTensor, nested_tensor_from_tensor_list, - accuracy, get_world_size, interpolate, get_rank, - is_dist_avail_and_initialized, inverse_sigmoid) - -from models.structures import Instances, Boxes, pairwise_iou, matched_boxlist_iou - -from .backbone import build_backbone -from .matcher import build_matcher -from .deformable_transformer_plus import build_deforamble_transformer -from .qim import build as build_query_interaction_layer -from .memory_bank import build_memory_bank -from .deformable_detr import SetCriterion, MLP -from .segmentation import sigmoid_focal_loss - - -class ClipMatcher(SetCriterion): - def __init__(self, num_classes, - matcher, - weight_dict, - losses): - """ Create the criterion. - Parameters: - num_classes: number of object categories, omitting the special no-object category - matcher: module able to compute a matching between targets and proposals - weight_dict: dict containing as key the names of the losses and as values their relative weight. - eos_coef: relative classification weight applied to the no-object category - losses: list of all the losses to be applied. See get_loss for list of available losses. - """ - super().__init__(num_classes, matcher, weight_dict, losses) - self.num_classes = num_classes - self.matcher = matcher - self.weight_dict = weight_dict - self.losses = losses - self.focal_loss = True - self.losses_dict = {} - self._current_frame_idx = 0 - - def initialize_for_single_clip(self, gt_instances: List[Instances]): - self.gt_instances = gt_instances - self.num_samples = 0 - self.sample_device = None - self._current_frame_idx = 0 - self.losses_dict = {} - - def _step(self): - self._current_frame_idx += 1 - - def calc_loss_for_track_scores(self, track_instances: Instances): - frame_id = self._current_frame_idx - 1 - gt_instances = self.gt_instances[frame_id] - outputs = { - 'pred_logits': track_instances.track_scores[None], - } - device = track_instances.track_scores.device - - num_tracks = len(track_instances) - src_idx = torch.arange(num_tracks, dtype=torch.long, device=device) - tgt_idx = track_instances.matched_gt_idxes # -1 for FP tracks and disappeared tracks - - track_losses = self.get_loss('labels', - outputs=outputs, - gt_instances=[gt_instances], - indices=[(src_idx, tgt_idx)], - num_boxes=1) - self.losses_dict.update( - {'frame_{}_track_{}'.format(frame_id, key): value for key, value in - track_losses.items()}) - - def get_num_boxes(self, num_samples): - num_boxes = torch.as_tensor(num_samples, dtype=torch.float, device=self.sample_device) - if is_dist_avail_and_initialized(): - torch.distributed.all_reduce(num_boxes) - num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() - return num_boxes - - def get_loss(self, loss, outputs, gt_instances, indices, num_boxes, **kwargs): - loss_map = { - 'labels': self.loss_labels, - 'cardinality': self.loss_cardinality, - 'boxes': self.loss_boxes, - } - assert loss in loss_map, f'do you really want to compute {loss} loss?' - return loss_map[loss](outputs, gt_instances, indices, num_boxes, **kwargs) - - def loss_boxes(self, outputs, gt_instances: List[Instances], indices: List[tuple], num_boxes): - """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss - targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] - The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size. - """ - # We ignore the regression loss of the track-disappear slots. - #TODO: Make this filter process more elegant. - filtered_idx = [] - for src_per_img, tgt_per_img in indices: - keep = tgt_per_img != -1 - filtered_idx.append((src_per_img[keep], tgt_per_img[keep])) - indices = filtered_idx - idx = self._get_src_permutation_idx(indices) - src_boxes = outputs['pred_boxes'][idx] - target_boxes = torch.cat([gt_per_img.boxes[i] for gt_per_img, (_, i) in zip(gt_instances, indices)], dim=0) - - # for pad target, don't calculate regression loss, judged by whether obj_id=-1 - target_obj_ids = torch.cat([gt_per_img.obj_ids[i] for gt_per_img, (_, i) in zip(gt_instances, indices)], dim=0) # size(16) - mask = (target_obj_ids != -1) - - loss_bbox = F.l1_loss(src_boxes[mask], target_boxes[mask], reduction='none') - loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( - box_ops.box_cxcywh_to_xyxy(src_boxes[mask]), - box_ops.box_cxcywh_to_xyxy(target_boxes[mask]))) - - losses = {} - losses['loss_bbox'] = loss_bbox.sum() / num_boxes - losses['loss_giou'] = loss_giou.sum() / num_boxes - - return losses - - def loss_labels(self, outputs, gt_instances: List[Instances], indices, num_boxes, log=False): - """Classification loss (NLL) - targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] - """ - src_logits = outputs['pred_logits'] - idx = self._get_src_permutation_idx(indices) - target_classes = torch.full(src_logits.shape[:2], self.num_classes, - dtype=torch.int64, device=src_logits.device) - # The matched gt for disappear track query is set -1. - labels = [] - for gt_per_img, (_, J) in zip(gt_instances, indices): - labels_per_img = torch.ones_like(J) - # set labels of track-appear slots to 0. - if len(gt_per_img) > 0: - labels_per_img[J != -1] = gt_per_img.labels[J[J != -1]] - labels.append(labels_per_img) - target_classes_o = torch.cat(labels) - target_classes[idx] = target_classes_o - if self.focal_loss: - gt_labels_target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[:, :, :-1] # no loss for the last (background) class - gt_labels_target = gt_labels_target.to(src_logits) - loss_ce = sigmoid_focal_loss(src_logits.flatten(1), - gt_labels_target.flatten(1), - alpha=0.25, - gamma=2, - num_boxes=num_boxes, mean_in_dim1=False) - loss_ce = loss_ce.sum() - else: - loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight) - losses = {'loss_ce': loss_ce} - - if log: - # TODO this should probably be a separate loss, not hacked in this one here - losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] - - return losses - - def match_for_single_frame(self, outputs: dict): - outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} - - gt_instances_i = self.gt_instances[self._current_frame_idx] # gt instances of i-th image. - track_instances: Instances = outputs_without_aux['track_instances'] - pred_logits_i = track_instances.pred_logits # predicted logits of i-th image. - pred_boxes_i = track_instances.pred_boxes # predicted boxes of i-th image. - - obj_idxes = gt_instances_i.obj_ids - obj_idxes_list = obj_idxes.detach().cpu().numpy().tolist() - obj_idx_to_gt_idx = {obj_idx: gt_idx for gt_idx, obj_idx in enumerate(obj_idxes_list)} - outputs_i = { - 'pred_logits': pred_logits_i.unsqueeze(0), - 'pred_boxes': pred_boxes_i.unsqueeze(0), - } - - # step1. inherit and update the previous tracks. - num_disappear_track = 0 - for j in range(len(track_instances)): - obj_id = track_instances.obj_idxes[j].item() - # set new target idx. - if obj_id >= 0: - if obj_id in obj_idx_to_gt_idx: - track_instances.matched_gt_idxes[j] = obj_idx_to_gt_idx[obj_id] - else: - num_disappear_track += 1 - track_instances.matched_gt_idxes[j] = -1 # track-disappear case. - else: - track_instances.matched_gt_idxes[j] = -1 - - full_track_idxes = torch.arange(len(track_instances), dtype=torch.long).to(pred_logits_i.device) - matched_track_idxes = (track_instances.obj_idxes >= 0) # occu - prev_matched_indices = torch.stack( - [full_track_idxes[matched_track_idxes], track_instances.matched_gt_idxes[matched_track_idxes]], dim=1).to( - pred_logits_i.device) - - # step2. select the unmatched slots. - # note that the FP tracks whose obj_idxes are -2 will not be selected here. - unmatched_track_idxes = full_track_idxes[track_instances.obj_idxes == -1] - - # step3. select the untracked gt instances (new tracks). - tgt_indexes = track_instances.matched_gt_idxes - tgt_indexes = tgt_indexes[tgt_indexes != -1] - - tgt_state = torch.zeros(len(gt_instances_i)).to(pred_logits_i.device) - tgt_state[tgt_indexes] = 1 - untracked_tgt_indexes = torch.arange(len(gt_instances_i)).to(pred_logits_i.device)[tgt_state == 0] - # untracked_tgt_indexes = select_unmatched_indexes(tgt_indexes, len(gt_instances_i)) - untracked_gt_instances = gt_instances_i[untracked_tgt_indexes] - - def match_for_single_decoder_layer(unmatched_outputs, matcher): - new_track_indices = matcher(unmatched_outputs, - [untracked_gt_instances]) # list[tuple(src_idx, tgt_idx)] - - src_idx = new_track_indices[0][0] - tgt_idx = new_track_indices[0][1] - # concat src and tgt. - new_matched_indices = torch.stack([unmatched_track_idxes[src_idx], untracked_tgt_indexes[tgt_idx]], - dim=1).to(pred_logits_i.device) - return new_matched_indices - - # step4. do matching between the unmatched slots and GTs. - unmatched_outputs = { - 'pred_logits': track_instances.pred_logits[unmatched_track_idxes].unsqueeze(0), - 'pred_boxes': track_instances.pred_boxes[unmatched_track_idxes].unsqueeze(0), - } - new_matched_indices = match_for_single_decoder_layer(unmatched_outputs, self.matcher) - - # step5. update obj_idxes according to the new matching result. - track_instances.obj_idxes[new_matched_indices[:, 0]] = gt_instances_i.obj_ids[new_matched_indices[:, 1]].long() - track_instances.matched_gt_idxes[new_matched_indices[:, 0]] = new_matched_indices[:, 1] - - # step6. calculate iou. - active_idxes = (track_instances.obj_idxes >= 0) & (track_instances.matched_gt_idxes >= 0) - active_track_boxes = track_instances.pred_boxes[active_idxes] - if len(active_track_boxes) > 0: - gt_boxes = gt_instances_i.boxes[track_instances.matched_gt_idxes[active_idxes]] - active_track_boxes = box_ops.box_cxcywh_to_xyxy(active_track_boxes) - gt_boxes = box_ops.box_cxcywh_to_xyxy(gt_boxes) - track_instances.iou[active_idxes] = matched_boxlist_iou(Boxes(active_track_boxes), Boxes(gt_boxes)) - - # step7. merge the unmatched pairs and the matched pairs. - matched_indices = torch.cat([new_matched_indices, prev_matched_indices], dim=0) - - # step8. calculate losses. - self.num_samples += len(gt_instances_i) + num_disappear_track - self.sample_device = pred_logits_i.device - for loss in self.losses: - new_track_loss = self.get_loss(loss, - outputs=outputs_i, - gt_instances=[gt_instances_i], - indices=[(matched_indices[:, 0], matched_indices[:, 1])], - num_boxes=1) - self.losses_dict.update( - {'frame_{}_{}'.format(self._current_frame_idx, key): value for key, value in new_track_loss.items()}) - - if 'aux_outputs' in outputs: - for i, aux_outputs in enumerate(outputs['aux_outputs']): - unmatched_outputs_layer = { - 'pred_logits': aux_outputs['pred_logits'][0, unmatched_track_idxes].unsqueeze(0), - 'pred_boxes': aux_outputs['pred_boxes'][0, unmatched_track_idxes].unsqueeze(0), - } - new_matched_indices_layer = match_for_single_decoder_layer(unmatched_outputs_layer, self.matcher) - matched_indices_layer = torch.cat([new_matched_indices_layer, prev_matched_indices], dim=0) - for loss in self.losses: - if loss == 'masks': - # Intermediate masks losses are too costly to compute, we ignore them. - continue - l_dict = self.get_loss(loss, - aux_outputs, - gt_instances=[gt_instances_i], - indices=[(matched_indices_layer[:, 0], matched_indices_layer[:, 1])], - num_boxes=1, ) - self.losses_dict.update( - {'frame_{}_aux{}_{}'.format(self._current_frame_idx, i, key): value for key, value in - l_dict.items()}) - self._step() - return track_instances - - def forward(self, outputs, input_data: dict): - # losses of each frame are calculated during the model's forwarding and are outputted by the model as outputs['losses_dict]. - losses = outputs.pop("losses_dict") - num_samples = self.get_num_boxes(self.num_samples) - for loss_name, loss in losses.items(): - losses[loss_name] /= num_samples - return losses - - -class RuntimeTrackerBase(object): - def __init__(self, score_thresh=0.8, filter_score_thresh=0.6, miss_tolerance=5): - self.score_thresh = score_thresh - self.filter_score_thresh = filter_score_thresh - self.miss_tolerance = miss_tolerance - self.max_obj_id = 0 - - def clear(self): - self.max_obj_id = 0 - - def update(self, track_instances: Instances): - track_instances.disappear_time[track_instances.scores >= self.score_thresh] = 0 - for i in range(len(track_instances)): - if track_instances.obj_idxes[i] == -1 and track_instances.scores[i] >= self.score_thresh: - # print("track {} has score {}, assign obj_id {}".format(i, track_instances.scores[i], self.max_obj_id)) - track_instances.obj_idxes[i] = self.max_obj_id - self.max_obj_id += 1 - elif track_instances.obj_idxes[i] >= 0 and track_instances.scores[i] < self.filter_score_thresh: - track_instances.disappear_time[i] += 1 - if track_instances.disappear_time[i] >= self.miss_tolerance: - # Set the obj_id to -1. - # Then this track will be removed by TrackEmbeddingLayer. - track_instances.obj_idxes[i] = -1 - - -class TrackerPostProcess(nn.Module): - """ This module converts the model's output into the format expected by the coco api""" - def __init__(self): - super().__init__() - - @torch.no_grad() - def forward(self, track_instances: Instances, target_size) -> Instances: - """ Perform the computation - Parameters: - outputs: raw outputs of the model - target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch - For evaluation, this must be the original image size (before any data augmentation) - For visualization, this should be the image size after data augment, but before padding - """ - out_logits = track_instances.pred_logits - out_bbox = track_instances.pred_boxes - - prob = out_logits.sigmoid() - # prob = out_logits[...,:1].sigmoid() - scores, labels = prob.max(-1) - - # convert to [x0, y0, x1, y1] format - boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) - # and from relative [0, 1] to absolute [0, height] coordinates - img_h, img_w = target_size - scale_fct = torch.Tensor([img_w, img_h, img_w, img_h]).to(boxes) - boxes = boxes * scale_fct[None, :] - - track_instances.boxes = boxes - track_instances.scores = scores - track_instances.labels = labels -# track_instances.remove('pred_logits') -# track_instances.remove('pred_boxes') - return track_instances - - -def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -class MOTR(nn.Module): - def __init__(self, backbone, transformer, num_classes, num_queries, num_feature_levels, criterion, track_embed, - aux_loss=True, with_box_refine=False, two_stage=False, memory_bank=None): - """ Initializes the model. - Parameters: - backbone: torch module of the backbone to be used. See backbone.py - transformer: torch module of the transformer architecture. See transformer.py - num_classes: number of object classes - num_queries: number of object queries, ie detection slot. This is the maximal number of objects - DETR can detect in a single image. For COCO, we recommend 100 queries. - aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. - with_box_refine: iterative bounding box refinement - two_stage: two-stage Deformable DETR - """ - super().__init__() - self.num_queries = num_queries - self.track_embed = track_embed - self.transformer = transformer - hidden_dim = transformer.d_model - self.num_classes = num_classes - self.class_embed = nn.Linear(hidden_dim, num_classes) - self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) - self.num_feature_levels = num_feature_levels - if not two_stage: - self.query_embed = nn.Embedding(num_queries, hidden_dim * 2) - if num_feature_levels > 1: - num_backbone_outs = len(backbone.strides) - input_proj_list = [] - for _ in range(num_backbone_outs): - in_channels = backbone.num_channels[_] - input_proj_list.append(nn.Sequential( - nn.Conv2d(in_channels, hidden_dim, kernel_size=1), - nn.GroupNorm(32, hidden_dim), - )) - for _ in range(num_feature_levels - num_backbone_outs): - input_proj_list.append(nn.Sequential( - nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), - nn.GroupNorm(32, hidden_dim), - )) - in_channels = hidden_dim - self.input_proj = nn.ModuleList(input_proj_list) - else: - self.input_proj = nn.ModuleList([ - nn.Sequential( - nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1), - nn.GroupNorm(32, hidden_dim), - )]) - self.backbone = backbone - self.aux_loss = aux_loss - self.with_box_refine = with_box_refine - self.two_stage = two_stage - - prior_prob = 0.01 - bias_value = -math.log((1 - prior_prob) / prior_prob) - self.class_embed.bias.data = torch.ones(num_classes) * bias_value - nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) - nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) - for proj in self.input_proj: - nn.init.xavier_uniform_(proj[0].weight, gain=1) - nn.init.constant_(proj[0].bias, 0) - - # if two-stage, the last class_embed and bbox_embed is for region proposal generation - num_pred = (transformer.decoder.num_layers + 1) if two_stage else transformer.decoder.num_layers - if with_box_refine: - self.class_embed = _get_clones(self.class_embed, num_pred) - self.bbox_embed = _get_clones(self.bbox_embed, num_pred) - nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) - # hack implementation for iterative bounding box refinement - self.transformer.decoder.bbox_embed = self.bbox_embed - else: - nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) - self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) - self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) - self.transformer.decoder.bbox_embed = None - if two_stage: - # hack implementation for two-stage - self.transformer.decoder.class_embed = self.class_embed - for box_embed in self.bbox_embed: - nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) - self.post_process = TrackerPostProcess() - self.track_base = RuntimeTrackerBase() - self.criterion = criterion - self.memory_bank = memory_bank - self.mem_bank_len = 0 if memory_bank is None else memory_bank.max_his_length - - def _generate_empty_tracks(self): - track_instances = Instances((1, 1)) - num_queries, dim = self.query_embed.weight.shape # (300, 512) - device = self.query_embed.weight.device - track_instances.ref_pts = self.transformer.reference_points(self.query_embed.weight[:, :dim // 2]) - track_instances.query_pos = self.query_embed.weight - track_instances.output_embedding = torch.zeros((num_queries, dim >> 1), device=device) - track_instances.obj_idxes = torch.full((len(track_instances),), -1, dtype=torch.long, device=device) - track_instances.matched_gt_idxes = torch.full((len(track_instances),), -1, dtype=torch.long, device=device) - track_instances.disappear_time = torch.zeros((len(track_instances), ), dtype=torch.long, device=device) - track_instances.iou = torch.zeros((len(track_instances),), dtype=torch.float, device=device) - track_instances.scores = torch.zeros((len(track_instances),), dtype=torch.float, device=device) - track_instances.track_scores = torch.zeros((len(track_instances),), dtype=torch.float, device=device) - track_instances.pred_boxes = torch.zeros((len(track_instances), 4), dtype=torch.float, device=device) - track_instances.pred_logits = torch.zeros((len(track_instances), self.num_classes), dtype=torch.float, device=device) - - mem_bank_len = self.mem_bank_len - track_instances.mem_bank = torch.zeros((len(track_instances), mem_bank_len, dim // 2), dtype=torch.float32, device=device) - track_instances.mem_padding_mask = torch.ones((len(track_instances), mem_bank_len), dtype=torch.bool, device=device) - track_instances.save_period = torch.zeros((len(track_instances), ), dtype=torch.float32, device=device) - - return track_instances.to(self.query_embed.weight.device) - - def clear(self): - self.track_base.clear() - - @torch.jit.unused - def _set_aux_loss(self, outputs_class, outputs_coord): - # this is a workaround to make torchscript happy, as torchscript - # doesn't support dictionary with non-homogeneous values, such - # as a dict having both a Tensor and a list. - return [{'pred_logits': a, 'pred_boxes': b, } - for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] - - def _forward_single_image(self, samples, track_instances: Instances): - features, pos = self.backbone(samples) - src, mask = features[-1].decompose() - assert mask is not None - - srcs = [] - masks = [] - for l, feat in enumerate(features): - src, mask = feat.decompose() - srcs.append(self.input_proj[l](src)) - masks.append(mask) - assert mask is not None - - if self.num_feature_levels > len(srcs): - _len_srcs = len(srcs) - for l in range(_len_srcs, self.num_feature_levels): - if l == _len_srcs: - src = self.input_proj[l](features[-1].tensors) - else: - src = self.input_proj[l](srcs[-1]) - m = samples.mask - mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] - pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) - srcs.append(src) - masks.append(mask) - pos.append(pos_l) - - hs, init_reference, inter_references, enc_outputs_class, enc_outputs_coord_unact = self.transformer(srcs, masks, pos, track_instances.query_pos, ref_pts=track_instances.ref_pts) - - outputs_classes = [] - outputs_coords = [] - for lvl in range(hs.shape[0]): - if lvl == 0: - reference = init_reference - else: - reference = inter_references[lvl - 1] - reference = inverse_sigmoid(reference) - outputs_class = self.class_embed[lvl](hs[lvl]) - tmp = self.bbox_embed[lvl](hs[lvl]) - if reference.shape[-1] == 4: - tmp += reference - else: - assert reference.shape[-1] == 2 - tmp[..., :2] += reference - outputs_coord = tmp.sigmoid() - outputs_classes.append(outputs_class) - outputs_coords.append(outputs_coord) - outputs_class = torch.stack(outputs_classes) - outputs_coord = torch.stack(outputs_coords) - - ref_pts_all = torch.cat([init_reference[None], inter_references[:, :, :, :2]], dim=0) - out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1], 'ref_pts': ref_pts_all[5]} - if self.aux_loss: - out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord) - - with torch.no_grad(): - if self.training: - track_scores = outputs_class[-1, 0, :].sigmoid().max(dim=-1).values - else: - track_scores = outputs_class[-1, 0, :, 0].sigmoid() - - track_instances.scores = track_scores - track_instances.pred_logits = outputs_class[-1, 0] - track_instances.pred_boxes = outputs_coord[-1, 0] - track_instances.output_embedding = hs[-1, 0] - if self.training: - # the track id will be assigned by the mather. - out['track_instances'] = track_instances - track_instances = self.criterion.match_for_single_frame(out) - else: - # each track will be assigned an unique global id by the track base. - self.track_base.update(track_instances) - if self.memory_bank is not None: - track_instances = self.memory_bank(track_instances) - # track_instances.track_scores = track_instances.track_scores[..., 0] - # track_instances.scores = track_instances.track_scores.sigmoid() - if self.training: - self.criterion.calc_loss_for_track_scores(track_instances) - tmp = {} - tmp['init_track_instances'] = self._generate_empty_tracks() - tmp['track_instances'] = track_instances - out_track_instances = self.track_embed(tmp) - out['track_instances'] = out_track_instances - return out - - @torch.no_grad() - def inference_single_image(self, img, ori_img_size, track_instances=None): - if not isinstance(img, NestedTensor): - img = nested_tensor_from_tensor_list(img) -# if track_instances is None: -# track_instances = self._generate_empty_tracks() - track_instances = self._generate_empty_tracks() - - res = self._forward_single_image(img, track_instances=track_instances) - - track_instances = res['track_instances'] - track_instances = self.post_process(track_instances, ori_img_size) - ret = {'track_instances': track_instances} - if 'ref_pts' in res: - ref_pts = res['ref_pts'] - img_h, img_w = ori_img_size - scale_fct = torch.Tensor([img_w, img_h]).to(ref_pts) - ref_pts = ref_pts * scale_fct[None] - ret['ref_pts'] = ref_pts - return ret - - def forward(self, data: dict): - if self.training: - self.criterion.initialize_for_single_clip(data['gt_instances']) - frames = data['imgs'] # list of Tensor. - outputs = { - 'pred_logits': [], - 'pred_boxes': [], - } - - track_instances = self._generate_empty_tracks() - for frame in frames: - if not isinstance(frame, NestedTensor): - frame = nested_tensor_from_tensor_list([frame]) - frame_res = self._forward_single_image(frame, track_instances) - track_instances = frame_res['track_instances'] - outputs['pred_logits'].append(frame_res['pred_logits']) - outputs['pred_boxes'].append(frame_res['pred_boxes']) - - if not self.training: - outputs['track_instances'] = track_instances - else: - outputs['losses_dict'] = self.criterion.losses_dict - return outputs - - -def build(args): - dataset_to_num_classes = { - 'coco': 91, - 'coco_panoptic': 250, - 'e2e_mot': 1, - 'e2e_joint': 1, - 'e2e_static_mot': 1 - } - assert args.dataset_file in dataset_to_num_classes - num_classes = dataset_to_num_classes[args.dataset_file] - device = torch.device(args.device) - - backbone = build_backbone(args) - - transformer = build_deforamble_transformer(args) - d_model = transformer.d_model - hidden_dim = args.dim_feedforward - query_interaction_layer = build_query_interaction_layer(args, args.query_interaction_layer, d_model, hidden_dim, d_model*2) - - img_matcher = build_matcher(args) - num_frames_per_batch = max(args.sampler_lengths) - weight_dict = {} - for i in range(num_frames_per_batch): - weight_dict.update({"frame_{}_loss_ce".format(i): args.cls_loss_coef, - 'frame_{}_loss_bbox'.format(i): args.bbox_loss_coef, - 'frame_{}_loss_giou'.format(i): args.giou_loss_coef, - }) - - # TODO this is a hack - if args.aux_loss: - for i in range(num_frames_per_batch): - for j in range(args.dec_layers - 1): - weight_dict.update({"frame_{}_aux{}_loss_ce".format(i, j): args.cls_loss_coef, - 'frame_{}_aux{}_loss_bbox'.format(i, j): args.bbox_loss_coef, - 'frame_{}_aux{}_loss_giou'.format(i, j): args.giou_loss_coef, - }) - if args.memory_bank_type is not None and len(args.memory_bank_type) > 0: - memory_bank = build_memory_bank(args, d_model, hidden_dim, d_model * 2) - for i in range(num_frames_per_batch): - weight_dict.update({"frame_{}_track_loss_ce".format(i): args.cls_loss_coef}) - else: - memory_bank = None - losses = ['labels', 'boxes'] - criterion = ClipMatcher(num_classes, matcher=img_matcher, weight_dict=weight_dict, losses=losses) - criterion.to(device) - postprocessors = {} - model = MOTR( - backbone, - transformer, - track_embed=query_interaction_layer, - num_feature_levels=args.num_feature_levels, - num_classes=num_classes, - num_queries=args.num_queries, - aux_loss=args.aux_loss, - criterion=criterion, - with_box_refine=args.with_box_refine, - two_stage=args.two_stage, - memory_bank=memory_bank, - ) - return model, criterion, postprocessors diff --git a/spaces/EleutherAI/VQGAN_CLIP/CLIP/clip/model.py b/spaces/EleutherAI/VQGAN_CLIP/CLIP/clip/model.py deleted file mode 100644 index f2c95c481724270116998b90de64cee8ef58c94e..0000000000000000000000000000000000000000 --- a/spaces/EleutherAI/VQGAN_CLIP/CLIP/clip/model.py +++ /dev/null @@ -1,432 +0,0 @@ -from collections import OrderedDict -from typing import Tuple, Union - -import numpy as np -import torch -import torch.nn.functional as F -from torch import nn - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1): - super().__init__() - - # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1 - self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - - self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - - self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() - - self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - - self.relu = nn.ReLU(inplace=True) - self.downsample = None - self.stride = stride - - if stride > 1 or inplanes != planes * Bottleneck.expansion: - # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1 - self.downsample = nn.Sequential(OrderedDict([ - ("-1", nn.AvgPool2d(stride)), - ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), - ("1", nn.BatchNorm2d(planes * self.expansion)) - ])) - - def forward(self, x: torch.Tensor): - identity = x - - out = self.relu(self.bn1(self.conv1(x))) - out = self.relu(self.bn2(self.conv2(out))) - out = self.avgpool(out) - out = self.bn3(self.conv3(out)) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - return out - - -class AttentionPool2d(nn.Module): - def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): - super().__init__() - self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) - self.k_proj = nn.Linear(embed_dim, embed_dim) - self.q_proj = nn.Linear(embed_dim, embed_dim) - self.v_proj = nn.Linear(embed_dim, embed_dim) - self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) - self.num_heads = num_heads - - def forward(self, x): - x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC - x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC - x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC - x, _ = F.multi_head_attention_forward( - query=x, key=x, value=x, - embed_dim_to_check=x.shape[-1], - num_heads=self.num_heads, - q_proj_weight=self.q_proj.weight, - k_proj_weight=self.k_proj.weight, - v_proj_weight=self.v_proj.weight, - in_proj_weight=None, - in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), - bias_k=None, - bias_v=None, - add_zero_attn=False, - dropout_p=0, - out_proj_weight=self.c_proj.weight, - out_proj_bias=self.c_proj.bias, - use_separate_proj_weight=True, - training=self.training, - need_weights=False - ) - - return x[0] - - -class ModifiedResNet(nn.Module): - """ - A ResNet class that is similar to torchvision's but contains the following changes: - - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. - - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 - - The final pooling layer is a QKV attention instead of an average pool - """ - - def __init__(self, layers, output_dim, heads, input_resolution=224, width=64): - super().__init__() - self.output_dim = output_dim - self.input_resolution = input_resolution - - # the 3-layer stem - self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(width // 2) - self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(width // 2) - self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) - self.bn3 = nn.BatchNorm2d(width) - self.avgpool = nn.AvgPool2d(2) - self.relu = nn.ReLU(inplace=True) - - # residual layers - self._inplanes = width # this is a *mutable* variable used during construction - self.layer1 = self._make_layer(width, layers[0]) - self.layer2 = self._make_layer(width * 2, layers[1], stride=2) - self.layer3 = self._make_layer(width * 4, layers[2], stride=2) - self.layer4 = self._make_layer(width * 8, layers[3], stride=2) - - embed_dim = width * 32 # the ResNet feature dimension - self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim) - - def _make_layer(self, planes, blocks, stride=1): - layers = [Bottleneck(self._inplanes, planes, stride)] - - self._inplanes = planes * Bottleneck.expansion - for _ in range(1, blocks): - layers.append(Bottleneck(self._inplanes, planes)) - - return nn.Sequential(*layers) - - def forward(self, x): - def stem(x): - for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]: - x = self.relu(bn(conv(x))) - x = self.avgpool(x) - return x - - x = x.type(self.conv1.weight.dtype) - x = stem(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - x = self.attnpool(x) - - return x - - -class LayerNorm(nn.LayerNorm): - """Subclass torch's LayerNorm to handle fp16.""" - - def forward(self, x: torch.Tensor): - orig_type = x.dtype - ret = super().forward(x.type(torch.float32)) - return ret.type(orig_type) - - -class QuickGELU(nn.Module): - def forward(self, x: torch.Tensor): - return x * torch.sigmoid(1.702 * x) - - -class ResidualAttentionBlock(nn.Module): - def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None): - super().__init__() - - self.attn = nn.MultiheadAttention(d_model, n_head) - self.ln_1 = LayerNorm(d_model) - self.mlp = nn.Sequential(OrderedDict([ - ("c_fc", nn.Linear(d_model, d_model * 4)), - ("gelu", QuickGELU()), - ("c_proj", nn.Linear(d_model * 4, d_model)) - ])) - self.ln_2 = LayerNorm(d_model) - self.attn_mask = attn_mask - - def attention(self, x: torch.Tensor): - self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None - return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0] - - def forward(self, x: torch.Tensor): - x = x + self.attention(self.ln_1(x)) - x = x + self.mlp(self.ln_2(x)) - return x - - -class Transformer(nn.Module): - def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None): - super().__init__() - self.width = width - self.layers = layers - self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)]) - - def forward(self, x: torch.Tensor): - return self.resblocks(x) - - -class VisionTransformer(nn.Module): - def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int): - super().__init__() - self.input_resolution = input_resolution - self.output_dim = output_dim - self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) - - scale = width ** -0.5 - self.class_embedding = nn.Parameter(scale * torch.randn(width)) - self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width)) - self.ln_pre = LayerNorm(width) - - self.transformer = Transformer(width, layers, heads) - - self.ln_post = LayerNorm(width) - self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) - - def forward(self, x: torch.Tensor): - x = self.conv1(x) # shape = [*, width, grid, grid] - x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] - x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] - x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] - x = x + self.positional_embedding.to(x.dtype) - x = self.ln_pre(x) - - x = x.permute(1, 0, 2) # NLD -> LND - x = self.transformer(x) - x = x.permute(1, 0, 2) # LND -> NLD - - x = self.ln_post(x[:, 0, :]) - - if self.proj is not None: - x = x @ self.proj - - return x - - -class CLIP(nn.Module): - def __init__(self, - embed_dim: int, - # vision - image_resolution: int, - vision_layers: Union[Tuple[int, int, int, int], int], - vision_width: int, - vision_patch_size: int, - # text - context_length: int, - vocab_size: int, - transformer_width: int, - transformer_heads: int, - transformer_layers: int - ): - super().__init__() - - self.context_length = context_length - - if isinstance(vision_layers, (tuple, list)): - vision_heads = vision_width * 32 // 64 - self.visual = ModifiedResNet( - layers=vision_layers, - output_dim=embed_dim, - heads=vision_heads, - input_resolution=image_resolution, - width=vision_width - ) - else: - vision_heads = vision_width // 64 - self.visual = VisionTransformer( - input_resolution=image_resolution, - patch_size=vision_patch_size, - width=vision_width, - layers=vision_layers, - heads=vision_heads, - output_dim=embed_dim - ) - - self.transformer = Transformer( - width=transformer_width, - layers=transformer_layers, - heads=transformer_heads, - attn_mask=self.build_attention_mask() - ) - - self.vocab_size = vocab_size - self.token_embedding = nn.Embedding(vocab_size, transformer_width) - self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width)) - self.ln_final = LayerNorm(transformer_width) - - self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim)) - self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) - - self.initialize_parameters() - - def initialize_parameters(self): - nn.init.normal_(self.token_embedding.weight, std=0.02) - nn.init.normal_(self.positional_embedding, std=0.01) - - if isinstance(self.visual, ModifiedResNet): - if self.visual.attnpool is not None: - std = self.visual.attnpool.c_proj.in_features ** -0.5 - nn.init.normal_(self.visual.attnpool.q_proj.weight, std=std) - nn.init.normal_(self.visual.attnpool.k_proj.weight, std=std) - nn.init.normal_(self.visual.attnpool.v_proj.weight, std=std) - nn.init.normal_(self.visual.attnpool.c_proj.weight, std=std) - - for resnet_block in [self.visual.layer1, self.visual.layer2, self.visual.layer3, self.visual.layer4]: - for name, param in resnet_block.named_parameters(): - if name.endswith("bn3.weight"): - nn.init.zeros_(param) - - proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) - attn_std = self.transformer.width ** -0.5 - fc_std = (2 * self.transformer.width) ** -0.5 - for block in self.transformer.resblocks: - nn.init.normal_(block.attn.in_proj_weight, std=attn_std) - nn.init.normal_(block.attn.out_proj.weight, std=proj_std) - nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) - nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) - - if self.text_projection is not None: - nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) - - def build_attention_mask(self): - # lazily create causal attention mask, with full attention between the vision tokens - # pytorch uses additive attention mask; fill with -inf - mask = torch.empty(self.context_length, self.context_length) - mask.fill_(float("-inf")) - mask.triu_(1) # zero out the lower diagonal - return mask - - @property - def dtype(self): - return self.visual.conv1.weight.dtype - - def encode_image(self, image): - return self.visual(image.type(self.dtype)) - - def encode_text(self, text): - x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model] - - x = x + self.positional_embedding.type(self.dtype) - x = x.permute(1, 0, 2) # NLD -> LND - x = self.transformer(x) - x = x.permute(1, 0, 2) # LND -> NLD - x = self.ln_final(x).type(self.dtype) - - # x.shape = [batch_size, n_ctx, transformer.width] - # take features from the eot embedding (eot_token is the highest number in each sequence) - x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection - - return x - - def forward(self, image, text): - image_features = self.encode_image(image) - text_features = self.encode_text(text) - - # normalized features - image_features = image_features / image_features.norm(dim=-1, keepdim=True) - text_features = text_features / text_features.norm(dim=-1, keepdim=True) - - # cosine similarity as logits - logit_scale = self.logit_scale.exp() - logits_per_image = logit_scale * image_features @ text_features.t() - logits_per_text = logit_scale * text_features @ image_features.t() - - # shape = [global_batch_size, global_batch_size] - return logits_per_image, logits_per_text - - -def convert_weights(model: nn.Module): - """Convert applicable model parameters to fp16""" - - def _convert_weights_to_fp16(l): - if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): - l.weight.data = l.weight.data.half() - if l.bias is not None: - l.bias.data = l.bias.data.half() - - if isinstance(l, nn.MultiheadAttention): - for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: - tensor = getattr(l, attr) - if tensor is not None: - tensor.data = tensor.data.half() - - for name in ["text_projection", "proj"]: - if hasattr(l, name): - attr = getattr(l, name) - if attr is not None: - attr.data = attr.data.half() - - model.apply(_convert_weights_to_fp16) - - -def build_model(state_dict: dict): - vit = "visual.proj" in state_dict - - if vit: - vision_width = state_dict["visual.conv1.weight"].shape[0] - vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) - vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] - grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) - image_resolution = vision_patch_size * grid_size - else: - counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] - vision_layers = tuple(counts) - vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] - output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) - vision_patch_size = None - assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] - image_resolution = output_width * 32 - - embed_dim = state_dict["text_projection"].shape[1] - context_length = state_dict["positional_embedding"].shape[0] - vocab_size = state_dict["token_embedding.weight"].shape[0] - transformer_width = state_dict["ln_final.weight"].shape[0] - transformer_heads = transformer_width // 64 - transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks"))) - - model = CLIP( - embed_dim, - image_resolution, vision_layers, vision_width, vision_patch_size, - context_length, vocab_size, transformer_width, transformer_heads, transformer_layers - ) - - for key in ["input_resolution", "context_length", "vocab_size"]: - if key in state_dict: - del state_dict[key] - - convert_weights(model) - model.load_state_dict(state_dict) - return model.eval() diff --git a/spaces/Emmawang/audio_summarizer/README.md b/spaces/Emmawang/audio_summarizer/README.md deleted file mode 100644 index ee4c5041b1687984f17df318710daa9509007617..0000000000000000000000000000000000000000 --- a/spaces/Emmawang/audio_summarizer/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Audio Summarizer -emoji: 📉 -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.17.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers.py b/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers.py deleted file mode 100644 index 4fc1b5cb85a3327f60cbb9f5deffbeeaaac516ad..0000000000000000000000000000000000000000 --- a/spaces/EronSamez/RVC_HFmeu/infer/lib/uvr5_pack/lib_v5/layers.py +++ /dev/null @@ -1,118 +0,0 @@ -import torch -import torch.nn.functional as F -from torch import nn - -from . import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 5, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/ExpertPrompters/AskIDF/__init__.py b/spaces/ExpertPrompters/AskIDF/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" "b/spaces/Fengbinbin/gpt-academic/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" deleted file mode 100644 index ffbb05599ef09c9de25334ebeca2eef8022b9aaf..0000000000000000000000000000000000000000 --- "a/spaces/Fengbinbin/gpt-academic/crazy_functions/\346\211\271\351\207\217\346\200\273\347\273\223PDF\346\226\207\346\241\243pdfminer.py" +++ /dev/null @@ -1,160 +0,0 @@ -from toolbox import update_ui -from toolbox import CatchException, report_execption, write_results_to_file -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive - -fast_debug = False - -def readPdf(pdfPath): - """ - 读取pdf文件,返回文本内容 - """ - import pdfminer - from pdfminer.pdfparser import PDFParser - from pdfminer.pdfdocument import PDFDocument - from pdfminer.pdfpage import PDFPage, PDFTextExtractionNotAllowed - from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter - from pdfminer.pdfdevice import PDFDevice - from pdfminer.layout import LAParams - from pdfminer.converter import PDFPageAggregator - - fp = open(pdfPath, 'rb') - - # Create a PDF parser object associated with the file object - parser = PDFParser(fp) - - # Create a PDF document object that stores the document structure. - # Password for initialization as 2nd parameter - document = PDFDocument(parser) - # Check if the document allows text extraction. If not, abort. - if not document.is_extractable: - raise PDFTextExtractionNotAllowed - - # Create a PDF resource manager object that stores shared resources. - rsrcmgr = PDFResourceManager() - - # Create a PDF device object. - # device = PDFDevice(rsrcmgr) - - # BEGIN LAYOUT ANALYSIS. - # Set parameters for analysis. - laparams = LAParams( - char_margin=10.0, - line_margin=0.2, - boxes_flow=0.2, - all_texts=False, - ) - # Create a PDF page aggregator object. - device = PDFPageAggregator(rsrcmgr, laparams=laparams) - # Create a PDF interpreter object. - interpreter = PDFPageInterpreter(rsrcmgr, device) - - # loop over all pages in the document - outTextList = [] - for page in PDFPage.create_pages(document): - # read the page into a layout object - interpreter.process_page(page) - layout = device.get_result() - for obj in layout._objs: - if isinstance(obj, pdfminer.layout.LTTextBoxHorizontal): - # print(obj.get_text()) - outTextList.append(obj.get_text()) - - return outTextList - - -def 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt): - import time, glob, os - from bs4 import BeautifulSoup - print('begin analysis on:', file_manifest) - for index, fp in enumerate(file_manifest): - if ".tex" in fp: - with open(fp, 'r', encoding='utf-8', errors='replace') as f: - file_content = f.read() - if ".pdf" in fp.lower(): - file_content = readPdf(fp) - file_content = BeautifulSoup(''.join(file_content), features="lxml").body.text.encode('gbk', 'ignore').decode('gbk') - - prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else "" - i_say = prefix + f'请对下面的文章片段用中文做一个概述,文件名是{os.path.relpath(fp, project_folder)},文章内容是 ```{file_content}```' - i_say_show_user = prefix + f'[{index}/{len(file_manifest)}] 请对下面的文章片段做一个概述: {os.path.abspath(fp)}' - chatbot.append((i_say_show_user, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say_show_user, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=[], - sys_prompt="总结文章。" - ) # 带超时倒计时 - chatbot[-1] = (i_say_show_user, gpt_say) - history.append(i_say_show_user); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - if not fast_debug: time.sleep(2) - - all_file = ', '.join([os.path.relpath(fp, project_folder) for index, fp in enumerate(file_manifest)]) - i_say = f'根据以上你自己的分析,对全文进行概括,用学术性语言写一段中文摘要,然后再写一段英文摘要(包括{all_file})。' - chatbot.append((i_say, "[Local Message] waiting gpt response.")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - if not fast_debug: - msg = '正常' - # ** gpt request ** - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, - inputs_show_user=i_say, - llm_kwargs=llm_kwargs, - chatbot=chatbot, - history=history, - sys_prompt="总结文章。" - ) # 带超时倒计时 - chatbot[-1] = (i_say, gpt_say) - history.append(i_say); history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - res = write_results_to_file(history) - chatbot.append(("完成了吗?", res)) - yield from update_ui(chatbot=chatbot, history=history, msg=msg) # 刷新界面 - - - -@CatchException -def 批量总结PDF文档pdfminer(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - history = [] # 清空历史,以免输入溢出 - import glob, os - - # 基本信息:功能、贡献者 - chatbot.append([ - "函数插件功能?", - "批量总结PDF文档,此版本使用pdfminer插件,带token约简功能。函数插件贡献者: Euclid-Jie。"]) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - - # 尝试导入依赖,如果缺少依赖,则给出安装建议 - try: - import pdfminer, bs4 - except: - report_execption(chatbot, history, - a = f"解析项目: {txt}", - b = f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade pdfminer beautifulsoup4```。") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - if os.path.exists(txt): - project_folder = txt - else: - if txt == "": txt = '空空如也的输入栏' - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.tex', recursive=True)] + \ - [f for f in glob.glob(f'{project_folder}/**/*.pdf', recursive=True)] # + \ - # [f for f in glob.glob(f'{project_folder}/**/*.cpp', recursive=True)] + \ - # [f for f in glob.glob(f'{project_folder}/**/*.c', recursive=True)] - if len(file_manifest) == 0: - report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.tex或pdf文件: {txt}") - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 - return - yield from 解析Paper(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt) - diff --git a/spaces/Fouzia/Harvard-USPTO_Patentability-Score/app.py b/spaces/Fouzia/Harvard-USPTO_Patentability-Score/app.py deleted file mode 100644 index 43a83bf2086aaa3f3bf944d0967bfd7c65db446c..0000000000000000000000000000000000000000 --- a/spaces/Fouzia/Harvard-USPTO_Patentability-Score/app.py +++ /dev/null @@ -1,67 +0,0 @@ -import streamlit as st -from datasets import load_dataset -from transformers import pipeline -import pandas as pd -import torch -from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline -from datasets import load_dataset - -dataset_dict = load_dataset('HUPD/hupd', - name='sample', - data_files="https://huggingface.co/datasets/HUPD/hupd/blob/main/hupd_metadata_2022-02-22.feather", - icpr_label=None, - train_filing_start_date='2016-01-01', - train_filing_end_date='2016-01-31', - val_filing_start_date='2017-01-22', - val_filing_end_date='2017-01-31', -) - -df = pd.DataFrame.from_dict(dataset_dict["train"]) -df = pd.DataFrame(df,columns =['patent_number','decision', 'abstract', 'claims','filing_date']) -#st.dataframe(df) -PAN = df['patent_number'].drop_duplicates() - -st.title('Harvard USPTO Patentability Score') -#make_choice = st.sidebar.selectbox('Select the Patent Application Number:', PAN) - -#####NEW -with st.form("patent-form"): - make_choice = st.selectbox('Select the Patent Application Number:', PAN) - submitted = st.form_submit_button(label='submit') - - if submitted: - #st.write("Outside the form") - model_name = "distilbert-base-uncased-finetuned-sst-2-english" - model = AutoModelForSequenceClassification.from_pretrained(model_name) - tokenizer = AutoTokenizer.from_pretrained(model_name) - classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) - - #abstract = df['abstract'].loc[df['patent_number'] == make_choice] - - decision = df['decision'].loc[df['patent_number'] == make_choice] - #X_train = abstract.to_string() - X_train = decision.to_string() - #X_train = abstract.values.tolist() - results = classifier(X_train, truncation=True) - - for result in results: - print(result) - score = result['score'] - print(score) - st.write("The Patentability Score is:", score) - - -######NEW - -pd.options.display.max_colwidth = 100000 - -abstract = df["abstract"].loc[df["patent_number"] == make_choice] -st.subheader(':red[Patent Application]') -st.subheader(':red[Abstract:]') -st.info(abstract) - - -claims = df["claims"].loc[df["patent_number"] == make_choice] -st.subheader(':red[Claim:]') -st.info(claims) - diff --git a/spaces/FridaZuley/RVC_HFKawaii/infer/modules/uvr5/modules.py b/spaces/FridaZuley/RVC_HFKawaii/infer/modules/uvr5/modules.py deleted file mode 100644 index f63ac6a794100cc95da21dcba78b23377a1f133d..0000000000000000000000000000000000000000 --- a/spaces/FridaZuley/RVC_HFKawaii/infer/modules/uvr5/modules.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import traceback -import logging - -logger = logging.getLogger(__name__) - -import ffmpeg -import torch - -from configs.config import Config -from infer.modules.uvr5.mdxnet import MDXNetDereverb -from infer.modules.uvr5.preprocess import AudioPre, AudioPreDeEcho - -config = Config() - - -def uvr(model_name, inp_root, save_root_vocal, paths, save_root_ins, agg, format0): - infos = [] - try: - inp_root = inp_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - save_root_vocal = ( - save_root_vocal.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) - save_root_ins = ( - save_root_ins.strip(" ").strip('"').strip("\n").strip('"').strip(" ") - ) - if model_name == "onnx_dereverb_By_FoxJoy": - pre_fun = MDXNetDereverb(15, config.device) - else: - func = AudioPre if "DeEcho" not in model_name else AudioPreDeEcho - pre_fun = func( - agg=int(agg), - model_path=os.path.join( - os.getenv("weight_uvr5_root"), model_name + ".pth" - ), - device=config.device, - is_half=config.is_half, - ) - if inp_root != "": - paths = [os.path.join(inp_root, name) for name in os.listdir(inp_root)] - else: - paths = [path.name for path in paths] - for path in paths: - inp_path = os.path.join(inp_root, path) - need_reformat = 1 - done = 0 - try: - info = ffmpeg.probe(inp_path, cmd="ffprobe") - if ( - info["streams"][0]["channels"] == 2 - and info["streams"][0]["sample_rate"] == "44100" - ): - need_reformat = 0 - pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0 - ) - done = 1 - except: - need_reformat = 1 - traceback.print_exc() - if need_reformat == 1: - tmp_path = "%s/%s.reformatted.wav" % ( - os.path.join(os.environ["TEMP"]), - os.path.basename(inp_path), - ) - os.system( - "ffmpeg -i %s -vn -acodec pcm_s16le -ac 2 -ar 44100 %s -y" - % (inp_path, tmp_path) - ) - inp_path = tmp_path - try: - if done == 0: - pre_fun.path_audio( - inp_path, save_root_ins, save_root_vocal, format0 - ) - infos.append("%s->Success" % (os.path.basename(inp_path))) - yield "\n".join(infos) - except: - try: - if done == 0: - pre_fun._path_audio_( - inp_path, save_root_ins, save_root_vocal, format0 - ) - infos.append("%s->Success" % (os.path.basename(inp_path))) - yield "\n".join(infos) - except: - infos.append( - "%s->%s" % (os.path.basename(inp_path), traceback.format_exc()) - ) - yield "\n".join(infos) - except: - infos.append(traceback.format_exc()) - yield "\n".join(infos) - finally: - try: - if model_name == "onnx_dereverb_By_FoxJoy": - del pre_fun.pred.model - del pre_fun.pred.model_ - else: - del pre_fun.model - del pre_fun - except: - traceback.print_exc() - if torch.cuda.is_available(): - torch.cuda.empty_cache() - logger.info("Executed torch.cuda.empty_cache()") - yield "\n".join(infos) diff --git a/spaces/Friklogff/xx-xhai/app.py b/spaces/Friklogff/xx-xhai/app.py deleted file mode 100644 index 1fa15d802f477119aa1e3a7515c7af43b272aba9..0000000000000000000000000000000000000000 --- a/spaces/Friklogff/xx-xhai/app.py +++ /dev/null @@ -1,166 +0,0 @@ -# -*- coding = utf-8 -*- -""" -# @Time : 2023/7/31 19:33 -# @Author : CSDN:FriKlogff -# @File : PublicGui.py -# @Software: PyCharm -# @Function: 请输入项目功能 -""" -import os -os.system("""python -m pip install -i https://mirrors.aliyun.com/pypi/simple/ --upgrade pip setuptools -pip install -i https://mirrors.aliyun.com/pypi/simple/ websocket -pip install -i https://mirrors.aliyun.com/pypi/simple/ websocket-client -pip install -i https://mirrors.aliyun.com/pypi/simple/ gradio -pip install -i https://mirrors.aliyun.com/pypi/simple/ sxtwl -""") -from PublicFunctions import * -import gradio as gr - -# 定义星座选项 -signs = ["白羊座", "金牛座", "双子座", "巨蟹座", "狮子座", "处女座", - "天秤座", "天蝎座", "射手座", "摩羯座", "水瓶座", "双鱼座"] -cards_num = [1, 2, 3, 4, 5] -months = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] -days = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] -hours = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] -# 使用 Gradio 的模块化组件,构建包含五个选项卡的界面 -with gr.Blocks() as demo: - with gr.Tab("星火api配置"): - xh_input = [ - gr.components.Textbox(label="appid"), - gr.components.Textbox(label="api_secret"), - gr.components.Textbox(label="api_key"), - gr.components.Textbox(label="gpt_url") - ] - xh_output = gr.components.Textbox(label="点击提交返回配置情况,请自行配置星火大模型API再使用后续功能") - xh_button = gr.components.Button("提交") - xh_button.click(xh_api, inputs=xh_input, outputs=xh_output - ) - with gr.Tab("AI星座解读"): - horoscope_input = [gr.components.Radio(choices=["男", "女"], label="性别"), - gr.components.Textbox(label="姓名"), - gr.components.Number(label="出生年份"), - gr.components.Dropdown(months, label="出生月份"), - gr.components.Dropdown(days, label="出生日"), - gr.components.Dropdown(hours, label="出生时辰"), - gr.components.Dropdown(signs, label="选择您的星座") - ] - horoscope_output = gr.components.Textbox(label="星座解读(由于我们的解析是由AI生成的,结果仅供娱乐,如果不成功请多试几次)") - horoscope_button = gr.components.Button("提交") - horoscope_button.click(horoscope_reading, inputs=horoscope_input, outputs=horoscope_output - ) - - with gr.Tab("AI塔罗牌解读"): - tarot_input = [gr.components.Textbox(label="你想问的问题"), - gr.components.Dropdown(cards_num, label="你想抽几张牌"), - ] - tarot_output = gr.components.Textbox(label="塔罗牌解析(由于我们的解析是由AI生成的,结果仅供娱乐,如果不成功请多试几次)") - upload_button = gr.components.Button("抽取") - upload_button.click(tarot_reading, inputs=tarot_input, outputs=tarot_output) - with gr.Tab("AI八字合婚分析"): - marriage_input = [gr.components.Textbox(label="新郎姓名"), - gr.components.Number(label="出生年份"), - gr.components.Dropdown(months, label="出生月份"), - gr.components.Dropdown(days, label="出生日"), - gr.components.Dropdown(hours, label="出生时辰"), - - gr.components.Textbox(label="新娘姓名"), - gr.components.Number(label="出生年份"), - gr.components.Dropdown(months, label="出生月份"), - gr.components.Dropdown(days, label="出生日"), - gr.components.Dropdown(hours, label="出生时辰"), - ] - marriage_analysis_output = gr.components.Textbox(label="婚姻分析(由于我们的解析是由AI生成的,结果仅供娱乐,如果不成功请多试几次)") - analyze_button = gr.components.Button("马上测算") - analyze_button.click(marriage_bazi_analysis, - inputs=marriage_input, - outputs=marriage_analysis_output) - with gr.Tab("AI兔年运程预测"): - birth_year_input = [gr.components.Radio(choices=["男", "女"], label="性别"), - gr.components.Textbox(label="姓名"), - gr.components.Number(label="出生年份"), - gr.components.Dropdown(months, label="出生月份"), - gr.components.Dropdown(days, label="出生日"), - gr.components.Dropdown(hours, label="出生时辰"), - ] - prediction_output = gr.components.Textbox(label="运程预测(由于我们的解析是由AI生成的,结果仅供娱乐,如果不成功请多试几次)") - predict_button = gr.components.Button("预测运势") - predict_button.click(rabbit_year_prediction, - inputs=birth_year_input, - outputs=prediction_output) - with gr.Tab("AI公司命理解析"): - company_name_input = [gr.components.Radio(choices=["男", "女"], label="性别"), - gr.components.Textbox(label="姓名"), - gr.components.Number(label="出生年份"), - gr.components.Dropdown(months, label="出生月份"), - gr.components.Dropdown(days, label="出生日"), - gr.components.Dropdown(hours, label="出生时辰"), - gr.components.Textbox(label="公司名称"), - gr.components.Textbox(label="所属行业")] - name_analysis_output = gr.components.Textbox(label="命理分析(由于我们的解析是由AI生成的,结果仅供娱乐,如果不成功请多试几次)") - analyze_button = gr.components.Button("分析") - analyze_button.click(company_name_analysis, - inputs=company_name_input, - outputs=name_analysis_output) - with gr.Tab("AI姓名配对"): - name1_input = [gr.components.Textbox(label="姓名1"), - gr.components.Textbox(label="姓名2"), - ] - matching_output = gr.components.Textbox(label="配对结果(由于我们的解析是由AI生成的,结果仅供娱乐,如果不成功请多试几次)") - match_button = gr.components.Button("分析配对") - match_button.click(name_compatibility, - inputs=name1_input, - outputs=matching_output) - - with gr.Tab("AI月老姻缘"): - yue_lau_input = [gr.components.Radio(choices=["男", "女"], label="性别"), - gr.components.Textbox(label="姓名"), - gr.components.Number(label="出生年份"), - gr.components.Dropdown(months, label="出生月份"), - gr.components.Dropdown(days, label="出生日"), - gr.components.Dropdown(hours, label="出生时辰"), - ] - affinity_output = gr.components.Textbox(label="姻缘分析(由于我们的解析是由AI生成的,结果仅供娱乐,如果不成功请多试几次)") - analyze_button = gr.components.Button("分析姻缘") - analyze_button.click(yue_lau_affinity, - inputs=yue_lau_input, - outputs=affinity_output) - - with gr.Tab("AI八字精批"): - bazi_input = [gr.components.Radio(choices=["男", "女"], label="性别"), - gr.components.Textbox(label="姓名"), - gr.components.Number(label="出生年份"), - gr.components.Dropdown(months, label="出生月份"), - gr.components.Dropdown(days, label="出生日"), - gr.components.Dropdown(hours, label="出生时辰"), - ] - analysis_output = gr.components.Textbox(label="精批结果(由于我们的解析是由AI生成的,结果仅供娱乐,如果不成功请多试几次)") - batch_button = gr.components.Button("八字精批") - batch_button.click(bazi_analysis, - inputs=bazi_input, - outputs=analysis_output) - - with gr.Tab("AI姓名分析"): - name_input = [gr.components.Radio(choices=["男", "女"], label="性别"), - gr.components.Textbox(label="姓名")] - name_output = gr.components.Textbox(label="命理分析(由于我们的解析是由AI生成的,结果仅供娱乐,如果不成功请多试几次)") - analyze_button = gr.components.Button("分析姓名") - analyze_button.click(name_analysis, - inputs=name_input, - outputs=name_output) - with gr.Tab("AI紫薇斗数解析"): - zhiwei_input = [gr.components.Radio(choices=["男", "女"], label="性别"), - gr.components.Textbox(label="姓名"), - gr.components.Number(label="出生年份"), - gr.components.Dropdown(months, label="出生月份"), - gr.components.Dropdown(days, label="出生日"), - gr.components.Dropdown(hours, label="出生时辰"), - ] - zhiwei_output = gr.components.Textbox(label="紫薇解读(由于我们的解析是由AI生成的,结果仅供娱乐,如果不成功请多试几次)") - zhiwei_button = gr.components.Button("解读运势") - zhiwei_button.click(zhiwei_analysis, - inputs=zhiwei_input, - outputs=zhiwei_output) -demo.launch() -# demo.launch(share=True) diff --git a/spaces/GFXY/stabilityai-stable-diffusion-2-1-base/README.md b/spaces/GFXY/stabilityai-stable-diffusion-2-1-base/README.md deleted file mode 100644 index e7b76ff3eba58f890806a1bc210f707bc51d8230..0000000000000000000000000000000000000000 --- a/spaces/GFXY/stabilityai-stable-diffusion-2-1-base/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stabilityai Stable Diffusion 2 1 Base -emoji: 🏆 -colorFrom: pink -colorTo: yellow -sdk: gradio -sdk_version: 3.34.0 -app_file: app.py -pinned: false -license: agpl-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/GIZ/embedding_visualisation/README.md b/spaces/GIZ/embedding_visualisation/README.md deleted file mode 100644 index fd7990229da29e116048e587c169e7fc9348167d..0000000000000000000000000000000000000000 --- a/spaces/GIZ/embedding_visualisation/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Embedding Visualisation -emoji: 🐢 -colorFrom: purple -colorTo: green -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -python_version: 3.7.15 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/build_cylinder_structure.py b/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/build_cylinder_structure.py deleted file mode 100644 index 8e454f45f88dd8b7d331da4d047aec971efb63a1..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/build_cylinder_structure.py +++ /dev/null @@ -1,67 +0,0 @@ -import numpy as np -import os -import pybullet as p -import random -from cliport.tasks import primitives -from cliport.tasks.grippers import Spatula -from cliport.tasks.task import Task -from cliport.utils import utils -import numpy as np -from cliport.tasks.task import Task -from cliport.utils import utils - -class BuildCylinderStructure(Task): - """Construct a structure using four colored cylinders (red, blue, green, yellow) on a square base.""" - - def __init__(self): - super().__init__() - self.max_steps = 5 - self.lang_template = "construct a structure using four colored cylinders on a square base" - self.task_completed_desc = "done building the cylinder structure." - self.additional_reset() - - def reset(self, env): - super().reset(env) - - # Add square base. - # x, y, z dimensions for the asset size - base_size = (0.15, 0.15, 0.005) - base_urdf = 'square/square-template.urdf' - base_pose = self.get_random_pose(env, base_size) - env.add_object(base_urdf, base_pose, category='fixed') - - # Cylinder colors. - colors = [ - utils.COLORS['red'], utils.COLORS['blue'], utils.COLORS['green'], utils.COLORS['yellow'] - ] - - # Add cylinders. - # x, y, z dimensions for the asset size - cylinder_size = (0.04, 0.04, 0.08) - cylinder_urdf = 'cylinder/cylinder-template.urdf' - - objs = [] - for i in range(4): - cylinder_pose = self.get_random_pose(env, cylinder_size) - cylinder_id = env.add_object(cylinder_urdf, cylinder_pose, color=colors[i]) - objs.append(cylinder_id) - - # Associate placement locations for goals. - place_pos = [(0, -0.05, 0.04), (0, 0.05, 0.04), - (0, 0.05, 0.12), (0, -0.05, 0.12)] - targs = [(utils.apply(base_pose, i), base_pose[1]) for i in place_pos] - - # Goal: red and blue cylinders are placed side by side on the base. - self.add_goal(objs=objs[:2], matches=np.ones((2, 2)), targ_poses=targs[:2], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 2, symmetries=[np.pi/2]*2, - language_goal="place the red and blue cylinders side by side on the base") - - # Goal: green cylinder is placed on top of the blue cylinder. - self.add_goal(objs=[objs[2]], matches=np.ones((1, 1)), targ_poses=[targs[2]], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 2, symmetries=[np.pi/2], - language_goal="place the green cylinder on top of the blue cylinder") - - # Goal: yellow cylinder is placed on top of the red cylinder. - self.add_goal(objs=[objs[3]], matches=np.ones((1, 1)), targ_poses=[targs[3]], replace=False, - rotations=True, metric='pose', params=None, step_max_reward=1 / 2, symmetries=[np.pi/2], - language_goal="place the yellow cylinder on top of the red cylinder") \ No newline at end of file diff --git a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train3_gptmixcliport3_small.sh b/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train3_gptmixcliport3_small.sh deleted file mode 100644 index 8a6d0311fb48c598836814349a207af362f608a9..0000000000000000000000000000000000000000 --- a/spaces/Gen-Sim/Gen-Sim/scripts/metascripts/train3_gptmixcliport3_small.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -#SBATCH -c 10 -#SBATCH -n 1 -#SBATCH -o logs/%j.out -#SBATCH --exclusive -STEPS=${1-'50000'} -now=$(date "+%Y-%m-%d_%H-%M-%S") - - -sh scripts/traintest_scripts/train_test_multi_task_goal.sh data \ - "[place-red-in-green,stack-block-pyramid,put-block-in-bowl,color-coordinated-sphere-insertion,rainbow-stack,vertical-insertion-blocks]" \ - "[place-red-in-green,stack-block-pyramid,put-block-in-bowl]" \ - gpt3_mixcliport3_${now} \ No newline at end of file diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/docs/model_zoo.md b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/docs/model_zoo.md deleted file mode 100644 index 132cc514bac6b447addac8485e0622a834d34474..0000000000000000000000000000000000000000 --- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/RealESRGANv030/docs/model_zoo.md +++ /dev/null @@ -1,49 +0,0 @@ -# :european_castle: Model Zoo - -- [For General Images](#for-general-images) -- [For Anime Images](#for-anime-images) -- [For Anime Videos](#for-anime-videos) - ---- - -## For General Images - -| Models | Scale | Description | -| ------------------------------------------------------------------------------------------------------------------------------- | :---- | :------------------------------------------- | -| [RealESRGAN_x4plus](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth) | X4 | X4 model for general images | -| [RealESRGAN_x2plus](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth) | X2 | X2 model for general images | -| [RealESRNet_x4plus](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth) | X4 | X4 model with MSE loss (over-smooth effects) | -| [official ESRGAN_x4](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/ESRGAN_SRx4_DF2KOST_official-ff704c30.pth) | X4 | official ESRGAN model | -| [realesr-general-x4v3](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth) | X4 (can also be used for X1, X2, X3) | A tiny small model (consume much fewer GPU memory and time); not too strong deblur and denoise capacity | - -The following models are **discriminators**, which are usually used for fine-tuning. - -| Models | Corresponding model | -| ---------------------------------------------------------------------------------------------------------------------- | :------------------ | -| [RealESRGAN_x4plus_netD](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.3/RealESRGAN_x4plus_netD.pth) | RealESRGAN_x4plus | -| [RealESRGAN_x2plus_netD](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.3/RealESRGAN_x2plus_netD.pth) | RealESRGAN_x2plus | - -## For Anime Images / Illustrations - -| Models | Scale | Description | -| ------------------------------------------------------------------------------------------------------------------------------ | :---- | :---------------------------------------------------------- | -| [RealESRGAN_x4plus_anime_6B](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth) | X4 | Optimized for anime images; 6 RRDB blocks (smaller network) | - -The following models are **discriminators**, which are usually used for fine-tuning. - -| Models | Corresponding model | -| ---------------------------------------------------------------------------------------------------------------------------------------- | :------------------------- | -| [RealESRGAN_x4plus_anime_6B_netD](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B_netD.pth) | RealESRGAN_x4plus_anime_6B | - -## For Animation Videos - -| Models | Scale | Description | -| ---------------------------------------------------------------------------------------------------------------------------------- | :---- | :----------------------------- | -| [realesr-animevideov3](https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth) | X41 | Anime video model with XS size | - -Note:
      -1 This model can also be used for X1, X2, X3. - -The following models are **discriminators**, which are usually used for fine-tuning. - -TODO diff --git a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/Dataloader.py b/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/Dataloader.py deleted file mode 100644 index 05a6d191de076299fa6bc9a571572f3cc05d279c..0000000000000000000000000000000000000000 --- a/spaces/GolDNenex/Super-Resolution-Anime-Diffusion/Waifu2x/Dataloader.py +++ /dev/null @@ -1,231 +0,0 @@ -import glob -import io -import numpy as np -import re -import os -import random -from io import BytesIO -from uuid import uuid4 -import sqlite3 -import h5py -import torch -from PIL import Image -from torch.utils.data import Dataset -from torchvision.transforms import RandomCrop -from torchvision.transforms.functional import to_tensor - - -class ImageH5Data(Dataset): - def __init__(self, h5py_file, folder_name): - self.data = h5py.File(h5py_file, "r")[folder_name] - self.data_hr = self.data["train_hr"] - self.data_lr = self.data["train_lr"] - self.len_imgs = len(self.data_hr) - self.h5py_file = h5py_file - self.folder_name = folder_name - - def __len__(self): - # with h5py.File(self.h5py_file, 'r') as f: - # return len(f[self.folder_name]['train_lr']) - return self.len_imgs - - def __getitem__(self, index): - # with h5py.File(self.h5py_file, 'r') as f: - # data_lr = f[self.folder_name]['train_lr'][index] - # data_hr = f[self.folder_name]['train_lr'][index] - # - # return data_lr, data_hr - return self.data_lr[index], self.data_hr[index] - - -class ImageData(Dataset): - def __init__( - self, - img_folder, - patch_size=96, - shrink_size=2, - noise_level=1, - down_sample_method=None, - color_mod="RGB", - dummy_len=None, - ): - - self.img_folder = img_folder - all_img = glob.glob(self.img_folder + "/**", recursive=True) - self.img = list( - filter( - lambda x: x.endswith("png") or x.endswith("jpg") or x.endswith("jpeg"), - all_img, - ) - ) - self.total_img = len(self.img) - self.dummy_len = dummy_len if dummy_len is not None else self.total_img - self.random_cropper = RandomCrop(size=patch_size) - self.color_mod = color_mod - self.img_augmenter = ImageAugment(shrink_size, noise_level, down_sample_method) - - def get_img_patches(self, img_file): - img_pil = Image.open(img_file).convert("RGB") - img_patch = self.random_cropper(img_pil) - lr_hr_patches = self.img_augmenter.process(img_patch) - return lr_hr_patches - - def __len__(self): - return self.dummy_len # len(self.img) - - def __getitem__(self, index): - idx = random.choice(range(0, self.total_img)) - img = self.img[idx] - patch = self.get_img_patches(img) - if self.color_mod == "RGB": - lr_img = patch[0].convert("RGB") - hr_img = patch[1].convert("RGB") - elif self.color_mod == "YCbCr": - lr_img, _, _ = patch[0].convert("YCbCr").split() - hr_img, _, _ = patch[1].convert("YCbCr").split() - else: - raise KeyError("Either RGB or YCbCr") - return to_tensor(lr_img), to_tensor(hr_img) - - -class Image2Sqlite(ImageData): - def __getitem__(self, item): - img = self.img[item] - lr_hr_patch = self.get_img_patches(img) - if self.color_mod == "RGB": - lr_img = lr_hr_patch[0].convert("RGB") - hr_img = lr_hr_patch[1].convert("RGB") - elif self.color_mod == "YCbCr": - lr_img, _, _ = lr_hr_patch[0].convert("YCbCr").split() - hr_img, _, _ = lr_hr_patch[1].convert("YCbCr").split() - else: - raise KeyError("Either RGB or YCbCr") - lr_byte = self.convert_to_bytevalue(lr_img) - hr_byte = self.convert_to_bytevalue(hr_img) - return [lr_byte, hr_byte] - - @staticmethod - def convert_to_bytevalue(pil_img): - img_byte = io.BytesIO() - pil_img.save(img_byte, format="png") - return img_byte.getvalue() - - -class ImageDBData(Dataset): - def __init__( - self, - db_file, - db_table="images", - lr_col="lr_img", - hr_col="hr_img", - max_images=None, - ): - self.db_file = db_file - self.db_table = db_table - self.lr_col = lr_col - self.hr_col = hr_col - self.total_images = self.get_num_rows(max_images) - # self.lr_hr_images = self.get_all_images() - - def __len__(self): - return self.total_images - - # def get_all_images(self): - # with sqlite3.connect(self.db_file) as conn: - # cursor = conn.cursor() - # cursor.execute(f"SELECT * FROM {self.db_table} LIMIT {self.total_images}") - # return cursor.fetchall() - - def get_num_rows(self, max_images): - with sqlite3.connect(self.db_file) as conn: - cursor = conn.cursor() - cursor.execute(f"SELECT MAX(ROWID) FROM {self.db_table}") - db_rows = cursor.fetchone()[0] - if max_images: - return min(max_images, db_rows) - else: - return db_rows - - def __getitem__(self, item): - # lr, hr = self.lr_hr_images[item] - # lr = Image.open(io.BytesIO(lr)) - # hr = Image.open(io.BytesIO(hr)) - # return to_tensor(lr), to_tensor(hr) - # note sqlite rowid starts with 1 - with sqlite3.connect(self.db_file) as conn: - cursor = conn.cursor() - cursor.execute( - f"SELECT {self.lr_col}, {self.hr_col} FROM {self.db_table} WHERE ROWID={item + 1}" - ) - lr, hr = cursor.fetchone() - lr = Image.open(io.BytesIO(lr)).convert("RGB") - hr = Image.open(io.BytesIO(hr)).convert("RGB") - # lr = np.array(lr) # use scale [0, 255] instead of [0,1] - # hr = np.array(hr) - return to_tensor(lr), to_tensor(hr) - - -class ImagePatchData(Dataset): - def __init__(self, lr_folder, hr_folder): - self.lr_folder = lr_folder - self.hr_folder = hr_folder - self.lr_imgs = glob.glob(os.path.join(lr_folder, "**")) - self.total_imgs = len(self.lr_imgs) - - def __len__(self): - return self.total_imgs - - def __getitem__(self, item): - lr_file = self.lr_imgs[item] - hr_path = re.sub("lr", "hr", os.path.dirname(lr_file)) - filename = os.path.basename(lr_file) - hr_file = os.path.join(hr_path, filename) - return to_tensor(Image.open(lr_file)), to_tensor(Image.open(hr_file)) - - -class ImageAugment: - def __init__(self, shrink_size=2, noise_level=1, down_sample_method=None): - # noise_level (int): 0: no noise; 1: 75-95% quality; 2:50-75% - if noise_level == 0: - self.noise_level = [0, 0] - elif noise_level == 1: - self.noise_level = [5, 25] - elif noise_level == 2: - self.noise_level = [25, 50] - else: - raise KeyError("Noise level should be either 0, 1, 2") - self.shrink_size = shrink_size - self.down_sample_method = down_sample_method - - def shrink_img(self, hr_img): - - if self.down_sample_method is None: - resample_method = random.choice( - [Image.BILINEAR, Image.BICUBIC, Image.LANCZOS] - ) - else: - resample_method = self.down_sample_method - img_w, img_h = tuple(map(lambda x: int(x / self.shrink_size), hr_img.size)) - lr_img = hr_img.resize((img_w, img_h), resample_method) - return lr_img - - def add_jpeg_noise(self, hr_img): - quality = 100 - round(random.uniform(*self.noise_level)) - lr_img = BytesIO() - hr_img.save(lr_img, format="JPEG", quality=quality) - lr_img.seek(0) - lr_img = Image.open(lr_img) - return lr_img - - def process(self, hr_patch_pil): - lr_patch_pil = self.shrink_img(hr_patch_pil) - if self.noise_level[1] > 0: - lr_patch_pil = self.add_jpeg_noise(lr_patch_pil) - - return lr_patch_pil, hr_patch_pil - - def up_sample(self, img, resample): - width, height = img.size - return img.resize( - (self.shrink_size * width, self.shrink_size * height), resample=resample - ) diff --git a/spaces/GowthamSiddharth/MyAssist_ChatBot/README.md b/spaces/GowthamSiddharth/MyAssist_ChatBot/README.md deleted file mode 100644 index acf2cbfb7efa210a2c887be8f5f19c29d69d7e19..0000000000000000000000000000000000000000 --- a/spaces/GowthamSiddharth/MyAssist_ChatBot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MyAssist ChatBot -emoji: 📚 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/__init__.py b/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/__init__.py deleted file mode 100644 index ece0ea08fe2e939cc260a1dafc0ab5b391b773d9..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/EmojiGAN/torch_utils/ops/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -# empty diff --git a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/op/upfirdn2d.cpp b/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/op/upfirdn2d.cpp deleted file mode 100644 index d2e633dc896433c205e18bc3e455539192ff968e..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/StyleGAN-NADA/e4e/models/stylegan2/op/upfirdn2d.cpp +++ /dev/null @@ -1,23 +0,0 @@ -#include - - -torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel, - int up_x, int up_y, int down_x, int down_y, - int pad_x0, int pad_x1, int pad_y0, int pad_y1); - -#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") -#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") -#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) - -torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel, - int up_x, int up_y, int down_x, int down_y, - int pad_x0, int pad_x1, int pad_y0, int pad_y1) { - CHECK_CUDA(input); - CHECK_CUDA(kernel); - - return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1); -} - -PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { - m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)"); -} \ No newline at end of file diff --git a/spaces/Gradio-Blocks/are-you-wearing-a-mask/app.py b/spaces/Gradio-Blocks/are-you-wearing-a-mask/app.py deleted file mode 100644 index b897953f05e74498cbccbb9ab06a5844b3931164..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/are-you-wearing-a-mask/app.py +++ /dev/null @@ -1,29 +0,0 @@ -# Are you wearing a mask? -import gradio as gr -import torch -import torchvision -import numpy as np -from PIL import Image - -# Face masks -# TODO: Allow user selectable model? -model = torch.hub.load('ultralytics/yolov5:v6.2', 'custom', "model_weights/face_masks_v8.pt") - -def yolo(im, size=640): - g = (size / max(im.size)) # gain - im = im.resize((int(x * g) for x in im.size), Image.ANTIALIAS) # resize - - results = model(im) # inference - results.render() # updates results.imgs with boxes and labels - return Image.fromarray(results.imgs[0]) - - -inputs = gr.inputs.Image(type='pil', label="Original Image") -outputs = gr.outputs.Image(type="pil", label="Output Image") - -title = "Are you wearing a mask?" -description = "Detecting masked and unmasked faces with YOLOv5. Take a picture, upload an image, or click an example image to use." -article = "

      This app makes predictions using a YOLOv5s model that was fine tuned on a dataset of people with and without masks. All of the code for training the model is available on GitHub. This app and the model behind it were created by Henry Lydecker, for a course he developed for the Sydney Informatics Hub, a Core Research Facility of The University of Sydney. Find out more about the YOLO model from the original creator, Joseph Redmon. YOLOv5 is a family of compound-scaled object detection models trained on the COCO dataset and developed by Ultralytics, and includes simple functionality for Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. Source code | PyTorch Hub

      " - -examples = [['data/picard.jpg'], ['data/crowd.jpeg'],['data/baseball2.jpeg'],['data/santa-claus-orig.jpg'],['data/kfc_anime2.jpg'],['data/doge2.webp'],['data/cat_mask.jpg']] -gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface").launch(enable_queue=True) \ No newline at end of file diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py deleted file mode 100644 index 927609206e1323dcf1173c4a5393e3f03d534c0a..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_2x_coco.py' -model = dict( - pretrained='open-mmlab://resnext101_32x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=32, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py deleted file mode 100644 index ef81123a2ebd5a30eb812d321eb7a3764e315a72..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py +++ /dev/null @@ -1,97 +0,0 @@ -_base_ = [ - '../_base_/datasets/coco_detection.py', - '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' -] - -model = dict( - type='NASFCOS', - pretrained='open-mmlab://detectron2/resnet50_caffe', - backbone=dict( - type='ResNet', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=False, eps=0), - style='caffe'), - neck=dict( - type='NASFCOS_FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - start_level=1, - add_extra_convs=True, - num_outs=5, - norm_cfg=dict(type='BN'), - conv_cfg=dict(type='DCNv2', deform_groups=2)), - bbox_head=dict( - type='NASFCOSHead', - num_classes=80, - in_channels=256, - feat_channels=256, - strides=[8, 16, 32, 64, 128], - norm_cfg=dict(type='GN', num_groups=32), - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - loss_centerness=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), - train_cfg=dict( - assigner=dict( - type='MaxIoUAssigner', - pos_iou_thr=0.5, - neg_iou_thr=0.4, - min_pos_iou=0, - ignore_iof_thr=-1), - allowed_border=-1, - pos_weight=-1, - debug=False), - test_cfg=dict( - nms_pre=1000, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.6), - max_per_img=100)) - -img_norm_cfg = dict( - mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) - -train_pipeline = [ - dict(type='LoadImageFromFile'), - dict(type='LoadAnnotations', with_bbox=True), - dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] - -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(1333, 800), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=True), - dict(type='RandomFlip'), - dict(type='Normalize', **img_norm_cfg), - dict(type='Pad', size_divisor=32), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] - -data = dict( - samples_per_gpu=4, - workers_per_gpu=2, - train=dict(pipeline=train_pipeline), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) - -optimizer = dict( - lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/dense_heads/fcos_head.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/dense_heads/fcos_head.py deleted file mode 100644 index 905a703507f279ac8d34cff23c99af33c0d5f973..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/models/dense_heads/fcos_head.py +++ /dev/null @@ -1,629 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from mmcv.cnn import Scale, normal_init -from mmcv.runner import force_fp32 - -from mmdet.core import distance2bbox, multi_apply, multiclass_nms, reduce_mean -from ..builder import HEADS, build_loss -from .anchor_free_head import AnchorFreeHead - -INF = 1e8 - - -@HEADS.register_module() -class FCOSHead(AnchorFreeHead): - """Anchor-free head used in `FCOS `_. - - The FCOS head does not use anchor boxes. Instead bounding boxes are - predicted at each pixel and a centerness measure is used to suppress - low-quality predictions. - Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training - tricks used in official repo, which will bring remarkable mAP gains - of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for - more detail. - - Args: - num_classes (int): Number of categories excluding the background - category. - in_channels (int): Number of channels in the input feature map. - strides (list[int] | list[tuple[int, int]]): Strides of points - in multiple feature levels. Default: (4, 8, 16, 32, 64). - regress_ranges (tuple[tuple[int, int]]): Regress range of multiple - level points. - center_sampling (bool): If true, use center sampling. Default: False. - center_sample_radius (float): Radius of center sampling. Default: 1.5. - norm_on_bbox (bool): If true, normalize the regression targets - with FPN strides. Default: False. - centerness_on_reg (bool): If true, position centerness on the - regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042. - Default: False. - conv_bias (bool | str): If specified as `auto`, it will be decided by the - norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise - False. Default: "auto". - loss_cls (dict): Config of classification loss. - loss_bbox (dict): Config of localization loss. - loss_centerness (dict): Config of centerness loss. - norm_cfg (dict): dictionary to construct and config norm layer. - Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True). - - Example: - >>> self = FCOSHead(11, 7) - >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]] - >>> cls_score, bbox_pred, centerness = self.forward(feats) - >>> assert len(cls_score) == len(self.scales) - """ # noqa: E501 - - def __init__(self, - num_classes, - in_channels, - regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512), - (512, INF)), - center_sampling=False, - center_sample_radius=1.5, - norm_on_bbox=False, - centerness_on_reg=False, - loss_cls=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_bbox=dict(type='IoULoss', loss_weight=1.0), - loss_centerness=dict( - type='CrossEntropyLoss', - use_sigmoid=True, - loss_weight=1.0), - norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), - **kwargs): - self.regress_ranges = regress_ranges - self.center_sampling = center_sampling - self.center_sample_radius = center_sample_radius - self.norm_on_bbox = norm_on_bbox - self.centerness_on_reg = centerness_on_reg - super().__init__( - num_classes, - in_channels, - loss_cls=loss_cls, - loss_bbox=loss_bbox, - norm_cfg=norm_cfg, - **kwargs) - self.loss_centerness = build_loss(loss_centerness) - - def _init_layers(self): - """Initialize layers of the head.""" - super()._init_layers() - self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) - self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides]) - - def init_weights(self): - """Initialize weights of the head.""" - super().init_weights() - normal_init(self.conv_centerness, std=0.01) - - def forward(self, feats): - """Forward features from the upstream network. - - Args: - feats (tuple[Tensor]): Features from the upstream network, each is - a 4D-tensor. - - Returns: - tuple: - cls_scores (list[Tensor]): Box scores for each scale level, \ - each is a 4D-tensor, the channel number is \ - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each \ - scale level, each is a 4D-tensor, the channel number is \ - num_points * 4. - centernesses (list[Tensor]): centerness for each scale level, \ - each is a 4D-tensor, the channel number is num_points * 1. - """ - return multi_apply(self.forward_single, feats, self.scales, - self.strides) - - def forward_single(self, x, scale, stride): - """Forward features of a single scale level. - - Args: - x (Tensor): FPN feature maps of the specified stride. - scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize - the bbox prediction. - stride (int): The corresponding stride for feature maps, only - used to normalize the bbox prediction when self.norm_on_bbox - is True. - - Returns: - tuple: scores for each class, bbox predictions and centerness \ - predictions of input feature maps. - """ - cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x) - if self.centerness_on_reg: - centerness = self.conv_centerness(reg_feat) - else: - centerness = self.conv_centerness(cls_feat) - # scale the bbox_pred of different level - # float to avoid overflow when enabling FP16 - bbox_pred = scale(bbox_pred).float() - if self.norm_on_bbox: - bbox_pred = F.relu(bbox_pred) - if not self.training: - bbox_pred *= stride - else: - bbox_pred = bbox_pred.exp() - return cls_score, bbox_pred, centerness - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) - def loss(self, - cls_scores, - bbox_preds, - centernesses, - gt_bboxes, - gt_labels, - img_metas, - gt_bboxes_ignore=None): - """Compute loss of the head. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level, - each is a 4D-tensor, the channel number is - num_points * num_classes. - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level, each is a 4D-tensor, the channel number is - num_points * 4. - centernesses (list[Tensor]): centerness for each scale level, each - is a 4D-tensor, the channel number is num_points * 1. - gt_bboxes (list[Tensor]): Ground truth bboxes for each image with - shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): class indices corresponding to each box - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - gt_bboxes_ignore (None | list[Tensor]): specify which bounding - boxes can be ignored when computing the loss. - - Returns: - dict[str, Tensor]: A dictionary of loss components. - """ - assert len(cls_scores) == len(bbox_preds) == len(centernesses) - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, - bbox_preds[0].device) - labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes, - gt_labels) - - num_imgs = cls_scores[0].size(0) - # flatten cls_scores, bbox_preds and centerness - flatten_cls_scores = [ - cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels) - for cls_score in cls_scores - ] - flatten_bbox_preds = [ - bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4) - for bbox_pred in bbox_preds - ] - flatten_centerness = [ - centerness.permute(0, 2, 3, 1).reshape(-1) - for centerness in centernesses - ] - flatten_cls_scores = torch.cat(flatten_cls_scores) - flatten_bbox_preds = torch.cat(flatten_bbox_preds) - flatten_centerness = torch.cat(flatten_centerness) - flatten_labels = torch.cat(labels) - flatten_bbox_targets = torch.cat(bbox_targets) - # repeat points to align with bbox_preds - flatten_points = torch.cat( - [points.repeat(num_imgs, 1) for points in all_level_points]) - - # FG cat_id: [0, num_classes -1], BG cat_id: num_classes - bg_class_ind = self.num_classes - pos_inds = ((flatten_labels >= 0) - & (flatten_labels < bg_class_ind)).nonzero().reshape(-1) - num_pos = torch.tensor( - len(pos_inds), dtype=torch.float, device=bbox_preds[0].device) - num_pos = max(reduce_mean(num_pos), 1.0) - loss_cls = self.loss_cls( - flatten_cls_scores, flatten_labels, avg_factor=num_pos) - - pos_bbox_preds = flatten_bbox_preds[pos_inds] - pos_centerness = flatten_centerness[pos_inds] - - if len(pos_inds) > 0: - pos_bbox_targets = flatten_bbox_targets[pos_inds] - pos_centerness_targets = self.centerness_target(pos_bbox_targets) - pos_points = flatten_points[pos_inds] - pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds) - pos_decoded_target_preds = distance2bbox(pos_points, - pos_bbox_targets) - # centerness weighted iou loss - centerness_denorm = max( - reduce_mean(pos_centerness_targets.sum().detach()), 1e-6) - loss_bbox = self.loss_bbox( - pos_decoded_bbox_preds, - pos_decoded_target_preds, - weight=pos_centerness_targets, - avg_factor=centerness_denorm) - loss_centerness = self.loss_centerness( - pos_centerness, pos_centerness_targets, avg_factor=num_pos) - else: - loss_bbox = pos_bbox_preds.sum() - loss_centerness = pos_centerness.sum() - - return dict( - loss_cls=loss_cls, - loss_bbox=loss_bbox, - loss_centerness=loss_centerness) - - @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses')) - def get_bboxes(self, - cls_scores, - bbox_preds, - centernesses, - img_metas, - cfg=None, - rescale=False, - with_nms=True): - """Transform network output for a batch into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box scores for each scale level - with shape (N, num_points * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for each scale - level with shape (N, num_points * 4, H, W). - centernesses (list[Tensor]): Centerness for each scale level with - shape (N, num_points * 1, H, W). - img_metas (list[dict]): Meta information of each image, e.g., - image size, scaling factor, etc. - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used. Default: None. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple. - The first item is an (n, 5) tensor, where 5 represent - (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1. - The shape of the second tensor in the tuple is (n,), and - each element represents the class label of the corresponding - box. - """ - assert len(cls_scores) == len(bbox_preds) - num_levels = len(cls_scores) - - featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores] - mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype, - bbox_preds[0].device) - - cls_score_list = [cls_scores[i].detach() for i in range(num_levels)] - bbox_pred_list = [bbox_preds[i].detach() for i in range(num_levels)] - centerness_pred_list = [ - centernesses[i].detach() for i in range(num_levels) - ] - if torch.onnx.is_in_onnx_export(): - assert len( - img_metas - ) == 1, 'Only support one input image while in exporting to ONNX' - img_shapes = img_metas[0]['img_shape_for_onnx'] - else: - img_shapes = [ - img_metas[i]['img_shape'] - for i in range(cls_scores[0].shape[0]) - ] - scale_factors = [ - img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0]) - ] - result_list = self._get_bboxes(cls_score_list, bbox_pred_list, - centerness_pred_list, mlvl_points, - img_shapes, scale_factors, cfg, rescale, - with_nms) - return result_list - - def _get_bboxes(self, - cls_scores, - bbox_preds, - centernesses, - mlvl_points, - img_shapes, - scale_factors, - cfg, - rescale=False, - with_nms=True): - """Transform outputs for a single batch item into bbox predictions. - - Args: - cls_scores (list[Tensor]): Box scores for a single scale level - with shape (N, num_points * num_classes, H, W). - bbox_preds (list[Tensor]): Box energies / deltas for a single scale - level with shape (N, num_points * 4, H, W). - centernesses (list[Tensor]): Centerness for a single scale level - with shape (N, num_points * 4, H, W). - mlvl_points (list[Tensor]): Box reference for a single scale level - with shape (num_total_points, 4). - img_shapes (list[tuple[int]]): Shape of the input image, - list[(height, width, 3)]. - scale_factors (list[ndarray]): Scale factor of the image arrange as - (w_scale, h_scale, w_scale, h_scale). - cfg (mmcv.Config | None): Test / postprocessing configuration, - if None, test_cfg would be used. - rescale (bool): If True, return boxes in original image space. - Default: False. - with_nms (bool): If True, do nms before return boxes. - Default: True. - - Returns: - tuple(Tensor): - det_bboxes (Tensor): BBox predictions in shape (n, 5), where - the first 4 columns are bounding box positions - (tl_x, tl_y, br_x, br_y) and the 5-th column is a score - between 0 and 1. - det_labels (Tensor): A (n,) tensor where each item is the - predicted class label of the corresponding box. - """ - cfg = self.test_cfg if cfg is None else cfg - assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) - device = cls_scores[0].device - batch_size = cls_scores[0].shape[0] - # convert to tensor to keep tracing - nms_pre_tensor = torch.tensor( - cfg.get('nms_pre', -1), device=device, dtype=torch.long) - mlvl_bboxes = [] - mlvl_scores = [] - mlvl_centerness = [] - for cls_score, bbox_pred, centerness, points in zip( - cls_scores, bbox_preds, centernesses, mlvl_points): - assert cls_score.size()[-2:] == bbox_pred.size()[-2:] - scores = cls_score.permute(0, 2, 3, 1).reshape( - batch_size, -1, self.cls_out_channels).sigmoid() - centerness = centerness.permute(0, 2, 3, - 1).reshape(batch_size, - -1).sigmoid() - - bbox_pred = bbox_pred.permute(0, 2, 3, - 1).reshape(batch_size, -1, 4) - # Always keep topk op for dynamic input in onnx - if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export() - or scores.shape[-2] > nms_pre_tensor): - from torch import _shape_as_tensor - # keep shape as tensor and get k - num_anchor = _shape_as_tensor(scores)[-2].to(device) - nms_pre = torch.where(nms_pre_tensor < num_anchor, - nms_pre_tensor, num_anchor) - - max_scores, _ = (scores * centerness[..., None]).max(-1) - _, topk_inds = max_scores.topk(nms_pre) - points = points[topk_inds, :] - batch_inds = torch.arange(batch_size).view( - -1, 1).expand_as(topk_inds).long() - bbox_pred = bbox_pred[batch_inds, topk_inds, :] - scores = scores[batch_inds, topk_inds, :] - centerness = centerness[batch_inds, topk_inds] - - bboxes = distance2bbox(points, bbox_pred, max_shape=img_shapes) - mlvl_bboxes.append(bboxes) - mlvl_scores.append(scores) - mlvl_centerness.append(centerness) - - batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1) - if rescale: - batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor( - scale_factors).unsqueeze(1) - batch_mlvl_scores = torch.cat(mlvl_scores, dim=1) - batch_mlvl_centerness = torch.cat(mlvl_centerness, dim=1) - - # Set max number of box to be feed into nms in deployment - deploy_nms_pre = cfg.get('deploy_nms_pre', -1) - if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export(): - batch_mlvl_scores, _ = ( - batch_mlvl_scores * - batch_mlvl_centerness.unsqueeze(2).expand_as(batch_mlvl_scores) - ).max(-1) - _, topk_inds = batch_mlvl_scores.topk(deploy_nms_pre) - batch_inds = torch.arange(batch_mlvl_scores.shape[0]).view( - -1, 1).expand_as(topk_inds) - batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :] - batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :] - batch_mlvl_centerness = batch_mlvl_centerness[batch_inds, - topk_inds] - - # remind that we set FG labels to [0, num_class-1] since mmdet v2.0 - # BG cat_id: num_class - padding = batch_mlvl_scores.new_zeros(batch_size, - batch_mlvl_scores.shape[1], 1) - batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1) - - if with_nms: - det_results = [] - for (mlvl_bboxes, mlvl_scores, - mlvl_centerness) in zip(batch_mlvl_bboxes, batch_mlvl_scores, - batch_mlvl_centerness): - det_bbox, det_label = multiclass_nms( - mlvl_bboxes, - mlvl_scores, - cfg.score_thr, - cfg.nms, - cfg.max_per_img, - score_factors=mlvl_centerness) - det_results.append(tuple([det_bbox, det_label])) - else: - det_results = [ - tuple(mlvl_bs) - for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores, - batch_mlvl_centerness) - ] - return det_results - - def _get_points_single(self, - featmap_size, - stride, - dtype, - device, - flatten=False): - """Get points according to feature map sizes.""" - y, x = super()._get_points_single(featmap_size, stride, dtype, device) - points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride), - dim=-1) + stride // 2 - return points - - def get_targets(self, points, gt_bboxes_list, gt_labels_list): - """Compute regression, classification and centerness targets for points - in multiple images. - - Args: - points (list[Tensor]): Points of each fpn level, each has shape - (num_points, 2). - gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image, - each has shape (num_gt, 4). - gt_labels_list (list[Tensor]): Ground truth labels of each box, - each has shape (num_gt,). - - Returns: - tuple: - concat_lvl_labels (list[Tensor]): Labels of each level. \ - concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \ - level. - """ - assert len(points) == len(self.regress_ranges) - num_levels = len(points) - # expand regress ranges to align with points - expanded_regress_ranges = [ - points[i].new_tensor(self.regress_ranges[i])[None].expand_as( - points[i]) for i in range(num_levels) - ] - # concat all levels points and regress ranges - concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0) - concat_points = torch.cat(points, dim=0) - - # the number of points per img, per lvl - num_points = [center.size(0) for center in points] - - # get labels and bbox_targets of each image - labels_list, bbox_targets_list = multi_apply( - self._get_target_single, - gt_bboxes_list, - gt_labels_list, - points=concat_points, - regress_ranges=concat_regress_ranges, - num_points_per_lvl=num_points) - - # split to per img, per level - labels_list = [labels.split(num_points, 0) for labels in labels_list] - bbox_targets_list = [ - bbox_targets.split(num_points, 0) - for bbox_targets in bbox_targets_list - ] - - # concat per level image - concat_lvl_labels = [] - concat_lvl_bbox_targets = [] - for i in range(num_levels): - concat_lvl_labels.append( - torch.cat([labels[i] for labels in labels_list])) - bbox_targets = torch.cat( - [bbox_targets[i] for bbox_targets in bbox_targets_list]) - if self.norm_on_bbox: - bbox_targets = bbox_targets / self.strides[i] - concat_lvl_bbox_targets.append(bbox_targets) - return concat_lvl_labels, concat_lvl_bbox_targets - - def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges, - num_points_per_lvl): - """Compute regression and classification targets for a single image.""" - num_points = points.size(0) - num_gts = gt_labels.size(0) - if num_gts == 0: - return gt_labels.new_full((num_points,), self.num_classes), \ - gt_bboxes.new_zeros((num_points, 4)) - - areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( - gt_bboxes[:, 3] - gt_bboxes[:, 1]) - # TODO: figure out why these two are different - # areas = areas[None].expand(num_points, num_gts) - areas = areas[None].repeat(num_points, 1) - regress_ranges = regress_ranges[:, None, :].expand( - num_points, num_gts, 2) - gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4) - xs, ys = points[:, 0], points[:, 1] - xs = xs[:, None].expand(num_points, num_gts) - ys = ys[:, None].expand(num_points, num_gts) - - left = xs - gt_bboxes[..., 0] - right = gt_bboxes[..., 2] - xs - top = ys - gt_bboxes[..., 1] - bottom = gt_bboxes[..., 3] - ys - bbox_targets = torch.stack((left, top, right, bottom), -1) - - if self.center_sampling: - # condition1: inside a `center bbox` - radius = self.center_sample_radius - center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2 - center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2 - center_gts = torch.zeros_like(gt_bboxes) - stride = center_xs.new_zeros(center_xs.shape) - - # project the points on current lvl back to the `original` sizes - lvl_begin = 0 - for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl): - lvl_end = lvl_begin + num_points_lvl - stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius - lvl_begin = lvl_end - - x_mins = center_xs - stride - y_mins = center_ys - stride - x_maxs = center_xs + stride - y_maxs = center_ys + stride - center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0], - x_mins, gt_bboxes[..., 0]) - center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1], - y_mins, gt_bboxes[..., 1]) - center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2], - gt_bboxes[..., 2], x_maxs) - center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3], - gt_bboxes[..., 3], y_maxs) - - cb_dist_left = xs - center_gts[..., 0] - cb_dist_right = center_gts[..., 2] - xs - cb_dist_top = ys - center_gts[..., 1] - cb_dist_bottom = center_gts[..., 3] - ys - center_bbox = torch.stack( - (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1) - inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0 - else: - # condition1: inside a gt bbox - inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0 - - # condition2: limit the regression range for each location - max_regress_distance = bbox_targets.max(-1)[0] - inside_regress_range = ( - (max_regress_distance >= regress_ranges[..., 0]) - & (max_regress_distance <= regress_ranges[..., 1])) - - # if there are still more than one objects for a location, - # we choose the one with minimal area - areas[inside_gt_bbox_mask == 0] = INF - areas[inside_regress_range == 0] = INF - min_area, min_area_inds = areas.min(dim=1) - - labels = gt_labels[min_area_inds] - labels[min_area == INF] = self.num_classes # set as BG - bbox_targets = bbox_targets[range(num_points), min_area_inds] - - return labels, bbox_targets - - def centerness_target(self, pos_bbox_targets): - """Compute centerness targets. - - Args: - pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape - (num_pos, 4) - - Returns: - Tensor: Centerness target. - """ - # only calculate pos centerness targets, otherwise there may be nan - left_right = pos_bbox_targets[:, [0, 2]] - top_bottom = pos_bbox_targets[:, [1, 3]] - centerness_targets = ( - left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * ( - top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) - return torch.sqrt(centerness_targets) diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/backbones/mobilenet_v2.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/backbones/mobilenet_v2.py deleted file mode 100644 index 5820b4b13c0019d67801c5f924650e928acca72e..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/mmseg/models/backbones/mobilenet_v2.py +++ /dev/null @@ -1,180 +0,0 @@ -import logging - -import torch.nn as nn -from mmcv.cnn import ConvModule, constant_init, kaiming_init -from mmcv.runner import load_checkpoint -from torch.nn.modules.batchnorm import _BatchNorm - -from ..builder import BACKBONES -from ..utils import InvertedResidual, make_divisible - - -@BACKBONES.register_module() -class MobileNetV2(nn.Module): - """MobileNetV2 backbone. - - Args: - widen_factor (float): Width multiplier, multiply number of - channels in each layer by this amount. Default: 1.0. - strides (Sequence[int], optional): Strides of the first block of each - layer. If not specified, default config in ``arch_setting`` will - be used. - dilations (Sequence[int]): Dilation of each layer. - out_indices (None or Sequence[int]): Output from which stages. - Default: (7, ). - frozen_stages (int): Stages to be frozen (all param fixed). - Default: -1, which means not freezing any parameters. - conv_cfg (dict): Config dict for convolution layer. - Default: None, which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU6'). - norm_eval (bool): Whether to set norm layers to eval mode, namely, - freeze running stats (mean and var). Note: Effect on Batch Norm - and its variants only. Default: False. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - """ - - # Parameters to build layers. 3 parameters are needed to construct a - # layer, from left to right: expand_ratio, channel, num_blocks. - arch_settings = [[1, 16, 1], [6, 24, 2], [6, 32, 3], [6, 64, 4], - [6, 96, 3], [6, 160, 3], [6, 320, 1]] - - def __init__(self, - widen_factor=1., - strides=(1, 2, 2, 2, 1, 2, 1), - dilations=(1, 1, 1, 1, 1, 1, 1), - out_indices=(1, 2, 4, 6), - frozen_stages=-1, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU6'), - norm_eval=False, - with_cp=False): - super(MobileNetV2, self).__init__() - self.widen_factor = widen_factor - self.strides = strides - self.dilations = dilations - assert len(strides) == len(dilations) == len(self.arch_settings) - self.out_indices = out_indices - for index in out_indices: - if index not in range(0, 7): - raise ValueError('the item in out_indices must in ' - f'range(0, 8). But received {index}') - - if frozen_stages not in range(-1, 7): - raise ValueError('frozen_stages must be in range(-1, 7). ' - f'But received {frozen_stages}') - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.act_cfg = act_cfg - self.norm_eval = norm_eval - self.with_cp = with_cp - - self.in_channels = make_divisible(32 * widen_factor, 8) - - self.conv1 = ConvModule( - in_channels=3, - out_channels=self.in_channels, - kernel_size=3, - stride=2, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - self.layers = [] - - for i, layer_cfg in enumerate(self.arch_settings): - expand_ratio, channel, num_blocks = layer_cfg - stride = self.strides[i] - dilation = self.dilations[i] - out_channels = make_divisible(channel * widen_factor, 8) - inverted_res_layer = self.make_layer( - out_channels=out_channels, - num_blocks=num_blocks, - stride=stride, - dilation=dilation, - expand_ratio=expand_ratio) - layer_name = f'layer{i + 1}' - self.add_module(layer_name, inverted_res_layer) - self.layers.append(layer_name) - - def make_layer(self, out_channels, num_blocks, stride, dilation, - expand_ratio): - """Stack InvertedResidual blocks to build a layer for MobileNetV2. - - Args: - out_channels (int): out_channels of block. - num_blocks (int): Number of blocks. - stride (int): Stride of the first block. - dilation (int): Dilation of the first block. - expand_ratio (int): Expand the number of channels of the - hidden layer in InvertedResidual by this ratio. - """ - layers = [] - for i in range(num_blocks): - layers.append( - InvertedResidual( - self.in_channels, - out_channels, - stride if i == 0 else 1, - expand_ratio=expand_ratio, - dilation=dilation if i == 0 else 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - with_cp=self.with_cp)) - self.in_channels = out_channels - - return nn.Sequential(*layers) - - def init_weights(self, pretrained=None): - if isinstance(pretrained, str): - logger = logging.getLogger() - load_checkpoint(self, pretrained, strict=False, logger=logger) - elif pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, (_BatchNorm, nn.GroupNorm)): - constant_init(m, 1) - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - x = self.conv1(x) - - outs = [] - for i, layer_name in enumerate(self.layers): - layer = getattr(self, layer_name) - x = layer(x) - if i in self.out_indices: - outs.append(x) - - if len(outs) == 1: - return outs[0] - else: - return tuple(outs) - - def _freeze_stages(self): - if self.frozen_stages >= 0: - for param in self.conv1.parameters(): - param.requires_grad = False - for i in range(1, self.frozen_stages + 1): - layer = getattr(self, f'layer{i}') - layer.eval() - for param in layer.parameters(): - param.requires_grad = False - - def train(self, mode=True): - super(MobileNetV2, self).train(mode) - self._freeze_stages() - if mode and self.norm_eval: - for m in self.modules(): - if isinstance(m, _BatchNorm): - m.eval() diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/CHANGELOG.md b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/CHANGELOG.md deleted file mode 100644 index 24fc214df236b40efead4b1585b01632d9658e9b..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/CHANGELOG.md +++ /dev/null @@ -1,23 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - -## [0.0.2a] - TBD - -Improved demo, fixed top p (thanks @jnordberg). - -Compressor tanh on output to avoid clipping with some style (especially piano). -Now repeating the conditioning periodically if it is too short. - -More options when launching Gradio app locally (thanks @ashleykleynhans). - -Testing out PyTorch 2.0 memory efficient attention. - -Added extended generation (infinite length) by slowly moving the windows. -Note that other implementations exist: https://github.com/camenduru/MusicGen-colab. - -## [0.0.1] - 2023-06-09 - -Initial release, with model evaluation only. diff --git a/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/__init__.py b/spaces/Harveenchadha/Vakyansh-Hindi-TTS/ttsv/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Hitmanny/BigGAN-text-to-image/app.py b/spaces/Hitmanny/BigGAN-text-to-image/app.py deleted file mode 100644 index f22f259d84dd634dd567e2f923bad84afd6bae16..0000000000000000000000000000000000000000 --- a/spaces/Hitmanny/BigGAN-text-to-image/app.py +++ /dev/null @@ -1,9 +0,0 @@ -import gradio as gr -description = "BigGAN text-to-image demo." -title = "BigGAN ImageNet" -interface = gr.Interface.load("huggingface/osanseviero/BigGAN-deep-128", - description=description, - title = title, - examples=[["american robin"]] -) -interface.launch() \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/docs/hydra_integration.md b/spaces/ICML2022/OFA/fairseq/docs/hydra_integration.md deleted file mode 100644 index 6a15298382a6a16dfc4c5a4a812ea1cd0477ed52..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/docs/hydra_integration.md +++ /dev/null @@ -1,284 +0,0 @@ -## Hydra - -[Hydra](https://github.com/facebookresearch/hydra) is an open-source Python -framework that simplifies the development of research and other complex -applications. The key feature is the ability to dynamically create a -hierarchical configuration by composition and override it through config files -and the command line. The name Hydra comes from its ability to run multiple -similar jobs - much like a Hydra with multiple heads. - -## Motivation - -Until recently, all components in fairseq were configured through a shared -`args` namespace that was created at application startup. Components declared -their own `add_args` method to update the argparse parser, hoping that the names -would not clash with arguments from other components. While this model works for -smaller applications, as fairseq grew and became integrated into other -applications, this became problematic. In order to determine how to configure -each component, one needed to a) examine what args were added by this component, -and b) read the code to figure out what shared arguments it is using that were -added in other places. Reproducing models involved sharing commands that often -contained dozens of command line switches. - -The model described above is still supported by fairseq for backward -compatibility, but will be deprecated some time in the future. - -New components in fairseq should now create a dataclass that encapsulates all -parameters required to configure this component. The dataclass is registered -along with the component, and fairseq takes care of constructing and providing -this configuration object to the component's constructor. Note that sharing -parameters can optionally still work, but one has to explicitly point to the -"source of truth" (see inheritance example below). These changes make components -in fairseq more independent and re-usable by other applications: all that is -needed to create a component is to initialize its dataclass and overwrite some -of the defaults. - -While configuring fairseq through command line (using either the legacy argparse -based or the new Hydra based entry points) is still fully supported, you can now -take advantage of configuring fairseq completely or piece-by-piece through -hierarchical YAML configuration files. These files can also be shipped as -examples that others can use to run an identically configured job. - -Additionally, Hydra has a rich and growing [library of -plugins](https://github.com/facebookresearch/hydra/tree/master/plugins) that -provide functionality such as hyperparameter sweeping (including using bayesian -optimization through the [Ax](https://github.com/facebook/Ax) library), job -launching across various platforms, and more. - -## Creating or migrating components - -In general, each new (or updated) component should provide a companion -[dataclass](https://www.python.org/dev/peps/pep-0557/). These dataclass are -typically located in the same file as the component and are passed as arguments -to the `register_*()` functions. Top-level configs that should be present in -every fairseq application are placed in the -[global](fairseq/dataclass/configs.py) config file and added to the -`FairseqConfig` object. - -Each dataclass is a plain-old-data object, similar to a `NamedTuple`. These -classes are decorated with a `@dataclass` decorator, and typically inherit from -`FairseqDataclass` (which adds some functionality for backward compatibility). -Each field must have a type, and generally has metadata (such as a help string) -and a default value. Only primitive types or other config objects are allowed as -data types for each field. - -#### Example: - -```python -from dataclasses import dataclass, field -from fairseq.dataclass import FairseqDataclass - -@dataclass -class InteractiveConfig(FairseqDataclass): - buffer_size: int = field( - default=0, - metadata={ - "help": "read this many sentences into a buffer before processing them" - }, - ) - input: str = field( - default="-", - metadata={"help": "file to read from; use - for stdin"}, - ) -``` - -### Inherting values - -Some components require sharing a value. For example, a learning rate scheduler -and an optimizer may both need to know the initial learning rate value. One can -declare a field that, by default, will inherit its value from another config -node in the same hierarchy: - -```python -@dataclass -FairseqAdamConfig(FairseqDataclass): - ... - lr: List[float] = II("optimization.lr") - ... -``` - -`II("optimization.lr")` is syntactic sugar for `"${optimization.lr}"`, which is -the value one can use in a YAML config file or through command line to achieve -the same effect. Note that this assumes that there is an "optimization" config -object in the root config and it has a field called "lr". - -### Tasks and Models - -Creating Tasks and Models works same as before, except that legacy -implementations now inherit from `LegacyFairseq*` base classes, while new -components inherit from `FairseqTask` and `FairseqModel` and provide a dataclass -to the `register_*()` functions. - -#### Task example: - -```python -@dataclass -class LanguageModelingConfig(FairseqDataclass): - data: Optional[str] = field( - default=None, metadata={"help": "path to data directory"} - ) - ... - -@register_task("language_modeling", dataclass=LanguageModelingConfig) -class LanguageModelingTask(FairseqTask): - ... - @classmethod - def setup_task(cls, cfg: LanguageModelingConfig): - ... -``` - -#### Model example: - -```python -@dataclass -class TransformerLanguageModelConfig(FairseqDataclass): - activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field( - default="relu", metadata={"help": "activation function to use"} - ) - dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) - ... - -@register_model("transformer_lm", dataclass=TransformerLanguageModelConfig) -class TransformerLanguageModel(FairseqLanguageModel): - ... - @classmethod - def build_model(cls, cfg: TransformerLanguageModelConfig, task: FairseqTask): - ... -``` - -### Other components - -Other components work as before, but they now take their configuration dataclass -as the only constructor argument: - -```python -@dataclass -class MosesTokenizerConfig(FairseqDataclass): - source_lang: str = field(default="en", metadata={"help": "source language"}) - ... - -@register_tokenizer("moses", dataclass=MosesTokenizerConfig) -class MosesTokenizer(object): - def __init__(self, cfg: MosesTokenizerConfig): - ... -``` - -Note that if you are adding a new registry for a new set of components, you need -to add it to the `FairseqConfig` object in `fairseq/dataclass/configs.py`: - -```python -@dataclass -class FairseqConfig(object): - ... - my_new_registry: Any = None -``` - -## Training with `fairseq-hydra-train` - -To fully take advantage of configuration flexibility offered by Hydra, you may -want to train new models using the `fairseq-hydra-train` entry point. Legacy CLI -tools such as `fairseq-train` will remain supported for the foreseeable future -but will be deprecated eventually. - -On startup, Hydra will create a configuration object that contains a hierarchy -of all the necessary dataclasses populated with their default values in the -code. The default values are overwritten by values found in YAML files in -`fairseq/config` directory (which currently sets minimal defaults) and then -further overwritten by values provided through command line arguments. - -Some of the most common use cases are shown below: - -### 1. Override default values through command line: - -```shell script -$ fairseq-hydra-train \ - distributed_training.distributed_world_size=1 \ - dataset.batch_size=2 \ - task.data=data-bin \ - model=transformer_lm/transformer_lm_gpt \ - task=language_modeling \ - optimization.max_update=5000 -``` - -Note that along with explicitly providing values for parameters such as -`dataset.batch_size`, this also tells Hydra to overlay configuration found in -`fairseq/config/model/transformer_lm/transformer_lm_gpt.yaml` over the default -values in the dataclass. If you want to train a model without specifying a -particular architecture you can simply specify `model=transformer_lm`. This only -works for migrated tasks and models. - -### 2. Replace bundled configs with an external config: - -```shell script -$ fairseq-hydra-train \ - --config-dir /path/to/external/configs \ - --config-name wiki103 -``` - -where `/path/to/external/configs/wiki103.yaml` contains: - -```yaml -# @package _group_ - -model: - _name: transformer_lm -distributed_training: - distributed_world_size: 1 -dataset: - batch_size: 2 -task: - _name: language_modeling - data: /path/to/data - add_bos_token: false - max_target_positions: 1024 -optimization: - max_update: 50000 - lr: [ 0.25 ] -criterion: cross_entropy -optimizer: adam -lr_scheduler: - _name: cosine -``` - -Note that here bundled configs from `fairseq/config` directory are not used, -however the defaults from each dataclass will still be used (unless overwritten -by your external config). - -Additionally you can choose to break up your configs by creating a directory -structure in the same location as your main config file, with the names of the -top-level fields (such as "model", "dataset", etc), and placing config files -with meaningful names that would populate that specific section of your -top-level config file (for example, you might have -`model/small_transformer_lm.yaml`, `model/big_transformer_lm.yaml`, etc). You -can then specify the correct configuration via command line, defaults in the -main config, or even launch all of them as a sweep (see Hydra documentation on -how to do this). - -### 3. Add an external config directory to Hydra search path: - -This allows combining default configuration (including using any bundled config -files), while specifying your own config files for some parts of the -configuration. - -```shell script -$ fairseq-hydra-train \ - distributed_training.distributed_world_size=1 \ - dataset.batch_size=2 \ - task.data=/path/to/data/ \ - model=transformer_lm/2_layers \ - task=language_modeling \ - optimization.max_update=5000 \ - --config-dir /path/to/external/configs -``` - -where `/path/to/external/configs` has the following structure: -``` -. -+-- model -| +-- transformer_lm -| | +-- 2_layers.yaml -``` - -and `2_layers.yaml` contains a copy of `transformer_lm_gpt.yaml` but with -`decoder_layers` set to 2. You can add other configs to configure other -components as well. diff --git a/spaces/Ikaros521/so-vits-svc-4.0-ikaros/inference_main.py b/spaces/Ikaros521/so-vits-svc-4.0-ikaros/inference_main.py deleted file mode 100644 index 80a470ea9146f1f75e785411dd5d3b6fade64b70..0000000000000000000000000000000000000000 --- a/spaces/Ikaros521/so-vits-svc-4.0-ikaros/inference_main.py +++ /dev/null @@ -1,100 +0,0 @@ -import io -import logging -import time -from pathlib import Path - -import librosa -import matplotlib.pyplot as plt -import numpy as np -import soundfile - -from inference import infer_tool -from inference import slicer -from inference.infer_tool import Svc - -logging.getLogger('numba').setLevel(logging.WARNING) -chunks_dict = infer_tool.read_temp("inference/chunks_temp.json") - - - -def main(): - import argparse - - parser = argparse.ArgumentParser(description='sovits4 inference') - - # 一定要设置的部分 - parser.add_argument('-m', '--model_path', type=str, default="/Volumes/Extend/下载/G_20800.pth", help='模型路径') - parser.add_argument('-c', '--config_path', type=str, default="configs/config.json", help='配置文件路径') - parser.add_argument('-n', '--clean_names', type=str, nargs='+', default=["君の知らない物語-src"], help='wav文件名列表,放在raw文件夹下') - parser.add_argument('-t', '--trans', type=int, nargs='+', default=[0], help='音高调整,支持正负(半音)') - parser.add_argument('-s', '--spk_list', type=str, nargs='+', default=['nyaru'], help='合成目标说话人名称') - - # 可选项部分 - parser.add_argument('-a', '--auto_predict_f0', action='store_true', default=False, - help='语音转换自动预测音高,转换歌声时不要打开这个会严重跑调') - parser.add_argument('-cm', '--cluster_model_path', type=str, default="/Volumes/Extend/下载/so-vits-svc-4.0/logs/44k/kmeans_10000.pt", help='聚类模型路径,如果没有训练聚类则随便填') - parser.add_argument('-cr', '--cluster_infer_ratio', type=float, default=1, help='聚类方案占比,范围0-1,若没有训练聚类模型则填0即可') - - # 不用动的部分 - parser.add_argument('-sd', '--slice_db', type=int, default=-40, help='默认-40,嘈杂的音频可以-30,干声保留呼吸可以-50') - parser.add_argument('-d', '--device', type=str, default=None, help='推理设备,None则为自动选择cpu和gpu') - parser.add_argument('-ns', '--noice_scale', type=float, default=0.4, help='噪音级别,会影响咬字和音质,较为玄学') - parser.add_argument('-p', '--pad_seconds', type=float, default=0.5, help='推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现') - parser.add_argument('-wf', '--wav_format', type=str, default='flac', help='音频输出格式') - - args = parser.parse_args() - - svc_model = Svc(args.model_path, args.config_path, args.device, args.cluster_model_path) - infer_tool.mkdir(["raw", "results"]) - clean_names = args.clean_names - trans = args.trans - spk_list = args.spk_list - slice_db = args.slice_db - wav_format = args.wav_format - auto_predict_f0 = args.auto_predict_f0 - cluster_infer_ratio = args.cluster_infer_ratio - noice_scale = args.noice_scale - pad_seconds = args.pad_seconds - - infer_tool.fill_a_to_b(trans, clean_names) - for clean_name, tran in zip(clean_names, trans): - raw_audio_path = f"raw/{clean_name}" - if "." not in raw_audio_path: - raw_audio_path += ".wav" - infer_tool.format_wav(raw_audio_path) - wav_path = Path(raw_audio_path).with_suffix('.wav') - chunks = slicer.cut(wav_path, db_thresh=slice_db) - audio_data, audio_sr = slicer.chunks2audio(wav_path, chunks) - - for spk in spk_list: - audio = [] - for (slice_tag, data) in audio_data: - print(f'#=====segment start, {round(len(data) / audio_sr, 3)}s======') - # padd - pad_len = int(audio_sr * pad_seconds) - data = np.concatenate([np.zeros([pad_len]), data, np.zeros([pad_len])]) - length = int(np.ceil(len(data) / audio_sr * svc_model.target_sample)) - raw_path = io.BytesIO() - soundfile.write(raw_path, data, audio_sr, format="wav") - raw_path.seek(0) - if slice_tag: - print('jump empty segment') - _audio = np.zeros(length) - else: - out_audio, out_sr = svc_model.infer(spk, tran, raw_path, - cluster_infer_ratio=cluster_infer_ratio, - auto_predict_f0=auto_predict_f0, - noice_scale=noice_scale - ) - _audio = out_audio.cpu().numpy() - - pad_len = int(svc_model.target_sample * pad_seconds) - _audio = _audio[pad_len:-pad_len] - audio.extend(list(_audio)) - key = "auto" if auto_predict_f0 else f"{tran}key" - cluster_name = "" if cluster_infer_ratio == 0 else f"_{cluster_infer_ratio}" - res_path = f'./results/old——{clean_name}_{key}_{spk}{cluster_name}.{wav_format}' - soundfile.write(res_path, audio, svc_model.target_sample, format=wav_format) - -if __name__ == '__main__': - main() diff --git a/spaces/Jamkonams/AutoGPT/tests/test_json_parser.py b/spaces/Jamkonams/AutoGPT/tests/test_json_parser.py deleted file mode 100644 index 41c90a6f66c0b0468f1443de80033cc4f268eca0..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/tests/test_json_parser.py +++ /dev/null @@ -1,111 +0,0 @@ -import unittest - -import tests.context -from autogpt.json_utils.json_fix_llm import fix_and_parse_json - - -class TestParseJson(unittest.TestCase): - def test_valid_json(self): - # Test that a valid JSON string is parsed correctly - json_str = '{"name": "John", "age": 30, "city": "New York"}' - obj = fix_and_parse_json(json_str) - self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"}) - - def test_invalid_json_minor(self): - # Test that an invalid JSON string can be fixed with gpt - json_str = '{"name": "John", "age": 30, "city": "New York",}' - with self.assertRaises(Exception): - fix_and_parse_json(json_str, try_to_fix_with_gpt=False) - - def test_invalid_json_major_with_gpt(self): - # Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' - with self.assertRaises(Exception): - fix_and_parse_json(json_str, try_to_fix_with_gpt=False) - - def test_invalid_json_major_without_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END' - # Assert that this raises an exception: - with self.assertRaises(Exception): - fix_and_parse_json(json_str, try_to_fix_with_gpt=False) - - def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = """I suggest we start by browsing the repository to find any issues that we can fix. - -{ - "command": { - "name": "browse_website", - "args":{ - "url": "https://github.com/Torantulino/Auto-GPT" - } - }, - "thoughts": - { - "text": "I suggest we start browsing the repository to find any issues that we can fix.", - "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", - "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", - "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", - "speak": "I will start browsing the repository to find any issues we can fix." - } -}""" - good_obj = { - "command": { - "name": "browse_website", - "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, - }, - "thoughts": { - "text": "I suggest we start browsing the repository to find any issues that we can fix.", - "reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.", - "plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes", - "criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.", - "speak": "I will start browsing the repository to find any issues we can fix.", - }, - } - # Assert that this raises an exception: - self.assertEqual( - fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj - ) - - def test_invalid_json_leading_sentence_with_gpt(self): - # Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False - json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this. - -{ - "command": { - "name": "browse_website", - "args":{ - "url": "https://github.com/Torantulino/Auto-GPT" - } - }, - "thoughts": - { - "text": "Browsing the repository to identify potential bugs", - "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", - "plan": "- Analyze the repository for potential bugs and areas of improvement", - "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", - "speak": "I am browsing the repository to identify potential bugs." - } -}""" - good_obj = { - "command": { - "name": "browse_website", - "args": {"url": "https://github.com/Torantulino/Auto-GPT"}, - }, - "thoughts": { - "text": "Browsing the repository to identify potential bugs", - "reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.", - "plan": "- Analyze the repository for potential bugs and areas of improvement", - "criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.", - "speak": "I am browsing the repository to identify potential bugs.", - }, - } - # Assert that this raises an exception: - self.assertEqual( - fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj - ) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Jeff2323/ai-comic-factory/src/lib/replaceTextInSpeechBubbles.ts b/spaces/Jeff2323/ai-comic-factory/src/lib/replaceTextInSpeechBubbles.ts deleted file mode 100644 index 8566a2f8068feef008348ae7f6d6f06e2d2b1628..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/lib/replaceTextInSpeechBubbles.ts +++ /dev/null @@ -1,98 +0,0 @@ -"use client" - -import { createWorker } from "tesseract.js" -import { loadImageToCanvas } from "./loadImageToCanvas"; - -export async function replaceTextInSpeechBubbles(image: string, customText: string) { - console.log('creating OCR worker to find bubbles inside', image); - - const worker = await createWorker({ - logger: (info) => { - console.log(info) - }, - }); - - const canvas = await loadImageToCanvas(image) - - const ctx = canvas.getContext('2d')!; - - try { - await worker.load(); - await worker.loadLanguage('eng'); - await worker.initialize('eng'); - - const { data } = await worker.recognize(canvas); - const lines = data.lines || []; - - // Draw the lines on the image - ctx.fillStyle = "white"; - - lines.forEach((line) => { - ctx.fillRect(line.bbox.x0, line.bbox.y0, line.bbox.x1 - line.bbox.x0, line.bbox.y1 - line.bbox.y0); - - const bubbleWidth = line.bbox.x1 - line.bbox.x0; - const bubbleHeight = line.bbox.y1 - line.bbox.y0; - let fontSize = 18; - ctx.font = `${fontSize}px Arial`; - - /* - while ( - ctx.measureText(customText).width > bubbleWidth || fontSize * 1.2 // line height - > bubbleHeight) { - fontSize -= 1; - ctx.font = `${fontSize}px Arial`; - } - - const lines = wrapText(ctx, customText, line.bbox.x0, line.bbox.y0, bubbleWidth, fontSize); - - ctx.fillStyle = "black"; - lines.forEach((text, i) => { - ctx.fillText(text, line.bbox.x0, line.bbox.y0 + (i * fontSize * 1.2)); - }); - */ - }) - - await worker.terminate(); - - // Convert the Canvas to image data - const imgAsDataURL = canvas.toDataURL('image/png'); - - if (typeof window !== "undefined") { - const foo = (window as any) - if (!foo.debugJujul) { - foo.debugJujul = [] - } - foo.debugJujul.push({ - lines - }) - } - console.log("lines:", lines) - - return imgAsDataURL; - - } catch (err) { - console.error(err); - } - return ""; -} - -function wrapText(context: CanvasRenderingContext2D, text: string, x: number, y: number, maxWidth: number, lineHeight: number) { - const words = text.split(' '); - let line = ''; - const lines = []; - - for(let n = 0; n < words.length; n++) { - let testLine = line + words[n] + ' '; - let metrics = context.measureText(testLine); - let testWidth = metrics.width; - if (testWidth > maxWidth && n > 0) { - lines.push(line); - line = words[n] + ' '; - } - else { - line = testLine; - } - } - lines.push(line); - return lines; -} \ No newline at end of file diff --git a/spaces/JeffJing/ZookChatBot/steamship/invocable/plugin_service.py b/spaces/JeffJing/ZookChatBot/steamship/invocable/plugin_service.py deleted file mode 100644 index 3f5a67a377cf358cec51a1fb769807522163db85..0000000000000000000000000000000000000000 --- a/spaces/JeffJing/ZookChatBot/steamship/invocable/plugin_service.py +++ /dev/null @@ -1,119 +0,0 @@ -from __future__ import annotations - -import logging -from abc import ABC, abstractmethod -from typing import Generic, Type, TypeVar, Union - -# Note! -# ===== -# -# This the files in this package are for Plugin Implementors. -# If you are using the Steamship Client, you probably are looking for either steamship.client or steamship.data -# -from steamship.invocable import Invocable, InvocableResponse -from steamship.plugin.inputs.train_plugin_input import TrainPluginInput -from steamship.plugin.inputs.training_parameter_plugin_input import TrainingParameterPluginInput -from steamship.plugin.outputs.train_plugin_output import TrainPluginOutput -from steamship.plugin.outputs.training_parameter_plugin_output import TrainingParameterPluginOutput -from steamship.plugin.request import PluginRequest -from steamship.plugin.trainable_model import TrainableModel - -IN = TypeVar("IN") -OUT = TypeVar("OUT") - - -class PluginService(Invocable, Generic[IN, OUT], ABC): - """The Abstract Base Class of a Steamship Plugin. - - All Steamship Plugins implement the operation: - - - run(PluginRequest[T]) -> Response[U] - - Many plugins are effectively stateless. This run operation defines their entire capability. - Examples of such stateless plugins are: - - File Import Plugin - - Export Plugin - - Other plugins have state but in a very controlled way: - - they can be trained, - - this trainable process produces a "model", - - that model acts as the state on which the `run` method is conditioned - - This model is stored in the Steamship Workspace that owns the Plugin Instance, and access to it is provided by the - hosting environment that runs the model. - - TODO(ted) Document this process. - - These stateful plugins are called "Trainable Plugins," and they must implement the following additional methods: - - - get_training_parameters(PluginRequest[TrainingParameterInput]) -> Response[TrainingParameterOutput] - - train(PluginRequest[TrainPluginInput]) -> Response[TrainPluginOutput] - - """ - - @abstractmethod - def run(self, request: PluginRequest[IN]) -> Union[OUT, InvocableResponse[OUT]]: - """Runs the core operation implemented by this plugin: import, export, blockify, tag, etc. - - This is the method that a Steamship Plugin implements to perform its main work. - """ - pass - - -class TrainablePluginService(PluginService, Generic[IN, OUT], ABC): - @abstractmethod - def model_cls(self) -> Type[TrainableModel]: - """Returns the constructor of the TrainableModel this TrainablePluginService uses. - - This is required so the `run` method below can load the model and provide it to the subclass implementor. - """ - pass - - def run(self, request: PluginRequest[IN]) -> Union[OUT, InvocableResponse[OUT]]: - """Loads the trainable model before passing the request to the `run_with_model` handler on the subclass.""" - logging.info("TrainablePluginService:run() - Loading model") - model = self.model_cls().load_remote( - client=self.client, # This field comes from being a subclass of App - plugin_instance_id=request.context.plugin_instance_id, - checkpoint_handle=None, # Will use default - use_cache=True, - plugin_instance_config=self.config, - ) - logging.info("TrainablePluginService:run() - Loaded model; invoking run_with_model") - return self.run_with_model(request, model) - - @abstractmethod - def run_with_model( - self, request: PluginRequest[IN], model: TrainableModel - ) -> Union[OUT, InvocableResponse[OUT]]: - """Rather than implementing run(request), a TrainablePluginService implements run_with_model(request, model)""" - pass - - @abstractmethod - def get_training_parameters( - self, request: PluginRequest[TrainingParameterPluginInput] - ) -> InvocableResponse[TrainingParameterPluginOutput]: - """Produces the trainable parameters for this plugin. - - This method is run by the Steamship Engine prior to training to fetch hyperparameters. - - - The user themselves can provide hyperparameters on the TrainingParameterPluginInput object. - - This method then transforms those into the TrainingParameterPluginOutput object, altering the user's values - if desired. - - The Engine then takes those TrainingParameterPluginOutput and presents them on the TrainPluginInput - - """ - pass - - @abstractmethod - def train( - self, request: PluginRequest[TrainPluginInput], model: TrainableModel - ) -> InvocableResponse[TrainPluginOutput]: - """Train the model.""" - pass - - @abstractmethod - def train_status( - self, request: PluginRequest[TrainPluginInput], model: TrainableModel - ) -> InvocableResponse[TrainPluginOutput]: - """Train the model.""" - pass diff --git a/spaces/Joom/Front-end-code-generation-from-images/classes/model/autoencoder_image.py b/spaces/Joom/Front-end-code-generation-from-images/classes/model/autoencoder_image.py deleted file mode 100644 index f4ddc426c2abee8a4e10d5a2b0b6e69e50df3ee0..0000000000000000000000000000000000000000 --- a/spaces/Joom/Front-end-code-generation-from-images/classes/model/autoencoder_image.py +++ /dev/null @@ -1,59 +0,0 @@ -__author__ = 'Taneem Jan, improved the old model through pretrained Auto-encoders' - -from keras.layers import Input, Dropout, Conv2D, MaxPooling2D, Conv2DTranspose, UpSampling2D -from keras.models import Model -from .Config import * -from .AModel import * - - -class autoencoder_image(AModel): - def __init__(self, input_shape, output_size, output_path): - AModel.__init__(self, input_shape, output_size, output_path) - self.name = 'autoencoder' - - input_image = Input(shape=input_shape) - encoder = Conv2D(32, 3, padding='same', activation='relu')(input_image) - encoder = Conv2D(32, 3, padding='same', activation='relu')(encoder) - encoder = MaxPooling2D()(encoder) - encoder = Dropout(0.25)(encoder) - - encoder = Conv2D(64, 3, padding='same', activation='relu')(encoder) - encoder = Conv2D(64, 3, padding='same', activation='relu')(encoder) - encoder = MaxPooling2D()(encoder) - encoder = Dropout(0.25)(encoder) - - encoder = Conv2D(128, 3, padding='same', activation='relu')(encoder) - encoder = Conv2D(128, 3, padding='same', activation='relu')(encoder) - encoder = MaxPooling2D()(encoder) - encoded = Dropout(0.25, name='encoded_layer')(encoder) - - decoder = Conv2DTranspose(128, 3, padding='same', activation='relu')(encoded) - decoder = Conv2DTranspose(128, 3, padding='same', activation='relu')(decoder) - decoder = UpSampling2D()(decoder) - decoder = Dropout(0.25)(decoder) - - decoder = Conv2DTranspose(64, 3, padding='same', activation='relu')(decoder) - decoder = Conv2DTranspose(64, 3, padding='same', activation='relu')(decoder) - decoder = UpSampling2D()(decoder) - decoder = Dropout(0.25)(decoder) - - decoder = Conv2DTranspose(32, 3, padding='same', activation='relu')(decoder) - decoder = Conv2DTranspose(3, 3, padding='same', activation='relu')(decoder) - decoder = UpSampling2D()(decoder) - decoded = Dropout(0.25)(decoder) - - # decoder = Dense(256*256*3)(decoder) - # decoded = Reshape(target_shape=input_shape)(decoder) - - self.model = Model(input_image, decoded) - self.model.compile(optimizer='adadelta', loss='binary_crossentropy') - - # self.model.summary() - - def fit_generator(self, generator, steps_per_epoch): - self.model.fit_generator(generator, steps_per_epoch=steps_per_epoch, epochs=EPOCHS, verbose=1) - self.save() - - def predict_hidden(self, images): - hidden_layer_model = Model(inputs=self.input, outputs=self.get_layer('encoded_layer').output) - return hidden_layer_model.predict(images) diff --git a/spaces/KaygNas/cut-it/src/main.ts b/spaces/KaygNas/cut-it/src/main.ts deleted file mode 100644 index 0b1da25f83e45f5699e0f20273f7f3a69b171fcd..0000000000000000000000000000000000000000 --- a/spaces/KaygNas/cut-it/src/main.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { worker } from '../mocks' -import { App } from './App' - -if (import.meta.env.VITE_MOCK) - worker.start({ onUnhandledRequest: 'bypass' }) - -// eslint-disable-next-line no-console -console.log(`main.ts starting ${App.name}`) -window.addEventListener('DOMContentLoaded', () => { - const canvas = document.getElementById('renderCanvas') as HTMLCanvasElement - const app = new App(canvas) - app.run() -}) diff --git a/spaces/Kedreamix/YoloGesture/utils/utils_bbox.py b/spaces/Kedreamix/YoloGesture/utils/utils_bbox.py deleted file mode 100644 index 170b23af588430a346f2f6294279274fc963fb6c..0000000000000000000000000000000000000000 --- a/spaces/Kedreamix/YoloGesture/utils/utils_bbox.py +++ /dev/null @@ -1,227 +0,0 @@ -import torch -import torch.nn as nn -from torchvision.ops import nms -import numpy as np - -class DecodeBox(): - def __init__(self, anchors, num_classes, input_shape, anchors_mask = [[6,7,8], [3,4,5], [0,1,2]]): - super(DecodeBox, self).__init__() - self.anchors = anchors - self.num_classes = num_classes - self.bbox_attrs = 5 + num_classes - self.input_shape = input_shape - #-----------------------------------------------------------# - # 13x13的特征层对应的anchor是[142, 110],[192, 243],[459, 401] - # 26x26的特征层对应的anchor是[36, 75],[76, 55],[72, 146] - # 52x52的特征层对应的anchor是[12, 16],[19, 36],[40, 28] - #-----------------------------------------------------------# - self.anchors_mask = anchors_mask - - def decode_box(self, inputs): - outputs = [] - for i, input in enumerate(inputs): - #-----------------------------------------------# - # 输入的input一共有三个,他们的shape分别是 - # batch_size, 255, 13, 13 - # batch_size, 255, 26, 26 - # batch_size, 255, 52, 52 - #-----------------------------------------------# - batch_size = input.size(0) - input_height = input.size(2) - input_width = input.size(3) - - #-----------------------------------------------# - # 输入为416x416时 - # stride_h = stride_w = 32、16、8 - #-----------------------------------------------# - stride_h = self.input_shape[0] / input_height - stride_w = self.input_shape[1] / input_width - #-------------------------------------------------# - # 此时获得的scaled_anchors大小是相对于特征层的 - #-------------------------------------------------# - scaled_anchors = [(anchor_width / stride_w, anchor_height / stride_h) for anchor_width, anchor_height in self.anchors[self.anchors_mask[i]]] - - #-----------------------------------------------# - # 输入的input一共有三个,他们的shape分别是 - # batch_size, 3, 13, 13, 85 - # batch_size, 3, 26, 26, 85 - # batch_size, 3, 52, 52, 85 - #-----------------------------------------------# - prediction = input.view(batch_size, len(self.anchors_mask[i]), - self.bbox_attrs, input_height, input_width).permute(0, 1, 3, 4, 2).contiguous() - - #-----------------------------------------------# - # 先验框的中心位置的调整参数 - #-----------------------------------------------# - x = torch.sigmoid(prediction[..., 0]) - y = torch.sigmoid(prediction[..., 1]) - #-----------------------------------------------# - # 先验框的宽高调整参数 - #-----------------------------------------------# - w = prediction[..., 2] - h = prediction[..., 3] - #-----------------------------------------------# - # 获得置信度,是否有物体 - #-----------------------------------------------# - conf = torch.sigmoid(prediction[..., 4]) - #-----------------------------------------------# - # 种类置信度 - #-----------------------------------------------# - pred_cls = torch.sigmoid(prediction[..., 5:]) - - FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor - LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor - - #----------------------------------------------------------# - # 生成网格,先验框中心,网格左上角 - # batch_size,3,13,13 - #----------------------------------------------------------# - grid_x = torch.linspace(0, input_width - 1, input_width).repeat(input_height, 1).repeat( - batch_size * len(self.anchors_mask[i]), 1, 1).view(x.shape).type(FloatTensor) - grid_y = torch.linspace(0, input_height - 1, input_height).repeat(input_width, 1).t().repeat( - batch_size * len(self.anchors_mask[i]), 1, 1).view(y.shape).type(FloatTensor) - - #----------------------------------------------------------# - # 按照网格格式生成先验框的宽高 - # batch_size,3,13,13 - #----------------------------------------------------------# - anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0])) - anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1])) - anchor_w = anchor_w.repeat(batch_size, 1).repeat(1, 1, input_height * input_width).view(w.shape) - anchor_h = anchor_h.repeat(batch_size, 1).repeat(1, 1, input_height * input_width).view(h.shape) - - #----------------------------------------------------------# - # 利用预测结果对先验框进行调整 - # 首先调整先验框的中心,从先验框中心向右下角偏移 - # 再调整先验框的宽高。 - #----------------------------------------------------------# - pred_boxes = FloatTensor(prediction[..., :4].shape) - pred_boxes[..., 0] = x.data + grid_x - pred_boxes[..., 1] = y.data + grid_y - pred_boxes[..., 2] = torch.exp(w.data) * anchor_w - pred_boxes[..., 3] = torch.exp(h.data) * anchor_h - - #----------------------------------------------------------# - # 将输出结果归一化成小数的形式 - #----------------------------------------------------------# - _scale = torch.Tensor([input_width, input_height, input_width, input_height]).type(FloatTensor) - output = torch.cat((pred_boxes.view(batch_size, -1, 4) / _scale, - conf.view(batch_size, -1, 1), pred_cls.view(batch_size, -1, self.num_classes)), -1) - outputs.append(output.data) - return outputs - - def yolo_correct_boxes(self, box_xy, box_wh, input_shape, image_shape, letterbox_image): - #-----------------------------------------------------------------# - # 把y轴放前面是因为方便预测框和图像的宽高进行相乘 - #-----------------------------------------------------------------# - box_yx = box_xy[..., ::-1] - box_hw = box_wh[..., ::-1] - input_shape = np.array(input_shape) - image_shape = np.array(image_shape) - - if letterbox_image: - #-----------------------------------------------------------------# - # 这里求出来的offset是图像有效区域相对于图像左上角的偏移情况 - # new_shape指的是宽高缩放情况 - #-----------------------------------------------------------------# - new_shape = np.round(image_shape * np.min(input_shape/image_shape)) - offset = (input_shape - new_shape)/2./input_shape - scale = input_shape/new_shape - - box_yx = (box_yx - offset) * scale - box_hw *= scale - - box_mins = box_yx - (box_hw / 2.) - box_maxes = box_yx + (box_hw / 2.) - boxes = np.concatenate([box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1], box_maxes[..., 1:2]], axis=-1) - boxes *= np.concatenate([image_shape, image_shape], axis=-1) - return boxes - - def non_max_suppression(self, prediction, num_classes, input_shape, image_shape, letterbox_image, conf_thres=0.5, nms_thres=0.4): - #----------------------------------------------------------# - # 将预测结果的格式转换成左上角右下角的格式。 - # prediction [batch_size, num_anchors, 85] - #----------------------------------------------------------# - box_corner = prediction.new(prediction.shape) - box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2 - box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2 - box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2 - box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2 - prediction[:, :, :4] = box_corner[:, :, :4] - - output = [None for _ in range(len(prediction))] - for i, image_pred in enumerate(prediction): - #----------------------------------------------------------# - # 对种类预测部分取max。 - # class_conf [num_anchors, 1] 种类置信度 - # class_pred [num_anchors, 1] 种类 - #----------------------------------------------------------# - class_conf, class_pred = torch.max(image_pred[:, 5:5 + num_classes], 1, keepdim=True) - - #----------------------------------------------------------# - # 利用置信度进行第一轮筛选 - #----------------------------------------------------------# - conf_mask = (image_pred[:, 4] * class_conf[:, 0] >= conf_thres).squeeze() - - #----------------------------------------------------------# - # 根据置信度进行预测结果的筛选 - #----------------------------------------------------------# - image_pred = image_pred[conf_mask] - class_conf = class_conf[conf_mask] - class_pred = class_pred[conf_mask] - if not image_pred.size(0): - continue - #-------------------------------------------------------------------------# - # detections [num_anchors, 7] - # 7的内容为:x1, y1, x2, y2, obj_conf, class_conf, class_pred - #-------------------------------------------------------------------------# - detections = torch.cat((image_pred[:, :5], class_conf.float(), class_pred.float()), 1) - - #------------------------------------------# - # 获得预测结果中包含的所有种类 - #------------------------------------------# - unique_labels = detections[:, -1].cpu().unique() - - if prediction.is_cuda: - unique_labels = unique_labels.cuda() - detections = detections.cuda() - - for c in unique_labels: - #------------------------------------------# - # 获得某一类得分筛选后全部的预测结果 - #------------------------------------------# - detections_class = detections[detections[:, -1] == c] - - #------------------------------------------# - # 使用官方自带的非极大抑制会速度更快一些! - #------------------------------------------# - keep = nms( - detections_class[:, :4], - detections_class[:, 4] * detections_class[:, 5], - nms_thres - ) - max_detections = detections_class[keep] - - # # 按照存在物体的置信度排序 - # _, conf_sort_index = torch.sort(detections_class[:, 4]*detections_class[:, 5], descending=True) - # detections_class = detections_class[conf_sort_index] - # # 进行非极大抑制 - # max_detections = [] - # while detections_class.size(0): - # # 取出这一类置信度最高的,一步一步往下判断,判断重合程度是否大于nms_thres,如果是则去除掉 - # max_detections.append(detections_class[0].unsqueeze(0)) - # if len(detections_class) == 1: - # break - # ious = bbox_iou(max_detections[-1], detections_class[1:]) - # detections_class = detections_class[1:][ious < nms_thres] - # # 堆叠 - # max_detections = torch.cat(max_detections).data - - # Add max detections to outputs - output[i] = max_detections if output[i] is None else torch.cat((output[i], max_detections)) - - if output[i] is not None: - output[i] = output[i].cpu().numpy() - box_xy, box_wh = (output[i][:, 0:2] + output[i][:, 2:4])/2, output[i][:, 2:4] - output[i][:, 0:2] - output[i][:, :4] = self.yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape, letterbox_image) - return output diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/solver.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/solver.py deleted file mode 100644 index 9ca71cbf2a6b621fa299245f831d4d723ba56977..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/ppg2mel/train/solver.py +++ /dev/null @@ -1,217 +0,0 @@ -import os -import sys -import abc -import math -import yaml -import torch -from torch.utils.tensorboard import SummaryWriter - -from .option import default_hparas -from utils.util import human_format, Timer -from utils.load_yaml import HpsYaml - - -class BaseSolver(): - ''' - Prototype Solver for all kinds of tasks - Arguments - config - yaml-styled config - paras - argparse outcome - mode - "train"/"test" - ''' - - def __init__(self, config, paras, mode="train"): - # General Settings - self.config = config # load from yaml file - self.paras = paras # command line args - self.mode = mode # 'train' or 'test' - for k, v in default_hparas.items(): - setattr(self, k, v) - self.device = torch.device('cuda') if self.paras.gpu and torch.cuda.is_available() \ - else torch.device('cpu') - - # Name experiment - self.exp_name = paras.name - if self.exp_name is None: - if 'exp_name' in self.config: - self.exp_name = self.config.exp_name - else: - # By default, exp is named after config file - self.exp_name = paras.config.split('/')[-1].replace('.yaml', '') - if mode == 'train': - self.exp_name += '_seed{}'.format(paras.seed) - - - if mode == 'train': - # Filepath setup - os.makedirs(paras.ckpdir, exist_ok=True) - self.ckpdir = os.path.join(paras.ckpdir, self.exp_name) - os.makedirs(self.ckpdir, exist_ok=True) - - # Logger settings - self.logdir = os.path.join(paras.logdir, self.exp_name) - self.log = SummaryWriter( - self.logdir, flush_secs=self.TB_FLUSH_FREQ) - self.timer = Timer() - - # Hyper-parameters - self.step = 0 - self.valid_step = config.hparas.valid_step - self.max_step = config.hparas.max_step - - self.verbose('Exp. name : {}'.format(self.exp_name)) - self.verbose('Loading data... large corpus may took a while.') - - # elif mode == 'test': - # # Output path - # os.makedirs(paras.outdir, exist_ok=True) - # self.ckpdir = os.path.join(paras.outdir, self.exp_name) - - # Load training config to get acoustic feat and build model - # self.src_config = HpsYaml(config.src.config) - # self.paras.load = config.src.ckpt - - # self.verbose('Evaluating result of tr. config @ {}'.format( - # config.src.config)) - - def backward(self, loss): - ''' - Standard backward step with self.timer and debugger - Arguments - loss - the loss to perform loss.backward() - ''' - self.timer.set() - loss.backward() - grad_norm = torch.nn.utils.clip_grad_norm_( - self.model.parameters(), self.GRAD_CLIP) - if math.isnan(grad_norm): - self.verbose('Error : grad norm is NaN @ step '+str(self.step)) - else: - self.optimizer.step() - self.timer.cnt('bw') - return grad_norm - - def load_ckpt(self): - ''' Load ckpt if --load option is specified ''' - print(self.paras) - if self.paras.load is not None: - if self.paras.warm_start: - self.verbose(f"Warm starting model from checkpoint {self.paras.load}.") - ckpt = torch.load( - self.paras.load, map_location=self.device if self.mode == 'train' - else 'cpu') - model_dict = ckpt['model'] - if "ignore_layers" in self.config.model and len(self.config.model.ignore_layers) > 0: - model_dict = {k:v for k, v in model_dict.items() - if k not in self.config.model.ignore_layers} - dummy_dict = self.model.state_dict() - dummy_dict.update(model_dict) - model_dict = dummy_dict - self.model.load_state_dict(model_dict) - else: - # Load weights - ckpt = torch.load( - self.paras.load, map_location=self.device if self.mode == 'train' - else 'cpu') - self.model.load_state_dict(ckpt['model']) - - # Load task-dependent items - if self.mode == 'train': - self.step = ckpt['global_step'] - self.optimizer.load_opt_state_dict(ckpt['optimizer']) - self.verbose('Load ckpt from {}, restarting at step {}'.format( - self.paras.load, self.step)) - else: - for k, v in ckpt.items(): - if type(v) is float: - metric, score = k, v - self.model.eval() - self.verbose('Evaluation target = {} (recorded {} = {:.2f} %)'.format( - self.paras.load, metric, score)) - - def verbose(self, msg): - ''' Verbose function for print information to stdout''' - if self.paras.verbose: - if type(msg) == list: - for m in msg: - print('[INFO]', m.ljust(100)) - else: - print('[INFO]', msg.ljust(100)) - - def progress(self, msg): - ''' Verbose function for updating progress on stdout (do not include newline) ''' - if self.paras.verbose: - sys.stdout.write("\033[K") # Clear line - print('[{}] {}'.format(human_format(self.step), msg), end='\r') - - def write_log(self, log_name, log_dict): - ''' - Write log to TensorBoard - log_name - Name of tensorboard variable - log_value - / Value of variable (e.g. dict of losses), passed if value = None - ''' - if type(log_dict) is dict: - log_dict = {key: val for key, val in log_dict.items() if ( - val is not None and not math.isnan(val))} - if log_dict is None: - pass - elif len(log_dict) > 0: - if 'align' in log_name or 'spec' in log_name: - img, form = log_dict - self.log.add_image( - log_name, img, global_step=self.step, dataformats=form) - elif 'text' in log_name or 'hyp' in log_name: - self.log.add_text(log_name, log_dict, self.step) - else: - self.log.add_scalars(log_name, log_dict, self.step) - - def save_checkpoint(self, f_name, metric, score, show_msg=True): - '''' - Ckpt saver - f_name - the name of ckpt file (w/o prefix) to store, overwrite if existed - score - The value of metric used to evaluate model - ''' - ckpt_path = os.path.join(self.ckpdir, f_name) - full_dict = { - "model": self.model.state_dict(), - "optimizer": self.optimizer.get_opt_state_dict(), - "global_step": self.step, - metric: score - } - - torch.save(full_dict, ckpt_path) - if show_msg: - self.verbose("Saved checkpoint (step = {}, {} = {:.2f}) and status @ {}". - format(human_format(self.step), metric, score, ckpt_path)) - - - # ----------------------------------- Abtract Methods ------------------------------------------ # - @abc.abstractmethod - def load_data(self): - ''' - Called by main to load all data - After this call, data related attributes should be setup (e.g. self.tr_set, self.dev_set) - No return value - ''' - raise NotImplementedError - - @abc.abstractmethod - def set_model(self): - ''' - Called by main to set models - After this call, model related attributes should be setup (e.g. self.l2_loss) - The followings MUST be setup - - self.model (torch.nn.Module) - - self.optimizer (src.Optimizer), - init. w/ self.optimizer = src.Optimizer(self.model.parameters(),**self.config['hparas']) - Loading pre-trained model should also be performed here - No return value - ''' - raise NotImplementedError - - @abc.abstractmethod - def exec(self): - ''' - Called by main to execute training/inference - ''' - raise NotImplementedError diff --git a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/utils/f0_utils.py b/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/utils/f0_utils.py deleted file mode 100644 index 6bc25a882e866a05cfb9afc86397f6c82561a498..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese/utils/f0_utils.py +++ /dev/null @@ -1,124 +0,0 @@ -import logging -import numpy as np -import pyworld -from scipy.interpolate import interp1d -from scipy.signal import firwin, get_window, lfilter - -def compute_mean_std(lf0): - nonzero_indices = np.nonzero(lf0) - mean = np.mean(lf0[nonzero_indices]) - std = np.std(lf0[nonzero_indices]) - return mean, std - - -def compute_f0(wav, sr=16000, frame_period=10.0): - """Compute f0 from wav using pyworld harvest algorithm.""" - wav = wav.astype(np.float64) - f0, _ = pyworld.harvest( - wav, sr, frame_period=frame_period, f0_floor=80.0, f0_ceil=600.0) - return f0.astype(np.float32) - -def f02lf0(f0): - lf0 = f0.copy() - nonzero_indices = np.nonzero(f0) - lf0[nonzero_indices] = np.log(f0[nonzero_indices]) - return lf0 - -def get_converted_lf0uv( - wav, - lf0_mean_trg, - lf0_std_trg, - convert=True, -): - f0_src = compute_f0(wav) - if not convert: - uv, cont_lf0 = get_cont_lf0(f0_src) - lf0_uv = np.concatenate([cont_lf0[:, np.newaxis], uv[:, np.newaxis]], axis=1) - return lf0_uv - - lf0_src = f02lf0(f0_src) - lf0_mean_src, lf0_std_src = compute_mean_std(lf0_src) - - lf0_vc = lf0_src.copy() - lf0_vc[lf0_src > 0.0] = (lf0_src[lf0_src > 0.0] - lf0_mean_src) / lf0_std_src * lf0_std_trg + lf0_mean_trg - f0_vc = lf0_vc.copy() - f0_vc[lf0_src > 0.0] = np.exp(lf0_vc[lf0_src > 0.0]) - - uv, cont_lf0_vc = get_cont_lf0(f0_vc) - lf0_uv = np.concatenate([cont_lf0_vc[:, np.newaxis], uv[:, np.newaxis]], axis=1) - return lf0_uv - -def low_pass_filter(x, fs, cutoff=70, padding=True): - """FUNCTION TO APPLY LOW PASS FILTER - - Args: - x (ndarray): Waveform sequence - fs (int): Sampling frequency - cutoff (float): Cutoff frequency of low pass filter - - Return: - (ndarray): Low pass filtered waveform sequence - """ - - nyquist = fs // 2 - norm_cutoff = cutoff / nyquist - - # low cut filter - numtaps = 255 - fil = firwin(numtaps, norm_cutoff) - x_pad = np.pad(x, (numtaps, numtaps), 'edge') - lpf_x = lfilter(fil, 1, x_pad) - lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2] - - return lpf_x - - -def convert_continuos_f0(f0): - """CONVERT F0 TO CONTINUOUS F0 - - Args: - f0 (ndarray): original f0 sequence with the shape (T) - - Return: - (ndarray): continuous f0 with the shape (T) - """ - # get uv information as binary - uv = np.float32(f0 != 0) - - # get start and end of f0 - if (f0 == 0).all(): - logging.warn("all of the f0 values are 0.") - return uv, f0 - start_f0 = f0[f0 != 0][0] - end_f0 = f0[f0 != 0][-1] - - # padding start and end of f0 sequence - start_idx = np.where(f0 == start_f0)[0][0] - end_idx = np.where(f0 == end_f0)[0][-1] - f0[:start_idx] = start_f0 - f0[end_idx:] = end_f0 - - # get non-zero frame index - nz_frames = np.where(f0 != 0)[0] - - # perform linear interpolation - f = interp1d(nz_frames, f0[nz_frames]) - cont_f0 = f(np.arange(0, f0.shape[0])) - - return uv, cont_f0 - - -def get_cont_lf0(f0, frame_period=10.0, lpf=False): - uv, cont_f0 = convert_continuos_f0(f0) - if lpf: - cont_f0_lpf = low_pass_filter(cont_f0, int(1.0 / (frame_period * 0.001)), cutoff=20) - cont_lf0_lpf = cont_f0_lpf.copy() - nonzero_indices = np.nonzero(cont_lf0_lpf) - cont_lf0_lpf[nonzero_indices] = np.log(cont_f0_lpf[nonzero_indices]) - # cont_lf0_lpf = np.log(cont_f0_lpf) - return uv, cont_lf0_lpf - else: - nonzero_indices = np.nonzero(cont_f0) - cont_lf0 = cont_f0.copy() - cont_lf0[cont_f0>0] = np.log(cont_f0[cont_f0>0]) - return uv, cont_lf0 diff --git a/spaces/Kevin676/Clone-Your-Voice/synthesizer/utils/__init__.py b/spaces/Kevin676/Clone-Your-Voice/synthesizer/utils/__init__.py deleted file mode 100644 index 5ae3e48110e61231acf1e666e5fa76af5e4ebdcd..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/Clone-Your-Voice/synthesizer/utils/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -import torch - - -_output_ref = None -_replicas_ref = None - -def data_parallel_workaround(model, *input): - global _output_ref - global _replicas_ref - device_ids = list(range(torch.cuda.device_count())) - output_device = device_ids[0] - replicas = torch.nn.parallel.replicate(model, device_ids) - # input.shape = (num_args, batch, ...) - inputs = torch.nn.parallel.scatter(input, device_ids) - # inputs.shape = (num_gpus, num_args, batch/num_gpus, ...) - replicas = replicas[:len(inputs)] - outputs = torch.nn.parallel.parallel_apply(replicas, inputs) - y_hat = torch.nn.parallel.gather(outputs, output_device) - _output_ref = outputs - _replicas_ref = replicas - return y_hat - - -class ValueWindow(): - def __init__(self, window_size=100): - self._window_size = window_size - self._values = [] - - def append(self, x): - self._values = self._values[-(self._window_size - 1):] + [x] - - @property - def sum(self): - return sum(self._values) - - @property - def count(self): - return len(self._values) - - @property - def average(self): - return self.sum / max(1, self.count) - - def reset(self): - self._values = [] diff --git a/spaces/Komeng/Stock_Prediction/README.md b/spaces/Komeng/Stock_Prediction/README.md deleted file mode 100644 index 320dd0e308be5eb89bd433df10170740ac16eff4..0000000000000000000000000000000000000000 --- a/spaces/Komeng/Stock_Prediction/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Stock Prediction -emoji: 💩 -colorFrom: yellow -colorTo: blue -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: bigscience-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/KyanChen/FunSR/models/rcan.py b/spaces/KyanChen/FunSR/models/rcan.py deleted file mode 100644 index 76f661d79f679ade86940effcee389a31ef07c68..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/FunSR/models/rcan.py +++ /dev/null @@ -1,204 +0,0 @@ -import math -from argparse import Namespace - -import torch -import torch.nn as nn - -from models import register - - -def default_conv(in_channels, out_channels, kernel_size, bias=True): - return nn.Conv2d( - in_channels, out_channels, kernel_size, - padding=(kernel_size//2), bias=bias) - -class MeanShift(nn.Conv2d): - def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1): - super(MeanShift, self).__init__(3, 3, kernel_size=1) - std = torch.Tensor(rgb_std) - self.weight.data = torch.eye(3).view(3, 3, 1, 1) - self.weight.data.div_(std.view(3, 1, 1, 1)) - self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) - self.bias.data.div_(std) - self.requires_grad = False - -class Upsampler(nn.Sequential): - def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True): - - m = [] - if (scale & (scale - 1)) == 0: # Is scale = 2^n? - for _ in range(int(math.log(scale, 2))): - m.append(conv(n_feat, 4 * n_feat, 3, bias)) - m.append(nn.PixelShuffle(2)) - if bn: m.append(nn.BatchNorm2d(n_feat)) - if act: m.append(act()) - elif scale == 3: - m.append(conv(n_feat, 9 * n_feat, 3, bias)) - m.append(nn.PixelShuffle(3)) - if bn: m.append(nn.BatchNorm2d(n_feat)) - if act: m.append(act()) - else: - raise NotImplementedError - - super(Upsampler, self).__init__(*m) - -## Channel Attention (CA) Layer -class CALayer(nn.Module): - def __init__(self, channel, reduction=16): - super(CALayer, self).__init__() - # global average pooling: feature --> point - self.avg_pool = nn.AdaptiveAvgPool2d(1) - # feature channel downscale and upscale --> channel weight - self.conv_du = nn.Sequential( - nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True), - nn.ReLU(inplace=True), - nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True), - nn.Sigmoid() - ) - - def forward(self, x): - y = self.avg_pool(x) - y = self.conv_du(y) - return x * y - -## Residual Channel Attention Block (RCAB) -class RCAB(nn.Module): - def __init__( - self, conv, n_feat, kernel_size, reduction, - bias=True, bn=False, act=nn.ReLU(True), res_scale=1): - - super(RCAB, self).__init__() - modules_body = [] - for i in range(2): - modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias)) - if bn: modules_body.append(nn.BatchNorm2d(n_feat)) - if i == 0: modules_body.append(act) - modules_body.append(CALayer(n_feat, reduction)) - self.body = nn.Sequential(*modules_body) - self.res_scale = res_scale - - def forward(self, x): - res = self.body(x) - #res = self.body(x).mul(self.res_scale) - res += x - return res - -## Residual Group (RG) -class ResidualGroup(nn.Module): - def __init__(self, conv, n_feat, kernel_size, reduction, act, res_scale, n_resblocks): - super(ResidualGroup, self).__init__() - modules_body = [] - modules_body = [ - RCAB( - conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1) \ - for _ in range(n_resblocks)] - modules_body.append(conv(n_feat, n_feat, kernel_size)) - self.body = nn.Sequential(*modules_body) - - def forward(self, x): - res = self.body(x) - res += x - return res - -## Residual Channel Attention Network (RCAN) -class RCAN(nn.Module): - def __init__(self, args, conv=default_conv): - super(RCAN, self).__init__() - self.args = args - - n_resgroups = args.n_resgroups - n_resblocks = args.n_resblocks - n_feats = args.n_feats - kernel_size = 3 - reduction = args.reduction - scale = args.scale[0] - act = nn.ReLU(True) - - # RGB mean for DIV2K - rgb_mean = (0.4488, 0.4371, 0.4040) - rgb_std = (1.0, 1.0, 1.0) - self.sub_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std) - - # define head module - modules_head = [conv(args.n_colors, n_feats, kernel_size)] - - # define body module - modules_body = [ - ResidualGroup( - conv, n_feats, kernel_size, reduction, act=act, res_scale=args.res_scale, n_resblocks=n_resblocks) \ - for _ in range(n_resgroups)] - - modules_body.append(conv(n_feats, n_feats, kernel_size)) - - self.add_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std, 1) - - self.head = nn.Sequential(*modules_head) - self.body = nn.Sequential(*modules_body) - - if args.no_upsampling: - self.out_dim = n_feats - else: - self.out_dim = args.n_colors - # define tail module - modules_tail = [ - Upsampler(conv, scale, n_feats, act=False), - conv(n_feats, args.n_colors, kernel_size)] - self.tail = nn.Sequential(*modules_tail) - - def forward(self, x): - #x = self.sub_mean(x) - x = self.head(x) - - res = self.body(x) - res += x - - if self.args.no_upsampling: - x = res - else: - x = self.tail(res) - #x = self.add_mean(x) - return x - - def load_state_dict(self, state_dict, strict=False): - own_state = self.state_dict() - for name, param in state_dict.items(): - if name in own_state: - if isinstance(param, nn.Parameter): - param = param.data - try: - own_state[name].copy_(param) - except Exception: - if name.find('tail') >= 0: - print('Replace pre-trained upsampler to new one...') - else: - raise RuntimeError('While copying the parameter named {}, ' - 'whose dimensions in the model are {} and ' - 'whose dimensions in the checkpoint are {}.' - .format(name, own_state[name].size(), param.size())) - elif strict: - if name.find('tail') == -1: - raise KeyError('unexpected key "{}" in state_dict' - .format(name)) - - if strict: - missing = set(own_state.keys()) - set(state_dict.keys()) - if len(missing) > 0: - raise KeyError('missing keys in state_dict: "{}"'.format(missing)) - - -@register('rcan') -def make_rcan(n_resgroups=10, n_resblocks=20, n_feats=64, reduction=16, - scale=2, no_upsampling=False, rgb_range=1): - args = Namespace() - args.n_resgroups = n_resgroups - args.n_resblocks = n_resblocks - args.n_feats = n_feats - args.reduction = reduction - - args.scale = [scale] - args.no_upsampling = no_upsampling - - args.rgb_range = rgb_range - args.res_scale = 1 - args.n_colors = 3 - return RCAN(args) diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py b/spaces/KyanChen/RSPrompter/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py deleted file mode 100644 index 1b76e6b45bb9be2584f8b3eca2e5e1c0809249fa..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from typing import List - -import torch -import torch.nn.functional as F -from mmengine.structures import InstanceData, PixelData -from torch import Tensor - -from mmdet.evaluation.functional import INSTANCE_OFFSET -from mmdet.registry import MODELS -from mmdet.structures import SampleList -from mmdet.structures.mask import mask2bbox -from mmdet.utils import OptConfigType, OptMultiConfig -from .base_panoptic_fusion_head import BasePanopticFusionHead - - -@MODELS.register_module() -class MaskFormerFusionHead(BasePanopticFusionHead): - """MaskFormer fusion head which postprocesses results for panoptic - segmentation, instance segmentation and semantic segmentation.""" - - def __init__(self, - num_things_classes: int = 80, - num_stuff_classes: int = 53, - test_cfg: OptConfigType = None, - loss_panoptic: OptConfigType = None, - init_cfg: OptMultiConfig = None, - **kwargs): - super().__init__( - num_things_classes=num_things_classes, - num_stuff_classes=num_stuff_classes, - test_cfg=test_cfg, - loss_panoptic=loss_panoptic, - init_cfg=init_cfg, - **kwargs) - - def loss(self, **kwargs): - """MaskFormerFusionHead has no training loss.""" - return dict() - - def panoptic_postprocess(self, mask_cls: Tensor, - mask_pred: Tensor) -> PixelData: - """Panoptic segmengation inference. - - Args: - mask_cls (Tensor): Classfication outputs of shape - (num_queries, cls_out_channels) for a image. - Note `cls_out_channels` should includes - background. - mask_pred (Tensor): Mask outputs of shape - (num_queries, h, w) for a image. - - Returns: - :obj:`PixelData`: Panoptic segment result of shape \ - (h, w), each element in Tensor means: \ - ``segment_id = _cls + instance_id * INSTANCE_OFFSET``. - """ - object_mask_thr = self.test_cfg.get('object_mask_thr', 0.8) - iou_thr = self.test_cfg.get('iou_thr', 0.8) - filter_low_score = self.test_cfg.get('filter_low_score', False) - - scores, labels = F.softmax(mask_cls, dim=-1).max(-1) - mask_pred = mask_pred.sigmoid() - - keep = labels.ne(self.num_classes) & (scores > object_mask_thr) - cur_scores = scores[keep] - cur_classes = labels[keep] - cur_masks = mask_pred[keep] - - cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks - - h, w = cur_masks.shape[-2:] - panoptic_seg = torch.full((h, w), - self.num_classes, - dtype=torch.int32, - device=cur_masks.device) - if cur_masks.shape[0] == 0: - # We didn't detect any mask :( - pass - else: - cur_mask_ids = cur_prob_masks.argmax(0) - instance_id = 1 - for k in range(cur_classes.shape[0]): - pred_class = int(cur_classes[k].item()) - isthing = pred_class < self.num_things_classes - mask = cur_mask_ids == k - mask_area = mask.sum().item() - original_area = (cur_masks[k] >= 0.5).sum().item() - - if filter_low_score: - mask = mask & (cur_masks[k] >= 0.5) - - if mask_area > 0 and original_area > 0: - if mask_area / original_area < iou_thr: - continue - - if not isthing: - # different stuff regions of same class will be - # merged here, and stuff share the instance_id 0. - panoptic_seg[mask] = pred_class - else: - panoptic_seg[mask] = ( - pred_class + instance_id * INSTANCE_OFFSET) - instance_id += 1 - - return PixelData(sem_seg=panoptic_seg[None]) - - def semantic_postprocess(self, mask_cls: Tensor, - mask_pred: Tensor) -> PixelData: - """Semantic segmengation postprocess. - - Args: - mask_cls (Tensor): Classfication outputs of shape - (num_queries, cls_out_channels) for a image. - Note `cls_out_channels` should includes - background. - mask_pred (Tensor): Mask outputs of shape - (num_queries, h, w) for a image. - - Returns: - :obj:`PixelData`: Semantic segment result. - """ - # TODO add semantic segmentation result - raise NotImplementedError - - def instance_postprocess(self, mask_cls: Tensor, - mask_pred: Tensor) -> InstanceData: - """Instance segmengation postprocess. - - Args: - mask_cls (Tensor): Classfication outputs of shape - (num_queries, cls_out_channels) for a image. - Note `cls_out_channels` should includes - background. - mask_pred (Tensor): Mask outputs of shape - (num_queries, h, w) for a image. - - Returns: - :obj:`InstanceData`: Instance segmentation results. - - - scores (Tensor): Classification scores, has a shape - (num_instance, ) - - labels (Tensor): Labels of bboxes, has a shape - (num_instances, ). - - bboxes (Tensor): Has a shape (num_instances, 4), - the last dimension 4 arrange as (x1, y1, x2, y2). - - masks (Tensor): Has a shape (num_instances, H, W). - """ - max_per_image = self.test_cfg.get('max_per_image', 100) - num_queries = mask_cls.shape[0] - # shape (num_queries, num_class) - scores = F.softmax(mask_cls, dim=-1)[:, :-1] - # shape (num_queries * num_class, ) - labels = torch.arange(self.num_classes, device=mask_cls.device).\ - unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) - scores_per_image, top_indices = scores.flatten(0, 1).topk( - max_per_image, sorted=False) - labels_per_image = labels[top_indices] - - query_indices = top_indices // self.num_classes - mask_pred = mask_pred[query_indices] - - # extract things - is_thing = labels_per_image < self.num_things_classes - scores_per_image = scores_per_image[is_thing] - labels_per_image = labels_per_image[is_thing] - mask_pred = mask_pred[is_thing] - - mask_pred_binary = (mask_pred > 0).float() - mask_scores_per_image = (mask_pred.sigmoid() * - mask_pred_binary).flatten(1).sum(1) / ( - mask_pred_binary.flatten(1).sum(1) + 1e-6) - det_scores = scores_per_image * mask_scores_per_image - mask_pred_binary = mask_pred_binary.bool() - bboxes = mask2bbox(mask_pred_binary) - - results = InstanceData() - results.bboxes = bboxes - results.labels = labels_per_image - results.scores = det_scores - results.masks = mask_pred_binary - return results - - def predict(self, - mask_cls_results: Tensor, - mask_pred_results: Tensor, - batch_data_samples: SampleList, - rescale: bool = False, - **kwargs) -> List[dict]: - """Test segment without test-time aumengtation. - - Only the output of last decoder layers was used. - - Args: - mask_cls_results (Tensor): Mask classification logits, - shape (batch_size, num_queries, cls_out_channels). - Note `cls_out_channels` should includes background. - mask_pred_results (Tensor): Mask logits, shape - (batch_size, num_queries, h, w). - batch_data_samples (List[:obj:`DetDataSample`]): The Data - Samples. It usually includes information such as - `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. - rescale (bool): If True, return boxes in - original image space. Default False. - - Returns: - list[dict]: Instance segmentation \ - results and panoptic segmentation results for each \ - image. - - .. code-block:: none - - [ - { - 'pan_results': PixelData, - 'ins_results': InstanceData, - # semantic segmentation results are not supported yet - 'sem_results': PixelData - }, - ... - ] - """ - batch_img_metas = [ - data_sample.metainfo for data_sample in batch_data_samples - ] - panoptic_on = self.test_cfg.get('panoptic_on', True) - semantic_on = self.test_cfg.get('semantic_on', False) - instance_on = self.test_cfg.get('instance_on', False) - assert not semantic_on, 'segmantic segmentation '\ - 'results are not supported yet.' - - results = [] - for mask_cls_result, mask_pred_result, meta in zip( - mask_cls_results, mask_pred_results, batch_img_metas): - # remove padding - img_height, img_width = meta['img_shape'][:2] - mask_pred_result = mask_pred_result[:, :img_height, :img_width] - - if rescale: - # return result in original resolution - ori_height, ori_width = meta['ori_shape'][:2] - mask_pred_result = F.interpolate( - mask_pred_result[:, None], - size=(ori_height, ori_width), - mode='bilinear', - align_corners=False)[:, 0] - - result = dict() - if panoptic_on: - pan_results = self.panoptic_postprocess( - mask_cls_result, mask_pred_result) - result['pan_results'] = pan_results - - if instance_on: - ins_results = self.instance_postprocess( - mask_cls_result, mask_pred_result) - result['ins_results'] = ins_results - - if semantic_on: - sem_results = self.semantic_postprocess( - mask_cls_result, mask_pred_result) - result['sem_results'] = sem_results - - results.append(result) - - return results diff --git a/spaces/Laihiujin/OneFormer/oneformer/utils/events.py b/spaces/Laihiujin/OneFormer/oneformer/utils/events.py deleted file mode 100644 index d1d27ac6ecef656f1aa86649ceacb54470765821..0000000000000000000000000000000000000000 --- a/spaces/Laihiujin/OneFormer/oneformer/utils/events.py +++ /dev/null @@ -1,120 +0,0 @@ -import os -import wandb -from detectron2.utils import comm -from detectron2.utils.events import EventWriter, get_event_storage - - -def setup_wandb(cfg, args): - if comm.is_main_process(): - init_args = { - k.lower(): v - for k, v in cfg.WANDB.items() - if isinstance(k, str) and k not in ["config"] - } - # only include most related part to avoid too big table - # TODO: add configurable params to select which part of `cfg` should be saved in config - if "config_exclude_keys" in init_args: - init_args["config"] = cfg - init_args["config"]["cfg_file"] = args.config_file - else: - init_args["config"] = { - "model": cfg.MODEL, - "solver": cfg.SOLVER, - "cfg_file": args.config_file, - } - if ("name" not in init_args) or (init_args["name"] is None): - init_args["name"] = os.path.basename(args.config_file) - else: - init_args["name"] = init_args["name"] + '_' + os.path.basename(args.config_file) - wandb.init(**init_args) - - -class BaseRule(object): - def __call__(self, target): - return target - - -class IsIn(BaseRule): - def __init__(self, keyword: str): - self.keyword = keyword - - def __call__(self, target): - return self.keyword in target - - -class Prefix(BaseRule): - def __init__(self, keyword: str): - self.keyword = keyword - - def __call__(self, target): - return "/".join([self.keyword, target]) - - -class WandbWriter(EventWriter): - """ - Write all scalars to a tensorboard file. - """ - - def __init__(self): - """ - Args: - log_dir (str): the directory to save the output events - kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)` - """ - self._last_write = -1 - self._group_rules = [ - (IsIn("/"), BaseRule()), - (IsIn("loss"), Prefix("train")), - ] - - def write(self): - - storage = get_event_storage() - - def _group_name(scalar_name): - for (rule, op) in self._group_rules: - if rule(scalar_name): - return op(scalar_name) - return scalar_name - - stats = { - _group_name(name): scalars[0] - for name, scalars in storage.latest().items() - if scalars[1] > self._last_write - } - if len(stats) > 0: - self._last_write = max([v[1] for k, v in storage.latest().items()]) - - # storage.put_{image,histogram} is only meant to be used by - # tensorboard writer. So we access its internal fields directly from here. - if len(storage._vis_data) >= 1: - stats["image"] = [ - wandb.Image(img, caption=img_name) - for img_name, img, step_num in storage._vis_data - ] - # Storage stores all image data and rely on this writer to clear them. - # As a result it assumes only one writer will use its image data. - # An alternative design is to let storage store limited recent - # data (e.g. only the most recent image) that all writers can access. - # In that case a writer may not see all image data if its period is long. - storage.clear_images() - - if len(storage._histograms) >= 1: - - def create_bar(tag, bucket_limits, bucket_counts, **kwargs): - data = [ - [label, val] for (label, val) in zip(bucket_limits, bucket_counts) - ] - table = wandb.Table(data=data, columns=["label", "value"]) - return wandb.plot.bar(table, "label", "value", title=tag) - - stats["hist"] = [create_bar(**params) for params in storage._histograms] - - storage.clear_histograms() - - if len(stats) == 0: - return - wandb.log(stats, step=storage.iter) - - def close(self): - wandb.finish() \ No newline at end of file diff --git a/spaces/LarissaHung/text_generator/app.py b/spaces/LarissaHung/text_generator/app.py deleted file mode 100644 index 0783c314135c5755c74e999166ab87ff31bd6b93..0000000000000000000000000000000000000000 --- a/spaces/LarissaHung/text_generator/app.py +++ /dev/null @@ -1,14 +0,0 @@ -#libraries -import gradio as gr -from gradio.mix import Parallel - -title="My First Text Generator" -description="Input text." - -#variables, functions and parameters -model1 = gr.Interface.load("huggingface/gpt2") -model2 = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B") -model3 = gr.Interface.load("huggingface/EleutherAI/gpt-neo-1.3B") - -#functions, parameters and variables -gr.Parallel(model1, model2, model3,title=title,description=description).launch() \ No newline at end of file diff --git a/spaces/LaynzKunz/RVC-Inference-webui-grado-colab-huggingafce/lib/infer_pack/models.py b/spaces/LaynzKunz/RVC-Inference-webui-grado-colab-huggingafce/lib/infer_pack/models.py deleted file mode 100644 index 3665d03bc0514a6ed07d3372ea24717dae1e0a65..0000000000000000000000000000000000000000 --- a/spaces/LaynzKunz/RVC-Inference-webui-grado-colab-huggingafce/lib/infer_pack/models.py +++ /dev/null @@ -1,1142 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/Logic06183/ML_Classifier_Hub/app.py b/spaces/Logic06183/ML_Classifier_Hub/app.py deleted file mode 100644 index 812941be8bb18d46e0ed4884b525bc677f33c791..0000000000000000000000000000000000000000 --- a/spaces/Logic06183/ML_Classifier_Hub/app.py +++ /dev/null @@ -1,141 +0,0 @@ -import streamlit as st -import numpy as np -import matplotlib.pyplot as plt -from sklearn import datasets -from sklearn.model_selection import train_test_split -from sklearn.decomposition import PCA -from sklearn.svm import SVC -from sklearn.neighbors import KNeighborsClassifier -from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier -from sklearn.linear_model import LogisticRegression -from sklearn.metrics import accuracy_score -from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler - -st.title('Streamlit Example') - -st.write(""" -# Explore different classifier and datasets -Which one is the best? -""") - -dataset_name = st.sidebar.selectbox( - 'Select Dataset', - ('Breast Cancer', 'Wine', 'Digits') -) - -st.write(f"## {dataset_name} Dataset") - -classifier_name = st.sidebar.selectbox( - 'Select classifier', - ('KNN', 'SVM', 'Random Forest', 'Gradient Boosting', 'Logistic Regression') -) - -scaler_name = st.sidebar.selectbox( - 'Select feature scaling method', - ('None', 'Standard Scaler', 'MinMax Scaler', 'Robust Scaler') -) - -def get_dataset(name): - data = None - if name == 'Wine': - data = datasets.load_wine() - elif name == 'Breast Cancer': - data = datasets.load_breast_cancer() - else: # Digits - data = datasets.load_digits() - X = data.data - y = data.target - return X, y - -X, y = get_dataset(dataset_name) -st.write('Shape of dataset:', X.shape) -st.write('number of classes:', len(np.unique(y))) - -def apply_scaling(scaler_name, X): - if scaler_name == 'Standard Scaler': - scaler = StandardScaler() - elif scaler_name == 'MinMax Scaler': - scaler = MinMaxScaler() - elif scaler_name == 'Robust Scaler': - scaler = RobustScaler() - else: - return X - - X_scaled = scaler.fit_transform(X) - return X_scaled - -X = apply_scaling(scaler_name, X) - -def add_parameter_ui(clf_name): - params = dict() - if clf_name == 'SVM': - C = st.sidebar.slider('C', 0.01, 10.0) - params['C'] = C - elif clf_name == 'KNN': - K = st.sidebar.slider('K', 1, 15) - params['K'] = K - elif clf_name == 'Random Forest': - max_depth = st.sidebar.slider('max_depth', 2, 15) - params['max_depth'] = max_depth - n_estimators = st.sidebar.slider('n_estimators', 1, 100) - params['n_estimators'] = n_estimators - elif clf_name == 'Gradient Boosting': - max_depth = st.sidebar.slider('max_depth', 2, 15) - params['max_depth'] = max_depth - n_estimators = st.sidebar.slider('n_estimators', 1, 100) - params['n_estimators'] = n_estimators - else: # Logistic Regression - C = st.sidebar.slider('C', 0.01, 10.0) - params['C'] = C - return params - -params = add_parameter_ui(classifier_name) - -def get_classifier(clf_name, params): - clf = None - if clf_name == 'SVM': - clf = SVC(C=params['C']) - elif clf_name == 'KNN': - clf = KNeighborsClassifier(n_neighbors=params['K']) - elif clf_name == 'Random Forest': - clf = RandomForestClassifier(n_estimators=params['n_estimators'], - max_depth=params['max_depth'], random_state=1234) - elif clf_name == 'Gradient Boosting': - clf = GradientBoostingClassifier(n_estimators=params['n_estimators'], - max_depth=params['max_depth'], random_state=1234) - else: # Logistic Regression - clf = LogisticRegression(C=params['C']) - return clf - -clf = get_classifier(classifier_name, params) - -#### CLASSIFICATION #### - -X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234) - -clf.fit(X_train, y_train) -y_pred = clf.predict(X_test) - -acc = accuracy_score(y_test, y_pred) - -st.write(f'Classifier = {classifier_name}') -st.write(f'Accuracy =', acc) - -#### PLOT DATASET #### -# Project the data onto the 2 primary principal components -pca = PCA(2) -X_projected = pca.fit_transform(X) - -x1 = X_projected[:, 0] -x2 = X_projected[:, 1] - -fig = plt.figure() -plt.scatter(x1, x2, - c=y, alpha=0.8, - cmap='viridis') - -plt.xlabel('Principal Component 1') -plt.ylabel('Principal Component 2') -plt.colorbar() - -st.pyplot(fig) diff --git a/spaces/LucasCodeBreak/MusicGen/tests/data/test_audio.py b/spaces/LucasCodeBreak/MusicGen/tests/data/test_audio.py deleted file mode 100644 index 40c0d5ed69eff92a766dc6d176e532f0df6c2b5e..0000000000000000000000000000000000000000 --- a/spaces/LucasCodeBreak/MusicGen/tests/data/test_audio.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from itertools import product -import random - -import numpy as np -import torch -import torchaudio - -from audiocraft.data.audio import audio_info, audio_read, audio_write, _av_read - -from ..common_utils import TempDirMixin, get_white_noise, save_wav - - -class TestInfo(TempDirMixin): - - def test_info_mp3(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - wav = get_white_noise(ch, int(sample_rate * duration)) - path = self.get_temp_path('sample_wav.mp3') - save_wav(path, wav, sample_rate) - info = audio_info(path) - assert info.sample_rate == sample_rate - assert info.channels == ch - # we cannot trust torchaudio for num_frames, so we don't check - - def _test_info_format(self, ext: str): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path(f'sample_wav{ext}') - save_wav(path, wav, sample_rate) - info = audio_info(path) - assert info.sample_rate == sample_rate - assert info.channels == ch - assert np.isclose(info.duration, duration, atol=1e-5) - - def test_info_wav(self): - self._test_info_format('.wav') - - def test_info_flac(self): - self._test_info_format('.flac') - - def test_info_ogg(self): - self._test_info_format('.ogg') - - def test_info_m4a(self): - # TODO: generate m4a file programmatically - # self._test_info_format('.m4a') - pass - - -class TestRead(TempDirMixin): - - def test_read_full_wav(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99) - path = self.get_temp_path('sample_wav.wav') - save_wav(path, wav, sample_rate) - read_wav, read_sr = audio_read(path) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[1] == wav.shape[1] - assert torch.allclose(read_wav, wav, rtol=1e-03, atol=1e-04) - - def test_read_partial_wav(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - read_duration = torch.rand(1).item() - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - read_frames = int(sample_rate * read_duration) - wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99) - path = self.get_temp_path('sample_wav.wav') - save_wav(path, wav, sample_rate) - read_wav, read_sr = audio_read(path, 0, read_duration) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[1] == read_frames - assert torch.allclose(read_wav[..., 0:read_frames], wav[..., 0:read_frames], rtol=1e-03, atol=1e-04) - - def test_read_seek_time_wav(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - read_duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99) - path = self.get_temp_path('sample_wav.wav') - save_wav(path, wav, sample_rate) - seek_time = torch.rand(1).item() - read_wav, read_sr = audio_read(path, seek_time, read_duration) - seek_frames = int(sample_rate * seek_time) - expected_frames = n_frames - seek_frames - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[1] == expected_frames - assert torch.allclose(read_wav, wav[..., seek_frames:], rtol=1e-03, atol=1e-04) - - def test_read_seek_time_wav_padded(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - read_duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - read_frames = int(sample_rate * read_duration) - wav = get_white_noise(ch, n_frames).clamp(-0.99, 0.99) - path = self.get_temp_path('sample_wav.wav') - save_wav(path, wav, sample_rate) - seek_time = torch.rand(1).item() - seek_frames = int(sample_rate * seek_time) - expected_frames = n_frames - seek_frames - read_wav, read_sr = audio_read(path, seek_time, read_duration, pad=True) - expected_pad_wav = torch.zeros(wav.shape[0], read_frames - expected_frames) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[1] == read_frames - assert torch.allclose(read_wav[..., :expected_frames], wav[..., seek_frames:], rtol=1e-03, atol=1e-04) - assert torch.allclose(read_wav[..., expected_frames:], expected_pad_wav) - - -class TestAvRead(TempDirMixin): - - def test_avread_seek_base(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 2. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path(f'reference_a_{sample_rate}_{ch}.wav') - save_wav(path, wav, sample_rate) - for _ in range(100): - # seek will always load a full duration segment in the file - seek_time = random.uniform(0.0, 1.0) - seek_duration = random.uniform(0.001, 1.0) - read_wav, read_sr = _av_read(path, seek_time, seek_duration) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[-1] == int(seek_duration * sample_rate) - - def test_avread_seek_partial(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path(f'reference_b_{sample_rate}_{ch}.wav') - save_wav(path, wav, sample_rate) - for _ in range(100): - # seek will always load a partial segment - seek_time = random.uniform(0.5, 1.) - seek_duration = 1. - expected_num_frames = n_frames - int(seek_time * sample_rate) - read_wav, read_sr = _av_read(path, seek_time, seek_duration) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[-1] == expected_num_frames - - def test_avread_seek_outofbound(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(sample_rate * duration) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path(f'reference_c_{sample_rate}_{ch}.wav') - save_wav(path, wav, sample_rate) - seek_time = 1.5 - read_wav, read_sr = _av_read(path, seek_time, 1.) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[-1] == 0 - - def test_avread_seek_edge(self): - sample_rates = [8000, 16_000] - # some of these values will have - # int(((frames - 1) / sample_rate) * sample_rate) != (frames - 1) - n_frames = [1000, 1001, 1002] - channels = [1, 2] - for sample_rate, ch, frames in product(sample_rates, channels, n_frames): - duration = frames / sample_rate - wav = get_white_noise(ch, frames) - path = self.get_temp_path(f'reference_d_{sample_rate}_{ch}.wav') - save_wav(path, wav, sample_rate) - seek_time = (frames - 1) / sample_rate - seek_frames = int(seek_time * sample_rate) - read_wav, read_sr = _av_read(path, seek_time, duration) - assert read_sr == sample_rate - assert read_wav.shape[0] == wav.shape[0] - assert read_wav.shape[-1] == (frames - seek_frames) - - -class TestAudioWrite(TempDirMixin): - - def test_audio_write_wav(self): - torch.manual_seed(1234) - sample_rates = [8000, 16_000] - n_frames = [1000, 1001, 1002] - channels = [1, 2] - strategies = ["peak", "clip", "rms"] - formats = ["wav", "mp3"] - for sample_rate, ch, frames in product(sample_rates, channels, n_frames): - for format_, strategy in product(formats, strategies): - wav = get_white_noise(ch, frames) - path = self.get_temp_path(f'pred_{sample_rate}_{ch}') - audio_write(path, wav, sample_rate, format_, strategy=strategy) - read_wav, read_sr = torchaudio.load(f'{path}.{format_}') - if format_ == "wav": - assert read_wav.shape == wav.shape - - if format_ == "wav" and strategy in ["peak", "rms"]: - rescaled_read_wav = read_wav / read_wav.abs().max() * wav.abs().max() - # for a Gaussian, the typical max scale will be less than ~5x the std. - # The error when writing to disk will ~ 1/2**15, and when rescaling, 5x that. - # For RMS target, rescaling leaves more headroom by default, leading - # to a 20x rescaling typically - atol = (5 if strategy == "peak" else 20) / 2**15 - delta = (rescaled_read_wav - wav).abs().max() - assert torch.allclose(wav, rescaled_read_wav, rtol=0, atol=atol), (delta, atol) - formats = ["wav"] # faster unit tests diff --git a/spaces/MLVKU/Human_Object_Interaction/hotr/data/transforms/transforms.py b/spaces/MLVKU/Human_Object_Interaction/hotr/data/transforms/transforms.py deleted file mode 100644 index cf41a4dc07d9e0fbd77eb32550c087b48e7cdeed..0000000000000000000000000000000000000000 --- a/spaces/MLVKU/Human_Object_Interaction/hotr/data/transforms/transforms.py +++ /dev/null @@ -1,387 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Transforms and data augmentation for both image + bbox. -""" -import random - -import PIL -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as F - -from hotr.util.box_ops import box_xyxy_to_cxcywh -from hotr.util.misc import interpolate - - -def crop(image, target, region): - cropped_image = F.crop(image, *region) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - max_size = torch.as_tensor([w, h], dtype=torch.float32) - - fields = ["labels", "area", "iscrowd"] # add additional fields - if "inst_actions" in target.keys(): - fields.append("inst_actions") - - if "boxes" in target: - boxes = target["boxes"] - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "pair_boxes" in target or ("sub_boxes" in target and "obj_boxes" in target): - if "pair_boxes" in target: - pair_boxes = target["pair_boxes"] - hboxes = pair_boxes[:, :4] - oboxes = pair_boxes[:, 4:] - if ("sub_boxes" in target and "obj_boxes" in target): - hboxes = target["sub_boxes"] - oboxes = target["obj_boxes"] - - cropped_hboxes = hboxes - torch.as_tensor([j, i, j, i]) - cropped_hboxes = torch.min(cropped_hboxes.reshape(-1, 2, 2), max_size) - cropped_hboxes = cropped_hboxes.clamp(min=0) - hboxes = cropped_hboxes.reshape(-1, 4) - - obj_mask = (oboxes[:, 0] != -1) - if obj_mask.sum() != 0: - cropped_oboxes = oboxes[obj_mask] - torch.as_tensor([j, i, j, i]) - cropped_oboxes = torch.min(cropped_oboxes.reshape(-1, 2, 2), max_size) - cropped_oboxes = cropped_oboxes.clamp(min=0) - oboxes[obj_mask] = cropped_oboxes.reshape(-1, 4) - else: - cropped_oboxes = oboxes - - cropped_pair_boxes = torch.cat([hboxes, oboxes], dim=-1) - target["pair_boxes"] = cropped_pair_boxes - pair_fields = ["pair_boxes", "pair_actions", "pair_targets"] - - if "masks" in target: - # FIXME should we update the area here if there are no boxes[? - target['masks'] = target['masks'][:, i:i + h, j:j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target['boxes'].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target['masks'].flatten(1).any(1) - - for field in fields: - if field in target: # added this because there is no 'iscrowd' field in v-coco dataset - target[field] = target[field][keep] - - # remove elements that have redundant area - if "boxes" in target and "labels" in target: - cropped_boxes = target['boxes'] - cropped_labels = target['labels'] - - cnr, keep_idx = [], [] - for idx, (cropped_box, cropped_lbl) in enumerate(zip(cropped_boxes, cropped_labels)): - if str((cropped_box, cropped_lbl)) not in cnr: - cnr.append(str((cropped_box, cropped_lbl))) - keep_idx.append(True) - else: keep_idx.append(False) - - for field in fields: - if field in target: - target[field] = target[field][keep_idx] - - # remove elements for which pair boxes have zero area - if "pair_boxes" in target: - cropped_hboxes = target["pair_boxes"][:, :4].reshape(-1, 2, 2) - cropped_oboxes = target["pair_boxes"][:, 4:].reshape(-1, 2, 2) - keep_h = torch.all(cropped_hboxes[:, 1, :] > cropped_hboxes[:, 0, :], dim=1) - keep_o = torch.all(cropped_oboxes[:, 1, :] > cropped_oboxes[:, 0, :], dim=1) - not_empty_o = torch.all(target["pair_boxes"][:, 4:] >= 0, dim=1) - discard_o = (~keep_o) & not_empty_o - if (discard_o).sum() > 0: - target["pair_boxes"][discard_o, 4:] = -1 - - for pair_field in pair_fields: - target[pair_field] = target[pair_field][keep_h] - - return cropped_image, target - - -def hflip(image, target): - flipped_image = F.hflip(image) - - w, h = image.size - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) - target["boxes"] = boxes - - if "pair_boxes" in target: - pair_boxes = target["pair_boxes"] - hboxes = pair_boxes[:, :4] - oboxes = pair_boxes[:, 4:] - - # human flip - hboxes = hboxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) - - # object flip - obj_mask = (oboxes[:, 0] != -1) - if obj_mask.sum() != 0: - o_tmp = oboxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) - oboxes[obj_mask] = o_tmp[obj_mask] - - pair_boxes = torch.cat([hboxes, oboxes], dim=-1) - target["pair_boxes"] = pair_boxes - - if "masks" in target: - target['masks'] = target['masks'].flip(-1) - - return flipped_image, target - - -def resize(image, target, size, max_size=None): - # size can be min_size (scalar) or (w, h) tuple - - def get_size_with_aspect_ratio(image_size, size, max_size=None): - w, h = image_size - if max_size is not None: - min_original_size = float(min((w, h))) - max_original_size = float(max((w, h))) - if max_original_size / min_original_size * size > max_size: - size = int(round(max_size * min_original_size / max_original_size)) - - if (w <= h and w == size) or (h <= w and h == size): - return (h, w) - - if w < h: - ow = size - oh = int(size * h / w) - else: - oh = size - ow = int(size * w / h) - - return (oh, ow) - - def get_size(image_size, size, max_size=None): - if isinstance(size, (list, tuple)): - return size[::-1] - else: - return get_size_with_aspect_ratio(image_size, size, max_size) - - size = get_size(image.size, size, max_size) - rescaled_image = F.resize(image, size) - - if target is None: - return rescaled_image, None - - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) - ratio_width, ratio_height = ratios - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) - target["boxes"] = scaled_boxes - - if "pair_boxes" in target: - hboxes = target["pair_boxes"][:, :4] - scaled_hboxes = hboxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) - hboxes = scaled_hboxes - - oboxes = target["pair_boxes"][:, 4:] - obj_mask = (oboxes[:, 0] != -1) - if obj_mask.sum() != 0: - scaled_oboxes = oboxes[obj_mask] * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) - oboxes[obj_mask] = scaled_oboxes - - target["pair_boxes"] = torch.cat([hboxes, oboxes], dim=-1) - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - h, w = size - target["size"] = torch.tensor([h, w]) - - if "masks" in target: - target['masks'] = interpolate( - target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5 - - return rescaled_image, target - - -def pad(image, target, padding): - # assumes that we only pad on the bottom right corners - padded_image = F.pad(image, (0, 0, padding[0], padding[1])) - if target is None: - return padded_image, None - target = target.copy() - # should we do something wrt the original size? - target["size"] = torch.tensor(padded_image[::-1]) - if "masks" in target: - target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1])) - return padded_image, target - - -class RandomCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - region = T.RandomCrop.get_params(img, self.size) - return crop(img, target, region) - - -class RandomSizeCrop(object): - def __init__(self, min_size: int, max_size: int): - self.min_size = min_size - self.max_size = max_size - - def __call__(self, img: PIL.Image.Image, target: dict): - w = random.randint(self.min_size, min(img.width, self.max_size)) - h = random.randint(self.min_size, min(img.height, self.max_size)) - region = T.RandomCrop.get_params(img, [h, w]) - return crop(img, target, region) - - -class CenterCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - image_width, image_height = img.size - crop_height, crop_width = self.size - crop_top = int(round((image_height - crop_height) / 2.)) - crop_left = int(round((image_width - crop_width) / 2.)) - return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) - - -class RandomHorizontalFlip(object): - def __init__(self, p=0.5): - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return hflip(img, target) - return img, target - - -class RandomResize(object): - def __init__(self, sizes, max_size=None): - assert isinstance(sizes, (list, tuple)) - self.sizes = sizes - self.max_size = max_size - - def __call__(self, img, target=None): - size = random.choice(self.sizes) - return resize(img, target, size, self.max_size) - - -class RandomPad(object): - def __init__(self, max_pad): - self.max_pad = max_pad - - def __call__(self, img, target): - pad_x = random.randint(0, self.max_pad) - pad_y = random.randint(0, self.max_pad) - return pad(img, target, (pad_x, pad_y)) - - -class RandomSelect(object): - """ - Randomly selects between transforms1 and transforms2, - with probability p for transforms1 and (1 - p) for transforms2 - """ - def __init__(self, transforms1, transforms2, p=0.5): - self.transforms1 = transforms1 - self.transforms2 = transforms2 - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return self.transforms1(img, target) - return self.transforms2(img, target) - - -class ToTensor(object): - def __call__(self, img, target): - return F.to_tensor(img), target - - -class RandomErasing(object): - - def __init__(self, *args, **kwargs): - self.eraser = T.RandomErasing(*args, **kwargs) - - def __call__(self, img, target): - return self.eraser(img), target - - -class Normalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, image, target=None): - image = F.normalize(image, mean=self.mean, std=self.std) - if target is None: - return image, None - target = target.copy() - h, w = image.shape[-2:] - if "boxes" in target: - boxes = target["boxes"] - boxes = box_xyxy_to_cxcywh(boxes) - boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) - target["boxes"] = boxes - - if "pair_boxes" in target: - hboxes = target["pair_boxes"][:, :4] - hboxes = box_xyxy_to_cxcywh(hboxes) - hboxes = hboxes / torch.tensor([w, h, w, h], dtype=torch.float32) - - oboxes = target["pair_boxes"][:, 4:] - obj_mask = (oboxes[:, 0] != -1) - if obj_mask.sum() != 0: - oboxes[obj_mask] = box_xyxy_to_cxcywh(oboxes[obj_mask]) - oboxes[obj_mask] = oboxes[obj_mask] / torch.tensor([w, h, w, h], dtype=torch.float32) - - pair_boxes = torch.cat([hboxes, oboxes], dim=-1) - target["pair_boxes"] = pair_boxes - - return image, target - -class ColorJitter(object): - def __init__(self, brightness=0, contrast=0, saturatio=0, hue=0): - self.color_jitter = T.ColorJitter(brightness, contrast, saturatio, hue) - - def __call__(self, img, target): - return self.color_jitter(img), target - -class Compose(object): - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, image, target): - for t in self.transforms: - image, target = t(image, target) - return image, target - - def __repr__(self): - format_string = self.__class__.__name__ + "(" - for t in self.transforms: - format_string += "\n" - format_string += " {0}".format(t) - format_string += "\n)" - return format_string diff --git a/spaces/MechaXYZ/Audio-to-Text/app.py b/spaces/MechaXYZ/Audio-to-Text/app.py deleted file mode 100644 index e5b88e53c45c87cc26c020ed485739b9a710b5b4..0000000000000000000000000000000000000000 --- a/spaces/MechaXYZ/Audio-to-Text/app.py +++ /dev/null @@ -1,109 +0,0 @@ -import torch - -import gradio as gr -import pytube as pt -from transformers import pipeline - -MODEL_NAME = "openai/whisper-large-v2" - -device = 0 if torch.cuda.is_available() else "cpu" - -pipe = pipeline( - task="automatic-speech-recognition", - model=MODEL_NAME, - chunk_length_s=30, - device=device, - # return_timestamps=True -) - - -all_special_ids = pipe.tokenizer.all_special_ids -transcribe_token_id = all_special_ids[-5] -translate_token_id = all_special_ids[-6] - - -def transcribe(microphone, file_upload, task): - warn_output = "" - if (microphone is not None) and (file_upload is not None): - warn_output = ( - "WARNING: You've uploaded an audio file and used the microphone. " - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - ) - - elif (microphone is None) and (file_upload is None): - return "ERROR: You have to either use the microphone or upload an audio file" - - file = microphone if microphone is not None else file_upload - - pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]] - text = pipe(file,return_timestamps=True)["text"] - - return warn_output + text - - -def _return_yt_html_embed(yt_url): - video_id = yt_url.split("?v=")[-1] - HTML_str = ( - f'
      ' - "
      " - ) - return HTML_str - - -def yt_transcribe(yt_url, task): - yt = pt.YouTube(yt_url) - html_embed_str = _return_yt_html_embed(yt_url) - stream = yt.streams.filter(only_audio=True)[0] - stream.download(filename="audio.mp3") - - pipe.model.config.forced_decoder_ids = [[2, transcribe_token_id if task=="transcribe" else translate_token_id]] - - text = pipe("audio.mp3")["text"] - - return html_embed_str, text - - -demo = gr.Blocks() - -mf_transcribe = gr.Interface( - fn=transcribe, - inputs=[ - gr.inputs.Audio(source="microphone", type="filepath", optional=True), - gr.inputs.Audio(source="upload", type="filepath", optional=True), - gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe"), - ], - outputs="text", - layout="horizontal", - theme="huggingface", - title="Audio-to-Text Playground: Transcribe Audio", - description=( - "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the" - f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files" - " of arbitrary length." - ), - allow_flagging="never", -) - -yt_transcribe = gr.Interface( - fn=yt_transcribe, - inputs=[ - gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL"), - gr.inputs.Radio(["transcribe", "translate"], label="Task", default="transcribe") - ], - outputs=["html", "text"], - layout="horizontal", - theme="huggingface", - title="Audio-to-Text Playground: Transcribe YouTube", - description=( - "Transcribe long-form YouTube videos with the click of a button! Demo uses the checkpoint" - f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of" - " arbitrary length." - ), - allow_flagging="never", -) - -with demo: - gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"]) - -demo.launch(enable_queue=True) - diff --git a/spaces/Mountchicken/MAERec-Gradio/mmocr/apis/inferencers/base_mmocr_inferencer.py b/spaces/Mountchicken/MAERec-Gradio/mmocr/apis/inferencers/base_mmocr_inferencer.py deleted file mode 100644 index 02ac643d9ffea8dddde098aa02038ebfdc1cce25..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/mmocr/apis/inferencers/base_mmocr_inferencer.py +++ /dev/null @@ -1,405 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os.path as osp -from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union - -import mmcv -import mmengine -import numpy as np -from mmengine.dataset import Compose -from mmengine.infer.infer import BaseInferencer, ModelType -from mmengine.model.utils import revert_sync_batchnorm -from mmengine.registry import init_default_scope -from mmengine.structures import InstanceData -from rich.progress import track -from torch import Tensor - -from mmocr.utils import ConfigType - -InstanceList = List[InstanceData] -InputType = Union[str, np.ndarray] -InputsType = Union[InputType, Sequence[InputType]] -PredType = Union[InstanceData, InstanceList] -ImgType = Union[np.ndarray, Sequence[np.ndarray]] -ResType = Union[Dict, List[Dict], InstanceData, List[InstanceData]] - - -class BaseMMOCRInferencer(BaseInferencer): - """Base inferencer. - - Args: - model (str, optional): Path to the config file or the model name - defined in metafile. For example, it could be - "dbnet_resnet18_fpnc_1200e_icdar2015" or - "configs/textdet/dbnet/dbnet_resnet18_fpnc_1200e_icdar2015.py". - If model is not specified, user must provide the - `weights` saved by MMEngine which contains the config string. - Defaults to None. - weights (str, optional): Path to the checkpoint. If it is not specified - and model is a model name of metafile, the weights will be loaded - from metafile. Defaults to None. - device (str, optional): Device to run inference. If None, the available - device will be automatically used. Defaults to None. - scope (str, optional): The scope of the model. Defaults to "mmocr". - """ - - preprocess_kwargs: set = set() - forward_kwargs: set = set() - visualize_kwargs: set = { - 'return_vis', 'show', 'wait_time', 'draw_pred', 'pred_score_thr', - 'save_vis' - } - postprocess_kwargs: set = { - 'print_result', 'return_datasample', 'save_pred' - } - loading_transforms: list = ['LoadImageFromFile', 'LoadImageFromNDArray'] - - def __init__(self, - model: Union[ModelType, str, None] = None, - weights: Optional[str] = None, - device: Optional[str] = None, - scope: str = 'mmocr') -> None: - # A global counter tracking the number of images given in the form - # of ndarray, for naming the output images - self.num_unnamed_imgs = 0 - init_default_scope(scope) - super().__init__( - model=model, weights=weights, device=device, scope=scope) - self.model = revert_sync_batchnorm(self.model) - - def preprocess(self, inputs: InputsType, batch_size: int = 1, **kwargs): - """Process the inputs into a model-feedable format. - - Args: - inputs (InputsType): Inputs given by user. - batch_size (int): batch size. Defaults to 1. - - Yields: - Any: Data processed by the ``pipeline`` and ``collate_fn``. - """ - chunked_data = self._get_chunk_data(inputs, batch_size) - yield from map(self.collate_fn, chunked_data) - - def _get_chunk_data(self, inputs: Iterable, chunk_size: int): - """Get batch data from inputs. - - Args: - inputs (Iterable): An iterable dataset. - chunk_size (int): Equivalent to batch size. - - Yields: - list: batch data. - """ - inputs_iter = iter(inputs) - while True: - try: - chunk_data = [] - for _ in range(chunk_size): - inputs_ = next(inputs_iter) - pipe_out = self.pipeline(inputs_) - if pipe_out['data_samples'].get('img_path') is None: - pipe_out['data_samples'].set_metainfo( - dict(img_path=f'{self.num_unnamed_imgs}.jpg')) - self.num_unnamed_imgs += 1 - chunk_data.append((inputs_, pipe_out)) - yield chunk_data - except StopIteration: - if chunk_data: - yield chunk_data - break - - def __call__(self, - inputs: InputsType, - return_datasamples: bool = False, - batch_size: int = 1, - progress_bar: bool = True, - return_vis: bool = False, - show: bool = False, - wait_time: int = 0, - draw_pred: bool = True, - pred_score_thr: float = 0.3, - out_dir: str = 'results/', - save_vis: bool = False, - save_pred: bool = False, - print_result: bool = False, - **kwargs) -> dict: - """Call the inferencer. - - Args: - inputs (InputsType): Inputs for the inferencer. It can be a path - to image / image directory, or an array, or a list of these. - Note: If it's an numpy array, it should be in BGR order. - return_datasamples (bool): Whether to return results as - :obj:`BaseDataElement`. Defaults to False. - batch_size (int): Inference batch size. Defaults to 1. - progress_bar (bool): Whether to show a progress bar. Defaults to - True. - return_vis (bool): Whether to return the visualization result. - Defaults to False. - show (bool): Whether to display the visualization results in a - popup window. Defaults to False. - wait_time (float): The interval of show (s). Defaults to 0. - draw_pred (bool): Whether to draw predicted bounding boxes. - Defaults to True. - pred_score_thr (float): Minimum score of bboxes to draw. - Defaults to 0.3. - out_dir (str): Output directory of results. Defaults to 'results/'. - save_vis (bool): Whether to save the visualization results to - "out_dir". Defaults to False. - save_pred (bool): Whether to save the inference results to - "out_dir". Defaults to False. - print_result (bool): Whether to print the inference result w/o - visualization to the console. Defaults to False. - - **kwargs: Other keyword arguments passed to :meth:`preprocess`, - :meth:`forward`, :meth:`visualize` and :meth:`postprocess`. - Each key in kwargs should be in the corresponding set of - ``preprocess_kwargs``, ``forward_kwargs``, ``visualize_kwargs`` - and ``postprocess_kwargs``. - - Returns: - dict: Inference and visualization results, mapped from - "predictions" and "visualization". - """ - if (save_vis or save_pred) and not out_dir: - raise ValueError('out_dir must be specified when save_vis or ' - 'save_pred is True!') - if out_dir: - img_out_dir = osp.join(out_dir, 'vis') - pred_out_dir = osp.join(out_dir, 'preds') - else: - img_out_dir, pred_out_dir = '', '' - ( - preprocess_kwargs, - forward_kwargs, - visualize_kwargs, - postprocess_kwargs, - ) = self._dispatch_kwargs( - return_vis=return_vis, - show=show, - wait_time=wait_time, - draw_pred=draw_pred, - pred_score_thr=pred_score_thr, - save_vis=save_vis, - save_pred=save_pred, - print_result=print_result, - **kwargs) - - ori_inputs = self._inputs_to_list(inputs) - inputs = self.preprocess( - ori_inputs, batch_size=batch_size, **preprocess_kwargs) - results = {'predictions': [], 'visualization': []} - for ori_inputs, data in track( - inputs, description='Inference', disable=not progress_bar): - preds = self.forward(data, **forward_kwargs) - visualization = self.visualize( - ori_inputs, preds, img_out_dir=img_out_dir, **visualize_kwargs) - batch_res = self.postprocess( - preds, - visualization, - return_datasamples, - pred_out_dir=pred_out_dir, - **postprocess_kwargs) - results['predictions'].extend(batch_res['predictions']) - if return_vis and batch_res['visualization'] is not None: - results['visualization'].extend(batch_res['visualization']) - return results - - def _init_pipeline(self, cfg: ConfigType) -> Compose: - """Initialize the test pipeline.""" - pipeline_cfg = cfg.test_dataloader.dataset.pipeline - - # For inference, the key of ``instances`` is not used. - if 'meta_keys' in pipeline_cfg[-1]: - pipeline_cfg[-1]['meta_keys'] = tuple( - meta_key for meta_key in pipeline_cfg[-1]['meta_keys'] - if meta_key != 'instances') - - # Loading annotations is also not applicable - idx = self._get_transform_idx(pipeline_cfg, 'LoadOCRAnnotations') - if idx != -1: - del pipeline_cfg[idx] - - for transform in self.loading_transforms: - load_img_idx = self._get_transform_idx(pipeline_cfg, transform) - if load_img_idx != -1: - pipeline_cfg[load_img_idx]['type'] = 'InferencerLoader' - break - if load_img_idx == -1: - raise ValueError( - f'None of {self.loading_transforms} is found in the test ' - 'pipeline') - - return Compose(pipeline_cfg) - - def _get_transform_idx(self, pipeline_cfg: ConfigType, name: str) -> int: - """Returns the index of the transform in a pipeline. - - If the transform is not found, returns -1. - """ - for i, transform in enumerate(pipeline_cfg): - if transform['type'] == name: - return i - return -1 - - def visualize(self, - inputs: InputsType, - preds: PredType, - return_vis: bool = False, - show: bool = False, - wait_time: int = 0, - draw_pred: bool = True, - pred_score_thr: float = 0.3, - save_vis: bool = False, - img_out_dir: str = '') -> Union[List[np.ndarray], None]: - """Visualize predictions. - - Args: - inputs (List[Union[str, np.ndarray]]): Inputs for the inferencer. - preds (List[Dict]): Predictions of the model. - return_vis (bool): Whether to return the visualization result. - Defaults to False. - show (bool): Whether to display the image in a popup window. - Defaults to False. - wait_time (float): The interval of show (s). Defaults to 0. - draw_pred (bool): Whether to draw predicted bounding boxes. - Defaults to True. - pred_score_thr (float): Minimum score of bboxes to draw. - Defaults to 0.3. - save_vis (bool): Whether to save the visualization result. Defaults - to False. - img_out_dir (str): Output directory of visualization results. - If left as empty, no file will be saved. Defaults to ''. - - Returns: - List[np.ndarray] or None: Returns visualization results only if - applicable. - """ - if self.visualizer is None or not (show or save_vis or return_vis): - return None - - if getattr(self, 'visualizer') is None: - raise ValueError('Visualization needs the "visualizer" term' - 'defined in the config, but got None.') - - results = [] - - for single_input, pred in zip(inputs, preds): - if isinstance(single_input, str): - img_bytes = mmengine.fileio.get(single_input) - img = mmcv.imfrombytes(img_bytes, channel_order='rgb') - elif isinstance(single_input, np.ndarray): - img = single_input.copy()[:, :, ::-1] # to RGB - else: - raise ValueError('Unsupported input type: ' - f'{type(single_input)}') - img_name = osp.splitext(osp.basename(pred.img_path))[0] - - if save_vis and img_out_dir: - out_file = osp.splitext(img_name)[0] - out_file = f'{out_file}.jpg' - out_file = osp.join(img_out_dir, out_file) - else: - out_file = None - - visualization = self.visualizer.add_datasample( - img_name, - img, - pred, - show=show, - wait_time=wait_time, - draw_gt=False, - draw_pred=draw_pred, - pred_score_thr=pred_score_thr, - out_file=out_file, - ) - results.append(visualization) - - return results - - def postprocess( - self, - preds: PredType, - visualization: Optional[List[np.ndarray]] = None, - return_datasample: bool = False, - print_result: bool = False, - save_pred: bool = False, - pred_out_dir: str = '', - ) -> Union[ResType, Tuple[ResType, np.ndarray]]: - """Process the predictions and visualization results from ``forward`` - and ``visualize``. - - This method should be responsible for the following tasks: - - 1. Convert datasamples into a json-serializable dict if needed. - 2. Pack the predictions and visualization results and return them. - 3. Dump or log the predictions. - - Args: - preds (List[Dict]): Predictions of the model. - visualization (Optional[np.ndarray]): Visualized predictions. - return_datasample (bool): Whether to use Datasample to store - inference results. If False, dict will be used. - print_result (bool): Whether to print the inference result w/o - visualization to the console. Defaults to False. - save_pred (bool): Whether to save the inference result. Defaults to - False. - pred_out_dir: File to save the inference results w/o - visualization. If left as empty, no file will be saved. - Defaults to ''. - - Returns: - dict: Inference and visualization results with key ``predictions`` - and ``visualization``. - - - ``visualization`` (Any): Returned by :meth:`visualize`. - - ``predictions`` (dict or DataSample): Returned by - :meth:`forward` and processed in :meth:`postprocess`. - If ``return_datasample=False``, it usually should be a - json-serializable dict containing only basic data elements such - as strings and numbers. - """ - result_dict = {} - results = preds - if not return_datasample: - results = [] - for pred in preds: - result = self.pred2dict(pred) - if save_pred and pred_out_dir: - pred_name = osp.splitext(osp.basename(pred.img_path))[0] - pred_name = f'{pred_name}.json' - pred_out_file = osp.join(pred_out_dir, pred_name) - mmengine.dump(result, pred_out_file) - results.append(result) - # Add img to the results after printing and dumping - result_dict['predictions'] = results - if print_result: - print(result_dict) - result_dict['visualization'] = visualization - return result_dict - - def pred2dict(self, data_sample: InstanceData) -> Dict: - """Extract elements necessary to represent a prediction into a - dictionary. - - It's better to contain only basic data elements such as strings and - numbers in order to guarantee it's json-serializable. - """ - raise NotImplementedError - - def _array2list(self, array: Union[Tensor, np.ndarray, - List]) -> List[float]: - """Convert a tensor or numpy array to a list. - - Args: - array (Union[Tensor, np.ndarray]): The array to be converted. - - Returns: - List[float]: The converted list. - """ - if isinstance(array, Tensor): - return array.detach().cpu().numpy().tolist() - if isinstance(array, np.ndarray): - return array.tolist() - if isinstance(array, list): - array = [self._array2list(arr) for arr in array] - return array diff --git a/spaces/NATSpeech/DiffSpeech/tasks/tts/diffspeech.py b/spaces/NATSpeech/DiffSpeech/tasks/tts/diffspeech.py deleted file mode 100644 index 283bf9b62fed0c5f68a9f82887543b9413dd8955..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/tasks/tts/diffspeech.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch - -from modules.tts.diffspeech.shallow_diffusion_tts import GaussianDiffusion -from tasks.tts.fs2_orig import FastSpeech2OrigTask - -import utils -from utils.commons.hparams import hparams -from utils.commons.ckpt_utils import load_ckpt -from utils.audio.pitch.utils import denorm_f0 - - -class DiffSpeechTask(FastSpeech2OrigTask): - def build_tts_model(self): - # get min and max - # import torch - # from tqdm import tqdm - # v_min = torch.ones([80]) * 100 - # v_max = torch.ones([80]) * -100 - # for i, ds in enumerate(tqdm(self.dataset_cls('train'))): - # v_max = torch.max(torch.max(ds['mel'].reshape(-1, 80), 0)[0], v_max) - # v_min = torch.min(torch.min(ds['mel'].reshape(-1, 80), 0)[0], v_min) - # if i % 100 == 0: - # print(i, v_min, v_max) - # print('final', v_min, v_max) - dict_size = len(self.token_encoder) - self.model = GaussianDiffusion(dict_size, hparams) - if hparams['fs2_ckpt'] != '': - load_ckpt(self.model.fs2, hparams['fs2_ckpt'], 'model', strict=True) - # for k, v in self.model.fs2.named_parameters(): - # if 'predictor' not in k: - # v.requires_grad = False - # or - for k, v in self.model.fs2.named_parameters(): - v.requires_grad = False - - def build_optimizer(self, model): - self.optimizer = optimizer = torch.optim.AdamW( - filter(lambda p: p.requires_grad, model.parameters()), - lr=hparams['lr'], - betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']), - weight_decay=hparams['weight_decay']) - return optimizer - - def build_scheduler(self, optimizer): - return torch.optim.lr_scheduler.StepLR(optimizer, hparams['decay_steps'], gamma=0.5) - - def run_model(self, sample, infer=False, *args, **kwargs): - txt_tokens = sample['txt_tokens'] # [B, T_t] - spk_embed = sample.get('spk_embed') - spk_id = sample.get('spk_ids') - if not infer: - target = sample['mels'] # [B, T_s, 80] - mel2ph = sample['mel2ph'] # [B, T_s] - f0 = sample.get('f0') - uv = sample.get('uv') - output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, - ref_mels=target, f0=f0, uv=uv, infer=False) - losses = {} - if 'diff_loss' in output: - losses['mel'] = output['diff_loss'] - self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses) - if hparams['use_pitch_embed']: - self.add_pitch_loss(output, sample, losses) - return losses, output - else: - use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur']) - use_gt_f0 = kwargs.get('infer_use_gt_f0', hparams['use_gt_f0']) - mel2ph, uv, f0 = None, None, None - if use_gt_dur: - mel2ph = sample['mel2ph'] - if use_gt_f0: - f0 = sample['f0'] - uv = sample['uv'] - output = self.model(txt_tokens, mel2ph=mel2ph, spk_embed=spk_embed, spk_id=spk_id, - ref_mels=None, f0=f0, uv=uv, infer=True) - return output - - def save_valid_result(self, sample, batch_idx, model_out): - sr = hparams['audio_sample_rate'] - f0_gt = None - # mel_out = model_out['mel_out'] - if sample.get('f0') is not None: - f0_gt = denorm_f0(sample['f0'][0].cpu(), sample['uv'][0].cpu()) - # self.plot_mel(batch_idx, sample['mels'], mel_out, f0s=f0_gt) - if self.global_step > 0: - # wav_pred = self.vocoder.spec2wav(mel_out[0].cpu(), f0=f0_gt) - # self.logger.add_audio(f'wav_val_{batch_idx}', wav_pred, self.global_step, sr) - # with gt duration - model_out = self.run_model(sample, infer=True, infer_use_gt_dur=True) - dur_info = self.get_plot_dur_info(sample, model_out) - del dur_info['dur_pred'] - wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) - self.logger.add_audio(f'wav_gdur_{batch_idx}', wav_pred, self.global_step, sr) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'diffmel_gdur_{batch_idx}', - dur_info=dur_info, f0s=f0_gt) - self.plot_mel(batch_idx, sample['mels'], model_out['fs2_mel'][0], f'fs2mel_gdur_{batch_idx}', - dur_info=dur_info, f0s=f0_gt) # gt mel vs. fs2 mel - - # with pred duration - if not hparams['use_gt_dur']: - model_out = self.run_model(sample, infer=True, infer_use_gt_dur=False) - dur_info = self.get_plot_dur_info(sample, model_out) - self.plot_mel(batch_idx, sample['mels'], model_out['mel_out'][0], f'mel_pdur_{batch_idx}', - dur_info=dur_info, f0s=f0_gt) - wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu(), f0=f0_gt) - self.logger.add_audio(f'wav_pdur_{batch_idx}', wav_pred, self.global_step, sr) - # gt wav - if self.global_step <= hparams['valid_infer_interval']: - mel_gt = sample['mels'][0].cpu() - wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt) - self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, sr) diff --git a/spaces/NATSpeech/DiffSpeech/utils/text/encoding.py b/spaces/NATSpeech/DiffSpeech/utils/text/encoding.py deleted file mode 100644 index f09f514613fd44a27450fe7c04cbdf5ebfbe78a8..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/utils/text/encoding.py +++ /dev/null @@ -1,9 +0,0 @@ -import chardet - - -def get_encoding(file): - with open(file, 'rb') as f: - encoding = chardet.detect(f.read())['encoding'] - if encoding == 'GB2312': - encoding = 'GB18030' - return encoding diff --git a/spaces/Nadaal/chatgpt-demo/app.py b/spaces/Nadaal/chatgpt-demo/app.py deleted file mode 100644 index 3a56aadf0c5972b89e657ebff6239406e4dd7319..0000000000000000000000000000000000000000 --- a/spaces/Nadaal/chatgpt-demo/app.py +++ /dev/null @@ -1,123 +0,0 @@ -import gradio as gr -import os -import openai -import requests -import json - -openai.api_key = os.environ.get("OPENAI_API_KEY") - -prompt_templates = {"Default ChatGPT": ""} - -def get_empty_state(): - return {"total_tokens": 0, "messages": []} - -def download_prompt_templates(): - url = "https://raw.githubusercontent.com/f/awesome-chatgpt-prompts/main/prompts.csv" - response = requests.get(url) - - for line in response.text.splitlines()[1:]: - act, prompt = line.split('","') - prompt_templates[act.replace('"', '')] = prompt.replace('"', '') - - choices = list(prompt_templates.keys()) - return gr.update(value=choices[0], choices=choices) - -def on_token_change(user_token): - openai.api_key = user_token or os.environ.get("OPENAI_API_KEY") - -def on_prompt_template_change(prompt_template): - if not isinstance(prompt_template, str): return - return prompt_templates[prompt_template] - -def submit_message(user_token, prompt, prompt_template, temperature, max_tokens, state): - - history = state['messages'] - - if not prompt: - return gr.update(value='', visible=state['total_tokens'] < 1_000), [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)], f"Total tokens used: {state['total_tokens']} / 3000", state - - prompt_template = prompt_templates[prompt_template] - - system_prompt = [] - if prompt_template: - system_prompt = [{ "role": "system", "content": prompt_template }] - - prompt_msg = { "role": "user", "content": prompt } - - try: - completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + history + [prompt_msg], temperature=temperature, max_tokens=max_tokens) - - history.append(prompt_msg) - history.append(completion.choices[0].message.to_dict()) - - state['total_tokens'] += completion['usage']['total_tokens'] - - except Exception as e: - history.append(prompt_msg) - history.append({ - "role": "system", - "content": f"Error: {e}" - }) - - total_tokens_used_msg = f"Total tokens used: {state['total_tokens']} / 3000" if not user_token else "" - chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)] - input_visibility = user_token or state['total_tokens'] < 3000 - - return gr.update(value='', visible=input_visibility), chat_messages, total_tokens_used_msg, state - -def clear_conversation(): - return gr.update(value=None, visible=True), None, "", get_empty_state() - -css = """ - #col-container {max-width: 80%; margin-left: auto; margin-right: auto;} - #chatbox {min-height: 400px;} - #header {text-align: center;} - #prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;} - #total_tokens_str {text-align: right; font-size: 0.8em; color: #666; height: 1em;} - #label {font-size: 0.8em; padding: 0.5em; margin: 0;} - .message { font-size: 1.2em; } - """ - -with gr.Blocks(css=css) as demo: - - state = gr.State(get_empty_state()) - - - with gr.Column(elem_id="col-container"): - gr.Markdown("""## OpenAI ChatGPT Demo - Using the ofiicial API (gpt-3.5-turbo model)
      - Prompt templates from [awesome-chatgpt-prompts](https://github.com/f/awesome-chatgpt-prompts).
      - Current limit is 3000 tokens per conversation.""", - elem_id="header") - - with gr.Row(): - with gr.Column(): - chatbot = gr.Chatbot(elem_id="chatbox") - input_message = gr.Textbox(show_label=False, placeholder="Enter text and press enter", visible=True).style(container=False) - btn_submit = gr.Button("Submit") - total_tokens_str = gr.Markdown(elem_id="total_tokens_str") - btn_clear_conversation = gr.Button("🔃 Start New Conversation") - with gr.Column(): - prompt_template = gr.Dropdown(label="Set a custom insruction for the chatbot:", choices=list(prompt_templates.keys())) - prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview") - gr.Markdown("Enter your own OpenAI API Key to remove the 3000 token limit. You can get it [here](https://platform.openai.com/account/api-keys).", elem_id="label") - user_token = gr.Textbox(placeholder="OpenAI API Key", type="password", show_label=False) - with gr.Accordion("Advanced parameters", open=False): - temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, interactive=True, label="Temperature (higher = more creative/chaotic)") - max_tokens = gr.Slider(minimum=100, maximum=4096, value=1000, step=1, interactive=True, label="Max tokens per response") - - gr.HTML('''


      Duplicate SpaceYou can duplicate this Space.
      - Don't forget to set your own OpenAI API Key environment variable in Settings.
      -

      visitors

      ''') - - btn_submit.click(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state]) - input_message.submit(submit_message, [user_token, input_message, prompt_template, temperature, max_tokens, state], [input_message, chatbot, total_tokens_str, state]) - btn_clear_conversation.click(clear_conversation, [], [input_message, chatbot, total_tokens_str, state]) - prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview]) - user_token.change(on_token_change, inputs=[user_token], outputs=[]) - - - demo.load(download_prompt_templates, inputs=None, outputs=[prompt_template]) - - -demo.launch(debug=True, height='800px') diff --git a/spaces/OAOA/DifFace/basicsr/data/vimeo90k_dataset.py b/spaces/OAOA/DifFace/basicsr/data/vimeo90k_dataset.py deleted file mode 100644 index e5e33e1082667aeee61fecf2436fb287e82e0936..0000000000000000000000000000000000000000 --- a/spaces/OAOA/DifFace/basicsr/data/vimeo90k_dataset.py +++ /dev/null @@ -1,199 +0,0 @@ -import random -import torch -from pathlib import Path -from torch.utils import data as data - -from basicsr.data.transforms import augment, paired_random_crop -from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor -from basicsr.utils.registry import DATASET_REGISTRY - - -@DATASET_REGISTRY.register() -class Vimeo90KDataset(data.Dataset): - """Vimeo90K dataset for training. - - The keys are generated from a meta info txt file. - basicsr/data/meta_info/meta_info_Vimeo90K_train_GT.txt - - Each line contains the following items, separated by a white space. - - 1. clip name; - 2. frame number; - 3. image shape - - Examples: - - :: - - 00001/0001 7 (256,448,3) - 00001/0002 7 (256,448,3) - - - Key examples: "00001/0001" - - GT (gt): Ground-Truth; - - LQ (lq): Low-Quality, e.g., low-resolution/blurry/noisy/compressed frames. - - The neighboring frame list for different num_frame: - - :: - - num_frame | frame list - 1 | 4 - 3 | 3,4,5 - 5 | 2,3,4,5,6 - 7 | 1,2,3,4,5,6,7 - - Args: - opt (dict): Config for train dataset. It contains the following keys: - dataroot_gt (str): Data root path for gt. - dataroot_lq (str): Data root path for lq. - meta_info_file (str): Path for meta information file. - io_backend (dict): IO backend type and other kwarg. - num_frame (int): Window size for input frames. - gt_size (int): Cropped patched size for gt patches. - random_reverse (bool): Random reverse input frames. - use_hflip (bool): Use horizontal flips. - use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation). - scale (bool): Scale, which will be added automatically. - """ - - def __init__(self, opt): - super(Vimeo90KDataset, self).__init__() - self.opt = opt - self.gt_root, self.lq_root = Path(opt['dataroot_gt']), Path(opt['dataroot_lq']) - - with open(opt['meta_info_file'], 'r') as fin: - self.keys = [line.split(' ')[0] for line in fin] - - # file client (io backend) - self.file_client = None - self.io_backend_opt = opt['io_backend'] - self.is_lmdb = False - if self.io_backend_opt['type'] == 'lmdb': - self.is_lmdb = True - self.io_backend_opt['db_paths'] = [self.lq_root, self.gt_root] - self.io_backend_opt['client_keys'] = ['lq', 'gt'] - - # indices of input images - self.neighbor_list = [i + (9 - opt['num_frame']) // 2 for i in range(opt['num_frame'])] - - # temporal augmentation configs - self.random_reverse = opt['random_reverse'] - logger = get_root_logger() - logger.info(f'Random reverse is {self.random_reverse}.') - - def __getitem__(self, index): - if self.file_client is None: - self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) - - # random reverse - if self.random_reverse and random.random() < 0.5: - self.neighbor_list.reverse() - - scale = self.opt['scale'] - gt_size = self.opt['gt_size'] - key = self.keys[index] - clip, seq = key.split('/') # key example: 00001/0001 - - # get the GT frame (im4.png) - if self.is_lmdb: - img_gt_path = f'{key}/im4' - else: - img_gt_path = self.gt_root / clip / seq / 'im4.png' - img_bytes = self.file_client.get(img_gt_path, 'gt') - img_gt = imfrombytes(img_bytes, float32=True) - - # get the neighboring LQ frames - img_lqs = [] - for neighbor in self.neighbor_list: - if self.is_lmdb: - img_lq_path = f'{clip}/{seq}/im{neighbor}' - else: - img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png' - img_bytes = self.file_client.get(img_lq_path, 'lq') - img_lq = imfrombytes(img_bytes, float32=True) - img_lqs.append(img_lq) - - # randomly crop - img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale, img_gt_path) - - # augmentation - flip, rotate - img_lqs.append(img_gt) - img_results = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot']) - - img_results = img2tensor(img_results) - img_lqs = torch.stack(img_results[0:-1], dim=0) - img_gt = img_results[-1] - - # img_lqs: (t, c, h, w) - # img_gt: (c, h, w) - # key: str - return {'lq': img_lqs, 'gt': img_gt, 'key': key} - - def __len__(self): - return len(self.keys) - - -@DATASET_REGISTRY.register() -class Vimeo90KRecurrentDataset(Vimeo90KDataset): - - def __init__(self, opt): - super(Vimeo90KRecurrentDataset, self).__init__(opt) - - self.flip_sequence = opt['flip_sequence'] - self.neighbor_list = [1, 2, 3, 4, 5, 6, 7] - - def __getitem__(self, index): - if self.file_client is None: - self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt) - - # random reverse - if self.random_reverse and random.random() < 0.5: - self.neighbor_list.reverse() - - scale = self.opt['scale'] - gt_size = self.opt['gt_size'] - key = self.keys[index] - clip, seq = key.split('/') # key example: 00001/0001 - - # get the neighboring LQ and GT frames - img_lqs = [] - img_gts = [] - for neighbor in self.neighbor_list: - if self.is_lmdb: - img_lq_path = f'{clip}/{seq}/im{neighbor}' - img_gt_path = f'{clip}/{seq}/im{neighbor}' - else: - img_lq_path = self.lq_root / clip / seq / f'im{neighbor}.png' - img_gt_path = self.gt_root / clip / seq / f'im{neighbor}.png' - # LQ - img_bytes = self.file_client.get(img_lq_path, 'lq') - img_lq = imfrombytes(img_bytes, float32=True) - # GT - img_bytes = self.file_client.get(img_gt_path, 'gt') - img_gt = imfrombytes(img_bytes, float32=True) - - img_lqs.append(img_lq) - img_gts.append(img_gt) - - # randomly crop - img_gts, img_lqs = paired_random_crop(img_gts, img_lqs, gt_size, scale, img_gt_path) - - # augmentation - flip, rotate - img_lqs.extend(img_gts) - img_results = augment(img_lqs, self.opt['use_hflip'], self.opt['use_rot']) - - img_results = img2tensor(img_results) - img_lqs = torch.stack(img_results[:7], dim=0) - img_gts = torch.stack(img_results[7:], dim=0) - - if self.flip_sequence: # flip the sequence: 7 frames to 14 frames - img_lqs = torch.cat([img_lqs, img_lqs.flip(0)], dim=0) - img_gts = torch.cat([img_gts, img_gts.flip(0)], dim=0) - - # img_lqs: (t, c, h, w) - # img_gt: (c, h, w) - # key: str - return {'lq': img_lqs, 'gt': img_gts, 'key': key} - - def __len__(self): - return len(self.keys) diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/unsupervised_quality_estimation/meteor.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/unsupervised_quality_estimation/meteor.py deleted file mode 100644 index 2ee0448cf1f167f6f3ecee56ad807922cffb0956..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/unsupervised_quality_estimation/meteor.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import math -import os -import subprocess -import sys -import tempfile -from collections import defaultdict -from itertools import combinations - - -def read_translations(path, n_repeats): - segment_counter = 0 - segment_translations = [] - translations = defaultdict(list) - for line in open(path): - segment_translations.append(" ".join(line.split())) - if len(segment_translations) == n_repeats: - translations[segment_counter] = segment_translations - segment_translations = [] - segment_counter += 1 - return translations - - -def generate_input(translations, n_repeats): - _, ref_path = tempfile.mkstemp() - _, mt_path = tempfile.mkstemp() - ref_fh = open(ref_path, "w") - mt_fh = open(mt_path, "w") - for segid in sorted(translations.keys()): - assert len(translations[segid]) == n_repeats - indexes = combinations(range(n_repeats), 2) - for idx1, idx2 in indexes: - mt_fh.write(translations[segid][idx1].strip() + "\n") - ref_fh.write(translations[segid][idx2].strip() + "\n") - sys.stderr.write("\nSaved translations to %s and %s" % (ref_path, mt_path)) - return ref_path, mt_path - - -def run_meteor(ref_path, mt_path, metric_path, lang="en"): - _, out_path = tempfile.mkstemp() - subprocess.call( - [ - "java", - "-Xmx2G", - "-jar", - metric_path, - mt_path, - ref_path, - "-p", - "0.5 0.2 0.6 0.75", # default parameters, only changed alpha to give equal weight to P and R - "-norm", - "-l", - lang, - ], - stdout=open(out_path, "w"), - ) - os.remove(ref_path) - os.remove(mt_path) - sys.stderr.write("\nSaved Meteor output to %s" % out_path) - return out_path - - -def read_output(meteor_output_path, n_repeats): - n_combinations = math.factorial(n_repeats) / ( - math.factorial(2) * math.factorial(n_repeats - 2) - ) - raw_scores = [] - average_scores = [] - for line in open(meteor_output_path): - if not line.startswith("Segment "): - continue - score = float(line.strip().split("\t")[1]) - raw_scores.append(score) - if len(raw_scores) == n_combinations: - average_scores.append(sum(raw_scores) / n_combinations) - raw_scores = [] - os.remove(meteor_output_path) - return average_scores - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("-i", "--infile") - parser.add_argument("-n", "--repeat_times", type=int) - parser.add_argument("-m", "--meteor") - parser.add_argument("-o", "--output") - args = parser.parse_args() - - translations = read_translations(args.infile, args.repeat_times) - sys.stderr.write("\nGenerating input for Meteor...") - ref_path, mt_path = generate_input(translations, args.repeat_times) - sys.stderr.write("\nRunning Meteor...") - out_path = run_meteor(ref_path, mt_path, args.meteor) - sys.stderr.write("\nReading output...") - scores = read_output(out_path, args.repeat_times) - sys.stderr.write("\nWriting results...") - with open(args.output, "w") as o: - for scr in scores: - o.write("{}\n".format(scr)) - o.close() - - -if __name__ == "__main__": - main() diff --git a/spaces/OFA-Sys/OFA-Image_Caption/models/ofa/unify_transformer_layer.py b/spaces/OFA-Sys/OFA-Image_Caption/models/ofa/unify_transformer_layer.py deleted file mode 100644 index c02410548106e177be4ead10dbc8facdf5947e1f..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-Image_Caption/models/ofa/unify_transformer_layer.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from typing import Dict, List, Optional - -import torch -import torch.nn as nn -from fairseq import utils -from fairseq.modules import LayerNorm -from fairseq.modules.fairseq_dropout import FairseqDropout -from fairseq.modules.quant_noise import quant_noise -from torch import Tensor - -from .unify_multihead_attention import MultiheadAttention - - -def drop_path(x, drop_prob: float = 0.0, training: bool = False): - """ - Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). - Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, - however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... - See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the - layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the - argument. - """ - if drop_prob == 0.0 or not training: - return x - keep_prob = 1 - drop_prob - shape = (1, x.shape[1], 1) - random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) - random_tensor.floor_() # binarize - output = x.div(keep_prob) * random_tensor - return output - - -class DropPath(nn.Module): - """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" - - def __init__(self, drop_prob=None): - super().__init__() - self.drop_prob = drop_prob - - def forward(self, x): - return drop_path(x, self.drop_prob, self.training) - - def extra_repr(self) -> str: - return "p={}".format(self.drop_prob) - - -class TransformerEncoderLayer(nn.Module): - """Encoder layer block. - - In the original paper each operation (multi-head attention or FFN) is - postprocessed with: `dropout -> add residual -> layernorm`. In the - tensor2tensor code they suggest that learning is more robust when - preprocessing each layer with layernorm and postprocessing with: - `dropout -> add residual`. We default to the approach in the paper, but the - tensor2tensor approach can be enabled by setting - *args.encoder_normalize_before* to ``True``. - - Args: - args (argparse.Namespace): parsed command-line arguments - """ - - def __init__(self, args, drop_path_rate=0.0): - super().__init__() - self.args = args - self.embed_dim = args.encoder_embed_dim - self.quant_noise = getattr(args, 'quant_noise_pq', 0) - self.quant_noise_block_size = getattr(args, 'quant_noise_pq_block_size', 8) or 8 - self.self_attn = self.build_self_attention(self.embed_dim, args) - self.self_attn_layer_norm = LayerNorm(self.embed_dim) - self.dropout_module = FairseqDropout( - args.dropout, module_name=self.__class__.__name__ - ) - self.activation_fn = utils.get_activation_fn( - activation=getattr(args, 'activation_fn', 'relu') or "relu" - ) - activation_dropout_p = getattr(args, "activation_dropout", 0) or 0 - if activation_dropout_p == 0: - # for backwards compatibility with models that use args.relu_dropout - activation_dropout_p = getattr(args, "relu_dropout", 0) or 0 - self.activation_dropout_module = FairseqDropout( - float(activation_dropout_p), module_name=self.__class__.__name__ - ) - self.normalize_before = args.encoder_normalize_before - self.fc1 = self.build_fc1( - self.embed_dim, - args.encoder_ffn_embed_dim, - self.quant_noise, - self.quant_noise_block_size, - ) - self.fc2 = self.build_fc2( - args.encoder_ffn_embed_dim, - self.embed_dim, - self.quant_noise, - self.quant_noise_block_size, - ) - - self.attn_ln = LayerNorm(self.embed_dim) if getattr(args, 'scale_attn', False) else None - self.nh = self.self_attn.num_heads - self.head_dim = self.self_attn.head_dim - - self.ffn_layernorm = LayerNorm(args.encoder_ffn_embed_dim) if getattr(args, 'scale_fc', False) else None - self.w_resid = nn.Parameter(torch.ones(self.embed_dim, ), requires_grad=True) if getattr(args, 'scale_resids', False) else None - - self.final_layer_norm = LayerNorm(self.embed_dim) - - self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() - - def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): - return quant_noise( - nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size - ) - - def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): - return quant_noise( - nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size - ) - - def build_self_attention(self, embed_dim, args): - return MultiheadAttention( - embed_dim, - args.encoder_attention_heads, - dropout=args.attention_dropout, - self_attention=True, - q_noise=self.quant_noise, - qn_block_size=self.quant_noise_block_size, - scale_factor=args.attn_scale_factor, - scale_heads=getattr(args, 'scale_heads', False) - ) - - def residual_connection(self, x, residual): - return residual + self.drop_path(x) - - def upgrade_state_dict_named(self, state_dict, name): - """ - Rename layer norm states from `...layer_norms.0.weight` to - `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to - `...final_layer_norm.weight` - """ - layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} - for old, new in layer_norm_map.items(): - for m in ("weight", "bias"): - k = "{}.layer_norms.{}.{}".format(name, old, m) - if k in state_dict: - state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] - del state_dict[k] - if "{}.{}.{}".format(name, new, m) not in state_dict and "{}.{}".format(new, m) in self.state_dict(): - state_dict[ - "{}.{}.{}".format(name, new, m) - ] = self.state_dict()["{}.{}".format(new, m)] - - prefix = name + "." if name != "" else "" - for param_name, param_tensor in self.state_dict().items(): - if (prefix + param_name) not in state_dict and param_name in self.state_dict(): - state_dict[prefix + param_name] = self.state_dict()[param_name] - - def forward( - self, - x, - encoder_padding_mask: Optional[Tensor], - attn_mask: Optional[Tensor] = None, - self_attn_bias: Optional[Tensor] = None - ): - """ - Args: - x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` - encoder_padding_mask (ByteTensor): binary ByteTensor of shape - `(batch, seq_len)` where padding elements are indicated by ``1``. - attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`, - where `tgt_len` is the length of output and `src_len` is the - length of input, though here both are equal to `seq_len`. - `attn_mask[tgt_i, src_j] = 1` means that when calculating the - embedding for `tgt_i`, we exclude (mask out) `src_j`. This is - useful for strided self-attention. - - Returns: - encoded output of shape `(seq_len, batch, embed_dim)` - """ - # anything in original attn_mask = 1, becomes -1e8 - # anything in original attn_mask = 0, becomes 0 - # Note that we cannot use -inf here, because at some edge cases, - # the attention weight (before softmax) for some padded element in query - # will become -inf, which results in NaN in model parameters - if attn_mask is not None: - attn_mask = attn_mask.masked_fill( - attn_mask.to(torch.bool), - -1e8 if x.dtype == torch.float32 else -1e4 - ) - - residual = x - if self.normalize_before: - x = self.self_attn_layer_norm(x) - x, _ = self.self_attn( - query=x, - key=x, - value=x, - key_padding_mask=encoder_padding_mask, - need_weights=False, - attn_mask=attn_mask, - attn_bias=self_attn_bias - ) - if self.attn_ln is not None: - x = self.attn_ln(x) - x = self.dropout_module(x) - x = self.residual_connection(x, residual) - if not self.normalize_before: - x = self.self_attn_layer_norm(x) - - residual = x - if self.normalize_before: - x = self.final_layer_norm(x) - x = self.activation_fn(self.fc1(x)) - x = self.activation_dropout_module(x) - if self.ffn_layernorm is not None: - x = self.ffn_layernorm(x) - x = self.fc2(x) - x = self.dropout_module(x) - if self.w_resid is not None: - residual = torch.mul(self.w_resid, residual) - x = self.residual_connection(x, residual) - if not self.normalize_before: - x = self.final_layer_norm(x) - return x - - -class TransformerDecoderLayer(nn.Module): - """Decoder layer block. - - In the original paper each operation (multi-head attention, encoder - attention or FFN) is postprocessed with: `dropout -> add residual -> - layernorm`. In the tensor2tensor code they suggest that learning is more - robust when preprocessing each layer with layernorm and postprocessing with: - `dropout -> add residual`. We default to the approach in the paper, but the - tensor2tensor approach can be enabled by setting - *args.decoder_normalize_before* to ``True``. - - Args: - args (argparse.Namespace): parsed command-line arguments - no_encoder_attn (bool, optional): whether to attend to encoder outputs - (default: False). - """ - - def __init__( - self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, drop_path_rate=0.0 - ): - super().__init__() - self.embed_dim = args.decoder_embed_dim - self.dropout_module = FairseqDropout( - args.dropout, module_name=self.__class__.__name__ - ) - self.quant_noise = getattr(args, "quant_noise_pq", 0) - self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8) - - self.cross_self_attention = getattr(args, "cross_self_attention", False) - - self.self_attn = self.build_self_attention( - self.embed_dim, - args, - add_bias_kv=add_bias_kv, - add_zero_attn=add_zero_attn, - ) - self.self_attn_ln = LayerNorm(self.embed_dim) if getattr(args, 'scale_attn', False) else None - self.cross_attn_ln = LayerNorm(self.embed_dim) if getattr(args, 'scale_attn', False) else None - self.nh = self.self_attn.num_heads - self.head_dim = self.self_attn.head_dim - - self.activation_fn = utils.get_activation_fn( - activation=str(args.activation_fn) - if getattr(args, "activation_fn", None) is not None - else "relu" - ) - activation_dropout_p = getattr(args, "activation_dropout", 0) or 0 - if activation_dropout_p == 0: - # for backwards compatibility with models that use args.relu_dropout - activation_dropout_p = getattr(args, "relu_dropout", 0) or 0 - self.activation_dropout_module = FairseqDropout( - float(activation_dropout_p), module_name=self.__class__.__name__ - ) - self.normalize_before = args.decoder_normalize_before - - # use layerNorm rather than FusedLayerNorm for exporting. - # char_inputs can be used to determint this. - # TODO remove this once we update apex with the fix - export = getattr(args, "char_inputs", False) - self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export) - - if no_encoder_attn: - self.encoder_attn = None - self.encoder_attn_layer_norm = None - else: - self.encoder_attn = self.build_encoder_attention(self.embed_dim, args) - self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export) - - self.ffn_layernorm = LayerNorm(args.decoder_ffn_embed_dim) if getattr(args, 'scale_fc', False) else None - self.w_resid = nn.Parameter(torch.ones(self.embed_dim, ), requires_grad=True) if getattr(args, 'scale_resids', False) else None - - self.fc1 = self.build_fc1( - self.embed_dim, - args.decoder_ffn_embed_dim, - self.quant_noise, - self.quant_noise_block_size, - ) - self.fc2 = self.build_fc2( - args.decoder_ffn_embed_dim, - self.embed_dim, - self.quant_noise, - self.quant_noise_block_size, - ) - - self.final_layer_norm = LayerNorm(self.embed_dim, export=export) - self.need_attn = True - - self.onnx_trace = False - - self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() - - def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size): - return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) - - def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size): - return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size) - - def build_self_attention( - self, embed_dim, args, add_bias_kv=False, add_zero_attn=False - ): - return MultiheadAttention( - embed_dim, - args.decoder_attention_heads, - dropout=args.attention_dropout, - add_bias_kv=add_bias_kv, - add_zero_attn=add_zero_attn, - self_attention=not getattr(args, "cross_self_attention", False), - q_noise=self.quant_noise, - qn_block_size=self.quant_noise_block_size, - scale_factor=args.attn_scale_factor, - scale_heads=getattr(args, 'scale_heads', False) - ) - - def build_encoder_attention(self, embed_dim, args): - return MultiheadAttention( - embed_dim, - args.decoder_attention_heads, - kdim=getattr(args, "encoder_embed_dim", None), - vdim=getattr(args, "encoder_embed_dim", None), - dropout=args.attention_dropout, - encoder_decoder_attention=True, - q_noise=self.quant_noise, - qn_block_size=self.quant_noise_block_size, - scale_factor=args.attn_scale_factor, - scale_heads=getattr(args, 'scale_heads', False) - ) - - def prepare_for_onnx_export_(self): - self.onnx_trace = True - - def residual_connection(self, x, residual): - return residual + self.drop_path(x) - - def forward( - self, - x, - encoder_out: Optional[torch.Tensor] = None, - encoder_padding_mask: Optional[torch.Tensor] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, - prev_self_attn_state: Optional[List[torch.Tensor]] = None, - prev_attn_state: Optional[List[torch.Tensor]] = None, - self_attn_mask: Optional[torch.Tensor] = None, - self_attn_padding_mask: Optional[torch.Tensor] = None, - need_attn: bool = False, - need_head_weights: bool = False, - self_attn_bias: Optional[Tensor] = None, - cross_attn_bias: Optional[Tensor] = None - ): - """ - Args: - x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` - encoder_padding_mask (ByteTensor, optional): binary - ByteTensor of shape `(batch, src_len)` where padding - elements are indicated by ``1``. - need_attn (bool, optional): return attention weights - need_head_weights (bool, optional): return attention weights - for each head (default: return average over heads). - - Returns: - encoded output of shape `(seq_len, batch, embed_dim)` - """ - if need_head_weights: - need_attn = True - - residual = x - if self.normalize_before: - x = self.self_attn_layer_norm(x) - if prev_self_attn_state is not None: - prev_key, prev_value = prev_self_attn_state[:2] - saved_state: Dict[str, Optional[Tensor]] = { - "prev_key": prev_key, - "prev_value": prev_value, - } - if len(prev_self_attn_state) >= 3: - saved_state["prev_key_padding_mask"] = prev_self_attn_state[2] - assert incremental_state is not None - self.self_attn._set_input_buffer(incremental_state, saved_state) - _self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state) - if self.cross_self_attention and not ( - incremental_state is not None - and _self_attn_input_buffer is not None - and "prev_key" in _self_attn_input_buffer - ): - if self_attn_mask is not None: - assert encoder_out is not None - self_attn_mask = torch.cat( - (x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1 - ) - if self_attn_padding_mask is not None: - if encoder_padding_mask is None: - assert encoder_out is not None - encoder_padding_mask = self_attn_padding_mask.new_zeros( - encoder_out.size(1), encoder_out.size(0) - ) - self_attn_padding_mask = torch.cat( - (encoder_padding_mask, self_attn_padding_mask), dim=1 - ) - assert encoder_out is not None - y = torch.cat((encoder_out, x), dim=0) - else: - y = x - - x, attn = self.self_attn( - query=x, - key=y, - value=y, - key_padding_mask=self_attn_padding_mask, - incremental_state=incremental_state, - need_weights=False, - attn_mask=self_attn_mask, - attn_bias=self_attn_bias - ) - if self.self_attn_ln is not None: - x = self.self_attn_ln(x) - x = self.dropout_module(x) - x = self.residual_connection(x, residual) - if not self.normalize_before: - x = self.self_attn_layer_norm(x) - - if self.encoder_attn is not None and encoder_out is not None: - residual = x - if self.normalize_before: - x = self.encoder_attn_layer_norm(x) - if prev_attn_state is not None: - prev_key, prev_value = prev_attn_state[:2] - saved_state: Dict[str, Optional[Tensor]] = { - "prev_key": prev_key, - "prev_value": prev_value, - } - if len(prev_attn_state) >= 3: - saved_state["prev_key_padding_mask"] = prev_attn_state[2] - assert incremental_state is not None - self.encoder_attn._set_input_buffer(incremental_state, saved_state) - - x, attn = self.encoder_attn( - query=x, - key=encoder_out, - value=encoder_out, - key_padding_mask=encoder_padding_mask, - incremental_state=incremental_state, - static_kv=True, - need_weights=need_attn or (not self.training and self.need_attn), - need_head_weights=need_head_weights, - attn_bias=cross_attn_bias - ) - if self.cross_attn_ln is not None: - x = self.cross_attn_ln(x) - x = self.dropout_module(x) - x = self.residual_connection(x, residual) - if not self.normalize_before: - x = self.encoder_attn_layer_norm(x) - - residual = x - if self.normalize_before: - x = self.final_layer_norm(x) - - x = self.activation_fn(self.fc1(x)) - x = self.activation_dropout_module(x) - if self.ffn_layernorm is not None: - x = self.ffn_layernorm(x) - x = self.fc2(x) - x = self.dropout_module(x) - if self.w_resid is not None: - residual = torch.mul(self.w_resid, residual) - x = self.residual_connection(x, residual) - if not self.normalize_before: - x = self.final_layer_norm(x) - if self.onnx_trace and incremental_state is not None: - saved_state = self.self_attn._get_input_buffer(incremental_state) - assert saved_state is not None - if self_attn_padding_mask is not None: - self_attn_state = [ - saved_state["prev_key"], - saved_state["prev_value"], - saved_state["prev_key_padding_mask"], - ] - else: - self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]] - return x, attn, self_attn_state - return x, attn, None - - def make_generation_fast_(self, need_attn: bool = False, **kwargs): - self.need_attn = need_attn - - def upgrade_state_dict_named(self, state_dict, name): - """ - Rename layer norm states from `...layer_norms.0.weight` to - `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to - `...final_layer_norm.weight` - """ - # update layer norms - layer_norm_map = { - "0": "self_attn_layer_norm", - "1": "encoder_attn_layer_norm", - "2": "final_layer_norm", - } - for old, new in layer_norm_map.items(): - for m in ("weight", "bias"): - k = "{}.layer_norms.{}.{}".format(name, old, m) - if k in state_dict: - state_dict[ - "{}.{}.{}".format(name, new, m) - ] = state_dict[k] - del state_dict[k] - if "{}.{}.{}".format(name, new, m) not in state_dict and "{}.{}".format(new, m) in self.state_dict(): - state_dict[ - "{}.{}.{}".format(name, new, m) - ] = self.state_dict()["{}.{}".format(new, m)] - - prefix = name + "." if name != "" else "" - for param_name, param_tensor in self.state_dict().items(): - if (prefix + param_name) not in state_dict and param_name in self.state_dict(): - state_dict[prefix + param_name] = self.state_dict()[param_name] \ No newline at end of file diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/drnmt_rerank.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/drnmt_rerank.py deleted file mode 100644 index 2e0fc2bd29aedb0b477b7cc8e2c3b606acdd454a..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/discriminative_reranking_nmt/drnmt_rerank.py +++ /dev/null @@ -1,364 +0,0 @@ -#!/usr/bin/env python3 -u -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Score raw text with a trained model. -""" - -from collections import namedtuple -import logging -from multiprocessing import Pool -import sys -import os -import random - -import numpy as np -import sacrebleu -import torch - -from fairseq import checkpoint_utils, options, utils - - -logger = logging.getLogger("fairseq_cli.drnmt_rerank") -logger.setLevel(logging.INFO) - -Batch = namedtuple("Batch", "ids src_tokens src_lengths") - - -pool_init_variables = {} - - -def init_loaded_scores(mt_scores, model_scores, hyp, ref): - global pool_init_variables - pool_init_variables["mt_scores"] = mt_scores - pool_init_variables["model_scores"] = model_scores - pool_init_variables["hyp"] = hyp - pool_init_variables["ref"] = ref - - -def parse_fairseq_gen(filename, task): - source = {} - hypos = {} - scores = {} - with open(filename, "r", encoding="utf-8") as f: - for line in f: - line = line.strip() - if line.startswith("S-"): # source - uid, text = line.split("\t", 1) - uid = int(uid[2:]) - source[uid] = text - elif line.startswith("D-"): # hypo - uid, score, text = line.split("\t", 2) - uid = int(uid[2:]) - if uid not in hypos: - hypos[uid] = [] - scores[uid] = [] - hypos[uid].append(text) - scores[uid].append(float(score)) - else: - continue - - source_out = [source[i] for i in range(len(hypos))] - hypos_out = [h for i in range(len(hypos)) for h in hypos[i]] - scores_out = [s for i in range(len(scores)) for s in scores[i]] - - return source_out, hypos_out, scores_out - - -def read_target(filename): - with open(filename, "r", encoding="utf-8") as f: - output = [line.strip() for line in f] - return output - - -def make_batches(args, src, hyp, task, max_positions, encode_fn): - assert len(src) * args.beam == len( - hyp - ), f"Expect {len(src) * args.beam} hypotheses for {len(src)} source sentences with beam size {args.beam}. Got {len(hyp)} hypotheses intead." - hyp_encode = [ - task.source_dictionary.encode_line(encode_fn(h), add_if_not_exist=False).long() - for h in hyp - ] - if task.cfg.include_src: - src_encode = [ - task.source_dictionary.encode_line( - encode_fn(s), add_if_not_exist=False - ).long() - for s in src - ] - tokens = [(src_encode[i // args.beam], h) for i, h in enumerate(hyp_encode)] - lengths = [(t1.numel(), t2.numel()) for t1, t2 in tokens] - else: - tokens = [(h,) for h in hyp_encode] - lengths = [(h.numel(),) for h in hyp_encode] - - itr = task.get_batch_iterator( - dataset=task.build_dataset_for_inference(tokens, lengths), - max_tokens=args.max_tokens, - max_sentences=args.batch_size, - max_positions=max_positions, - ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, - ).next_epoch_itr(shuffle=False) - - for batch in itr: - yield Batch( - ids=batch["id"], - src_tokens=batch["net_input"]["src_tokens"], - src_lengths=batch["net_input"]["src_lengths"], - ) - - -def decode_rerank_scores(args): - if args.max_tokens is None and args.batch_size is None: - args.batch_size = 1 - - logger.info(args) - - use_cuda = torch.cuda.is_available() and not args.cpu - - # Load ensemble - logger.info("loading model(s) from {}".format(args.path)) - models, _model_args, task = checkpoint_utils.load_model_ensemble_and_task( - [args.path], arg_overrides=eval(args.model_overrides), - ) - - for model in models: - if args.fp16: - model.half() - if use_cuda: - model.cuda() - - # Initialize generator - generator = task.build_generator(args) - - # Handle tokenization and BPE - tokenizer = task.build_tokenizer(args) - bpe = task.build_bpe(args) - - def encode_fn(x): - if tokenizer is not None: - x = tokenizer.encode(x) - if bpe is not None: - x = bpe.encode(x) - return x - - max_positions = utils.resolve_max_positions( - task.max_positions(), *[model.max_positions() for model in models] - ) - - src, hyp, mt_scores = parse_fairseq_gen(args.in_text, task) - model_scores = {} - logger.info("decode reranker score") - for batch in make_batches(args, src, hyp, task, max_positions, encode_fn): - src_tokens = batch.src_tokens - src_lengths = batch.src_lengths - if use_cuda: - src_tokens = src_tokens.cuda() - src_lengths = src_lengths.cuda() - - sample = { - "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths}, - } - scores = task.inference_step(generator, models, sample) - - for id, sc in zip(batch.ids.tolist(), scores.tolist()): - model_scores[id] = sc[0] - - model_scores = [model_scores[i] for i in range(len(model_scores))] - - return src, hyp, mt_scores, model_scores - - -def get_score(mt_s, md_s, w1, lp, tgt_len): - return mt_s / (tgt_len ** lp) * w1 + md_s - - -def get_best_hyps(mt_scores, md_scores, hypos, fw_weight, lenpen, beam): - assert len(mt_scores) == len(md_scores) and len(mt_scores) == len(hypos) - hypo_scores = [] - best_hypos = [] - best_scores = [] - offset = 0 - for i in range(len(hypos)): - tgt_len = len(hypos[i].split()) - hypo_scores.append( - get_score(mt_scores[i], md_scores[i], fw_weight, lenpen, tgt_len) - ) - - if (i + 1) % beam == 0: - max_i = np.argmax(hypo_scores) - best_hypos.append(hypos[offset + max_i]) - best_scores.append(hypo_scores[max_i]) - hypo_scores = [] - offset += beam - return best_hypos, best_scores - - -def eval_metric(args, hypos, ref): - if args.metric == "bleu": - score = sacrebleu.corpus_bleu(hypos, [ref]).score - else: - score = sacrebleu.corpus_ter(hypos, [ref]).score - - return score - - -def score_target_hypo(args, fw_weight, lp): - mt_scores = pool_init_variables["mt_scores"] - model_scores = pool_init_variables["model_scores"] - hyp = pool_init_variables["hyp"] - ref = pool_init_variables["ref"] - best_hypos, _ = get_best_hyps( - mt_scores, model_scores, hyp, fw_weight, lp, args.beam - ) - rerank_eval = None - if ref: - rerank_eval = eval_metric(args, best_hypos, ref) - print(f"fw_weight {fw_weight}, lenpen {lp}, eval {rerank_eval}") - - return rerank_eval - - -def print_result(best_scores, best_hypos, output_file): - for i, (s, h) in enumerate(zip(best_scores, best_hypos)): - print(f"{i}\t{s}\t{h}", file=output_file) - - -def main(args): - utils.import_user_module(args) - - src, hyp, mt_scores, model_scores = decode_rerank_scores(args) - - assert ( - not args.tune or args.target_text is not None - ), "--target-text has to be set when tuning weights" - if args.target_text: - ref = read_target(args.target_text) - assert len(src) == len( - ref - ), f"different numbers of source and target sentences ({len(src)} vs. {len(ref)})" - - orig_best_hypos = [hyp[i] for i in range(0, len(hyp), args.beam)] - orig_eval = eval_metric(args, orig_best_hypos, ref) - - if args.tune: - logger.info("tune weights for reranking") - - random_params = np.array( - [ - [ - random.uniform( - args.lower_bound_fw_weight, args.upper_bound_fw_weight - ), - random.uniform(args.lower_bound_lenpen, args.upper_bound_lenpen), - ] - for k in range(args.num_trials) - ] - ) - - logger.info("launching pool") - with Pool( - 32, - initializer=init_loaded_scores, - initargs=(mt_scores, model_scores, hyp, ref), - ) as p: - rerank_scores = p.starmap( - score_target_hypo, - [ - (args, random_params[i][0], random_params[i][1],) - for i in range(args.num_trials) - ], - ) - if args.metric == "bleu": - best_index = np.argmax(rerank_scores) - else: - best_index = np.argmin(rerank_scores) - best_fw_weight = random_params[best_index][0] - best_lenpen = random_params[best_index][1] - else: - assert ( - args.lenpen is not None and args.fw_weight is not None - ), "--lenpen and --fw-weight should be set" - best_fw_weight, best_lenpen = args.fw_weight, args.lenpen - - best_hypos, best_scores = get_best_hyps( - mt_scores, model_scores, hyp, best_fw_weight, best_lenpen, args.beam - ) - - if args.results_path is not None: - os.makedirs(args.results_path, exist_ok=True) - output_path = os.path.join( - args.results_path, "generate-{}.txt".format(args.gen_subset), - ) - with open(output_path, "w", buffering=1, encoding="utf-8") as o: - print_result(best_scores, best_hypos, o) - else: - print_result(best_scores, best_hypos, sys.stdout) - - if args.target_text: - rerank_eval = eval_metric(args, best_hypos, ref) - print(f"before reranking, {args.metric.upper()}:", orig_eval) - print( - f"after reranking with fw_weight={best_fw_weight}, lenpen={best_lenpen}, {args.metric.upper()}:", - rerank_eval, - ) - - -def cli_main(): - parser = options.get_generation_parser(interactive=True) - - parser.add_argument( - "--in-text", - default=None, - required=True, - help="text from fairseq-interactive output, containing source sentences and hypotheses", - ) - parser.add_argument("--target-text", default=None, help="reference text") - parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu") - parser.add_argument( - "--tune", - action="store_true", - help="if set, tune weights on fw scores and lenpen instead of applying fixed weights for reranking", - ) - parser.add_argument( - "--lower-bound-fw-weight", - default=0.0, - type=float, - help="lower bound of search space", - ) - parser.add_argument( - "--upper-bound-fw-weight", - default=3, - type=float, - help="upper bound of search space", - ) - parser.add_argument( - "--lower-bound-lenpen", - default=0.0, - type=float, - help="lower bound of search space", - ) - parser.add_argument( - "--upper-bound-lenpen", - default=3, - type=float, - help="upper bound of search space", - ) - parser.add_argument( - "--fw-weight", type=float, default=None, help="weight on the fw model score" - ) - parser.add_argument( - "--num-trials", - default=1000, - type=int, - help="number of trials to do for random search", - ) - - args = options.parse_args_and_arch(parser) - main(args) - - -if __name__ == "__main__": - cli_main() diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/pay_less_attention_paper/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/pay_less_attention_paper/README.md deleted file mode 100644 index 5adab11f4dc3461f9e7126ac391b04e703616e6b..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/pay_less_attention_paper/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# Pay Less Attention with Lightweight and Dynamic Convolutions (Wu et al., 2019) - -This page contains pointers to pre-trained models as well as instructions on how to train new models for [our paper](https://arxiv.org/abs/1901.10430). - -## Citation: -```bibtex -@inproceedings{wu2018pay, - title = {Pay Less Attention with Lightweight and Dynamic Convolutions}, - author = {Felix Wu and Angela Fan and Alexei Baevski and Yann Dauphin and Michael Auli}, - booktitle = {International Conference on Learning Representations}, - year = {2019}, - url = {https://arxiv.org/abs/1901.10430}, -} -``` - -## Translation - -### Pre-trained models -For some datasets we release models without GLUs which are faster at inference. - -Model | Description | Dataset | Download ----|---|---|--- -`lightconv.no_glu.iwslt14.de-en` | LightConv (without GLUs) | [IWSLT14 German-English](https://wit3.fbk.eu/archive/2014-01/texts/de/en/de-en.tgz) | model:
      [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.lightconv.tar.gz)
      IWSLT14 test:
      [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/iwslt14.de-en.test.tar.bz2) -`dynamicconv.no_glu.iwslt14.de-en` | DynamicConv (without GLUs) | [IWSLT14 German-English](https://wit3.fbk.eu/archive/2014-01/texts/de/en/de-en.tgz) | model:
      [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/iwslt14.de-en.dynamicconv.tar.gz)
      IWSLT14 test:
      [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/iwslt14.de-en.test.tar.bz2) -`lightconv.no_glu.wmt16.en-de` | LightConv (without GLUs) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
      [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv.tar.gz)
      newstest2014 (shared vocab):
      [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`dynamicconv.no_glu.wmt16.en-de` | DynamicConv (without GLUs) | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
      [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv.tar.gz)
      newstest2014 (shared vocab):
      [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`lightconv.glu.wmt16.en-de` | LightConv | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
      [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.lightconv-glu.tar.gz)
      newstest2014 (shared vocab):
      [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`dynamicconv.glu.wmt16.en-de` | DynamicConv | [WMT16 English-German](https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8) | model:
      [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt16.en-de.joined-dict.dynamicconv-glu.tar.gz)
      newstest2014 (shared vocab):
      [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt16.en-de.joined-dict.newstest2014.tar.bz2) -`lightconv.glu.wmt14.en-fr` | LightConv | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model:
      [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.lightconv-glu.tar.gz)
      newstest2014:
      [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2) -`dynamicconv.glu.wmt14.en-fr` | DynamicConv | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | model:
      [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt14.en-fr.joined-dict.dynamicconv-glu.tar.gz)
      newstest2014:
      [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-fr.joined-dict.newstest2014.tar.bz2) -`lightconv.glu.wmt17.zh-en` | LightConv | [WMT17 Chinese-English](http://statmt.org/wmt17/translation-task.html#Download) | model:
      [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.lightconv-glu.tar.gz)
      newstest2017:
      [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.zh-en.newstest2017.tar.bz2) -`dynamicconv.glu.wmt17.zh-en` | DynamicConv | [WMT17 Chinese-English](http://statmt.org/wmt17/translation-task.html#Download) | model:
      [download (.tar.gz)](https://dl.fbaipublicfiles.com/fairseq/models/dynamicconv/wmt17.zh-en.dynamicconv-glu.tar.gz)
      newstest2017:
      [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.zh-en.newstest2017.tar.bz2) - -### Memory-Efficient CUDA Kernels - -Since the PyTorch implementations of Light/Dynamic conv are quite memory intensive, we have developed CUDA kernels that implement the light and dynamic convolution operator in a memory-efficient and performant manner. For large sequence lengths, these kernels save about 50% memory compared to the PyTorch equivalent. - -To install the kernels, use the commands below. Once installed, they will automatically be used in place of the PyTorch implementations whenever a light or dynamic convolution is used. - -```sh -# to install lightconv -cd fairseq/modules/lightconv_layer -python cuda_function_gen.py -python setup.py install - -# to install dynamicconv -cd fairseq/modules/dynamicconv_layer -python cuda_function_gen.py -python setup.py install -``` - -### Example usage (torch.hub) - -We require a few additional Python dependencies for preprocessing: -```bash -pip install sacremoses subword_nmt -``` - -Interactive translation via PyTorch Hub: -```python -import torch - -# List available models -torch.hub.list('pytorch/fairseq') # [..., 'lightconv.glu.wmt17.zh-en', ... ] - -# Load a transformer trained on WMT'16 En-De -zh2en = torch.hub.load('pytorch/fairseq', 'lightconv.glu.wmt17.zh-en', tokenizer='moses', bpe='subword_nmt') - -# The underlying model is available under the *models* attribute -assert isinstance(zh2en.models[0], fairseq.models.lightconv.LightConvModel) - -# Translate a sentence -zh2en.translate('你好 世界') -# 'Hello World' -``` - -Loading custom models: -```python -from fairseq.models.lightconv import LightConvModel -en2fr = LightConvModel.from_pretrained( - '/path/to/checkpoints', - checkpoint_file='checkpoint_best.pt', - data_name_or_path='data-bin/wmt14_en_fr', - bpe='subword_nmt', - bpe_codes='data-bin/wmt14_en_fr/en.code' -) -en2fr.translate('Hello world!') -# 'Bonjour le monde' -``` - -### Preprocessing the training datasets - -Please follow the instructions in [`examples/translation/README.md`](../translation/README.md) to preprocess the data. - -### Training and evaluation options: -To use the model without GLU, please set `--encoder-glu 0 --decoder-glu 0`. -For LightConv, please use `--encoder-conv-type lightweight --decoder-conv-type lightweight`, otherwise the default is DynamicConv. -For best BLEU results, lenpen may need to be manually tuned. - -To use the CUDA kernels, first install the PyTorch modules using the commands -above. Once the CUDA modules are installed, they will automatically be used -instead of the PyTorch modules. - -### IWSLT14 De-En -Training and evaluating DynamicConv (without GLU) on a GPU: -```sh -# Training -SAVE="save/dynamic_conv_iwslt" -mkdir -p $SAVE -CUDA_VISIBLE_DEVICES=0 $(which fairseq-train) data-bin/iwslt14.tokenized.de-en \ - --clip-norm 0 --optimizer adam --lr 0.0005 \ - --source-lang de --target-lang en --max-tokens 4000 --no-progress-bar \ - --log-interval 100 --stop-min-lr '1e-09' --weight-decay 0.0001 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --lr-scheduler inverse_sqrt \ - --ddp-backend=legacy_ddp \ - --max-update 50000 --warmup-updates 4000 --warmup-init-lr '1e-07' \ - --adam-betas '(0.9, 0.98)' --keep-last-epochs 10 \ - -a lightconv_iwslt_de_en --save-dir $SAVE \ - --dropout 0.3 --attention-dropout 0.1 --weight-dropout 0.1 \ - --encoder-glu 0 --decoder-glu 0 -python scripts/average_checkpoints.py --inputs $SAVE \ - --num-epoch-checkpoints 10 --output "${SAVE}/checkpoint_last10_avg.pt" - -# Evaluation -CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/iwslt14.tokenized.de-en --path "${SAVE}/checkpoint_last10_avg.pt" --batch-size 128 --beam 4 --remove-bpe --lenpen 1 --gen-subset test --quiet -``` - -### WMT16 En-De -Training and evaluating DynamicConv (with GLU) on WMT16 En-De using cosine scheduler on one machine with 8 V100 GPUs: -```sh -# Training -SAVE="save/dynamic_conv_wmt16en2de" -mkdir -p $SAVE -python -m torch.distributed.launch --nproc_per_node 8 $(which fairseq-train) \ - data-bin/wmt16_en_de_bpe32k --fp16 --log-interval 100 --no-progress-bar \ - --max-update 30000 --share-all-embeddings --optimizer adam \ - --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --stop-min-lr 1e-09 --update-freq 16 --attention-dropout 0.1 --keep-last-epochs 10 \ - --ddp-backend=legacy_ddp --max-tokens 3584 \ - --lr-scheduler cosine --warmup-init-lr 1e-7 --warmup-updates 10000 \ - --lr-shrink 1 --lr 0.001 --min-lr 1e-7 --warmup-init-lr 1e-07 \ - --t-mult 1 --lr-period-updates 20000 \ - --arch lightconv_wmt_en_de_big --save-dir $SAVE \ - --dropout 0.3 --attention-dropout 0.1 --weight-dropout 0.1 \ - --encoder-glu 1 --decoder-glu 1 - -# Evaluation -CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/wmt16.en-de.joined-dict.newstest2014 --path "${SAVE}/checkpoint_best.pt" --batch-size 128 --beam 5 --remove-bpe --lenpen 0.5 --gen-subset test > wmt16_gen.txt -bash scripts/compound_split_bleu.sh wmt16_gen.txt -``` - -### WMT14 En-Fr -Training DynamicConv (with GLU) on WMT14 En-Fr using cosine scheduler on one machine with 8 V100 GPUs: -```sh -# Training -SAVE="save/dynamic_conv_wmt14en2fr" -mkdir -p $SAVE -python -m torch.distributed.launch --nproc_per_node 8 $(which fairseq-train) \ - data-bin/wmt14_en_fr --fp16 --log-interval 100 --no-progress-bar \ - --max-update 30000 --share-all-embeddings --optimizer adam \ - --adam-betas '(0.9, 0.98)' --clip-norm 0.0 --weight-decay 0.0 \ - --criterion label_smoothed_cross_entropy --label-smoothing 0.1 \ - --stop-min-lr 1e-09 --update-freq 16 --attention-dropout 0.1 --keep-last-epochs 10 \ - --ddp-backend=legacy_ddp --max-tokens 3584 \ - --lr-scheduler cosine --warmup-init-lr 1e-7 --warmup-updates 10000 \ - --lr-shrink 1 --lr 0.001 --min-lr 1e-7 --warmup-init-lr 1e-07 \ - --t-mult 1 --lr-period-updates 70000 \ - --arch lightconv_wmt_en_fr_big --save-dir $SAVE \ - --dropout 0.1 --attention-dropout 0.1 --weight-dropout 0.1 \ - --encoder-glu 1 --decoder-glu 1 - -# Evaluation -CUDA_VISIBLE_DEVICES=0 fairseq-generate data-bin/wmt14.en-fr.joined-dict.newstest2014 --path "${SAVE}/checkpoint_best.pt" --batch-size 128 --beam 5 --remove-bpe --lenpen 0.9 --gen-subset test -``` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/shuffled_word_order/README.md b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/shuffled_word_order/README.md deleted file mode 100644 index f20483849a8ca33bf349b57882a79155ba593bf1..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/shuffled_word_order/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# Masked Language Modeling and the Distributional Hypothesis: Order Word Matters Pre-training for Little - -[https://arxiv.org/abs/2104.06644](https://arxiv.org/abs/2104.06644) - -## Introduction - -In this work, we pre-train [RoBERTa](../roberta) base on various word shuffled variants of BookWiki corpus (16GB). We observe that a word shuffled pre-trained model achieves surprisingly good scores on GLUE, PAWS and several parametric probing tasks. Please read our paper for more details on the experiments. - -## Pre-trained models - -| Model | Description | Download | -| ------------------------------------- | -------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | -| `roberta.base.orig` | RoBERTa (base) trained on natural corpus | [roberta.base.orig.tar.gz](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.orig.tar.gz) | -| `roberta.base.shuffle.n1` | RoBERTa (base) trained on n=1 gram sentence word shuffled data | [roberta.base.shuffle.n1.tar.gz](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.n1.tar.gz) | -| `roberta.base.shuffle.n2` | RoBERTa (base) trained on n=2 gram sentence word shuffled data | [roberta.base.shuffle.n2.tar.gz](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.n2.tar.gz) | -| `roberta.base.shuffle.n3` | RoBERTa (base) trained on n=3 gram sentence word shuffled data | [roberta.base.shuffle.n3.tar.gz](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.n3.tar.gz) | -| `roberta.base.shuffle.n4` | RoBERTa (base) trained on n=4 gram sentence word shuffled data | [roberta.base.shuffle.n4.tar.gz](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.n4.tar.gz) | -| `roberta.base.shuffle.512` | RoBERTa (base) trained on unigram 512 word block shuffled data | [roberta.base.shuffle.512.tar.gz](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.512.tar.gz) | -| `roberta.base.shuffle.corpus` | RoBERTa (base) trained on unigram corpus word shuffled data | [roberta.base.shuffle.corpus.tar.gz](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.corpus.tar.gz) | -| `roberta.base.shuffle.corpus_uniform` | RoBERTa (base) trained on unigram corpus word shuffled data, where all words are uniformly sampled | [roberta.base.shuffle.corpus_uniform.tar.gz](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.corpus_uniform.tar.gz) | -| `roberta.base.nopos` | RoBERTa (base) without positional embeddings, trained on natural corpus | [roberta.base.nopos.tar.gz](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.nopos.tar.gz) | - -## Results - -[GLUE (Wang et al, 2019)](https://gluebenchmark.com/) & [PAWS (Zhang et al, 2019)](https://github.com/google-research-datasets/paws) _(dev set, single model, single-task fine-tuning, median of 5 seeds)_ - -| name | CoLA | MNLI | MRPC | PAWS | QNLI | QQP | RTE | SST-2 | -| :----------------------------------- | ----: | ----: | ----: | ----: | ----: | ----: | ----: | ----: | -| `roberta.base.orig` | 61.4 | 86.11 | 89.19 | 94.46 | 92.53 | 91.26 | 74.64 | 93.92 | -| `roberta.base.shuffle.n1` | 35.15 | 82.64 | 86 | 89.97 | 89.02 | 91.01 | 69.02 | 90.47 | -| `roberta.base.shuffle.n2` | 54.37 | 83.43 | 86.24 | 93.46 | 90.44 | 91.36 | 70.83 | 91.79 | -| `roberta.base.shuffle.n3` | 48.72 | 83.85 | 86.36 | 94.05 | 91.69 | 91.24 | 70.65 | 92.02 | -| `roberta.base.shuffle.n4` | 58.64 | 83.77 | 86.98 | 94.32 | 91.69 | 91.4 | 70.83 | 92.48 | -| `roberta.base.shuffle.512` | 12.76 | 77.52 | 79.61 | 84.77 | 85.19 | 90.2 | 56.52 | 86.34 | -| `roberta.base.shuffle.corpus` | 0 | 71.9 | 70.52 | 58.52 | 71.11 | 85.52 | 53.99 | 83.35 | -| `roberta.base.shuffle.corpus_random` | 9.19 | 72.33 | 70.76 | 58.42 | 77.76 | 85.93 | 53.99 | 84.04 | -| `roberta.base.nopos` | 0 | 63.5 | 72.73 | 57.08 | 77.72 | 87.87 | 54.35 | 83.24 | - -For more results on probing tasks, please refer to [our paper](https://arxiv.org/abs/2104.06644). - -## Example Usage - -Follow the same usage as in [RoBERTa](https://github.com/pytorch/fairseq/tree/main/examples/roberta) to load and test your models: - -```python -# Download roberta.base.shuffle.n1 model -wget https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.n1.tar.gz -tar -xzvf roberta.base.shuffle.n1.tar.gz - -# Load the model in fairseq -from fairseq.models.roberta import RoBERTaModel -roberta = RoBERTaModel.from_pretrained('/path/to/roberta.base.shuffle.n1', checkpoint_file='model.pt') -roberta.eval() # disable dropout (or leave in train mode to finetune) -``` - -**Note**: The model trained without positional embeddings (`roberta.base.nopos`) is a modified `RoBERTa` model, where the positional embeddings are not used. Thus, the typical `from_pretrained` method on fairseq version of RoBERTa will not be able to load the above model weights. To do so, construct a new `RoBERTaModel` object by setting the flag `use_positional_embeddings` to `False` (or [in the latest code](https://github.com/pytorch/fairseq/blob/main/fairseq/models/roberta/model.py#L543), set `no_token_positional_embeddings` to `True`), and then load the individual weights. - -## Fine-tuning Evaluation - -We provide the trained fine-tuned models on MNLI here for each model above for quick evaluation (1 seed for each model). Please refer to [finetuning details](README.finetuning.md) for the parameters of these models. Follow [RoBERTa](https://github.com/pytorch/fairseq/tree/main/examples/roberta) instructions to evaluate these models. - -| Model | MNLI M Dev Accuracy | Link | -| :----------------------------------------- | :------------------ | :--------------------------------------------------------------------------------------------------------------- | -| `roberta.base.orig.mnli` | 86.14 | [Download](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.orig.mnli.tar.gz) | -| `roberta.base.shuffle.n1.mnli` | 82.55 | [Download](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.n1.mnli.tar.gz) | -| `roberta.base.shuffle.n2.mnli` | 83.21 | [Download](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.n2.mnli.tar.gz) | -| `roberta.base.shuffle.n3.mnli` | 83.89 | [Download](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.n3.mnli.tar.gz) | -| `roberta.base.shuffle.n4.mnli` | 84.00 | [Download](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.n4.mnli.tar.gz) | -| `roberta.base.shuffle.512.mnli` | 77.22 | [Download](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.512.mnli.tar.gz) | -| `roberta.base.shuffle.corpus.mnli` | 71.88 | [Download](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.corpus.mnli.tar.gz) | -| `roberta.base.shuffle.corpus_uniform.mnli` | 72.46 | [Download](https://dl.fbaipublicfiles.com/unnatural_pretraining/roberta.base.shuffle.corpus_uniform.mnli.tar.gz) | - -## Citation - -```bibtex -@misc{sinha2021masked, - title={Masked Language Modeling and the Distributional Hypothesis: Order Word Matters Pre-training for Little}, - author={Koustuv Sinha and Robin Jia and Dieuwke Hupkes and Joelle Pineau and Adina Williams and Douwe Kiela}, - year={2021}, - eprint={2104.06644}, - archivePrefix={arXiv}, - primaryClass={cs.CL} -} -``` diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/simultaneous_translation/utils/p_choose_strategy.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/simultaneous_translation/utils/p_choose_strategy.py deleted file mode 100644 index 724c6912a62d48fc61988cac1434a4f5c8754521..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/simultaneous_translation/utils/p_choose_strategy.py +++ /dev/null @@ -1,126 +0,0 @@ -from typing import Optional, Dict -from torch import Tensor -import torch - - -def waitk_p_choose( - tgt_len: int, - src_len: int, - bsz: int, - waitk_lagging: int, - key_padding_mask: Optional[Tensor] = None, - incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None -): - - max_src_len = src_len - if incremental_state is not None: - # Retrieve target length from incremental states - # For inference the length of query is always 1 - max_tgt_len = incremental_state["steps"]["tgt"] - assert max_tgt_len is not None - max_tgt_len = int(max_tgt_len) - else: - max_tgt_len = tgt_len - - if max_src_len < waitk_lagging: - if incremental_state is not None: - max_tgt_len = 1 - return torch.zeros( - bsz, max_tgt_len, max_src_len - ) - - # Assuming the p_choose looks like this for wait k=3 - # src_len = 6, max_tgt_len = 5 - # [0, 0, 1, 0, 0, 0, 0] - # [0, 0, 0, 1, 0, 0, 0] - # [0, 0, 0, 0, 1, 0, 0] - # [0, 0, 0, 0, 0, 1, 0] - # [0, 0, 0, 0, 0, 0, 1] - # linearize the p_choose matrix: - # [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0...] - # The indices of linearized matrix that equals 1 is - # 2 + 6 * 0 - # 3 + 6 * 1 - # ... - # n + src_len * n + k - 1 = n * (src_len + 1) + k - 1 - # n from 0 to max_tgt_len - 1 - # - # First, generate the indices (activate_indices_offset: bsz, max_tgt_len) - # Second, scatter a zeros tensor (bsz, max_tgt_len * src_len) - # with activate_indices_offset - # Third, resize the tensor to (bsz, max_tgt_len, src_len) - - activate_indices_offset = ( - ( - torch.arange(max_tgt_len) * (max_src_len + 1) - + waitk_lagging - 1 - ) - .unsqueeze(0) - .expand(bsz, max_tgt_len) - .long() - ) - - if key_padding_mask is not None: - if key_padding_mask[:, 0].any(): - # Left padding - activate_indices_offset += ( - key_padding_mask.sum(dim=1, keepdim=True) - ) - - # Need to clamp the indices that are too large - activate_indices_offset = ( - activate_indices_offset - .clamp( - 0, - min( - [ - max_tgt_len, - max_src_len - waitk_lagging + 1 - ] - ) * max_src_len - 1 - ) - ) - - p_choose = torch.zeros(bsz, max_tgt_len * max_src_len) - - p_choose = p_choose.scatter( - 1, - activate_indices_offset, - 1.0 - ).view(bsz, max_tgt_len, max_src_len) - - if key_padding_mask is not None: - p_choose = p_choose.to(key_padding_mask) - p_choose = p_choose.masked_fill(key_padding_mask.unsqueeze(1), 0) - - if incremental_state is not None: - p_choose = p_choose[:, -1:] - - return p_choose.float() - - -def learnable_p_choose( - energy, - noise_mean: float = 0.0, - noise_var: float = 0.0, - training: bool = True -): - """ - Calculating step wise prob for reading and writing - 1 to read, 0 to write - energy: bsz, tgt_len, src_len - """ - - noise = 0 - if training: - # add noise here to encourage discretness - noise = ( - torch.normal(noise_mean, noise_var, energy.size()) - .type_as(energy) - .to(energy.device) - ) - - p_choose = torch.sigmoid(energy + noise) - - # p_choose: bsz * self.num_heads, tgt_len, src_len - return p_choose diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/dataclass/constants.py b/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/dataclass/constants.py deleted file mode 100644 index 4f159cfe9ac72b0524228fe290181c6898787265..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/fairseq/dataclass/constants.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from enum import Enum, EnumMeta -from typing import List - - -class StrEnumMeta(EnumMeta): - # this is workaround for submitit pickling leading to instance checks failing in hydra for StrEnum, see - # https://github.com/facebookresearch/hydra/issues/1156 - @classmethod - def __instancecheck__(cls, other): - return "enum" in str(type(other)) - - -class StrEnum(Enum, metaclass=StrEnumMeta): - def __str__(self): - return self.value - - def __eq__(self, other: str): - return self.value == other - - def __repr__(self): - return self.value - - def __hash__(self): - return hash(str(self)) - - -def ChoiceEnum(choices: List[str]): - """return the Enum class used to enforce list of choices""" - return StrEnum("Choices", {k: k for k in choices}) - - -LOG_FORMAT_CHOICES = ChoiceEnum(["json", "none", "simple", "tqdm"]) -DDP_BACKEND_CHOICES = ChoiceEnum([ - "c10d", # alias for pytorch_ddp - "fully_sharded", # FullyShardedDataParallel from fairscale - "legacy_ddp", - "no_c10d", # alias for legacy_ddp - "pytorch_ddp", - "slow_mo", -]) -DDP_COMM_HOOK_CHOICES = ChoiceEnum(["none", "fp16"]) -DATASET_IMPL_CHOICES = ChoiceEnum(["raw", "lazy", "cached", "mmap", "fasta", "huffman"]) -GENERATION_CONSTRAINTS_CHOICES = ChoiceEnum(["ordered", "unordered"]) -GENERATION_DECODING_FORMAT_CHOICES = ChoiceEnum( - ["unigram", "ensemble", "vote", "dp", "bs"] -) -ZERO_SHARDING_CHOICES = ChoiceEnum(["none", "os"]) -PIPELINE_CHECKPOINT_CHOICES = ChoiceEnum(["always", "never", "except_last"]) -PRINT_ALIGNMENT_CHOICES = ChoiceEnum(["hard", "soft"]) diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_valid_subset_checks.py b/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_valid_subset_checks.py deleted file mode 100644 index 3e9191bda66fccfebba34920f88bf7b1efea5f7e..0000000000000000000000000000000000000000 --- a/spaces/OFA-Sys/OFA-vqa/fairseq/tests/test_valid_subset_checks.py +++ /dev/null @@ -1,138 +0,0 @@ -import os -import shutil -import tempfile -import unittest - -from fairseq import options -from fairseq.dataclass.utils import convert_namespace_to_omegaconf -from fairseq.data.data_utils import raise_if_valid_subsets_unintentionally_ignored -from .utils import create_dummy_data, preprocess_lm_data, train_language_model - - -def make_lm_config( - data_dir=None, - extra_flags=None, - task="language_modeling", - arch="transformer_lm_gpt2_tiny", -): - task_args = [task] - if data_dir is not None: - task_args += [data_dir] - train_parser = options.get_training_parser() - train_args = options.parse_args_and_arch( - train_parser, - [ - "--task", - *task_args, - "--arch", - arch, - "--optimizer", - "adam", - "--lr", - "0.0001", - "--max-tokens", - "500", - "--tokens-per-sample", - "500", - "--save-dir", - data_dir, - "--max-epoch", - "1", - ] - + (extra_flags or []), - ) - cfg = convert_namespace_to_omegaconf(train_args) - return cfg - - -def write_empty_file(path): - with open(path, "w"): - pass - assert os.path.exists(path) - - -class TestValidSubsetsErrors(unittest.TestCase): - """Test various filesystem, clarg combinations and ensure that error raising happens as expected""" - - def _test_case(self, paths, extra_flags): - with tempfile.TemporaryDirectory() as data_dir: - [ - write_empty_file(os.path.join(data_dir, f"{p}.bin")) - for p in paths + ["train"] - ] - cfg = make_lm_config(data_dir, extra_flags=extra_flags) - raise_if_valid_subsets_unintentionally_ignored(cfg) - - def test_default_raises(self): - with self.assertRaises(ValueError): - self._test_case(["valid", "valid1"], []) - with self.assertRaises(ValueError): - self._test_case( - ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"] - ) - - def partially_specified_valid_subsets(self): - with self.assertRaises(ValueError): - self._test_case( - ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"] - ) - # Fix with ignore unused - self._test_case( - ["valid", "valid1", "valid2"], - ["--valid-subset", "valid,valid1", "--ignore-unused-valid-subsets"], - ) - - def test_legal_configs(self): - self._test_case(["valid"], []) - self._test_case(["valid", "valid1"], ["--ignore-unused-valid-subsets"]) - self._test_case(["valid", "valid1"], ["--combine-val"]) - self._test_case(["valid", "valid1"], ["--valid-subset", "valid,valid1"]) - self._test_case(["valid", "valid1"], ["--valid-subset", "valid1"]) - self._test_case( - ["valid", "valid1"], ["--combine-val", "--ignore-unused-valid-subsets"] - ) - self._test_case( - ["valid1"], ["--valid-subset", "valid1"] - ) # valid.bin doesn't need to be ignored. - - def test_disable_validation(self): - self._test_case([], ["--disable-validation"]) - self._test_case(["valid", "valid1"], ["--disable-validation"]) - - def test_dummy_task(self): - cfg = make_lm_config(task="dummy_lm") - raise_if_valid_subsets_unintentionally_ignored(cfg) - - def test_masked_dummy_task(self): - cfg = make_lm_config(task="dummy_masked_lm") - raise_if_valid_subsets_unintentionally_ignored(cfg) - - -class TestCombineValidSubsets(unittest.TestCase): - def _train(self, extra_flags): - with self.assertLogs() as logs: - with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: - create_dummy_data(data_dir, num_examples=20) - preprocess_lm_data(data_dir) - - shutil.copyfile(f"{data_dir}/valid.bin", f"{data_dir}/valid1.bin") - shutil.copyfile(f"{data_dir}/valid.idx", f"{data_dir}/valid1.idx") - train_language_model( - data_dir, - "transformer_lm", - ["--max-update", "0", "--log-format", "json"] + extra_flags, - run_validation=False, - ) - return [x.message for x in logs.records] - - def test_combined(self): - flags = ["--combine-valid-subsets"] - logs = self._train(flags) - assert any(["valid1" in x for x in logs]) # loaded 100 examples from valid1 - assert not any(["valid1_ppl" in x for x in logs]) # metrics are combined - - def test_subsets(self): - flags = ["--valid-subset", "valid,valid1"] - logs = self._train(flags) - assert any(["valid_ppl" in x for x in logs]) # loaded 100 examples from valid1 - assert any(["valid1_ppl" in x for x in logs]) # metrics are combined diff --git a/spaces/OIUGLK/bingo/README.md b/spaces/OIUGLK/bingo/README.md deleted file mode 100644 index 218767d1d7debd26932ffddca2ec0f421c0171a9..0000000000000000000000000000000000000000 --- a/spaces/OIUGLK/bingo/README.md +++ /dev/null @@ -1,195 +0,0 @@ ---- -title: bingo -emoji: 📉 -colorFrom: red -colorTo: red -sdk: docker -pinned: true -license: mit -duplicated_from: hf4all/bingo ---- - -
      - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -
      - -## 演示站点 - -https://bing.github1s.tk - - - -[![img](./docs/images/demo.png)](https://bing.github1s.tk) - -## 功能和特点 - -- 完全基于 Next.js 重写,高度还原 New Bing Web 版 UI,使用体验和 Bing AI 基本一致。 -- 支持 Docker 构建,方便快捷地部署和访问。 -- Cookie 可全局配置,全局共享。 -- 支持持续语音对话 - -## RoadMap - - - [x] 支持 wss 转发 - - [x] 支持一键部署 - - [x] 优化移动端展示 - - [x] 支持画图 - - [x] 支持语音输入(支持语音指令,目前仅支持 PC 版 Edge 及 Chrome 浏览器) - - [x] 支持语音输出(需要手动开启) - - [x] 支持图片输入 - - [x] 支持自定义域名 - - [ ] 支持历史记录 - - [ ] 适配深色模式 - - [ ] 支持内置提示词 - - [ ] 支持离线访问 - - [ ] 国际化翻译 - -## 一键部署 -你也可以一键部署自己的 New Bing AI 到 🤗 HuggingFace 。 - -### 部署到 Huggingface -1. 点击此图标 -[![Deploy to HuggingFace](https://img.shields.io/badge/%E7%82%B9%E5%87%BB%E9%83%A8%E7%BD%B2-%F0%9F%A4%97-fff)](https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic),配置可以不改。 - -2. 部署署完成后,点击“设置” 》“站点域名”,点一下,复制一下 HF 域名信息,然后分享给别人即可。 - -> Huggingface 不支持绑定自己的域名,不过我们可以使用曲线救国的方式来达到这个目的 -> 1. 方式二,借助 Cloudflare Workers [部署Cloudflare Workers](#使用Cloudflare-Workers自定义域名) -> 2. 方式一,借助 Github Pages 及 iframe [如何绑定域名](https://github.com/weaigc/bingo/issues/4) - -### 使用Cloudflare Workers自定义域名 - -> 核心代码 [worker.js](./cloudflare/worker.js) - -- [注册 Cloudflare 账号](https://dash.cloudflare.com/sign-up) - -- 添加一个新的网站,需要你有自己的域名并且将域名`Name Server`托管给 Cloudflare 才行(更多信息可自行 Google) - -- 通过左侧菜单进入「Workers」,并点击「Create a Worker」。 - -- 创建 Worker 服务,复制 [worker.js](./cloudflare/worker.js) 全部代码,粘贴至创建的服务中,根据注释进行改动,保存并部署。 - -- 触发器 中自定义访问域名。 - -### 部署其它平台 -
      - -由于其他平台目前遭到 New Bing 封杀,会遇到很多问题,不再做推荐,有需要的可以自行查看 - - -#### 部署到 Netlify -[![Deploy to Netlify Button](https://www.netlify.com/img/deploy/button.svg)](https://app.netlify.com/start/deploy?repository=https://github.com/weaigc/bingo) - -#### 部署到 Vercel -如果你是 Vercel 付费用户,可以点以下链接一键部署到 Vercel。免费版本有[接口超时限制](https://vercel.com/docs/concepts/limits/overview),不推荐使用 - -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?demo-title=bingo&demo-description=bingo&demo-url=https%3A%2F%2Fbing.github1s.tk%2F&project-name=bingo&repository-name=bingo&repository-url=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo&from=templates&skippable-integrations=1&env=BING_HEADER&envDescription=%E5%A6%82%E6%9E%9C%E4%B8%8D%E7%9F%A5%E9%81%93%E6%80%8E%E4%B9%88%E9%85%8D%E7%BD%AE%E8%AF%B7%E7%82%B9%E5%8F%B3%E4%BE%A7Learn+More&envLink=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo%2Fblob%2Fmain%2F.env.example) - -#### 部署到 Render - -[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/weaigc/bingo) -
      - -## 环境和依赖 - -- Node.js >= 18 -- Bing AI 的[身份信息](#如何获取-BING_HEADER)) - -## 安装和使用 - -* 使用 Node 启动 - -```bash -git clone https://github.com/weaigc/bingo.git -npm i # 推荐使用 pnpm i -npm run build -npm run start -``` - -* 使用 Docker 启动 -```bash -docker pull weaigc/bingo -docker run --rm -it -p 7860:7860 weaigc/bingo -# 或者 -docker run --rm -it -e BING_HEADER=xxxx -p 7860:7860 weaigc/bingo -``` - -## 如何获取 BING_HEADER -> 配置了 BING_HEADER 意味着你将自己的账号共享给所有使用此服务的人,如果不需要免登录画图的功能,不建议设置此变量 - -打开 https://www.bing.com 并登录,然后访问 https://www.bing.com/turing/captcha/challenge,通过人机校验,然后 - -![BING HEADER](./docs/images/curl.png) - -> 复制出来的内容应该如下所示。确认格式无误后,打开 https://effulgent-bubblegum-e2f5df.netlify.app/#dialog=%22settings%22 ,粘贴进去,点击“转成 BING_HEADER 并复制”,然后从剪切板粘贴即可得到。(你也可以先在网页上进行验证) - -以下是格式参考,需要注意的是,网页端保存的格式是以`curl`开头, 而服务端配置的 `BING_HEADER` 是 `base64` 格式,两者不能互通。 -
      -正常格式/网页端保存的格式(格式仅供参考) - -``` -curl 'https://www.bing.com/turing/captcha/challenge' \ - -H 'authority: www.bing.com' \ - -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \ - -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6' \ - -H 'cache-control: max-age=0' \ - -H 'cookie: MicrosoftApplicationsTelemetryDeviceId=3399c004-fd0e-48ec-bb92-d82a27b2bbd4; _EDGE_V=1; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=29EBDDA4E6674329ACCF1A0A423C3E98&dmnchg=1; _UR=QS=0&TQS=0; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yNVQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6Mn0=; _RwBf=ilt=1&ihpd=1&ispd=0&rc=0&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=&clo=0&v=1&l=2023-07-25T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2023-07-25T11:00:31.7111548+00:00&rwred=0&wls=&lka=0&lkt=0&TH=&dci=0; ANON=A=0043C6590EA808ED6E395059FFFFFFFF&E=1c8b&W=1; NAP=V=1.9&E=1c31&C=DnaMSbDN_4efZ_xXqBF3Daorjr53kYqYoaP8YHsupjmiXnysX7a37A&W=1; PPLState=1; KievRPSSecAuth=FABSBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACMGUA7EGVSjGEAQBGHtNsc5sNL7unmJsfPJ2t6imfo4BeUJlAia3IpMTtMUy4PU/C5QAzRI5pODtsIee0+blgllXt/5IiWwGjwmdhivsFM597pRPkjARPfwsPhNLPNbJrCPNPHdje4Is78MnCADXw6/NBq2FL8V2/byw2fH6IuAMD2MvN/VvqpEa9ZxiDjZtENj4HEj0mO2SgzjfyEhVAkjvznJqU2rw/Q2tHmX94NAM2kzlzKF/hWPhCCUmu8IHLvCnHDS6mSptvJDDP/sp3ovtzOXkP1mlM/Xju5ftesUvccVEQGffXORa1dE5hEMbKIiKXz1tDdduSXE19g9/+mRMAjaQhpwhI8XmilCTx1adb1Ll5qK+VjC9GNfEZzcbsGBPVaOl+anG8rEMq+Xnhjo7J+NqTNolavHgcuV8kJsCeJZIged33UA8eOZeFo+wAECMguxMoSqgpGH+sthqynvD/FJD6r/tiU2N3uqVq8NE8V37asrN6T14Z0FGBJOe6ET1+PGApm3s11OY9/xhFEB9T5BEPUGEbvRcLcW2ncFQX0EU+xweiPqo1Q1hNUg/dCtSI+lZ7c2H8XheePZavZ0TJQ8oNCSAuKiTqJmI0fVGpwbXwfaADkEipuawz3fIuMJBNgMU0OtA7Hm59v2fGLIBuvi6YeKS6GgVk3BIPf+P/eKahwozrxQZaFnoHTSqMkvct7xCP4atBROfXKf5Ww0CcFKp+2WX9BIskTOo2jjk6bAyyYJ+ElUB1fgLKNk5m/YSMc9iYCLIBMIGN8F0Yvy3tZ7cvh7Ue5Klo98US/I+nW1G7ZJMHRgUO8h8lpneHqEMegKd8gynO4VF7RpCjJkunDmW0Ta+RkXAP619pg0dqHMFkoOgknN78oBbGTV6fJUKotv+vi61kLhAeXZGWoHGCRXh2wUC6YgfPgKA6ESRNHtFn7E5B3HHpLc5rVMDSNhKZYfdhupV4Ezf6+5DhMcZLZhi0kk+ivDiN1gdHlVtSN55xpvf+c+XZDzR0uhgcvgy0LAbmzgk6y4WbYH+LQsMpzNNj+aC72vMiWovWrKh9jY4MYCmdgxsS/skPtLdp18muiEIRXTbZQGUmhxFpJAIbBIsCscMpzL0BgeujxUwM5wr79Sd9r4xwbgSMwmBlBfUHRVBdNyg8feepeJbCS63nD6eHOuLqMRsPIio3w/ki/EAa92UUEiZeavLsMUD/y/qAvWUdzdP5Y+C/TM+CMGS/kGL4LEdY/28MQeTvU1qv1X21kQt2aiaj3pPVL36hAzxbcLgqcMo9oymDRy87kdCXW/+g4oKLtMh6fm/G6W6Y/B01JlxohyyvueHQIG557uzkEkTJ3FnOVODSKBKpb3WZ65rExfV71zSZa25F3GmpaIG6HiYrX2YYhQAkIE9pKEQBHbnwHuwNDGottZTXZw=; WLS=C=9df3f9d8518fae19&N=wen; WLID=pGY8HgWCu4p5XYCOk2oa0+DBdftkMUfmNIn8XtSjSTKsgv/Il7GUlYs0Jpjf/E12jZMgV7x44Dy3fXOgjjUoJx7Y/ClLrLhsk20THksJJoI=; _EDGE_S=F=1&SID=17CF6EE006426448213C7DB907436588&mkt=zh-CN; MUID=225621093D8A6C27301632413C0E6D08; MUIDB=225621093D8A6C27301632413C0E6D08; SUID=A; SNRHOP=I=&TS=; _U=nGyzKQruEsDwLiu65fZFIG6e12hf2lwTJmroW__k8joUJIKmG3OIjayXKGW9dCVR3sNhF76mEVxyW6yjUGPodOfjtSa3s3J_DxMOrEK1BqXCOBI9bC66spAIASV7prsYFlVAJz73jVNENp_tBubLHJy6EbT0BKRe4AjrYkH-9uMnmCKB8Zmyg; _SS=SID=17CF6EE006426448213C7DB907436588&R=0&RB=0&GB=0&RG=200&RP=0&PC=U531; SRCHS=PC=U531; USRLOC=HS=1&ELOC=LAT=22.501529693603516|LON=113.9263687133789|N=%E5%8D%97%E5%B1%B1%E5%8C%BA%EF%BC%8C%E5%B9%BF%E4%B8%9C%E7%9C%81|ELT=2|&CLOC=LAT=22.50153029046461|LON=113.92637070632928|A=733.4464586120832|TS=230726151034|SRC=W; SRCHUSR=DOB=20230725&T=1690384908000&POEX=W; ipv6=hit=1690388509974&t=6; SRCHHPGUSR=HV=1690384945&SRCHLANG=zh-Hans&PV=15.0.0&BRW=MW&BRH=MT&CW=410&CH=794&SCW=410&SCH=794&DPR=1.5&UTC=480&DM=0&WTS=63825879627&PRVCW=410&PRVCH=794&PR=1.5; cct=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpny6Y_CVyi_MSyM94VyMWnjdYkkccVtm3czoIAtXUGQA; GC=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpR3Y_D9Ytcks4Ht6XhadXk75dvhzP4YOUS0UmoEyqyxw' \ - -H 'dnt: 1' \ - -H 'sec-ch-ua: "Chromium";v="116", "Not)A;Brand";v="24", "Microsoft Edge";v="116"' \ - -H 'sec-ch-ua-arch: "x86"' \ - -H 'sec-ch-ua-bitness: "64"' \ - -H 'sec-ch-ua-full-version: "116.0.1938.29"' \ - -H 'sec-ch-ua-full-version-list: "Chromium";v="116.0.5845.42", "Not)A;Brand";v="24.0.0.0", "Microsoft Edge";v="116.0.1938.29"' \ - -H 'sec-ch-ua-mobile: ?0' \ - -H 'sec-ch-ua-model: ""' \ - -H 'sec-ch-ua-platform: "Windows"' \ - -H 'sec-ch-ua-platform-version: "15.0.0"' \ - -H 'sec-fetch-dest: document' \ - -H 'sec-fetch-mode: navigate' \ - -H 'sec-fetch-site: none' \ - -H 'sec-fetch-user: ?1' \ - -H 'sec-ms-gec: B3F47AD4A283CAB374C0451C46AAFD147C6A4DACAFF6A1C13F34B2C72B024494' \ - -H 'sec-ms-gec-version: 1-116.0.1938.29' \ - -H 'upgrade-insecure-requests: 1' \ - -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.0.0' \ - -H 'x-client-data: eyIxIjoiMiIsIjEwIjoiXCJTMGg3R05HOTF2aDQ1TUZSUnZ5NHN2akRmMWdlaVJKenNxNlA3aU1WbnF3PVwiIiwiMiI6IjEiLCIzIjoiMSIsIjQiOiIyMTU4ODQ5NTM4MjY4OTM5NTA3IiwiNSI6IlwiSm9GUWpPTDk3OS9MbkRRZnlCd2N1M2FsOUN3eTZTQmdaMGNYMXBtOWVMZz1cIiIsIjYiOiJiZXRhIiwiNyI6IjE4MDM4ODYyNjQzNSIsIjkiOiJkZXNrdG9wIn0=' \ - -H 'x-edge-shopping-flag: 1' \ - --compressed -``` -
      - -
      -转成base64之后的格式(BING_HEADER只能使用 base64 之后的格式) - -``` -Y3VybCAnaHR0cHM6Ly93d3cuYmluZy5jb20vdHVyaW5nL2NvbnZlcnNhdGlvbi9jcmVhdGUnIFwgICAtSCAnYXV0aG9yaXR5OiB3d3cuYmluZy5jb20nIFwgICAtSCAnYWNjZXB0OiB0ZXh0L2h0bWwsYXBwbGljYXRpb24veGh0bWwreG1sLGFwcGxpY2F0aW9uL3htbDtxPTAuOSxpbWFnZS93ZWJwLGltYWdlL2FwbmcsKi8qO3E9MC44LGFwcGxpY2F0aW9uL3NpZ25lZC1leGNoYW5nZTt2PWIzO3E9MC43JyBcICAgLUggJ2FjY2VwdC1sYW5ndWFnZTogemgtQ04semg7cT0wLjksZW47cT0wLjgsZW4tR0I7cT0wLjcsZW4tVVM7cT0wLjYnIFwgICAtSCAnY2FjaGUtY29udHJvbDogbWF4LWFnZT0wJyBcICAgLUggJ2Nvb2tpZTogTWljcm9zb2Z0QXBwbGljYXRpb25zVGVsZW1ldHJ5RGV2aWNlSWQ9MzM5OWMwMDQtZmQwZS00OGVjLWJiOTItZDgyYTI3YjJiYmQ0OyBfRURHRV9WPTE7IFNSQ0hEPUFGPU5PRk9STTsgU1JDSFVJRD1WPTImR1VJRD0yOUVCRERBNEU2Njc0MzI5QUNDRjFBMEE0MjNDM0U5OCZkbW5jaGc9MTsgX1VSPVFTPTAmVFFTPTA7IF9IUFZOPUNTPWV5SlFiaUk2ZXlKRGJpSTZNU3dpVTNRaU9qQXNJbEZ6SWpvd0xDSlFjbTlrSWpvaVVDSjlMQ0pUWXlJNmV5SkRiaUk2TVN3aVUzUWlPakFzSWxGeklqb3dMQ0pRY205a0lqb2lTQ0o5TENKUmVpSTZleUpEYmlJNk1Td2lVM1FpT2pBc0lsRnpJam93TENKUWNtOWtJam9pVkNKOUxDSkJjQ0k2ZEhKMVpTd2lUWFYwWlNJNmRISjFaU3dpVEdGa0lqb2lNakF5TXkwd055MHlOVlF3TURvd01Eb3dNRm9pTENKSmIzUmtJam93TENKSGQySWlPakFzSWtSbWRDSTZiblZzYkN3aVRYWnpJam93TENKR2JIUWlPakFzSWtsdGNDSTZNbjA9OyBfUndCZj1pbHQ9MSZpaHBkPTEmaXNwZD0wJnJjPTAmcmI9MCZnYj0wJnJnPTIwMCZwYz0wJm10dT0wJnJiYj0wJmc9MCZjaWQ9JmNsbz0wJnY9MSZsPTIwMjMtMDctMjVUMDc6MDA6MDAuMDAwMDAwMFombGZ0PTAwMDEtMDEtMDFUMDA6MDA6MDAuMDAwMDAwMCZhb2Y9MCZvPTImcD0mYz0mdD0wJnM9MDAwMS0wMS0wMVQwMDowMDowMC4wMDAwMDAwKzAwOjAwJnRzPTIwMjMtMDctMjVUMTE6MDA6MzEuNzExMTU0OCswMDowMCZyd3JlZD0wJndscz0mbGthPTAmbGt0PTAmVEg9JmRjaT0wOyBBTk9OPUE9MDA0M0M2NTkwRUE4MDhFRDZFMzk1MDU5RkZGRkZGRkYmRT0xYzhiJlc9MTsgTkFQPVY9MS45JkU9MWMzMSZDPURuYU1TYkROXzRlZlpfeFhxQkYzRGFvcmpyNTNrWXFZb2FQOFlIc3Vwam1pWG55c1g3YTM3QSZXPTE7IFBQTFN0YXRlPTE7IEtpZXZSUFNTZWNBdXRoPUZBQlNCQlJhVE9KSUx0RnNNa3BMVldTRzZBTjZDL3N2UndObUFBQUVnQUFBQ01HVUE3RUdWU2pHRUFRQkdIdE5zYzVzTkw3dW5tSnNmUEoydDZpbWZvNEJlVUpsQWlhM0lwTVR0TVV5NFBVL0M1UUF6Ukk1cE9EdHNJZWUwK2JsZ2xsWHQvNUlpV3dHandtZGhpdnNGTTU5N3BSUGtqQVJQZndzUGhOTFBOYkpyQ1BOUEhkamU0SXM3OE1uQ0FEWHc2L05CcTJGTDhWMi9ieXcyZkg2SXVBTUQyTXZOL1Z2cXBFYTlaeGlEalp0RU5qNEhFajBtTzJTZ3pqZnlFaFZBa2p2em5KcVUycncvUTJ0SG1YOTROQU0ya3psektGL2hXUGhDQ1VtdThJSEx2Q25IRFM2bVNwdHZKRERQL3NwM292dHpPWGtQMW1sTS9YanU1ZnRlc1V2Y2NWRVFHZmZYT1JhMWRFNWhFTWJLSWlLWHoxdERkZHVTWEUxOWc5LyttUk1BamFRaHB3aEk4WG1pbENUeDFhZGIxTGw1cUsrVmpDOUdOZkVaemNic0dCUFZhT2wrYW5HOHJFTXErWG5oam83SitOcVROb2xhdkhnY3VWOGtKc0NlSlpJZ2VkMzNVQThlT1plRm8rd0FFQ01ndXhNb1NxZ3BHSCtzdGhxeW52RC9GSkQ2ci90aVUyTjN1cVZxOE5FOFYzN2Fzck42VDE0WjBGR0JKT2U2RVQxK1BHQXBtM3MxMU9ZOS94aEZFQjlUNUJFUFVHRWJ2UmNMY1cybmNGUVgwRVUreHdlaVBxbzFRMWhOVWcvZEN0U0krbFo3YzJIOFhoZWVQWmF2WjBUSlE4b05DU0F1S2lUcUptSTBmVkdwd2JYd2ZhQURrRWlwdWF3ejNmSXVNSkJOZ01VME90QTdIbTU5djJmR0xJQnV2aTZZZUtTNkdnVmszQklQZitQL2VLYWh3b3pyeFFaYUZub0hUU3FNa3ZjdDd4Q1A0YXRCUk9mWEtmNVd3MENjRktwKzJXWDlCSXNrVE9vMmpqazZiQXl5WUorRWxVQjFmZ0xLTms1bS9ZU01jOWlZQ0xJQk1JR044RjBZdnkzdFo3Y3ZoN1VlNUtsbzk4VVMvSStuVzFHN1pKTUhSZ1VPOGg4bHBuZUhxRU1lZ0tkOGd5bk80VkY3UnBDakprdW5EbVcwVGErUmtYQVA2MTlwZzBkcUhNRmtvT2drbk43OG9CYkdUVjZmSlVLb3R2K3ZpNjFrTGhBZVhaR1dvSEdDUlhoMndVQzZZZ2ZQZ0tBNkVTUk5IdEZuN0U1QjNISHBMYzVyVk1EU05oS1pZZmRodXBWNEV6ZjYrNURoTWNaTFpoaTBraytpdkRpTjFnZEhsVnRTTjU1eHB2ZitjK1haRHpSMHVoZ2N2Z3kwTEFibXpnazZ5NFdiWUgrTFFzTXB6Tk5qK2FDNzJ2TWlXb3ZXcktoOWpZNE1ZQ21kZ3hzUy9za1B0TGRwMThtdWlFSVJYVGJaUUdVbWh4RnBKQUliQklzQ3NjTXB6TDBCZ2V1anhVd001d3I3OVNkOXI0eHdiZ1NNd21CbEJmVUhSVkJkTnlnOGZlZXBlSmJDUzYzbkQ2ZUhPdUxxTVJzUElpbzN3L2tpL0VBYTkyVVVFaVplYXZMc01VRC95L3FBdldVZHpkUDVZK0MvVE0rQ01HUy9rR0w0TEVkWS8yOE1RZVR2VTFxdjFYMjFrUXQyYWlhajNwUFZMMzZoQXp4YmNMZ3FjTW85b3ltRFJ5ODdrZENYVy8rZzRvS0x0TWg2Zm0vRzZXNlkvQjAxSmx4b2h5eXZ1ZUhRSUc1NTd1emtFa1RKM0ZuT1ZPRFNLQktwYjNXWjY1ckV4ZlY3MXpTWmEyNUYzR21wYUlHNkhpWXJYMllZaFFBa0lFOXBLRVFCSGJud0h1d05ER290dFpUWFp3PTsgV0xTPUM9OWRmM2Y5ZDg1MThmYWUxOSZOPXdlbjsgV0xJRD1wR1k4SGdXQ3U0cDVYWUNPazJvYTArREJkZnRrTVVmbU5JbjhYdFNqU1RLc2d2L0lsN0dVbFlzMEpwamYvRTEyalpNZ1Y3eDQ0RHkzZlhPZ2pqVW9KeDdZL0NsTHJMaHNrMjBUSGtzSkpvST07IF9FREdFX1M9Rj0xJlNJRD0xN0NGNkVFMDA2NDI2NDQ4MjEzQzdEQjkwNzQzNjU4OCZta3Q9emgtQ047IE1VSUQ9MjI1NjIxMDkzRDhBNkMyNzMwMTYzMjQxM0MwRTZEMDg7IE1VSURCPTIyNTYyMTA5M0Q4QTZDMjczMDE2MzI0MTNDMEU2RDA4OyBTVUlEPUE7IFNOUkhPUD1JPSZUUz07IF9VPW5HeXpLUXJ1RXNEd0xpdTY1ZlpGSUc2ZTEyaGYybHdUSm1yb1dfX2s4am9VSklLbUczT0lqYXlYS0dXOWRDVlIzc05oRjc2bUVWeHlXNnlqVUdQb2RPZmp0U2EzczNKX0R4TU9yRUsxQnFYQ09CSTliQzY2c3BBSUFTVjdwcnNZRmxWQUp6NzNqVk5FTnBfdEJ1YkxISnk2RWJUMEJLUmU0QWpyWWtILTl1TW5tQ0tCOFpteWc7IF9TUz1TSUQ9MTdDRjZFRTAwNjQyNjQ0ODIxM0M3REI5MDc0MzY1ODgmUj0wJlJCPTAmR0I9MCZSRz0yMDAmUlA9MCZQQz1VNTMxOyBTUkNIUz1QQz1VNTMxOyBVU1JMT0M9SFM9MSZFTE9DPUxBVD0yMi41MDE1Mjk2OTM2MDM1MTZ8TE9OPTExMy45MjYzNjg3MTMzNzg5fE49JUU1JThEJTk3JUU1JUIxJUIxJUU1JThDJUJBJUVGJUJDJThDJUU1JUI5JUJGJUU0JUI4JTlDJUU3JTlDJTgxfEVMVD0yfCZDTE9DPUxBVD0yMi41MDE1MzAyOTA0NjQ2MXxMT049MTEzLjkyNjM3MDcwNjMyOTI4fEE9NzMzLjQ0NjQ1ODYxMjA4MzJ8VFM9MjMwNzI2MTUxMDM0fFNSQz1XOyBTUkNIVVNSPURPQj0yMDIzMDcyNSZUPTE2OTAzODQ5MDgwMDAmUE9FWD1XOyBpcHY2PWhpdD0xNjkwMzg4NTA5OTc0JnQ9NjsgU1JDSEhQR1VTUj1IVj0xNjkwMzg0OTQ1JlNSQ0hMQU5HPXpoLUhhbnMmUFY9MTUuMC4wJkJSVz1NVyZCUkg9TVQmQ1c9NDEwJkNIPTc5NCZTQ1c9NDEwJlNDSD03OTQmRFBSPTEuNSZVVEM9NDgwJkRNPTAmV1RTPTYzODI1ODc5NjI3JlBSVkNXPTQxMCZQUlZDSD03OTQmUFI9MS41OyBjY3Q9QWpXSUJZT29WUC1BZnE2Z1d3dHg4MElmNnlIbjZpQnVFVkhBMVhIZEFLcG55NllfQ1Z5aV9NU3lNOTRWeU1XbmpkWWtrY2NWdG0zY3pvSUF0WFVHUUE7IEdDPUFqV0lCWU9vVlAtQWZxNmdXd3R4ODBJZjZ5SG42aUJ1RVZIQTFYSGRBS3BSM1lfRDlZdGNrczRIdDZYaGFkWGs3NWR2aHpQNFlPVVMwVW1vRXlxeXh3JyBcICAgLUggJ2RudDogMScgXCAgIC1IICdzZWMtY2gtdWE6ICJDaHJvbWl1bSI7dj0iMTE2IiwgIk5vdClBO0JyYW5kIjt2PSIyNCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2IicgXCAgIC1IICdzZWMtY2gtdWEtYXJjaDogIng4NiInIFwgICAtSCAnc2VjLWNoLXVhLWJpdG5lc3M6ICI2NCInIFwgICAtSCAnc2VjLWNoLXVhLWZ1bGwtdmVyc2lvbjogIjExNi4wLjE5MzguMjkiJyBcICAgLUggJ3NlYy1jaC11YS1mdWxsLXZlcnNpb24tbGlzdDogIkNocm9taXVtIjt2PSIxMTYuMC41ODQ1LjQyIiwgIk5vdClBO0JyYW5kIjt2PSIyNC4wLjAuMCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2LjAuMTkzOC4yOSInIFwgICAtSCAnc2VjLWNoLXVhLW1vYmlsZTogPzAnIFwgICAtSCAnc2VjLWNoLXVhLW1vZGVsOiAiIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm06ICJXaW5kb3dzIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm0tdmVyc2lvbjogIjE1LjAuMCInIFwgICAtSCAnc2VjLWZldGNoLWRlc3Q6IGRvY3VtZW50JyBcICAgLUggJ3NlYy1mZXRjaC1tb2RlOiBuYXZpZ2F0ZScgXCAgIC1IICdzZWMtZmV0Y2gtc2l0ZTogbm9uZScgXCAgIC1IICdzZWMtZmV0Y2gtdXNlcjogPzEnIFwgICAtSCAnc2VjLW1zLWdlYzogQjNGNDdBRDRBMjgzQ0FCMzc0QzA0NTFDNDZBQUZEMTQ3QzZBNERBQ0FGRjZBMUMxM0YzNEIyQzcyQjAyNDQ5NCcgXCAgIC1IICdzZWMtbXMtZ2VjLXZlcnNpb246IDEtMTE2LjAuMTkzOC4yOScgXCAgIC1IICd1cGdyYWRlLWluc2VjdXJlLXJlcXVlc3RzOiAxJyBcICAgLUggJ3VzZXItYWdlbnQ6IE1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS8xMTYuMC4wLjAgU2FmYXJpLzUzNy4zNiBFZGcvMTE2LjAuMC4wJyBcICAgLUggJ3gtY2xpZW50LWRhdGE6IGV5SXhJam9pTWlJc0lqRXdJam9pWENKVE1HZzNSMDVIT1RGMmFEUTFUVVpTVW5aNU5ITjJha1JtTVdkbGFWSktlbk54TmxBM2FVMVdibkYzUFZ3aUlpd2lNaUk2SWpFaUxDSXpJam9pTVNJc0lqUWlPaUl5TVRVNE9EUTVOVE00TWpZNE9UTTVOVEEzSWl3aU5TSTZJbHdpU205R1VXcFBURGszT1M5TWJrUlJabmxDZDJOMU0yRnNPVU4zZVRaVFFtZGFNR05ZTVhCdE9XVk1aejFjSWlJc0lqWWlPaUppWlhSaElpd2lOeUk2SWpFNE1ETTRPRFl5TmpRek5TSXNJamtpT2lKa1pYTnJkRzl3SW4wPScgXCAgIC1IICd4LWVkZ2Utc2hvcHBpbmctZmxhZzogMScgXCAgIC0tY29tcHJlc3NlZA== -``` -
      - - -## 鸣谢 - - 感谢 [EdgeGPT](https://github.com/acheong08/EdgeGPT) 提供的代理 API 的方法。 - - 感谢 [Vercel AI](https://github.com/vercel-labs/ai-chatbot) 提供的基础脚手架和 [ChatHub](https://github.com/chathub-dev/chathub) [go-proxy-bingai](https://github.com/adams549659584/go-proxy-bingai) 提供的部分代码。 - - -## 答疑及交流 - - - -## License - -MIT © [LICENSE](https://github.com/weaigc/bingo/blob/main/LICENSE). - - diff --git a/spaces/ORI-Muchim/BarKeYaeTTS/text/symbols.py b/spaces/ORI-Muchim/BarKeYaeTTS/text/symbols.py deleted file mode 100644 index 8648bd1e2ac0cfe99e0eaab6540c56baf668fe14..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/BarKeYaeTTS/text/symbols.py +++ /dev/null @@ -1,74 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -'''# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' -''' - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - -# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -'''# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' -''' - -'''# sanskrit_cleaners -_pad = '_' -_punctuation = '।' -_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ ' -''' - -'''# cjks_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ ' -''' - -'''# thai_cleaners -_pad = '_' -_punctuation = '.!? ' -_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์' -''' - -'''# cjke_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ ' -''' - -'''# shanghainese_cleaners -_pad = '_' -_punctuation = ',.!?…' -_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 ' -''' - -'''# chinese_dialect_cleaners -_pad = '_' -_punctuation = ',.!?~…─' -_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚αᴀᴇ↑↓∅ⱼ ' -''' - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") diff --git a/spaces/ORI-Muchim/RaidenTTS/text/cleaners.py b/spaces/ORI-Muchim/RaidenTTS/text/cleaners.py deleted file mode 100644 index b155dfeca776469dc1ab4286497f43d674c82897..0000000000000000000000000000000000000000 --- a/spaces/ORI-Muchim/RaidenTTS/text/cleaners.py +++ /dev/null @@ -1,11 +0,0 @@ -import re -from text.korean import latin_to_hangul, number_to_hangul, divide_hangul, korean_to_lazy_ipa, korean_to_ipa - -def korean_cleaners(text): - '''Pipeline for Korean text''' - text = latin_to_hangul(text) - text = number_to_hangul(text) - text = divide_hangul(text) - if re.match('[\u3131-\u3163]', text[-1]): - text += '.' - return text diff --git a/spaces/OgiKazus/vits-uma-genshin-honkai/text/__init__.py b/spaces/OgiKazus/vits-uma-genshin-honkai/text/__init__.py deleted file mode 100644 index 663c4b6416affb53c9dc56dddbc8b2b65d4bf518..0000000000000000000000000000000000000000 --- a/spaces/OgiKazus/vits-uma-genshin-honkai/text/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence, clean_text - - -def cleaned_text_to_sequence(cleaned_text): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()] - return sequence - - -def sequence_to_text(sequence): - '''Converts a sequence of IDs back to a string''' - result = '' - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/OpenDILabCommunity/LLMRiddlesChatGLMCN/app.py b/spaces/OpenDILabCommunity/LLMRiddlesChatGLMCN/app.py deleted file mode 100644 index c85a71be15db61a169854185062bc38b0f8b61f8..0000000000000000000000000000000000000000 --- a/spaces/OpenDILabCommunity/LLMRiddlesChatGLMCN/app.py +++ /dev/null @@ -1,266 +0,0 @@ -import logging -import os -import uuid - -import gradio as gr - -from llmriddles.questions import QuestionExecutor -from llmriddles.questions import list_ordered_questions - -_QUESTION_SESSIONS = {} -count = 0 -_QUESTIONS = list_ordered_questions() -_LANG = os.environ.get('QUESTION_LANG', 'cn') -assert _LANG in ['cn', 'en'], _LANG -_LLM = os.environ.get('QUESTION_LLM', 'chatgpt') -assert _LLM in ['chatgpt', 'chatglm', 'mistral-7b'], _LLM -_LLM_KEY = os.environ.get('QUESTION_LLM_KEY', None) -_DEBUG = os.environ.get('DEBUG', 'false').lower() == 'true' - -if _DEBUG: - logging.getLogger().setLevel(logging.INFO) -else: - logging.getLogger().setLevel(logging.WARNING) -if _LANG == "cn": - title = "完蛋!我被 LLM 拿捏了" - requirement_ph = """ -

      欢迎来到 LLM Riddles!

      -

      你将通过本游戏对大语言模型产生更深刻的理解。在本游戏中,你需要构造一个提给语言大模型的问题,使得它回复的答案符合题目要求。点击\"下一题\" 即可开始游戏。

      - """ - requirement_label = "游戏须知/说明" - question_ph = "你对大语言模型的提问(例如:请你输出1+1=3)" - question_label = "玩家提问栏" - answer_ph = "大语言模型的回答" - answer_label = "大语言模型回答栏" - submit_label = "提交" - next_label = "下一题" - api_ph = "你个人的大语言模型 API Key (例如:ChatGPT)" - api_label = "API key" - predict_label = "结果正确性" - explanation_label = "结果详细解释" - game_cleared_label = "

      祝贺!你已成功通关!

      " - correct_label = "正确" - wrong_label = "错误" - api_error_info = "请在提交问题之前先输入你的 API Key" - try_again_label = "再玩一次" - select_label = "选择关卡(投机取巧需谨慎)" - title_markdown = """ -
      - Banner Image -
      -

      🎭LLM Riddles:完蛋!我被 LLM 拿捏了

      -
      更多不同语言模型的在线试玩 demo 可以访问 GitHub源代码仓库获取
      -
      如果你喜欢这个项目,请给我们在 GitHub 点个 star ✨ 代码仓库传送门 。我们将会持续保持更新。再次感谢游戏 原作者 的奇思妙想!
      -
      注意:算法模型的输出可能包含一定的随机性。相关结果不代表任何开发者和相关 AI 服务的态度和意见。本项目开发者不对生成结果作任何保证,仅供娱乐。
      - """ - tos_markdown = """ - ### 使用条款 - 玩家使用本服务须同意以下条款: - 该服务是一项探索性研究预览版,仅供非商业用途。它仅提供有限的安全措施,并可能生成令人反感的内容。不得将其用于任何非法、有害、暴力、种族主义等目的。该服务可能会收集玩家对话数据以供未来研究之用。 - 如果您的游玩体验有不佳之处,请发送邮件至 opendilab@pjlab.org.cn ! 我们将删除相关信息,并不断改进这个项目。 - 为了获得最佳体验,请使用台式电脑进行此预览版游戏,因为移动设备可能会影响可视化效果。 - **版权所有 2023 OpenDILab。** - """ -elif _LANG == "en": - title = "LLM Riddles: Oops! Rolling in LLM." - requirement_ph = """ -

      Welcome to LLM Riddles!

      -

      In this game, you'll gain a deeper understanding of language models. Your challenge is to create a question to ask a language model in a way that the answer it provides meets specific criteria. Click \'Next\' to Start

      - """ - requirement_label = "Game Requirements" - question_ph = "Your Question for LLM (e.g. Please print 1+1=3)" - question_label = "Question" - answer_ph = "Answer From LLM" - answer_label = "Answer" - submit_label = "Submit" - next_label = "Next" - api_ph = "Your API Key (e.g. ChatGPT)" - api_label = "API key" - predict_label = "Correctness" - explanation_label = "Explanation" - game_cleared_label = "

      Congratulations!

      " - correct_label = "Correct" - wrong_label = "Wrong" - api_error_info = "Please Enter API Key Before Submitting Question." - try_again_label = "Try Again" - select_label = "Select level" - title_markdown = """ -
      - Banner Image -
      -

      🎭LLM Riddles: Oops! Rolling in LLM.

      -
      If you like our project, please give us a star ✨ on GitHub for latest update (Code Link) . Thanks for the interesting idea of the original game author .
      -
      Notice: The output is generated by algorithm scheme and may involve some randomness. It does not represent the attitudes and opinions of any developers and AI services in this project. We do not make any guarantees about the generated content.
      - """ - tos_markdown = """ - ### Terms of use - By using this service, players are required to agree to the following terms: - The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. - Please send email to opendilab@pjlab.org.cn if you get any inappropriate answer! We will delete those and keep improving our moderator. - For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality. - **Copyright 2023 OpenDILab.** - """ -else: - raise KeyError("invalid _LANG: {}".format(_LANG)) - - -def _need_api_key(): - return (_LLM == 'chatgpt' or _LLM == 'chatglm') and _LLM_KEY is None - - -def _get_api_key_cfgs(api_key): - if _LLM == 'chatgpt': - return {'api_key': api_key} - elif _LLM == 'chatglm': - return {'api_key': api_key} - else: - return {} - - -if __name__ == '__main__': - with gr.Blocks(title=title, theme='ParityError/Interstellar') as demo: - gr.Markdown(title_markdown) - - with gr.Row(): - gr_requirement = gr.HTML(value=requirement_ph, label=requirement_label) - with gr.Row(): - with gr.Column(): - gr_question = gr.TextArea(placeholder=question_ph, label=question_label) - gr_api_key = gr.Text(placeholder=api_ph, label=api_label, type='password', visible=_need_api_key()) - with gr.Row(): - gr_submit = gr.Button(submit_label, interactive=False) - gr_next = gr.Button(next_label) - with gr.Row(): - gr_select = gr.Radio( - choices=[(QuestionExecutor(q, _LANG).question_name, i) for i, q in enumerate(_QUESTIONS)], - label=select_label - ) - - with gr.Column(): - gr_uuid = gr.Text(value='', visible=False) - gr_predict = gr.Label(label=predict_label) - gr_answer = gr.TextArea(label=answer_label, lines=3) - gr_explanation = gr.TextArea(label=explanation_label, lines=1) - gr.Markdown(tos_markdown) - - def _postprocess_question_text(question_text): - if _LANG == 'cn': - idx = question_text.find(',') - question_title = question_text[:idx] - former, latter = question_title.split('(') - question_title = former + ':' + latter[:-1] - question_text = f"

      {question_title}

      {question_text[idx+1:]}

      " - elif _LANG == 'en': - idx = question_text.find(',') - question_text = f"

      {question_text[:idx]}

      {question_text[idx+1:]}

      " - return question_text - - - def _radio_select(uuid_, select_qid): - global count - if not uuid_: - uuid_ = str(uuid.uuid4()) - count += 1 - logging.info(f'Player {count} starts the game now') - global _QUESTION_SESSIONS - if uuid_ not in _QUESTION_SESSIONS: - _QUESTION_SESSIONS[uuid_] = set(), select_qid - else: - _exists, _ = _QUESTION_SESSIONS[uuid_] - _QUESTION_SESSIONS[uuid_] = _exists, select_qid - - executor = QuestionExecutor(_QUESTIONS[select_qid], _LANG) - question_text = _postprocess_question_text(executor.question_text) - return question_text, '', '', {}, '', \ - gr.Button(submit_label, interactive=True), \ - gr.Button(next_label, interactive=False), \ - uuid_ - - gr_select.select( - _radio_select, - inputs=[gr_uuid, gr_select], - outputs=[ - gr_requirement, gr_question, gr_answer, - gr_predict, gr_explanation, gr_submit, gr_next, gr_uuid, - ], - ) - - - def _next_question(uuid_): - global count - if not uuid_: - uuid_ = str(uuid.uuid4()) - count += 1 - logging.info(f'Player {count} starts the game now') - global _QUESTION_SESSIONS - if uuid_ in _QUESTION_SESSIONS: - _exists, _qid = _QUESTION_SESSIONS[uuid_] - else: - _exists, _qid = set(), -1 - _qid += 1 - _QUESTION_SESSIONS[uuid_] = _exists, _qid - - if _qid >= len(_QUESTIONS): - del _QUESTION_SESSIONS[uuid_] - logging.info(f'Player {count} has passed the game now') - return game_cleared_label, '', '', {}, '', \ - gr.Button(submit_label, interactive=False), \ - gr.Button(try_again_label, interactive=True), \ - '', \ - gr.Radio( - choices=[(QuestionExecutor(q, _LANG).question_name, i) for i, q in enumerate(_QUESTIONS)], - label=select_label - ) - else: - executor = QuestionExecutor(_QUESTIONS[_qid], _LANG) - question_text = _postprocess_question_text(executor.question_text) - return question_text, '', '', {}, '', \ - gr.Button(submit_label, interactive=True), \ - gr.Button(next_label, interactive=False), \ - uuid_, \ - gr.Radio( - choices=[(QuestionExecutor(q, _LANG).question_name, i) for i, q in enumerate(_QUESTIONS)], - value=_qid, - label=select_label, - ) - - - gr_next.click( - fn=_next_question, - inputs=[gr_uuid], - outputs=[ - gr_requirement, gr_question, gr_answer, - gr_predict, gr_explanation, gr_submit, gr_next, - gr_uuid, gr_select, - ], - ) - - - def _submit_answer(qs_text: str, api_key: str, uuid_: str): - global _QUESTION_SESSIONS - if _need_api_key() and not api_key: - raise gr.Error(api_error_info) - - _exists, _qid = _QUESTION_SESSIONS[uuid_] - executor = QuestionExecutor( - _QUESTIONS[_qid], _LANG, - llm=_LLM, llm_cfgs=_get_api_key_cfgs(api_key) if _need_api_key() else {'api_key': _LLM_KEY} - ) - answer_text, correctness, explanation = executor.check(qs_text) - labels = {correct_label: 1.0} if correctness else {wrong_label: 1.0} - if correctness: - _QUESTION_SESSIONS[uuid_] = (_exists | {_qid}), _qid - return answer_text, labels, explanation, gr.Button(next_label, interactive=True), uuid_ - else: - return answer_text, labels, explanation, gr.Button(next_label, interactive=False), uuid_ - - - gr_submit.click( - _submit_answer, - inputs=[gr_question, gr_api_key, gr_uuid], - outputs=[gr_answer, gr_predict, gr_explanation, gr_next, gr_uuid], - ) - - concurrency = int(os.environ.get('CONCURRENCY', os.cpu_count())) - favicon_path = os.path.join(os.path.dirname(__file__), 'llmriddles', 'assets', 'avatar.png') - demo.queue().launch(max_threads=concurrency, favicon_path=favicon_path, share=True) diff --git a/spaces/OpenDILabCommunity/LLMRiddlesChatGPTCN/llmriddles/questions/level1.py b/spaces/OpenDILabCommunity/LLMRiddlesChatGPTCN/llmriddles/questions/level1.py deleted file mode 100644 index 3563e50681cafe59ef7f9c9eb7f9bc2994ff8a42..0000000000000000000000000000000000000000 --- a/spaces/OpenDILabCommunity/LLMRiddlesChatGPTCN/llmriddles/questions/level1.py +++ /dev/null @@ -1,204 +0,0 @@ -from .question import register_question - - -def count_english_words(text: str): - return len(text.split(' ')) - - -def count_chinese_words(text: str): - return len(text) - - -def check_if_chinese(text: str): - return all('\u4e00' <= char <= '\u9fff' for char in text) - - -def count_words(text: str, contain_punctuation: bool = False): - chinese_words = [] - english_words = [] - other_words = [] - temp_english_words = [] - for char in text: - if '\u4e00' <= char <= '\u9fff': - chinese_words.append(char) - if len(temp_english_words) > 0: - english_words.append(''.join(temp_english_words)) - temp_english_words = [] - else: - if char.isalpha(): - temp_english_words.append(char) - else: - if len(temp_english_words) > 0: - english_words.append(''.join(temp_english_words)) - temp_english_words = [] - other_words.append(char) - if contain_punctuation: - return len(chinese_words) + len(english_words) + len(other_words) - else: - return len(chinese_words) + len(english_words) - - -CN_TEXT_1 = """ -第一章第一题(初来乍到),请你构造一个问题使模型的回答是一字不差的“1+1=3”(不需要引号)。 - -请在下面的输入框内填写你的构造并点击按钮提交。 -""" - -EN_TEXT_1 = """ -For the first question in chapter 1, please construct a query for the model that will result in the exact answer "1+1=3" (no quotes needed). - -Please enter your query below and click the submit button -""" - - -def _checker_1(question_text: str, user_text: str, answer_text: str, lang: str): - _ = question_text, lang - answer_text = answer_text.strip() - user_text = user_text.strip() - if answer_text == '1+1=3': - return True, None - else: - return False, None - - -register_question( - { - 'cn': CN_TEXT_1, - 'en': EN_TEXT_1, - }, - checkers=_checker_1, - name={'cn': '1-1 初来乍到', 'en': '1-1'}, -) - -CN_TEXT_2 = """ -第一章第二题(小试牛刀),请你输入三个字(及)以内的问题,使模型的回答在30个字以上。 - -请在下面的输入框内填写你的问题并点击按钮提交。 -""" - -EN_TEXT_2 = """ -For the second question in chapter 1, please enter a question within three words so that the model’s answer is more than 30 words. - -Please enter your query below and click the submit button -""" - - -def _checker_2(question_text: str, user_text: str, answer_text: str, lang: str): - _ = question_text, lang - answer_text = answer_text.strip() - user_text = user_text.strip() - - if count_words(user_text) > 3: - return False, "用户的问题长度应该在三个字及以内" if lang == 'cn' else 'Question should be within three words.' - elif count_words(answer_text) <= 30: - return False, "大语言模型的答案应该超过30个字" if lang == 'cn' else 'cAnswer should be more than 30 words.' - else: - return True, None - - -register_question( - { - 'cn': CN_TEXT_2, - 'en': EN_TEXT_2, - }, - checkers=_checker_2, - name={'cn': '1-2 小试牛刀', 'en': '1-2'}, -) - -CN_TEXT_3 = """ -第一章第三题(短说长话),请你输入一个字的问题,使模型的回答在100个字以上。 - -请在下面的输入框内填写你的问题并点击按钮提交。 -""" - -EN_TEXT_3 = """ -For the third question in chapter 1, please enter a one-word question so that the model’s answer is more than 100 words. - -Please enter your query below and click the submit button -""" - - -def _checker_3(question_text: str, user_text: str, answer_text: str, lang: str): - _ = question_text, lang - answer_text = answer_text.strip() - user_text = user_text.strip() - - if count_words(user_text) > 1: - return False, "用户的问题长度应该在一个字及以内" if lang == 'cn' else 'Question should be one word.' - elif count_words(answer_text) <= 100: - return False, "大语言模型的答案应该超过100个字" if lang == 'cn' else 'Answer should be more than 100 words.' - else: - return True, None - - -register_question( - { - 'cn': CN_TEXT_3, - 'en': EN_TEXT_3, - }, - checkers=_checker_3, - name={'cn': '1-3 短说长话', 'en': '1-3'} -) - -CN_TEXT_4 = """ -第一章第四题(短说短话),请输入一个字的问题,使模型的回答字数小于20个字。 - -请在下面的输入框内填写你的问题并点击按钮提交。 -""" - -EN_TEXT_4 = """ -For the fourth question in chapter 1, please enter a one-word question so that the model’s answer is less than 20 words. - -Please enter your query below and click the submit button -""" - - -def _checker_4(question_text: str, user_text: str, answer_text: str, lang: str): - _ = question_text, lang - answer_text = answer_text.strip() - user_text = user_text.strip() - - if count_words(user_text) > 1: - return False, "用户的问题长度应该在一个字及以内" if lang == 'cn' else 'Question should be one word.' - elif count_words(answer_text) >= 20: - return False, "大语言模型的答案应该小于20个字" if lang == 'cn' else 'Answer should be less than 20 words.' - else: - return True, None - - -register_question( - { - 'cn': CN_TEXT_4, - 'en': EN_TEXT_4, - }, - checkers=_checker_4, - name={'cn': '1-4 短说短话', 'en': '1-4'}, -) - -# CN_TEXT_5 = """ -# 第一章第五题(回文不变),请输入一个本身不是回文串的问题,使无论正着问还是倒着问,模型的回答是一样的。 - -# 请在下面的输入框内填写你的问题并点击按钮提交。 -# """ - -# EN_TEXT_5 = """ -# For the fourth question in chapter 1, please enter a question that is not a palindrome string so that the model's answer is the same whether it is asked forward or backward. - -# Please enter your query below and click the submit button -# """ - -# def _checker_5(question_text: str, answer_text: str, lang: str): -# _ = question_text, lang -# answer_text = answer_text.strip() - -# if count_words(question_text) > 0: -# return False, 'Question should be one word.' -# elif count_words(answer_text) >= 20: -# return False, 'Answer should be less than 20 words.' -# else: -# return True, None - -# register_question({ -# 'cn': CN_TEXT_5, -# 'en': EN_TEXT_5, -# }, _checker_5) diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/README_D2.md b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/README_D2.md deleted file mode 100644 index a88ad7e21ce1d8651ec0d73848ce6dcd17f19d00..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/README_D2.md +++ /dev/null @@ -1,62 +0,0 @@ - - -Detectron2 is Facebook AI Research's next generation software system -that implements state-of-the-art object detection algorithms. -It is a ground-up rewrite of the previous version, -[Detectron](https://github.com/facebookresearch/Detectron/), -and it originates from [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/). - -
      - -
      - -### What's New -* It is powered by the [PyTorch](https://pytorch.org) deep learning framework. -* Includes more features such as panoptic segmentation, Densepose, Cascade R-CNN, rotated bounding boxes, PointRend, - DeepLab, etc. -* Can be used as a library to support [different projects](projects/) on top of it. - We'll open source more research projects in this way. -* It [trains much faster](https://detectron2.readthedocs.io/notes/benchmarks.html). -* Models can be exported to TorchScript format or Caffe2 format for deployment. - -See our [blog post](https://ai.facebook.com/blog/-detectron2-a-pytorch-based-modular-object-detection-library-/) -to see more demos and learn about detectron2. - -## Installation - -See [INSTALL.md](INSTALL.md). - -## Getting Started - -Follow the [installation instructions](https://detectron2.readthedocs.io/tutorials/install.html) to -install detectron2. - -See [Getting Started with Detectron2](https://detectron2.readthedocs.io/tutorials/getting_started.html), -and the [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) -to learn about basic usage. - -Learn more at our [documentation](https://detectron2.readthedocs.org). -And see [projects/](projects/) for some projects that are built on top of detectron2. - -## Model Zoo and Baselines - -We provide a large set of baseline results and trained models available for download in the [Detectron2 Model Zoo](MODEL_ZOO.md). - - -## License - -Detectron2 is released under the [Apache 2.0 license](LICENSE). - -## Citing Detectron2 - -If you use Detectron2 in your research or wish to refer to the baseline results published in the [Model Zoo](MODEL_ZOO.md), please use the following BibTeX entry. - -```BibTeX -@misc{wu2019detectron2, - author = {Yuxin Wu and Alexander Kirillov and Francisco Massa and - Wan-Yen Lo and Ross Girshick}, - title = {Detectron2}, - howpublished = {\url{https://github.com/facebookresearch/detectron2}}, - year = {2019} -} -``` diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/paper_runfiles/generate_val_test.sh b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/paper_runfiles/generate_val_test.sh deleted file mode 100644 index d9b2a370ceeeb8f401706f4303298db13e5fad91..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/paper_runfiles/generate_val_test.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# !!! file set to make test_large_30k from the vanilla test_large: configs/test_large_30k.lst - -# paths to data are valid for mml7 -PLACES_ROOT="/data/inpainting/Places365" -OUT_DIR="/data/inpainting/paper_data/Places365_val_test" - -source "$(dirname $0)/env.sh" - -for datadir in test_large_30k # val_large -do - for conf in random_thin_256 random_medium_256 random_thick_256 random_thin_512 random_medium_512 random_thick_512 - do - "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \ - "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 8 - - "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" - done - - for conf in segm_256 segm_512 - do - "$BINDIR/gen_mask_dataset.py" "$CONFIGDIR/data_gen/${conf}.yaml" \ - "$PLACES_ROOT/$datadir" "$OUT_DIR/$datadir/$conf" --n-jobs 2 - - "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats" - done -done diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/report_from_tb.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/report_from_tb.py deleted file mode 100644 index 9a444e6cd8027f88bd34adfc0b1dd000bbb4b2be..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/report_from_tb.py +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env python3 - -import glob -import os -import re - -import tensorflow as tf -from torch.utils.tensorboard import SummaryWriter - - -GROUPING_RULES = [ - re.compile(r'^(?Ptrain|test|val|extra_val_.*?(256|512))_(?P.*)', re.I) -] - - -DROP_RULES = [ - re.compile(r'_std$', re.I) -] - - -def need_drop(tag): - for rule in DROP_RULES: - if rule.search(tag): - return True - return False - - -def get_group_and_title(tag): - for rule in GROUPING_RULES: - match = rule.search(tag) - if match is None: - continue - return match.group('group'), match.group('title') - return None, None - - -def main(args): - os.makedirs(args.outdir, exist_ok=True) - - ignored_events = set() - - for orig_fname in glob.glob(args.inglob): - cur_dirpath = os.path.dirname(orig_fname) # remove filename, this should point to "version_0" directory - subdirname = os.path.basename(cur_dirpath) # == "version_0" most of time - exp_root_path = os.path.dirname(cur_dirpath) # remove "version_0" - exp_name = os.path.basename(exp_root_path) - - writers_by_group = {} - - for e in tf.compat.v1.train.summary_iterator(orig_fname): - for v in e.summary.value: - if need_drop(v.tag): - continue - - cur_group, cur_title = get_group_and_title(v.tag) - if cur_group is None: - if v.tag not in ignored_events: - print(f'WARNING: Could not detect group for {v.tag}, ignoring it') - ignored_events.add(v.tag) - continue - - cur_writer = writers_by_group.get(cur_group, None) - if cur_writer is None: - if args.include_version: - cur_outdir = os.path.join(args.outdir, exp_name, f'{subdirname}_{cur_group}') - else: - cur_outdir = os.path.join(args.outdir, exp_name, cur_group) - cur_writer = SummaryWriter(cur_outdir) - writers_by_group[cur_group] = cur_writer - - cur_writer.add_scalar(cur_title, v.simple_value, global_step=e.step, walltime=e.wall_time) - - -if __name__ == '__main__': - import argparse - - aparser = argparse.ArgumentParser() - aparser.add_argument('inglob', type=str) - aparser.add_argument('outdir', type=str) - aparser.add_argument('--include-version', action='store_true', - help='Include subdirectory name e.g. "version_0" into output path') - - main(aparser.parse_args()) diff --git a/spaces/OpenGVLab/all-seeing/utils.py b/spaces/OpenGVLab/all-seeing/utils.py deleted file mode 100644 index 311cd84944857e93ccf5654c924893807e36858c..0000000000000000000000000000000000000000 --- a/spaces/OpenGVLab/all-seeing/utils.py +++ /dev/null @@ -1,34 +0,0 @@ -import requests -from PIL import Image,ImageDraw -from io import BytesIO -import random -import os - - -def imread(path): - if path.startswith('http') or path.startswith('https'): - response = requests.get(path) - image = Image.open(BytesIO(response.content)).convert('RGB') - else: - image = Image.open(path).convert('RGB') - return image - -def random_image(root_path): - img_list = os.listdir(root_path) - img_item = random.sample(img_list, 1)[0] - return Image.open(os.path.join(root_path, img_item)) - -def draw_points_to_image(image:Image.Image,points:list,radius=16,color = (255, 0, 0)): - draw = ImageDraw.Draw(image) - for [x,y] in points: - draw.ellipse((x - radius, y - radius, x + radius,y + radius), fill=color) - return image - -def in_rectangle(bbox,points): - for point in points: - if min(max(point[0],bbox[0]),bbox[0]+bbox[2]) != point[0] or min(max(point[1],bbox[1]),bbox[1]+bbox[3]) != point[1] : - return False - - return True - - diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/parallel/data_parallel.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/parallel/data_parallel.py deleted file mode 100644 index 79b5f69b654cf647dc7ae9174223781ab5c607d2..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/parallel/data_parallel.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from itertools import chain - -from torch.nn.parallel import DataParallel - -from .scatter_gather import scatter_kwargs - - -class MMDataParallel(DataParallel): - """The DataParallel module that supports DataContainer. - - MMDataParallel has two main differences with PyTorch DataParallel: - - - It supports a custom type :class:`DataContainer` which allows more - flexible control of input data during both GPU and CPU inference. - - It implement two more APIs ``train_step()`` and ``val_step()``. - - Args: - module (:class:`nn.Module`): Module to be encapsulated. - device_ids (list[int]): Device IDS of modules to be scattered to. - Defaults to None when GPU is not available. - output_device (str | int): Device ID for output. Defaults to None. - dim (int): Dimension used to scatter the data. Defaults to 0. - """ - - def __init__(self, *args, dim=0, **kwargs): - super(MMDataParallel, self).__init__(*args, dim=dim, **kwargs) - self.dim = dim - - def forward(self, *inputs, **kwargs): - """Override the original forward function. - - The main difference lies in the CPU inference where the data in - :class:`DataContainers` will still be gathered. - """ - if not self.device_ids: - # We add the following line thus the module could gather and - # convert data containers as those in GPU inference - inputs, kwargs = self.scatter(inputs, kwargs, [-1]) - return self.module(*inputs[0], **kwargs[0]) - else: - return super().forward(*inputs, **kwargs) - - def scatter(self, inputs, kwargs, device_ids): - return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim) - - def train_step(self, *inputs, **kwargs): - if not self.device_ids: - # We add the following line thus the module could gather and - # convert data containers as those in GPU inference - inputs, kwargs = self.scatter(inputs, kwargs, [-1]) - return self.module.train_step(*inputs[0], **kwargs[0]) - - assert len(self.device_ids) == 1, \ - ('MMDataParallel only supports single GPU training, if you need to' - ' train with multiple GPUs, please use MMDistributedDataParallel' - 'instead.') - - for t in chain(self.module.parameters(), self.module.buffers()): - if t.device != self.src_device_obj: - raise RuntimeError( - 'module must have its parameters and buffers ' - f'on device {self.src_device_obj} (device_ids[0]) but ' - f'found one of them on device: {t.device}') - - inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) - return self.module.train_step(*inputs[0], **kwargs[0]) - - def val_step(self, *inputs, **kwargs): - if not self.device_ids: - # We add the following line thus the module could gather and - # convert data containers as those in GPU inference - inputs, kwargs = self.scatter(inputs, kwargs, [-1]) - return self.module.val_step(*inputs[0], **kwargs[0]) - - assert len(self.device_ids) == 1, \ - ('MMDataParallel only supports single GPU training, if you need to' - ' train with multiple GPUs, please use MMDistributedDataParallel' - ' instead.') - - for t in chain(self.module.parameters(), self.module.buffers()): - if t.device != self.src_device_obj: - raise RuntimeError( - 'module must have its parameters and buffers ' - f'on device {self.src_device_obj} (device_ids[0]) but ' - f'found one of them on device: {t.device}') - - inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids) - return self.module.val_step(*inputs[0], **kwargs[0]) diff --git a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/utils/path.py b/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/utils/path.py deleted file mode 100644 index 7dab4b3041413b1432b0f434b8b14783097d33c6..0000000000000000000000000000000000000000 --- a/spaces/PAIR/Text2Video-Zero/annotator/uniformer/mmcv/utils/path.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import os -import os.path as osp -from pathlib import Path - -from .misc import is_str - - -def is_filepath(x): - return is_str(x) or isinstance(x, Path) - - -def fopen(filepath, *args, **kwargs): - if is_str(filepath): - return open(filepath, *args, **kwargs) - elif isinstance(filepath, Path): - return filepath.open(*args, **kwargs) - raise ValueError('`filepath` should be a string or a Path') - - -def check_file_exist(filename, msg_tmpl='file "{}" does not exist'): - if not osp.isfile(filename): - raise FileNotFoundError(msg_tmpl.format(filename)) - - -def mkdir_or_exist(dir_name, mode=0o777): - if dir_name == '': - return - dir_name = osp.expanduser(dir_name) - os.makedirs(dir_name, mode=mode, exist_ok=True) - - -def symlink(src, dst, overwrite=True, **kwargs): - if os.path.lexists(dst) and overwrite: - os.remove(dst) - os.symlink(src, dst, **kwargs) - - -def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True): - """Scan a directory to find the interested files. - - Args: - dir_path (str | obj:`Path`): Path of the directory. - suffix (str | tuple(str), optional): File suffix that we are - interested in. Default: None. - recursive (bool, optional): If set to True, recursively scan the - directory. Default: False. - case_sensitive (bool, optional) : If set to False, ignore the case of - suffix. Default: True. - - Returns: - A generator for all the interested files with relative paths. - """ - if isinstance(dir_path, (str, Path)): - dir_path = str(dir_path) - else: - raise TypeError('"dir_path" must be a string or Path object') - - if (suffix is not None) and not isinstance(suffix, (str, tuple)): - raise TypeError('"suffix" must be a string or tuple of strings') - - if suffix is not None and not case_sensitive: - suffix = suffix.lower() if isinstance(suffix, str) else tuple( - item.lower() for item in suffix) - - root = dir_path - - def _scandir(dir_path, suffix, recursive, case_sensitive): - for entry in os.scandir(dir_path): - if not entry.name.startswith('.') and entry.is_file(): - rel_path = osp.relpath(entry.path, root) - _rel_path = rel_path if case_sensitive else rel_path.lower() - if suffix is None or _rel_path.endswith(suffix): - yield rel_path - elif recursive and os.path.isdir(entry.path): - # scan recursively if entry.path is a directory - yield from _scandir(entry.path, suffix, recursive, - case_sensitive) - - return _scandir(dir_path, suffix, recursive, case_sensitive) - - -def find_vcs_root(path, markers=('.git', )): - """Finds the root directory (including itself) of specified markers. - - Args: - path (str): Path of directory or file. - markers (list[str], optional): List of file or directory names. - - Returns: - The directory contained one of the markers or None if not found. - """ - if osp.isfile(path): - path = osp.dirname(path) - - prev, cur = None, osp.abspath(osp.expanduser(path)) - while cur != prev: - if any(osp.exists(osp.join(cur, marker)) for marker in markers): - return cur - prev, cur = cur, osp.split(cur)[0] - return None diff --git a/spaces/PKUWilliamYang/StyleGANEX/models/stylegan2/model.py b/spaces/PKUWilliamYang/StyleGANEX/models/stylegan2/model.py deleted file mode 100644 index 901cb7a86ea5b13912ff2a98680f368d18e36d9f..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/StyleGANEX/models/stylegan2/model.py +++ /dev/null @@ -1,768 +0,0 @@ -import math -import random -import torch -from torch import nn -from torch.nn import functional as F -import numpy as np - -from models.stylegan2.op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d - - -class PixelNorm(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) - - -def make_kernel(k): - k = torch.tensor(k, dtype=torch.float32) - - if k.ndim == 1: - k = k[None, :] * k[:, None] - - k /= k.sum() - - return k - - -class Upsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) * (factor ** 2) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad) - - return out - - -class Downsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad) - - return out - - -class Blur(nn.Module): - def __init__(self, kernel, pad, upsample_factor=1): - super().__init__() - - kernel = make_kernel(kernel) - - if upsample_factor > 1: - kernel = kernel * (upsample_factor ** 2) - - self.register_buffer('kernel', kernel) - - self.pad = pad - - def forward(self, input): - out = upfirdn2d(input, self.kernel, pad=self.pad) - - return out - - -class EqualConv2d(nn.Module): - def __init__( - self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True, dilation=1 ## modified - ): - super().__init__() - - self.weight = nn.Parameter( - torch.randn(out_channel, in_channel, kernel_size, kernel_size) - ) - self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) - - self.stride = stride - self.padding = padding - self.dilation = dilation ## modified - - if bias: - self.bias = nn.Parameter(torch.zeros(out_channel)) - - else: - self.bias = None - - def forward(self, input): - out = F.conv2d( - input, - self.weight * self.scale, - bias=self.bias, - stride=self.stride, - padding=self.padding, - dilation=self.dilation, ## modified - ) - - return out - - def __repr__(self): - return ( - f"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]}," - f" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding}, dilation={self.dilation})" ## modified - ) - - -class EqualLinear(nn.Module): - def __init__( - self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None - ): - super().__init__() - - self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) - - if bias: - self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) - - else: - self.bias = None - - self.activation = activation - - self.scale = (1 / math.sqrt(in_dim)) * lr_mul - self.lr_mul = lr_mul - - def forward(self, input): - if self.activation: - out = F.linear(input, self.weight * self.scale) - out = fused_leaky_relu(out, self.bias * self.lr_mul) - - else: - out = F.linear( - input, self.weight * self.scale, bias=self.bias * self.lr_mul - ) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' - ) - - -class ScaledLeakyReLU(nn.Module): - def __init__(self, negative_slope=0.2): - super().__init__() - - self.negative_slope = negative_slope - - def forward(self, input): - out = F.leaky_relu(input, negative_slope=self.negative_slope) - - return out * math.sqrt(2) - - -class ModulatedConv2d(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - demodulate=True, - upsample=False, - downsample=False, - blur_kernel=[1, 3, 3, 1], - dilation=1, ##### modified - ): - super().__init__() - - self.eps = 1e-8 - self.kernel_size = kernel_size - self.in_channel = in_channel - self.out_channel = out_channel - self.upsample = upsample - self.downsample = downsample - self.dilation = dilation ##### modified - - if upsample: - factor = 2 - p = (len(blur_kernel) - factor) - (kernel_size - 1) - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 + 1 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor) - - # to simulate transconv + blur - # we use dilated transposed conv with blur kernel as weight + dilated transconv - if dilation > 1: ##### modified - blur_weight = torch.randn(1, 1, 3, 3) * 0 + 1 - blur_weight[:,:,0,1] = 2 - blur_weight[:,:,1,0] = 2 - blur_weight[:,:,1,2] = 2 - blur_weight[:,:,2,1] = 2 - blur_weight[:,:,1,1] = 4 - blur_weight = blur_weight / 16.0 - self.register_buffer("blur_weight", blur_weight) - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1)) - - fan_in = in_channel * kernel_size ** 2 - self.scale = 1 / math.sqrt(fan_in) - self.padding = kernel_size // 2 + dilation - 1 ##### modified - - self.weight = nn.Parameter( - torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) - ) - - self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) - - self.demodulate = demodulate - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, ' - f'upsample={self.upsample}, downsample={self.downsample})' - ) - - def forward(self, input, style): - batch, in_channel, height, width = input.shape - - style = self.modulation(style).view(batch, 1, in_channel, 1, 1) - weight = self.scale * self.weight * style - - if self.demodulate: - demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) - weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) - - weight = weight.view( - batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - - if self.upsample: - input = input.view(1, batch * in_channel, height, width) - weight = weight.view( - batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - weight = weight.transpose(1, 2).reshape( - batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size - ) - - if self.dilation > 1: ##### modified - # to simulate out = self.blur(out) - out = F.conv_transpose2d( - input, self.blur_weight.repeat(batch*in_channel,1,1,1), padding=0, groups=batch*in_channel, dilation=self.dilation//2) - # to simulate the next line - out = F.conv_transpose2d( - out, weight, padding=self.dilation, groups=batch, dilation=self.dilation//2) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - return out - - out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - out = self.blur(out) - - elif self.downsample: - input = self.blur(input) - _, _, height, width = input.shape - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - else: - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=self.padding, groups=batch, dilation=self.dilation) ##### modified - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - return out - - -class NoiseInjection(nn.Module): - def __init__(self): - super().__init__() - - self.weight = nn.Parameter(torch.zeros(1)) - - def forward(self, image, noise=None): - if noise is None: - batch, _, height, width = image.shape - noise = image.new_empty(batch, 1, height, width).normal_() - else: ##### modified, to make the resolution matches - batch, _, height, width = image.shape - _, _, height1, width1 = noise.shape - if height != height1 or width != width1: - noise = F.adaptive_avg_pool2d(noise, (height, width)) - - return image + self.weight * noise - - -class ConstantInput(nn.Module): - def __init__(self, channel, size=4): - super().__init__() - - self.input = nn.Parameter(torch.randn(1, channel, size, size)) - - def forward(self, input): - batch = input.shape[0] - out = self.input.repeat(batch, 1, 1, 1) - - return out - - -class StyledConv(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=False, - blur_kernel=[1, 3, 3, 1], - demodulate=True, - dilation=1, ##### modified - ): - super().__init__() - - self.conv = ModulatedConv2d( - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=upsample, - blur_kernel=blur_kernel, - demodulate=demodulate, - dilation=dilation, ##### modified - ) - - self.noise = NoiseInjection() - self.activate = FusedLeakyReLU(out_channel) - - def forward(self, input, style, noise=None): - out = self.conv(input, style) - out = self.noise(out, noise=noise) - out = self.activate(out) - - return out - - -class ToRGB(nn.Module): - def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1], dilation=1): ##### modified - super().__init__() - - if upsample: - self.upsample = Upsample(blur_kernel) - - self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False) - self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) - - self.dilation = dilation ##### modified - if dilation > 1: ##### modified - blur_weight = torch.randn(1, 1, 3, 3) * 0 + 1 - blur_weight[:,:,0,1] = 2 - blur_weight[:,:,1,0] = 2 - blur_weight[:,:,1,2] = 2 - blur_weight[:,:,2,1] = 2 - blur_weight[:,:,1,1] = 4 - blur_weight = blur_weight / 16.0 - self.register_buffer("blur_weight", blur_weight) - - def forward(self, input, style, skip=None): - out = self.conv(input, style) - out = out + self.bias - - if skip is not None: - if self.dilation == 1: - skip = self.upsample(skip) - else: ##### modified, to simulate skip = self.upsample(skip) - batch, in_channel, _, _ = skip.shape - skip = F.conv2d(skip, self.blur_weight.repeat(in_channel,1,1,1), - padding=self.dilation//2, groups=in_channel, dilation=self.dilation//2) - - out = out + skip - - return out - - -class Generator(nn.Module): - def __init__( - self, - size, - style_dim, - n_mlp, - channel_multiplier=2, - blur_kernel=[1, 3, 3, 1], - lr_mlp=0.01, - ): - super().__init__() - - self.size = size - - self.style_dim = style_dim - - layers = [PixelNorm()] - - for i in range(n_mlp): - layers.append( - EqualLinear( - style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu' - ) - ) - - self.style = nn.Sequential(*layers) - - self.channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - self.input = ConstantInput(self.channels[4]) - self.conv1 = StyledConv( - self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel, dilation=8 ##### modified - ) - self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False) - - self.log_size = int(math.log(size, 2)) - self.num_layers = (self.log_size - 2) * 2 + 1 - - self.convs = nn.ModuleList() - self.upsamples = nn.ModuleList() - self.to_rgbs = nn.ModuleList() - self.noises = nn.Module() - - in_channel = self.channels[4] - - for layer_idx in range(self.num_layers): - res = (layer_idx + 5) // 2 - shape = [1, 1, 2 ** res, 2 ** res] - self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape)) - - for i in range(3, self.log_size + 1): - out_channel = self.channels[2 ** i] - - self.convs.append( - StyledConv( - in_channel, - out_channel, - 3, - style_dim, - upsample=True, - blur_kernel=blur_kernel, - dilation=max(1, 32 // (2**(i-1))) ##### modified - ) - ) - - self.convs.append( - StyledConv( - out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel, dilation=max(1, 32 // (2**i)) ##### modified - ) - ) - - self.to_rgbs.append(ToRGB(out_channel, style_dim, dilation=max(1, 32 // (2**(i-1))))) ##### modified - - in_channel = out_channel - - self.n_latent = self.log_size * 2 - 2 - - def make_noise(self): - device = self.input.input.device - - noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)] - - for i in range(3, self.log_size + 1): - for _ in range(2): - noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device)) - - return noises - - def mean_latent(self, n_latent): - latent_in = torch.randn( - n_latent, self.style_dim, device=self.input.input.device - ) - latent = self.style(latent_in).mean(0, keepdim=True) - - return latent - - def get_latent(self, input): - return self.style(input) - - # styles is the latent code w+ - # first_layer_feature is the first-layer input feature f - # first_layer_feature_ind indicate which layer of G accepts f (should always=0, the first layer) - # skip_layer_feature is the encoder features sent by skip connection - # fusion_block is the network to fuse the encoder feature and decoder feature - # zero_noise is to force the noise to be zero (to avoid flickers for videos) - # editing_w is the editing vector v used in video face editing - def forward( - self, - styles, - return_latents=False, - return_features=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - noise=None, - randomize_noise=True, - first_layer_feature = None, ##### modified - first_layer_feature_ind = 0, ##### modified - skip_layer_feature = None, ##### modified - fusion_block = None, ##### modified - zero_noise = False, ##### modified - editing_w = None, ##### modified - ): - if not input_is_latent: - styles = [self.style(s) for s in styles] - - if zero_noise: - noise = [ - getattr(self.noises, f'noise_{i}') * 0.0 for i in range(self.num_layers) - ] - elif noise is None: - if randomize_noise: - noise = [None] * self.num_layers - else: - noise = [ - getattr(self.noises, f'noise_{i}') for i in range(self.num_layers) - ] - - if truncation < 1: - style_t = [] - - for style in styles: - style_t.append( - truncation_latent + truncation * (style - truncation_latent) - ) - - styles = style_t - - if len(styles) < 2: - inject_index = self.n_latent - - if styles[0].ndim < 3: - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - else: - latent = styles[0] - - else: - if inject_index is None: - inject_index = random.randint(1, self.n_latent - 1) - - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1) - - latent = torch.cat([latent, latent2], 1) - - # w+ + v for video face editing - if editing_w is not None: ##### modified - latent = latent + editing_w - - # the original StyleGAN - if first_layer_feature is None: ##### modified - out = self.input(latent) - out = F.adaptive_avg_pool2d(out, 32) ##### modified - out = self.conv1(out, latent[:, 0], noise=noise[0]) - skip = self.to_rgb1(out, latent[:, 1]) - # the default StyleGANEX, replacing the first layer of G - elif first_layer_feature_ind == 0: ##### modified - out = first_layer_feature[0] ##### modified - out = self.conv1(out, latent[:, 0], noise=noise[0]) - skip = self.to_rgb1(out, latent[:, 1]) - # maybe we can also use the second layer of G to accept f? - else: ##### modified - out = first_layer_feature[0] ##### modified - skip = first_layer_feature[1] ##### modified - - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs - ): - # these layers accepts skipped encoder layer, use fusion block to fuse the encoder feature and decoder feature - if skip_layer_feature and fusion_block and i//2 < len(skip_layer_feature) and i//2 < len(fusion_block): - if editing_w is None: - out, skip = fusion_block[i//2](skip_layer_feature[i//2], out, skip) - else: - out, skip = fusion_block[i//2](skip_layer_feature[i//2], out, skip, editing_w[:,i]) - out = conv1(out, latent[:, i], noise=noise1) - out = conv2(out, latent[:, i + 1], noise=noise2) - skip = to_rgb(out, latent[:, i + 2], skip) - - i += 2 - - image = skip - - if return_latents: - return image, latent - elif return_features: - return image, out - else: - return image, None - - -class ConvLayer(nn.Sequential): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - downsample=False, - blur_kernel=[1, 3, 3, 1], - bias=True, - activate=True, - dilation=1, ## modified - ): - layers = [] - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - layers.append(Blur(blur_kernel, pad=(pad0, pad1))) - - stride = 2 - self.padding = 0 - - else: - stride = 1 - self.padding = kernel_size // 2 + dilation-1 ## modified - - layers.append( - EqualConv2d( - in_channel, - out_channel, - kernel_size, - padding=self.padding, - stride=stride, - bias=bias and not activate, - dilation=dilation, ## modified - ) - ) - - if activate: - if bias: - layers.append(FusedLeakyReLU(out_channel)) - - else: - layers.append(ScaledLeakyReLU(0.2)) - - super().__init__(*layers) - - -class ResBlock(nn.Module): - def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - self.conv1 = ConvLayer(in_channel, in_channel, 3) - self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True) - - self.skip = ConvLayer( - in_channel, out_channel, 1, downsample=True, activate=False, bias=False - ) - - def forward(self, input): - out = self.conv1(input) - out = self.conv2(out) - - skip = self.skip(input) - out = (out + skip) / math.sqrt(2) - - return out - - -class Discriminator(nn.Module): - def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], img_channel=3): - super().__init__() - - channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - convs = [ConvLayer(img_channel, channels[size], 1)] - - log_size = int(math.log(size, 2)) - - in_channel = channels[size] - - for i in range(log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - - convs.append(ResBlock(in_channel, out_channel, blur_kernel)) - - in_channel = out_channel - - self.convs = nn.Sequential(*convs) - - self.stddev_group = 4 - self.stddev_feat = 1 - - self.final_conv = ConvLayer(in_channel + 1, channels[4], 3) - self.final_linear = nn.Sequential( - EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'), - EqualLinear(channels[4], 1), - ) - - self.size = size ##### modified - - def forward(self, input): - # for input that not satisfies the target size, we crop it to extract a small image of the target size. - _, _, h, w = input.shape ##### modified - i, j = torch.randint(0, h+1-self.size, size=(1,)).item(), torch.randint(0, w+1-self.size, size=(1,)).item() ##### modified - out = self.convs(input[:,:,i:i+self.size,j:j+self.size]) ##### modified - - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - - out = out.view(batch, -1) - out = self.final_linear(out) - - return out \ No newline at end of file diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/__init__.py b/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/PSLD/PSLD/diffusion-posterior-sampling/guided_diffusion/fp16_util.py b/spaces/PSLD/PSLD/diffusion-posterior-sampling/guided_diffusion/fp16_util.py deleted file mode 100644 index c1961650ea28affa1fe64a1794f6342d355050aa..0000000000000000000000000000000000000000 --- a/spaces/PSLD/PSLD/diffusion-posterior-sampling/guided_diffusion/fp16_util.py +++ /dev/null @@ -1,234 +0,0 @@ -""" -Helpers to train with 16-bit precision. -""" - -import numpy as np -import torch as th -import torch.nn as nn -from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors - -INITIAL_LOG_LOSS_SCALE = 20.0 - - -def convert_module_to_f16(l): - """ - Convert primitive modules to float16. - """ - if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): - l.weight.data = l.weight.data.half() - if l.bias is not None: - l.bias.data = l.bias.data.half() - - -def convert_module_to_f32(l): - """ - Convert primitive modules to float32, undoing convert_module_to_f16(). - """ - if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)): - l.weight.data = l.weight.data.float() - if l.bias is not None: - l.bias.data = l.bias.data.float() - - -def make_master_params(param_groups_and_shapes): - """ - Copy model parameters into a (differently-shaped) list of full-precision - parameters. - """ - master_params = [] - for param_group, shape in param_groups_and_shapes: - master_param = nn.Parameter( - _flatten_dense_tensors( - [param.detach().float() for (_, param) in param_group] - ).view(shape) - ) - master_param.requires_grad = True - master_params.append(master_param) - return master_params - - -def model_grads_to_master_grads(param_groups_and_shapes, master_params): - """ - Copy the gradients from the model parameters into the master parameters - from make_master_params(). - """ - for master_param, (param_group, shape) in zip( - master_params, param_groups_and_shapes - ): - master_param.grad = _flatten_dense_tensors( - [param_grad_or_zeros(param) for (_, param) in param_group] - ).view(shape) - - -def master_params_to_model_params(param_groups_and_shapes, master_params): - """ - Copy the master parameter data back into the model parameters. - """ - # Without copying to a list, if a generator is passed, this will - # silently not copy any parameters. - for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes): - for (_, param), unflat_master_param in zip( - param_group, unflatten_master_params(param_group, master_param.view(-1)) - ): - param.detach().copy_(unflat_master_param) - - -def unflatten_master_params(param_group, master_param): - return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group]) - - -def get_param_groups_and_shapes(named_model_params): - named_model_params = list(named_model_params) - scalar_vector_named_params = ( - [(n, p) for (n, p) in named_model_params if p.ndim <= 1], - (-1), - ) - matrix_named_params = ( - [(n, p) for (n, p) in named_model_params if p.ndim > 1], - (1, -1), - ) - return [scalar_vector_named_params, matrix_named_params] - - -def master_params_to_state_dict( - model, param_groups_and_shapes, master_params, use_fp16 -): - if use_fp16: - state_dict = model.state_dict() - for master_param, (param_group, _) in zip( - master_params, param_groups_and_shapes - ): - for (name, _), unflat_master_param in zip( - param_group, unflatten_master_params(param_group, master_param.view(-1)) - ): - assert name in state_dict - state_dict[name] = unflat_master_param - else: - state_dict = model.state_dict() - for i, (name, _value) in enumerate(model.named_parameters()): - assert name in state_dict - state_dict[name] = master_params[i] - return state_dict - - -def state_dict_to_master_params(model, state_dict, use_fp16): - if use_fp16: - named_model_params = [ - (name, state_dict[name]) for name, _ in model.named_parameters() - ] - param_groups_and_shapes = get_param_groups_and_shapes(named_model_params) - master_params = make_master_params(param_groups_and_shapes) - else: - master_params = [state_dict[name] for name, _ in model.named_parameters()] - return master_params - - -def zero_master_grads(master_params): - for param in master_params: - param.grad = None - - -def zero_grad(model_params): - for param in model_params: - # Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group - if param.grad is not None: - param.grad.detach_() - param.grad.zero_() - - -def param_grad_or_zeros(param): - if param.grad is not None: - return param.grad.data.detach() - else: - return th.zeros_like(param) - - -class MixedPrecisionTrainer: - def __init__( - self, - *, - model, - use_fp16=False, - fp16_scale_growth=1e-3, - initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE, - ): - self.model = model - self.use_fp16 = use_fp16 - self.fp16_scale_growth = fp16_scale_growth - - self.model_params = list(self.model.parameters()) - self.master_params = self.model_params - self.param_groups_and_shapes = None - self.lg_loss_scale = initial_lg_loss_scale - - if self.use_fp16: - self.param_groups_and_shapes = get_param_groups_and_shapes( - self.model.named_parameters() - ) - self.master_params = make_master_params(self.param_groups_and_shapes) - self.model.convert_to_fp16() - - def zero_grad(self): - zero_grad(self.model_params) - - def backward(self, loss: th.Tensor): - if self.use_fp16: - loss_scale = 2 ** self.lg_loss_scale - (loss * loss_scale).backward() - else: - loss.backward() - - def optimize(self, opt: th.optim.Optimizer): - if self.use_fp16: - return self._optimize_fp16(opt) - else: - return self._optimize_normal(opt) - - def _optimize_fp16(self, opt: th.optim.Optimizer): - logger.logkv_mean("lg_loss_scale", self.lg_loss_scale) - model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params) - grad_norm, param_norm = self._compute_norms(grad_scale=2 ** self.lg_loss_scale) - if check_overflow(grad_norm): - self.lg_loss_scale -= 1 - logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}") - zero_master_grads(self.master_params) - return False - - logger.logkv_mean("grad_norm", grad_norm) - logger.logkv_mean("param_norm", param_norm) - - self.master_params[0].grad.mul_(1.0 / (2 ** self.lg_loss_scale)) - opt.step() - zero_master_grads(self.master_params) - master_params_to_model_params(self.param_groups_and_shapes, self.master_params) - self.lg_loss_scale += self.fp16_scale_growth - return True - - def _optimize_normal(self, opt: th.optim.Optimizer): - grad_norm, param_norm = self._compute_norms() - logger.logkv_mean("grad_norm", grad_norm) - logger.logkv_mean("param_norm", param_norm) - opt.step() - return True - - def _compute_norms(self, grad_scale=1.0): - grad_norm = 0.0 - param_norm = 0.0 - for p in self.master_params: - with th.no_grad(): - param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2 - if p.grad is not None: - grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2 - return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm) - - def master_params_to_state_dict(self, master_params): - return master_params_to_state_dict( - self.model, self.param_groups_and_shapes, master_params, self.use_fp16 - ) - - def state_dict_to_master_params(self, state_dict): - return state_dict_to_master_params(self.model, state_dict, self.use_fp16) - - -def check_overflow(value): - return (value == float("inf")) or (value == -float("inf")) or (value != value) diff --git a/spaces/PeepDaSlan9/Bark-Voice-Cloning/bark/generation.py b/spaces/PeepDaSlan9/Bark-Voice-Cloning/bark/generation.py deleted file mode 100644 index ad474d770235c7b665218e64699fb0b0b1b8cc3f..0000000000000000000000000000000000000000 --- a/spaces/PeepDaSlan9/Bark-Voice-Cloning/bark/generation.py +++ /dev/null @@ -1,864 +0,0 @@ -import contextlib -import gc -import os -import re -import requests -import gc -import sys - -from encodec import EncodecModel -import funcy -import logging -import numpy as np -from scipy.special import softmax -import torch -import torch.nn.functional as F -import tqdm -from transformers import BertTokenizer -from huggingface_hub import hf_hub_download, hf_hub_url - -from .model import GPTConfig, GPT -from .model_fine import FineGPT, FineGPTConfig -from .settings import initenv - -initenv(sys.argv) -global_force_cpu = os.environ.get("BARK_FORCE_CPU", False) -if ( - global_force_cpu != True and - torch.cuda.is_available() and - hasattr(torch.cuda, "amp") and - hasattr(torch.cuda.amp, "autocast") and - hasattr(torch.cuda, "is_bf16_supported") and - torch.cuda.is_bf16_supported() -): - autocast = funcy.partial(torch.cuda.amp.autocast, dtype=torch.bfloat16) -else: - @contextlib.contextmanager - def autocast(): - yield - - -# hold models in global scope to lazy load -global models -models = {} - -global models_devices -models_devices = {} - - -CONTEXT_WINDOW_SIZE = 1024 - -SEMANTIC_RATE_HZ = 49.9 -SEMANTIC_VOCAB_SIZE = 10_000 - -CODEBOOK_SIZE = 1024 -N_COARSE_CODEBOOKS = 2 -N_FINE_CODEBOOKS = 8 -COARSE_RATE_HZ = 75 - -SAMPLE_RATE = 24_000 - - -SUPPORTED_LANGS = [ - ("English", "en"), - ("German", "de"), - ("Spanish", "es"), - ("French", "fr"), - ("Hindi", "hi"), - ("Italian", "it"), - ("Japanese", "ja"), - ("Korean", "ko"), - ("Polish", "pl"), - ("Portuguese", "pt"), - ("Russian", "ru"), - ("Turkish", "tr"), - ("Chinese", "zh"), -] - -ALLOWED_PROMPTS = {"announcer"} -for _, lang in SUPPORTED_LANGS: - for prefix in ("", f"v2{os.path.sep}"): - for n in range(10): - ALLOWED_PROMPTS.add(f"{prefix}{lang}_speaker_{n}") - - -logger = logging.getLogger(__name__) - - -CUR_PATH = os.path.dirname(os.path.abspath(__file__)) - - -#default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache") -#CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0") -#CACHE_DIR = os.path.join(os.getcwd(), "models" -CACHE_DIR = "./models" - - -def _cast_bool_env_var(s): - return s.lower() in ('true', '1', 't') - -USE_SMALL_MODELS = _cast_bool_env_var(os.environ.get("SUNO_USE_SMALL_MODELS", "False")) -GLOBAL_ENABLE_MPS = _cast_bool_env_var(os.environ.get("SUNO_ENABLE_MPS", "False")) -OFFLOAD_CPU = _cast_bool_env_var(os.environ.get("SUNO_OFFLOAD_CPU", "False")) - -REMOTE_MODEL_PATHS = { - "text_small": { - "repo_id": "suno/bark", - "file_name": "text.pt", - }, - "coarse_small": { - "repo_id": "suno/bark", - "file_name": "coarse.pt", - }, - "fine_small": { - "repo_id": "suno/bark", - "file_name": "fine.pt", - }, - "text": { - "repo_id": "suno/bark", - "file_name": "text_2.pt", - }, - "coarse": { - "repo_id": "suno/bark", - "file_name": "coarse_2.pt", - }, - "fine": { - "repo_id": "suno/bark", - "file_name": "fine_2.pt", - }, -} - - -if not hasattr(torch.nn.functional, 'scaled_dot_product_attention') and torch.cuda.is_available(): - logger.warning( - "torch version does not support flash attention. You will get faster" + - " inference speed by upgrade torch to newest nightly version." - ) - - -def grab_best_device(use_gpu=True): - if torch.cuda.device_count() > 0 and use_gpu: - device = "cuda" - elif torch.backends.mps.is_available() and use_gpu and GLOBAL_ENABLE_MPS: - device = "mps" - else: - device = "cpu" - return device - - -def _get_ckpt_path(model_type, use_small=False): - key = model_type - if use_small or USE_SMALL_MODELS: - key += "_small" - return os.path.join(CACHE_DIR, REMOTE_MODEL_PATHS[key]["file_name"]) - -""" -def _download(from_hf_path, file_name, destfilename): - os.makedirs(CACHE_DIR, exist_ok=True) - hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR, local_dir_use_symlinks=False) - # Bug in original repo? Downloaded name differs from expected... - if not os.path.exists(destfilename): - localname = os.path.join(CACHE_DIR, file_name) - os.rename(localname, destfilename) -""" -def _download(from_hf_path, file_name): - os.makedirs(CACHE_DIR, exist_ok=True) - hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR) - - -class InferenceContext: - def __init__(self, benchmark=False): - # we can't expect inputs to be the same length, so disable benchmarking by default - self._chosen_cudnn_benchmark = benchmark - self._cudnn_benchmark = None - - def __enter__(self): - self._cudnn_benchmark = torch.backends.cudnn.benchmark - torch.backends.cudnn.benchmark = self._chosen_cudnn_benchmark - - def __exit__(self, exc_type, exc_value, exc_traceback): - torch.backends.cudnn.benchmark = self._cudnn_benchmark - - -if torch.cuda.is_available(): - torch.backends.cuda.matmul.allow_tf32 = True - torch.backends.cudnn.allow_tf32 = True - - -@contextlib.contextmanager -def _inference_mode(): - with InferenceContext(), torch.inference_mode(), torch.no_grad(), autocast(): - yield - - -def _clear_cuda_cache(): - if torch.cuda.is_available(): - torch.cuda.empty_cache() - torch.cuda.synchronize() - - -def clean_models(model_key=None): - global models - model_keys = [model_key] if model_key is not None else models.keys() - for k in model_keys: - if k in models: - del models[k] - _clear_cuda_cache() - gc.collect() - - -def _load_model(ckpt_path, device, use_small=False, model_type="text"): - if model_type == "text": - ConfigClass = GPTConfig - ModelClass = GPT - elif model_type == "coarse": - ConfigClass = GPTConfig - ModelClass = GPT - elif model_type == "fine": - ConfigClass = FineGPTConfig - ModelClass = FineGPT - else: - raise NotImplementedError() - - # Force-remove Models to allow running on >12Gb GPU - # CF: Probably not needed anymore - #global models - #models.clear() - #gc.collect() - #torch.cuda.empty_cache() - # to here... - - model_key = f"{model_type}_small" if use_small or USE_SMALL_MODELS else model_type - model_info = REMOTE_MODEL_PATHS[model_key] - if not os.path.exists(ckpt_path): - logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`.") - ## added next two lines to make it super clear which model is being downloaded - remote_filename = hf_hub_url(model_info["repo_id"], model_info["file_name"]) - print(f"Downloading {model_key} {model_info['repo_id']} remote model file {remote_filename} {model_info['file_name']} to {CACHE_DIR}") - _download(model_info["repo_id"], model_info["file_name"]) - # add next line to make it super clear which model is being loaded - print(f"Loading {model_key} model from {ckpt_path} to {device}") # added - checkpoint = torch.load(ckpt_path, map_location=device) - # this is a hack - model_args = checkpoint["model_args"] - if "input_vocab_size" not in model_args: - model_args["input_vocab_size"] = model_args["vocab_size"] - model_args["output_vocab_size"] = model_args["vocab_size"] - del model_args["vocab_size"] - gptconf = ConfigClass(**checkpoint["model_args"]) - model = ModelClass(gptconf) - state_dict = checkpoint["model"] - # fixup checkpoint - unwanted_prefix = "_orig_mod." - for k, v in list(state_dict.items()): - if k.startswith(unwanted_prefix): - state_dict[k[len(unwanted_prefix) :]] = state_dict.pop(k) - extra_keys = set(state_dict.keys()) - set(model.state_dict().keys()) - extra_keys = set([k for k in extra_keys if not k.endswith(".attn.bias")]) - missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) - missing_keys = set([k for k in missing_keys if not k.endswith(".attn.bias")]) - if len(extra_keys) != 0: - raise ValueError(f"extra keys found: {extra_keys}") - if len(missing_keys) != 0: - raise ValueError(f"missing keys: {missing_keys}") - model.load_state_dict(state_dict, strict=False) - n_params = model.get_num_params() - val_loss = checkpoint["best_val_loss"].item() - logger.info(f"model loaded: {round(n_params/1e6,1)}M params, {round(val_loss,3)} loss") - model.eval() - model.to(device) - del checkpoint, state_dict - _clear_cuda_cache() - if model_type == "text": - tokenizer = BertTokenizer.from_pretrained("bert-base-multilingual-cased") - return { - "model": model, - "tokenizer": tokenizer, - } - return model - - -def _load_codec_model(device): - model = EncodecModel.encodec_model_24khz() - model.set_target_bandwidth(6.0) - model.eval() - model.to(device) - _clear_cuda_cache() - return model - - -def load_model(use_gpu=True, use_small=False, force_reload=False, model_type="text"): - _load_model_f = funcy.partial(_load_model, model_type=model_type, use_small=use_small) - if model_type not in ("text", "coarse", "fine"): - raise NotImplementedError() - global models - global models_devices - device = grab_best_device(use_gpu=use_gpu) - model_key = f"{model_type}" - if OFFLOAD_CPU: - models_devices[model_key] = device - device = "cpu" - if model_key not in models or force_reload: - ckpt_path = _get_ckpt_path(model_type, use_small=use_small) - clean_models(model_key=model_key) - model = _load_model_f(ckpt_path, device) - models[model_key] = model - if model_type == "text": - models[model_key]["model"].to(device) - else: - models[model_key].to(device) - return models[model_key] - - -def load_codec_model(use_gpu=True, force_reload=False): - global models - global models_devices - device = grab_best_device(use_gpu=use_gpu) - if device == "mps": - # encodec doesn't support mps - device = "cpu" - model_key = "codec" - if OFFLOAD_CPU: - models_devices[model_key] = device - device = "cpu" - if model_key not in models or force_reload: - clean_models(model_key=model_key) - model = _load_codec_model(device) - models[model_key] = model - models[model_key].to(device) - return models[model_key] - - -def preload_models( - text_use_gpu=True, - text_use_small=False, - coarse_use_gpu=True, - coarse_use_small=False, - fine_use_gpu=True, - fine_use_small=False, - codec_use_gpu=True, - force_reload=False -): - """Load all the necessary models for the pipeline.""" - if grab_best_device() == "cpu" and ( - text_use_gpu or coarse_use_gpu or fine_use_gpu or codec_use_gpu - ): - logger.warning("No GPU being used. Careful, inference might be very slow!") - _ = load_model( - model_type="text", use_gpu=text_use_gpu, use_small=text_use_small, force_reload=force_reload - ) - _ = load_model( - model_type="coarse", - use_gpu=coarse_use_gpu, - use_small=coarse_use_small, - force_reload=force_reload, - ) - _ = load_model( - model_type="fine", use_gpu=fine_use_gpu, use_small=fine_use_small, force_reload=force_reload - ) - _ = load_codec_model(use_gpu=codec_use_gpu, force_reload=force_reload) - - -#### -# Generation Functionality -#### - - -def _tokenize(tokenizer, text): - return tokenizer.encode(text, add_special_tokens=False) - - -def _detokenize(tokenizer, enc_text): - return tokenizer.decode(enc_text) - - -def _normalize_whitespace(text): - return re.sub(r"\s+", " ", text).strip() - - -TEXT_ENCODING_OFFSET = 10_048 -SEMANTIC_PAD_TOKEN = 10_000 -TEXT_PAD_TOKEN = 129_595 -SEMANTIC_INFER_TOKEN = 129_599 - - -def _load_history_prompt(history_prompt_input): - if isinstance(history_prompt_input, str) and history_prompt_input.endswith(".npz"): - history_prompt = np.load(history_prompt_input) - elif isinstance(history_prompt_input, str): - # make sure this works on non-ubuntu - history_prompt_input = os.path.join(*history_prompt_input.split("/")) -# if history_prompt_input not in ALLOWED_PROMPTS: -# raise ValueError("history prompt not found") - history_prompt = np.load( - os.path.join(CUR_PATH, "assets", "prompts", f"{history_prompt_input}.npz") - ) - elif isinstance(history_prompt_input, dict): - assert("semantic_prompt" in history_prompt_input) - assert("coarse_prompt" in history_prompt_input) - assert("fine_prompt" in history_prompt_input) - history_prompt = history_prompt_input - else: - raise ValueError("history prompt format unrecognized") - return history_prompt - - -def generate_text_semantic( - text, - history_prompt=None, - temp=0.7, - top_k=None, - top_p=None, - silent=False, - min_eos_p=0.2, - max_gen_duration_s=None, - allow_early_stop=True, - use_kv_caching=False, -): - """Generate semantic tokens from text.""" - assert isinstance(text, str) - text = _normalize_whitespace(text) - assert len(text.strip()) > 0 - if history_prompt is not None: - history_prompt = _load_history_prompt(history_prompt) - semantic_history = history_prompt["semantic_prompt"] - assert ( - isinstance(semantic_history, np.ndarray) - and len(semantic_history.shape) == 1 - and len(semantic_history) > 0 - and semantic_history.min() >= 0 - and semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1 - ) - else: - semantic_history = None - # load models if not yet exist - global models - global models_devices - if "text" not in models: - preload_models() - model_container = models["text"] - model = model_container["model"] - tokenizer = model_container["tokenizer"] - encoded_text = np.array(_tokenize(tokenizer, text)) + TEXT_ENCODING_OFFSET - if OFFLOAD_CPU: - model.to(models_devices["text"]) - device = next(model.parameters()).device - if len(encoded_text) > 256: - p = round((len(encoded_text) - 256) / len(encoded_text) * 100, 1) - logger.warning(f"warning, text too long, lopping of last {p}%") - encoded_text = encoded_text[:256] - encoded_text = np.pad( - encoded_text, - (0, 256 - len(encoded_text)), - constant_values=TEXT_PAD_TOKEN, - mode="constant", - ) - if semantic_history is not None: - semantic_history = semantic_history.astype(np.int64) - # lop off if history is too long, pad if needed - semantic_history = semantic_history[-256:] - semantic_history = np.pad( - semantic_history, - (0, 256 - len(semantic_history)), - constant_values=SEMANTIC_PAD_TOKEN, - mode="constant", - ) - else: - semantic_history = np.array([SEMANTIC_PAD_TOKEN] * 256) - x = torch.from_numpy( - np.hstack([ - encoded_text, semantic_history, np.array([SEMANTIC_INFER_TOKEN]) - ]).astype(np.int64) - )[None] - assert x.shape[1] == 256 + 256 + 1 - with _inference_mode(): - x = x.to(device) - n_tot_steps = 768 - # custom tqdm updates since we don't know when eos will occur - pbar = tqdm.tqdm(disable=silent, total=100) - pbar_state = 0 - tot_generated_duration_s = 0 - kv_cache = None - for n in range(n_tot_steps): - if use_kv_caching and kv_cache is not None: - x_input = x[:, [-1]] - else: - x_input = x - logits, kv_cache = model( - x_input, merge_context=True, use_cache=use_kv_caching, past_kv=kv_cache - ) - relevant_logits = logits[0, 0, :SEMANTIC_VOCAB_SIZE] - if allow_early_stop: - relevant_logits = torch.hstack( - (relevant_logits, logits[0, 0, [SEMANTIC_PAD_TOKEN]]) # eos - ) - if top_p is not None: - # faster to convert to numpy - original_device = relevant_logits.device - relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy() - sorted_indices = np.argsort(relevant_logits)[::-1] - sorted_logits = relevant_logits[sorted_indices] - cumulative_probs = np.cumsum(softmax(sorted_logits)) - sorted_indices_to_remove = cumulative_probs > top_p - sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy() - sorted_indices_to_remove[0] = False - relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf - relevant_logits = torch.from_numpy(relevant_logits) - relevant_logits = relevant_logits.to(original_device) - if top_k is not None: - v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1))) - relevant_logits[relevant_logits < v[-1]] = -float("Inf") - probs = F.softmax(relevant_logits / temp, dim=-1) - # multinomial bugged on mps: shuttle to cpu if necessary - inf_device = probs.device - if probs.device.type == "mps": - probs = probs.to("cpu") - item_next = torch.multinomial(probs, num_samples=1) - probs = probs.to(inf_device) - item_next = item_next.to(inf_device) - if allow_early_stop and ( - item_next == SEMANTIC_VOCAB_SIZE - or (min_eos_p is not None and probs[-1] >= min_eos_p) - ): - # eos found, so break - pbar.update(100 - pbar_state) - break - x = torch.cat((x, item_next[None]), dim=1) - tot_generated_duration_s += 1 / SEMANTIC_RATE_HZ - if max_gen_duration_s is not None and tot_generated_duration_s > max_gen_duration_s: - pbar.update(100 - pbar_state) - break - if n == n_tot_steps - 1: - pbar.update(100 - pbar_state) - break - del logits, relevant_logits, probs, item_next - req_pbar_state = np.min([100, int(round(100 * n / n_tot_steps))]) - if req_pbar_state > pbar_state: - pbar.update(req_pbar_state - pbar_state) - pbar_state = req_pbar_state - pbar.close() - out = x.detach().cpu().numpy().squeeze()[256 + 256 + 1 :] - if OFFLOAD_CPU: - model.to("cpu") - assert all(0 <= out) and all(out < SEMANTIC_VOCAB_SIZE) - _clear_cuda_cache() - return out - - -def _flatten_codebooks(arr, offset_size=CODEBOOK_SIZE): - assert len(arr.shape) == 2 - arr = arr.copy() - if offset_size is not None: - for n in range(1, arr.shape[0]): - arr[n, :] += offset_size * n - flat_arr = arr.ravel("F") - return flat_arr - - -COARSE_SEMANTIC_PAD_TOKEN = 12_048 -COARSE_INFER_TOKEN = 12_050 - - -def generate_coarse( - x_semantic, - history_prompt=None, - temp=0.7, - top_k=None, - top_p=None, - silent=False, - max_coarse_history=630, # min 60 (faster), max 630 (more context) - sliding_window_len=60, - use_kv_caching=False, -): - """Generate coarse audio codes from semantic tokens.""" -# CF: Uncommented because it breaks swap voice more than once -# assert ( -# isinstance(x_semantic, np.ndarray) -# and len(x_semantic.shape) == 1 -# and len(x_semantic) > 0 -# and x_semantic.min() >= 0 -# and x_semantic.max() <= SEMANTIC_VOCAB_SIZE - 1 -# ) - assert 60 <= max_coarse_history <= 630 - assert max_coarse_history + sliding_window_len <= 1024 - 256 - semantic_to_coarse_ratio = COARSE_RATE_HZ / SEMANTIC_RATE_HZ * N_COARSE_CODEBOOKS - max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) - if history_prompt is not None: - history_prompt = _load_history_prompt(history_prompt) - x_semantic_history = history_prompt["semantic_prompt"] - x_coarse_history = history_prompt["coarse_prompt"] - assert ( - isinstance(x_semantic_history, np.ndarray) - and len(x_semantic_history.shape) == 1 - and len(x_semantic_history) > 0 - and x_semantic_history.min() >= 0 - and x_semantic_history.max() <= SEMANTIC_VOCAB_SIZE - 1 - and isinstance(x_coarse_history, np.ndarray) - and len(x_coarse_history.shape) == 2 - and x_coarse_history.shape[0] == N_COARSE_CODEBOOKS - and x_coarse_history.shape[-1] >= 0 - and x_coarse_history.min() >= 0 - and x_coarse_history.max() <= CODEBOOK_SIZE - 1 - #and ( - # round(x_coarse_history.shape[-1] / len(x_semantic_history), 1) - # == round(semantic_to_coarse_ratio / N_COARSE_CODEBOOKS, 1) - #) - ) - x_coarse_history = _flatten_codebooks(x_coarse_history) + SEMANTIC_VOCAB_SIZE - # trim histories correctly - n_semantic_hist_provided = np.min( - [ - max_semantic_history, - len(x_semantic_history) - len(x_semantic_history) % 2, - int(np.floor(len(x_coarse_history) / semantic_to_coarse_ratio)), - ] - ) - n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio)) - x_semantic_history = x_semantic_history[-n_semantic_hist_provided:].astype(np.int32) - x_coarse_history = x_coarse_history[-n_coarse_hist_provided:].astype(np.int32) - # TODO: bit of a hack for time alignment (sounds better) - x_coarse_history = x_coarse_history[:-2] - else: - x_semantic_history = np.array([], dtype=np.int32) - x_coarse_history = np.array([], dtype=np.int32) - # load models if not yet exist - global models - global models_devices - if "coarse" not in models: - preload_models() - model = models["coarse"] - if OFFLOAD_CPU: - model.to(models_devices["coarse"]) - device = next(model.parameters()).device - # start loop - n_steps = int( - round( - np.floor(len(x_semantic) * semantic_to_coarse_ratio / N_COARSE_CODEBOOKS) - * N_COARSE_CODEBOOKS - ) - ) - assert n_steps > 0 and n_steps % N_COARSE_CODEBOOKS == 0 - x_semantic = np.hstack([x_semantic_history, x_semantic]).astype(np.int32) - x_coarse = x_coarse_history.astype(np.int32) - base_semantic_idx = len(x_semantic_history) - with _inference_mode(): - x_semantic_in = torch.from_numpy(x_semantic)[None].to(device) - x_coarse_in = torch.from_numpy(x_coarse)[None].to(device) - n_window_steps = int(np.ceil(n_steps / sliding_window_len)) - n_step = 0 - for _ in tqdm.tqdm(range(n_window_steps), total=n_window_steps, disable=silent): - semantic_idx = base_semantic_idx + int(round(n_step / semantic_to_coarse_ratio)) - # pad from right side - x_in = x_semantic_in[:, np.max([0, semantic_idx - max_semantic_history]) :] - x_in = x_in[:, :256] - x_in = F.pad( - x_in, - (0, 256 - x_in.shape[-1]), - "constant", - COARSE_SEMANTIC_PAD_TOKEN, - ) - x_in = torch.hstack( - [ - x_in, - torch.tensor([COARSE_INFER_TOKEN])[None].to(device), - x_coarse_in[:, -max_coarse_history:], - ] - ) - kv_cache = None - for _ in range(sliding_window_len): - if n_step >= n_steps: - continue - is_major_step = n_step % N_COARSE_CODEBOOKS == 0 - - if use_kv_caching and kv_cache is not None: - x_input = x_in[:, [-1]] - else: - x_input = x_in - - logits, kv_cache = model(x_input, use_cache=use_kv_caching, past_kv=kv_cache) - logit_start_idx = ( - SEMANTIC_VOCAB_SIZE + (1 - int(is_major_step)) * CODEBOOK_SIZE - ) - logit_end_idx = ( - SEMANTIC_VOCAB_SIZE + (2 - int(is_major_step)) * CODEBOOK_SIZE - ) - relevant_logits = logits[0, 0, logit_start_idx:logit_end_idx] - if top_p is not None: - # faster to convert to numpy - original_device = relevant_logits.device - relevant_logits = relevant_logits.detach().cpu().type(torch.float32).numpy() - sorted_indices = np.argsort(relevant_logits)[::-1] - sorted_logits = relevant_logits[sorted_indices] - cumulative_probs = np.cumsum(softmax(sorted_logits)) - sorted_indices_to_remove = cumulative_probs > top_p - sorted_indices_to_remove[1:] = sorted_indices_to_remove[:-1].copy() - sorted_indices_to_remove[0] = False - relevant_logits[sorted_indices[sorted_indices_to_remove]] = -np.inf - relevant_logits = torch.from_numpy(relevant_logits) - relevant_logits = relevant_logits.to(original_device) - if top_k is not None: - v, _ = torch.topk(relevant_logits, min(top_k, relevant_logits.size(-1))) - relevant_logits[relevant_logits < v[-1]] = -float("Inf") - probs = F.softmax(relevant_logits / temp, dim=-1) - # multinomial bugged on mps: shuttle to cpu if necessary - inf_device = probs.device - if probs.device.type == "mps": - probs = probs.to("cpu") - item_next = torch.multinomial(probs, num_samples=1) - probs = probs.to(inf_device) - item_next = item_next.to(inf_device) - item_next += logit_start_idx - x_coarse_in = torch.cat((x_coarse_in, item_next[None]), dim=1) - x_in = torch.cat((x_in, item_next[None]), dim=1) - del logits, relevant_logits, probs, item_next - n_step += 1 - del x_in - del x_semantic_in - if OFFLOAD_CPU: - model.to("cpu") - gen_coarse_arr = x_coarse_in.detach().cpu().numpy().squeeze()[len(x_coarse_history) :] - del x_coarse_in - assert len(gen_coarse_arr) == n_steps - gen_coarse_audio_arr = gen_coarse_arr.reshape(-1, N_COARSE_CODEBOOKS).T - SEMANTIC_VOCAB_SIZE - for n in range(1, N_COARSE_CODEBOOKS): - gen_coarse_audio_arr[n, :] -= n * CODEBOOK_SIZE - _clear_cuda_cache() - return gen_coarse_audio_arr - - -def generate_fine( - x_coarse_gen, - history_prompt=None, - temp=0.5, - silent=True, -): - """Generate full audio codes from coarse audio codes.""" - assert ( - isinstance(x_coarse_gen, np.ndarray) - and len(x_coarse_gen.shape) == 2 - and 1 <= x_coarse_gen.shape[0] <= N_FINE_CODEBOOKS - 1 - and x_coarse_gen.shape[1] > 0 - and x_coarse_gen.min() >= 0 - and x_coarse_gen.max() <= CODEBOOK_SIZE - 1 - ) - if history_prompt is not None: - history_prompt = _load_history_prompt(history_prompt) - x_fine_history = history_prompt["fine_prompt"] - assert ( - isinstance(x_fine_history, np.ndarray) - and len(x_fine_history.shape) == 2 - and x_fine_history.shape[0] == N_FINE_CODEBOOKS - and x_fine_history.shape[1] >= 0 - and x_fine_history.min() >= 0 - and x_fine_history.max() <= CODEBOOK_SIZE - 1 - ) - else: - x_fine_history = None - n_coarse = x_coarse_gen.shape[0] - # load models if not yet exist - global models - global models_devices - if "fine" not in models: - preload_models() - model = models["fine"] - if OFFLOAD_CPU: - model.to(models_devices["fine"]) - device = next(model.parameters()).device - # make input arr - in_arr = np.vstack( - [ - x_coarse_gen, - np.zeros((N_FINE_CODEBOOKS - n_coarse, x_coarse_gen.shape[1])) - + CODEBOOK_SIZE, # padding - ] - ).astype(np.int32) - # prepend history if available (max 512) - if x_fine_history is not None: - x_fine_history = x_fine_history.astype(np.int32) - in_arr = np.hstack( - [ - x_fine_history[:, -512:].astype(np.int32), - in_arr, - ] - ) - n_history = x_fine_history[:, -512:].shape[1] - else: - n_history = 0 - n_remove_from_end = 0 - # need to pad if too short (since non-causal model) - if in_arr.shape[1] < 1024: - n_remove_from_end = 1024 - in_arr.shape[1] - in_arr = np.hstack( - [ - in_arr, - np.zeros((N_FINE_CODEBOOKS, n_remove_from_end), dtype=np.int32) + CODEBOOK_SIZE, - ] - ) - # we can be lazy about fractional loop and just keep overwriting codebooks - n_loops = np.max([0, int(np.ceil((x_coarse_gen.shape[1] - (1024 - n_history)) / 512))]) + 1 - with _inference_mode(): - in_arr = torch.tensor(in_arr.T).to(device) - for n in tqdm.tqdm(range(n_loops), disable=silent): - start_idx = np.min([n * 512, in_arr.shape[0] - 1024]) - start_fill_idx = np.min([n_history + n * 512, in_arr.shape[0] - 512]) - rel_start_fill_idx = start_fill_idx - start_idx - in_buffer = in_arr[start_idx : start_idx + 1024, :][None] - for nn in range(n_coarse, N_FINE_CODEBOOKS): - logits = model(nn, in_buffer) - if temp is None: - relevant_logits = logits[0, rel_start_fill_idx:, :CODEBOOK_SIZE] - codebook_preds = torch.argmax(relevant_logits, -1) - else: - relevant_logits = logits[0, :, :CODEBOOK_SIZE] / temp - probs = F.softmax(relevant_logits, dim=-1) - # multinomial bugged on mps: shuttle to cpu if necessary - inf_device = probs.device - if probs.device.type == "mps": - probs = probs.to("cpu") - codebook_preds = torch.hstack( - [ - torch.multinomial(probs[nnn], num_samples=1).to(inf_device) - for nnn in range(rel_start_fill_idx, 1024) - ] - ) - in_buffer[0, rel_start_fill_idx:, nn] = codebook_preds - del logits, codebook_preds - # transfer over info into model_in and convert to numpy - for nn in range(n_coarse, N_FINE_CODEBOOKS): - in_arr[ - start_fill_idx : start_fill_idx + (1024 - rel_start_fill_idx), nn - ] = in_buffer[0, rel_start_fill_idx:, nn] - del in_buffer - gen_fine_arr = in_arr.detach().cpu().numpy().squeeze().T - del in_arr - if OFFLOAD_CPU: - model.to("cpu") - gen_fine_arr = gen_fine_arr[:, n_history:] - if n_remove_from_end > 0: - gen_fine_arr = gen_fine_arr[:, :-n_remove_from_end] - assert gen_fine_arr.shape[-1] == x_coarse_gen.shape[-1] - _clear_cuda_cache() - return gen_fine_arr - - -def codec_decode(fine_tokens): - """Turn quantized audio codes into audio array using encodec.""" - # load models if not yet exist - global models - global models_devices - if "codec" not in models: - preload_models() - model = models["codec"] - if OFFLOAD_CPU: - model.to(models_devices["codec"]) - device = next(model.parameters()).device - arr = torch.from_numpy(fine_tokens)[None] - arr = arr.to(device) - arr = arr.transpose(0, 1) - emb = model.quantizer.decode(arr) - out = model.decoder(emb) - audio_arr = out.detach().cpu().numpy().squeeze() - del arr, emb, out - if OFFLOAD_CPU: - model.to("cpu") - return audio_arr diff --git a/spaces/PeepDaSlan9/Bark-Voice-Cloning/util/__init__.py b/spaces/PeepDaSlan9/Bark-Voice-Cloning/util/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Pengyey/bingo-chuchu/src/components/settings.tsx b/spaces/Pengyey/bingo-chuchu/src/components/settings.tsx deleted file mode 100644 index 80b8a2d3b252b875f5b6f7dfc2f6e3ad9cdfb22a..0000000000000000000000000000000000000000 --- a/spaces/Pengyey/bingo-chuchu/src/components/settings.tsx +++ /dev/null @@ -1,157 +0,0 @@ -import { useEffect, useState } from 'react' -import { useAtom } from 'jotai' -import { Switch } from '@headlessui/react' -import { toast } from 'react-hot-toast' -import { hashAtom, voiceAtom } from '@/state' -import { - Dialog, - DialogContent, - DialogDescription, - DialogFooter, - DialogHeader, - DialogTitle -} from '@/components/ui/dialog' -import { Button } from './ui/button' -import { Input } from './ui/input' -import { ChunkKeys, parseCookies, extraCurlFromCookie, encodeHeadersToCookie, getCookie, setCookie } from '@/lib/utils' -import { ExternalLink } from './external-link' -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' - - -export function Settings() { - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - const [loc, setLoc] = useAtom(hashAtom) - const [curlValue, setCurlValue] = useState(extraCurlFromCookie(parseCookies(document.cookie, ChunkKeys))) - const [imageOnly, setImageOnly] = useState(getCookie('IMAGE_ONLY') !== '0') - const [enableTTS, setEnableTTS] = useAtom(voiceAtom) - - useEffect(() => { - if (isCopied) { - toast.success('复制成功') - } - }, [isCopied]) - - if (loc === 'settings') { - return ( - <Dialog open onOpenChange={() => setLoc('')} modal> - <DialogContent> - <DialogHeader> - <DialogTitle>设置你的用户信息</DialogTitle> - <DialogDescription> - 请使用 Edge 浏览器 - <ExternalLink - href="https://www.bing.com/turing/captcha/challenge" - > - 打开并登录 Bing - </ExternalLink> - ,然后再打开 - <ExternalLink href="https://www.bing.com/turing/captcha/challenge">Challenge 接口</ExternalLink> - 右键 》检查。打开开发者工具,在网络里面找到 Create 接口 》右键复制》复制为 cURL(bash),粘贴到此处,然后保存。 - <div className="h-2" /> - 图文示例: - <ExternalLink href="https://github.com/weaigc/bingo#如何获取%20BING_HEADER">如何获取 BING_HEADER</ExternalLink> - </DialogDescription> - </DialogHeader> - <div className="flex gap-4"> - - </div> - <Input - value={curlValue} - placeholder="在此填写用户信息,格式: curl 'https://www.bing.com/turing/captcha/challenge' ..." - onChange={e => setCurlValue(e.target.value)} - /> - <div className="flex gap-2"> - 身份信息仅用于画图(推荐) - <Switch - checked={imageOnly} - className={`${imageOnly ? 'bg-blue-600' : 'bg-gray-200'} relative inline-flex h-6 w-11 items-center rounded-full`} - onChange={(checked: boolean) => setImageOnly(checked)} - > - <span - className={`${imageOnly ? 'translate-x-6' : 'translate-x-1'} inline-block h-4 w-4 transform rounded-full bg-white transition`} - /> - </Switch> - </div> - - <Button variant="ghost" className="bg-[#F5F5F5] hover:bg-[#F2F2F2]" onClick={() => copyToClipboard(btoa(curlValue))}> - 转成 BING_HEADER 并复制 - </Button> - - <DialogFooter className="items-center"> - <Button - variant="secondary" - className="bg-[#c7f3ff] hover:bg-[#fdc7ff]" - onClick={() => { - let headerValue = curlValue - if (headerValue) { - try { - headerValue = atob(headerValue) - } catch (e) { } - if (!/^\s*curl ['"]https:\/\/(www|cn)\.bing\.com\/turing\/captcha\/challenge['"]/.test(headerValue)) { - toast.error('格式不正确') - return - } - const maxAge = 86400 * 30 - encodeHeadersToCookie(headerValue).forEach(cookie => document.cookie = `${cookie}; Max-Age=${maxAge}; Path=/; SameSite=None; Secure`) - } else { - [...ChunkKeys, 'BING_COOKIE', 'BING_UA', 'BING_IP'].forEach(key => setCookie(key, '')) - } - setCookie('IMAGE_ONLY', RegExp.$1 === 'cn' || imageOnly ? '1' : '0') - - toast.success('保存成功') - setLoc('') - setTimeout(() => { - location.href = './' - }, 2000) - }} - > - 保存 - </Button> - </DialogFooter> - </DialogContent> - </Dialog> - ) - } else if (loc === 'voice') { - return ( - <Dialog open onOpenChange={() => setLoc('')} modal> - <DialogContent> - <DialogHeader> - <DialogTitle>语音设置</DialogTitle> - <DialogDescription> - 目前仅支持 PC 端 Edge 及 Chrome 浏览器 - </DialogDescription> - </DialogHeader> - - <div className="flex gap-2"> - 启用语音回答 - <Switch - checked={enableTTS} - className={`${enableTTS ? 'bg-blue-600' : 'bg-gray-200'} relative inline-flex h-6 w-11 items-center rounded-full`} - onChange={(checked: boolean) => setEnableTTS(checked)} - > - <span - className={`${enableTTS ? 'translate-x-6' : 'translate-x-1'} inline-block h-4 w-4 transform rounded-full bg-white transition`} - /> - </Switch> - </div> - - <DialogFooter className="items-center"> - <Button - variant="secondary" - onClick={() => { - toast.success('保存成功') - setLoc('') - setTimeout(() => { - location.href = './' - }, 2000) - }} - > - 保存 - </Button> - </DialogFooter> - </DialogContent> - </Dialog> - ) - } - return null -} diff --git a/spaces/PrussianBlue/White-box-Cartoonization/wbc/guided_filter.py b/spaces/PrussianBlue/White-box-Cartoonization/wbc/guided_filter.py deleted file mode 100644 index fd019d145efc7f308cd96de90f4e7b648f6820b4..0000000000000000000000000000000000000000 --- a/spaces/PrussianBlue/White-box-Cartoonization/wbc/guided_filter.py +++ /dev/null @@ -1,87 +0,0 @@ -import tensorflow as tf -import numpy as np - - - - -def tf_box_filter(x, r): - k_size = int(2*r+1) - ch = x.get_shape().as_list()[-1] - weight = 1/(k_size**2) - box_kernel = weight*np.ones((k_size, k_size, ch, 1)) - box_kernel = np.array(box_kernel).astype(np.float32) - output = tf.nn.depthwise_conv2d(x, box_kernel, [1, 1, 1, 1], 'SAME') - return output - - - -def guided_filter(x, y, r, eps=1e-2): - - x_shape = tf.shape(x) - #y_shape = tf.shape(y) - - N = tf_box_filter(tf.ones((1, x_shape[1], x_shape[2], 1), dtype=x.dtype), r) - - mean_x = tf_box_filter(x, r) / N - mean_y = tf_box_filter(y, r) / N - cov_xy = tf_box_filter(x * y, r) / N - mean_x * mean_y - var_x = tf_box_filter(x * x, r) / N - mean_x * mean_x - - A = cov_xy / (var_x + eps) - b = mean_y - A * mean_x - - mean_A = tf_box_filter(A, r) / N - mean_b = tf_box_filter(b, r) / N - - output = mean_A * x + mean_b - - return output - - - -def fast_guided_filter(lr_x, lr_y, hr_x, r=1, eps=1e-8): - - #assert lr_x.shape.ndims == 4 and lr_y.shape.ndims == 4 and hr_x.shape.ndims == 4 - - lr_x_shape = tf.shape(lr_x) - #lr_y_shape = tf.shape(lr_y) - hr_x_shape = tf.shape(hr_x) - - N = tf_box_filter(tf.ones((1, lr_x_shape[1], lr_x_shape[2], 1), dtype=lr_x.dtype), r) - - mean_x = tf_box_filter(lr_x, r) / N - mean_y = tf_box_filter(lr_y, r) / N - cov_xy = tf_box_filter(lr_x * lr_y, r) / N - mean_x * mean_y - var_x = tf_box_filter(lr_x * lr_x, r) / N - mean_x * mean_x - - A = cov_xy / (var_x + eps) - b = mean_y - A * mean_x - - mean_A = tf.image.resize_images(A, hr_x_shape[1: 3]) - mean_b = tf.image.resize_images(b, hr_x_shape[1: 3]) - - output = mean_A * hr_x + mean_b - - return output - - -if __name__ == '__main__': - import cv2 - from tqdm import tqdm - - input_photo = tf.placeholder(tf.float32, [1, None, None, 3]) - #input_superpixel = tf.placeholder(tf.float32, [16, 256, 256, 3]) - output = guided_filter(input_photo, input_photo, 5, eps=1) - image = cv2.imread('output_figure1/cartoon2.jpg') - image = image/127.5 - 1 - image = np.expand_dims(image, axis=0) - - config = tf.ConfigProto() - config.gpu_options.allow_growth = True - sess = tf.Session(config=config) - sess.run(tf.global_variables_initializer()) - - out = sess.run(output, feed_dict={input_photo: image}) - out = (np.squeeze(out)+1)*127.5 - out = np.clip(out, 0, 255).astype(np.uint8) - cv2.imwrite('output_figure1/cartoon2_filter.jpg', out) diff --git a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/conditional_builder/utils.py b/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/conditional_builder/utils.py deleted file mode 100644 index d0ee175f2e05a80dbc71c22acbecb22dddadbb42..0000000000000000000000000000000000000000 --- a/spaces/Purple11/Grounded-Diffusion/src/taming-transformers/taming/data/conditional_builder/utils.py +++ /dev/null @@ -1,105 +0,0 @@ -import importlib -from typing import List, Any, Tuple, Optional - -from taming.data.helper_types import BoundingBox, Annotation - -# source: seaborn, color palette tab10 -COLOR_PALETTE = [(30, 118, 179), (255, 126, 13), (43, 159, 43), (213, 38, 39), (147, 102, 188), - (139, 85, 74), (226, 118, 193), (126, 126, 126), (187, 188, 33), (22, 189, 206)] -BLACK = (0, 0, 0) -GRAY_75 = (63, 63, 63) -GRAY_50 = (127, 127, 127) -GRAY_25 = (191, 191, 191) -WHITE = (255, 255, 255) -FULL_CROP = (0., 0., 1., 1.) - - -def intersection_area(rectangle1: BoundingBox, rectangle2: BoundingBox) -> float: - """ - Give intersection area of two rectangles. - @param rectangle1: (x0, y0, w, h) of first rectangle - @param rectangle2: (x0, y0, w, h) of second rectangle - """ - rectangle1 = rectangle1[0], rectangle1[1], rectangle1[0] + rectangle1[2], rectangle1[1] + rectangle1[3] - rectangle2 = rectangle2[0], rectangle2[1], rectangle2[0] + rectangle2[2], rectangle2[1] + rectangle2[3] - x_overlap = max(0., min(rectangle1[2], rectangle2[2]) - max(rectangle1[0], rectangle2[0])) - y_overlap = max(0., min(rectangle1[3], rectangle2[3]) - max(rectangle1[1], rectangle2[1])) - return x_overlap * y_overlap - - -def horizontally_flip_bbox(bbox: BoundingBox) -> BoundingBox: - return 1 - (bbox[0] + bbox[2]), bbox[1], bbox[2], bbox[3] - - -def absolute_bbox(relative_bbox: BoundingBox, width: int, height: int) -> Tuple[int, int, int, int]: - bbox = relative_bbox - bbox = bbox[0] * width, bbox[1] * height, (bbox[0] + bbox[2]) * width, (bbox[1] + bbox[3]) * height - return int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]) - - -def pad_list(list_: List, pad_element: Any, pad_to_length: int) -> List: - return list_ + [pad_element for _ in range(pad_to_length - len(list_))] - - -def rescale_annotations(annotations: List[Annotation], crop_coordinates: BoundingBox, flip: bool) -> \ - List[Annotation]: - def clamp(x: float): - return max(min(x, 1.), 0.) - - def rescale_bbox(bbox: BoundingBox) -> BoundingBox: - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - if flip: - x0 = 1 - (x0 + w) - return x0, y0, w, h - - return [a._replace(bbox=rescale_bbox(a.bbox)) for a in annotations] - - -def filter_annotations(annotations: List[Annotation], crop_coordinates: BoundingBox) -> List: - return [a for a in annotations if intersection_area(a.bbox, crop_coordinates) > 0.0] - - -def additional_parameters_string(annotation: Annotation, short: bool = True) -> str: - sl = slice(1) if short else slice(None) - string = '' - if not (annotation.is_group_of or annotation.is_occluded or annotation.is_depiction or annotation.is_inside): - return string - if annotation.is_group_of: - string += 'group'[sl] + ',' - if annotation.is_occluded: - string += 'occluded'[sl] + ',' - if annotation.is_depiction: - string += 'depiction'[sl] + ',' - if annotation.is_inside: - string += 'inside'[sl] - return '(' + string.strip(",") + ')' - - -def get_plot_font_size(font_size: Optional[int], figure_size: Tuple[int, int]) -> int: - if font_size is None: - font_size = 10 - if max(figure_size) >= 256: - font_size = 12 - if max(figure_size) >= 512: - font_size = 15 - return font_size - - -def get_circle_size(figure_size: Tuple[int, int]) -> int: - circle_size = 2 - if max(figure_size) >= 256: - circle_size = 3 - if max(figure_size) >= 512: - circle_size = 4 - return circle_size - - -def load_object_from_string(object_string: str) -> Any: - """ - Source: https://stackoverflow.com/a/10773699 - """ - module_name, class_name = object_string.rsplit(".", 1) - return getattr(importlib.import_module(module_name), class_name) diff --git a/spaces/Qiukai/gpt/crazy_functions/test_project/python/dqn/policies.py b/spaces/Qiukai/gpt/crazy_functions/test_project/python/dqn/policies.py deleted file mode 100644 index 4ecf39a5fc04b24ad1b809232b186728366987b6..0000000000000000000000000000000000000000 --- a/spaces/Qiukai/gpt/crazy_functions/test_project/python/dqn/policies.py +++ /dev/null @@ -1,237 +0,0 @@ -from typing import Any, Dict, List, Optional, Type - -import gym -import torch as th -from torch import nn - -from stable_baselines3.common.policies import BasePolicy, register_policy -from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp -from stable_baselines3.common.type_aliases import Schedule - - -class QNetwork(BasePolicy): - """ - Action-Value (Q-Value) network for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - features_extractor: nn.Module, - features_dim: int, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - normalize_images: bool = True, - ): - super(QNetwork, self).__init__( - observation_space, - action_space, - features_extractor=features_extractor, - normalize_images=normalize_images, - ) - - if net_arch is None: - net_arch = [64, 64] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.features_extractor = features_extractor - self.features_dim = features_dim - self.normalize_images = normalize_images - action_dim = self.action_space.n # number of actions - q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn) - self.q_net = nn.Sequential(*q_net) - - def forward(self, obs: th.Tensor) -> th.Tensor: - """ - Predict the q-values. - - :param obs: Observation - :return: The estimated Q-Value for each action. - """ - return self.q_net(self.extract_features(obs)) - - def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor: - q_values = self.forward(observation) - # Greedy action - action = q_values.argmax(dim=1).reshape(-1) - return action - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_arch, - features_dim=self.features_dim, - activation_fn=self.activation_fn, - features_extractor=self.features_extractor, - ) - ) - return data - - -class DQNPolicy(BasePolicy): - """ - Policy class with Q-Value Net and target net for DQN - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param features_extractor_kwargs: Keyword arguments - to pass to the features extractor. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(DQNPolicy, self).__init__( - observation_space, - action_space, - features_extractor_class, - features_extractor_kwargs, - optimizer_class=optimizer_class, - optimizer_kwargs=optimizer_kwargs, - ) - - if net_arch is None: - if features_extractor_class == FlattenExtractor: - net_arch = [64, 64] - else: - net_arch = [] - - self.net_arch = net_arch - self.activation_fn = activation_fn - self.normalize_images = normalize_images - - self.net_args = { - "observation_space": self.observation_space, - "action_space": self.action_space, - "net_arch": self.net_arch, - "activation_fn": self.activation_fn, - "normalize_images": normalize_images, - } - - self.q_net, self.q_net_target = None, None - self._build(lr_schedule) - - def _build(self, lr_schedule: Schedule) -> None: - """ - Create the network and the optimizer. - - :param lr_schedule: Learning rate schedule - lr_schedule(1) is the initial learning rate - """ - - self.q_net = self.make_q_net() - self.q_net_target = self.make_q_net() - self.q_net_target.load_state_dict(self.q_net.state_dict()) - - # Setup optimizer with initial learning rate - self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) - - def make_q_net(self) -> QNetwork: - # Make sure we always have separate networks for features extractors etc - net_args = self._update_features_extractor(self.net_args, features_extractor=None) - return QNetwork(**net_args).to(self.device) - - def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self._predict(obs, deterministic=deterministic) - - def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor: - return self.q_net._predict(obs, deterministic=deterministic) - - def _get_constructor_parameters(self) -> Dict[str, Any]: - data = super()._get_constructor_parameters() - - data.update( - dict( - net_arch=self.net_args["net_arch"], - activation_fn=self.net_args["activation_fn"], - lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone - optimizer_class=self.optimizer_class, - optimizer_kwargs=self.optimizer_kwargs, - features_extractor_class=self.features_extractor_class, - features_extractor_kwargs=self.features_extractor_kwargs, - ) - ) - return data - - -MlpPolicy = DQNPolicy - - -class CnnPolicy(DQNPolicy): - """ - Policy class for DQN when using images as input. - - :param observation_space: Observation space - :param action_space: Action space - :param lr_schedule: Learning rate schedule (could be constant) - :param net_arch: The specification of the policy and value networks. - :param activation_fn: Activation function - :param features_extractor_class: Features extractor to use. - :param normalize_images: Whether to normalize images or not, - dividing by 255.0 (True by default) - :param optimizer_class: The optimizer to use, - ``th.optim.Adam`` by default - :param optimizer_kwargs: Additional keyword arguments, - excluding the learning rate, to pass to the optimizer - """ - - def __init__( - self, - observation_space: gym.spaces.Space, - action_space: gym.spaces.Space, - lr_schedule: Schedule, - net_arch: Optional[List[int]] = None, - activation_fn: Type[nn.Module] = nn.ReLU, - features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN, - features_extractor_kwargs: Optional[Dict[str, Any]] = None, - normalize_images: bool = True, - optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam, - optimizer_kwargs: Optional[Dict[str, Any]] = None, - ): - super(CnnPolicy, self).__init__( - observation_space, - action_space, - lr_schedule, - net_arch, - activation_fn, - features_extractor_class, - features_extractor_kwargs, - normalize_images, - optimizer_class, - optimizer_kwargs, - ) - - -register_policy("MlpPolicy", MlpPolicy) -register_policy("CnnPolicy", CnnPolicy) diff --git a/spaces/QuoQA-NLP/QuoQaGo/app.py b/spaces/QuoQA-NLP/QuoQaGo/app.py deleted file mode 100644 index 18dbbdc6fa849b2ed538f54b47a15e335b81e611..0000000000000000000000000000000000000000 --- a/spaces/QuoQA-NLP/QuoQaGo/app.py +++ /dev/null @@ -1,72 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -import streamlit as st -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM - - -st.set_page_config( - page_title="쿼카고", layout="wide", initial_sidebar_state="expanded" -) - -@st.cache -def load_model(model_name): - model = AutoModelForSeq2SeqLM.from_pretrained(model_name) - return model - -tokenizer = AutoTokenizer.from_pretrained("QuoQA-NLP/KE-T5-Ko2En-Base") -ko2en_model = load_model("QuoQA-NLP/KE-T5-Ko2En-Base") -en2ko_model = load_model("QuoQA-NLP/KE-T5-En2Ko-Base") - - -st.title("🐻 쿼카고 번역기") -st.write("좌측에 번역 모드를 선택하고, CTRL+Enter(CMD+Enter)를 누르세요 🤗") -st.write("Select Translation Mode at the left and press CTRL+Enter(CMD+Enter)🤗") - -translation_list = ["한국어에서 영어 | Korean to English", "영어에서 한국어 | English to Korean"] -translation_mode = st.sidebar.radio("번역 모드를 선택(Translation Mode):", translation_list) - - -default_value = '신한카드 관계자는 "과거 내놓은 상품의 경우 출시 2개월 만에 적금 가입이 4만여 좌에 달할 정도로 인기를 끌었다"면서 "금리 인상에 따라 적금 금리를 더 올려 많은 고객이 몰릴 것으로 예상하고 있다"고 말했다.' -src_text = st.text_area( - "번역하고 싶은 문장을 입력하세요:", - default_value, - height=300, - max_chars=200, -) -print(src_text) - - - -if src_text == "": - st.warning("Please **enter text** for translation") -else: - # translate into english sentence - if translation_mode == translation_list[0]: - model = ko2en_model - else: - model = en2ko_model - - translation_result = model.generate( - **tokenizer( - src_text, - return_tensors="pt", - padding="max_length", - truncation=True, - max_length=64, - ), - max_length=64, - num_beams=5, - repetition_penalty=1.3, - no_repeat_ngram_size=3, - num_return_sequences=1, - ) - translation_result = tokenizer.decode( - translation_result[0], - clean_up_tokenization_spaces=True, - skip_special_tokens=True, - ) - - print(f"{src_text} -> {translation_result}") - - st.write(translation_result) - print(translation_result) diff --git a/spaces/RatKing243/Test/README.md b/spaces/RatKing243/Test/README.md deleted file mode 100644 index dbaadbf4c5737fcbc8229efadbc89f06f4b0f9bd..0000000000000000000000000000000000000000 --- a/spaces/RatKing243/Test/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Test -emoji: 📊 -colorFrom: red -colorTo: red -sdk: docker -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/tools/extract.py b/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/tools/extract.py deleted file mode 100644 index b3dea56a14f6c100b2c53978678bab69a656cdeb..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/ASpanFormer/tools/extract.py +++ /dev/null @@ -1,81 +0,0 @@ -import os -import glob -from re import split -from tqdm import tqdm -from multiprocessing import Pool -from functools import partial - -scannet_dir = "/root/data/ScanNet-v2-1.0.0/data/raw" -dump_dir = "/root/data/scannet_dump" -num_process = 32 - - -def extract(seq, scannet_dir, split, dump_dir): - assert split == "train" or split == "test" - if not os.path.exists(os.path.join(dump_dir, split, seq)): - os.mkdir(os.path.join(dump_dir, split, seq)) - cmd = ( - "python reader.py --filename " - + os.path.join( - scannet_dir, - "scans" if split == "train" else "scans_test", - seq, - seq + ".sens", - ) - + " --output_path " - + os.path.join(dump_dir, split, seq) - + " --export_depth_images --export_color_images --export_poses --export_intrinsics" - ) - os.system(cmd) - - -if __name__ == "__main__": - if not os.path.exists(dump_dir): - os.mkdir(dump_dir) - os.mkdir(os.path.join(dump_dir, "train")) - os.mkdir(os.path.join(dump_dir, "test")) - - train_seq_list = [ - seq.split("/")[-1] - for seq in glob.glob(os.path.join(scannet_dir, "scans", "scene*")) - ] - test_seq_list = [ - seq.split("/")[-1] - for seq in glob.glob(os.path.join(scannet_dir, "scans_test", "scene*")) - ] - - extract_train = partial( - extract, scannet_dir=scannet_dir, split="train", dump_dir=dump_dir - ) - extract_test = partial( - extract, scannet_dir=scannet_dir, split="test", dump_dir=dump_dir - ) - - num_train_iter = ( - len(train_seq_list) // num_process - if len(train_seq_list) % num_process == 0 - else len(train_seq_list) // num_process + 1 - ) - num_test_iter = ( - len(test_seq_list) // num_process - if len(test_seq_list) % num_process == 0 - else len(test_seq_list) // num_process + 1 - ) - - pool = Pool(num_process) - for index in tqdm(range(num_train_iter)): - seq_list = train_seq_list[ - index * num_process : min((index + 1) * num_process, len(train_seq_list)) - ] - pool.map(extract_train, seq_list) - pool.close() - pool.join() - - pool = Pool(num_process) - for index in tqdm(range(num_test_iter)): - seq_list = test_seq_list[ - index * num_process : min((index + 1) * num_process, len(test_seq_list)) - ] - pool.map(extract_test, seq_list) - pool.close() - pool.join() diff --git a/spaces/Realcat/image-matching-webui/third_party/LightGlue/setup.py b/spaces/Realcat/image-matching-webui/third_party/LightGlue/setup.py deleted file mode 100644 index 2b012e92a208d09e4983317c4eb3c1d8093177e8..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/LightGlue/setup.py +++ /dev/null @@ -1,27 +0,0 @@ -from pathlib import Path -from setuptools import setup - -description = ["LightGlue"] - -with open(str(Path(__file__).parent / "README.md"), "r", encoding="utf-8") as f: - readme = f.read() -with open(str(Path(__file__).parent / "requirements.txt"), "r") as f: - dependencies = f.read().split("\n") - -setup( - name="lightglue", - version="0.0", - packages=["lightglue"], - python_requires=">=3.6", - install_requires=dependencies, - author="Philipp Lindenberger, Paul-Edouard Sarlin", - description=description, - long_description=readme, - long_description_content_type="text/markdown", - url="https://github.com/cvg/LightGlue/", - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent", - ], -) diff --git a/spaces/RitaParadaRamos/SmallCapDemo/src/utils_generate_retrieved_caps.py b/spaces/RitaParadaRamos/SmallCapDemo/src/utils_generate_retrieved_caps.py deleted file mode 100644 index 805ca584de60342369b888f6e67af82f8ea2a624..0000000000000000000000000000000000000000 --- a/spaces/RitaParadaRamos/SmallCapDemo/src/utils_generate_retrieved_caps.py +++ /dev/null @@ -1,135 +0,0 @@ -from torch.utils.data import Dataset -from PIL import Image -import torch -import json -import h5py -import bisect - -CAPTION_LENGTH = 25 -SIMPLE_PREFIX = "This image shows " - -def prep_strings(text, tokenizer, template=None, retrieved_caps=None, k=None, is_test=False, max_length=None): - - if is_test: - padding = False - truncation = False - else: - padding = True - truncation = True - - if retrieved_caps is not None: - infix = '\n\n'.join(retrieved_caps[:k]) + '.' - prefix = template.replace('||', infix) - else: - prefix = SIMPLE_PREFIX - - prefix_ids = tokenizer.encode(prefix) - len_prefix = len(prefix_ids) - - text_ids = tokenizer.encode(text, add_special_tokens=False) - if truncation: - text_ids = text_ids[:CAPTION_LENGTH] - input_ids = prefix_ids + text_ids if not is_test else prefix_ids - - # we ignore the prefix (minus one as the first subtoken in the prefix is not predicted) - label_ids = [-100] * (len_prefix - 1) + text_ids + [tokenizer.eos_token_id] - if padding: - input_ids += [tokenizer.pad_token_id] * (max_length - len(input_ids)) - label_ids += [-100] * (max_length - len(label_ids)) - - if is_test: - return input_ids - else: - return input_ids, label_ids - -def postprocess_preds(pred, tokenizer): - pred = pred.split(SIMPLE_PREFIX)[-1] - pred = pred.replace(tokenizer.pad_token, '') - if pred.startswith(tokenizer.bos_token): - pred = pred[len(tokenizer.bos_token):] - if pred.endswith(tokenizer.eos_token): - pred = pred[:-len(tokenizer.eos_token)] - return pred - -class TrainDataset(Dataset): - def __init__(self, df, features_path, tokenizer, rag=False, template_path=None, k=None, max_caption_length=25): - self.df = df - self.tokenizer = tokenizer - self.features = h5py.File(features_path, 'r') - - if rag: - self.template = open(template_path).read().strip() + ' ' - self.max_target_length = (max_caption_length # target caption - + max_caption_length * k # retrieved captions - + len(tokenizer.encode(self.template)) # template - + len(tokenizer.encode('\n\n')) * (k-1) # separator between captions - ) - assert k is not None - self.k = k - self.rag = rag - - def __len__(self): - return len(self.df) - - def __getitem__(self, idx): - text = self.df['text'][idx] - if self.rag: - caps = self.df['caps'][idx] - decoder_input_ids, labels = prep_strings(text, self.tokenizer, template=self.template, - retrieved_caps=caps, k=self.k, max_length=self.max_target_length) - else: - decoder_input_ids, labels = prep_strings(text, self.tokenizer, max_length=self.max_target_length) - # load precomputed features - encoder_outputs = self.features[self.df['cocoid'][idx]][()] - encoding = {"encoder_outputs": torch.tensor(encoder_outputs), - "decoder_input_ids": torch.tensor(decoder_input_ids), - "labels": torch.tensor(labels)} - - return encoding - - -def load_data_for_training(annot_path, caps_path=None): - annotations = json.load(open(annot_path))['images'] - if caps_path is not None: - retrieved_caps = json.load(open(caps_path)) - data = {'train': [], 'val': []} - - for item in annotations: - file_name = item['filename'].split('_')[-1] - caps = retrieved_caps[str(item['cocoid'])] - - samples = [] - for sentence in item['sentences']: - print("how are the retrieved caps", caps + ' '.join(sentence['tokens'])) - - samples.append({'file_name': file_name, 'cocoid': str(item['cocoid']), 'caps': None, 'text': " ".join(caps) + ' '.join(sentence['tokens'])}) - if item['split'] == 'train' or item['split'] == 'restval': - data['train'] += samples - elif item['split'] == 'val': - data['val'] += samples - return data - - - - - -def load_data_for_inference(annot_path, caps_path=None): - annotations = json.load(open(annot_path))['images'] - if caps_path is not None: - retrieved_caps = json.load(open(caps_path)) - data = {'test': [], 'val': []} - - for item in annotations: - file_name = item['filename'].split('_')[-1] - if caps_path is not None: - caps = retrieved_caps[str(item['cocoid'])] - else: - caps = None - image = {'file_name': file_name, 'caps': caps, 'image_id': str(item['cocoid'])} - if item['split'] == 'test': - data['test'].append(image) - elif item['split'] == 'val': - data['val'].append(image) - - return data - diff --git a/spaces/Robert001/UniControl-Demo/README.md b/spaces/Robert001/UniControl-Demo/README.md deleted file mode 100644 index a8457ea3f35dbb3a47b5573aff671c56b57d1f9c..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: UniControl Demo -emoji: 📚 -colorFrom: green -colorTo: blue -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py deleted file mode 100644 index 988d9adf2f289ef223bd1c680a5ae1d3387f0269..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py +++ /dev/null @@ -1,412 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from ..utils import kaiming_init -from .registry import PLUGIN_LAYERS - - -@PLUGIN_LAYERS.register_module() -class GeneralizedAttention(nn.Module): - """GeneralizedAttention module. - - See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' - (https://arxiv.org/abs/1711.07971) for details. - - Args: - in_channels (int): Channels of the input feature map. - spatial_range (int): The spatial range. -1 indicates no spatial range - constraint. Default: -1. - num_heads (int): The head number of empirical_attention module. - Default: 9. - position_embedding_dim (int): The position embedding dimension. - Default: -1. - position_magnitude (int): A multiplier acting on coord difference. - Default: 1. - kv_stride (int): The feature stride acting on key/value feature map. - Default: 2. - q_stride (int): The feature stride acting on query feature map. - Default: 1. - attention_type (str): A binary indicator string for indicating which - items in generalized empirical_attention module are used. - Default: '1111'. - - - '1000' indicates 'query and key content' (appr - appr) item, - - '0100' indicates 'query content and relative position' - (appr - position) item, - - '0010' indicates 'key content only' (bias - appr) item, - - '0001' indicates 'relative position only' (bias - position) item. - """ - - _abbr_ = 'gen_attention_block' - - def __init__(self, - in_channels, - spatial_range=-1, - num_heads=9, - position_embedding_dim=-1, - position_magnitude=1, - kv_stride=2, - q_stride=1, - attention_type='1111'): - - super(GeneralizedAttention, self).__init__() - - # hard range means local range for non-local operation - self.position_embedding_dim = ( - position_embedding_dim - if position_embedding_dim > 0 else in_channels) - - self.position_magnitude = position_magnitude - self.num_heads = num_heads - self.in_channels = in_channels - self.spatial_range = spatial_range - self.kv_stride = kv_stride - self.q_stride = q_stride - self.attention_type = [bool(int(_)) for _ in attention_type] - self.qk_embed_dim = in_channels // num_heads - out_c = self.qk_embed_dim * num_heads - - if self.attention_type[0] or self.attention_type[1]: - self.query_conv = nn.Conv2d( - in_channels=in_channels, - out_channels=out_c, - kernel_size=1, - bias=False) - self.query_conv.kaiming_init = True - - if self.attention_type[0] or self.attention_type[2]: - self.key_conv = nn.Conv2d( - in_channels=in_channels, - out_channels=out_c, - kernel_size=1, - bias=False) - self.key_conv.kaiming_init = True - - self.v_dim = in_channels // num_heads - self.value_conv = nn.Conv2d( - in_channels=in_channels, - out_channels=self.v_dim * num_heads, - kernel_size=1, - bias=False) - self.value_conv.kaiming_init = True - - if self.attention_type[1] or self.attention_type[3]: - self.appr_geom_fc_x = nn.Linear( - self.position_embedding_dim // 2, out_c, bias=False) - self.appr_geom_fc_x.kaiming_init = True - - self.appr_geom_fc_y = nn.Linear( - self.position_embedding_dim // 2, out_c, bias=False) - self.appr_geom_fc_y.kaiming_init = True - - if self.attention_type[2]: - stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) - appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv - self.appr_bias = nn.Parameter(appr_bias_value) - - if self.attention_type[3]: - stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) - geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv - self.geom_bias = nn.Parameter(geom_bias_value) - - self.proj_conv = nn.Conv2d( - in_channels=self.v_dim * num_heads, - out_channels=in_channels, - kernel_size=1, - bias=True) - self.proj_conv.kaiming_init = True - self.gamma = nn.Parameter(torch.zeros(1)) - - if self.spatial_range >= 0: - # only works when non local is after 3*3 conv - if in_channels == 256: - max_len = 84 - elif in_channels == 512: - max_len = 42 - - max_len_kv = int((max_len - 1.0) / self.kv_stride + 1) - local_constraint_map = np.ones( - (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int) - for iy in range(max_len): - for ix in range(max_len): - local_constraint_map[ - iy, ix, - max((iy - self.spatial_range) // - self.kv_stride, 0):min((iy + self.spatial_range + - 1) // self.kv_stride + - 1, max_len), - max((ix - self.spatial_range) // - self.kv_stride, 0):min((ix + self.spatial_range + - 1) // self.kv_stride + - 1, max_len)] = 0 - - self.local_constraint_map = nn.Parameter( - torch.from_numpy(local_constraint_map).byte(), - requires_grad=False) - - if self.q_stride > 1: - self.q_downsample = nn.AvgPool2d( - kernel_size=1, stride=self.q_stride) - else: - self.q_downsample = None - - if self.kv_stride > 1: - self.kv_downsample = nn.AvgPool2d( - kernel_size=1, stride=self.kv_stride) - else: - self.kv_downsample = None - - self.init_weights() - - def get_position_embedding(self, - h, - w, - h_kv, - w_kv, - q_stride, - kv_stride, - device, - dtype, - feat_dim, - wave_length=1000): - # the default type of Tensor is float32, leading to type mismatch - # in fp16 mode. Cast it to support fp16 mode. - h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype) - h_idxs = h_idxs.view((h, 1)) * q_stride - - w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype) - w_idxs = w_idxs.view((w, 1)) * q_stride - - h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to( - device=device, dtype=dtype) - h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride - - w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to( - device=device, dtype=dtype) - w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride - - # (h, h_kv, 1) - h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0) - h_diff *= self.position_magnitude - - # (w, w_kv, 1) - w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0) - w_diff *= self.position_magnitude - - feat_range = torch.arange(0, feat_dim / 4).to( - device=device, dtype=dtype) - - dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype) - dim_mat = dim_mat**((4. / feat_dim) * feat_range) - dim_mat = dim_mat.view((1, 1, -1)) - - embedding_x = torch.cat( - ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2) - - embedding_y = torch.cat( - ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2) - - return embedding_x, embedding_y - - def forward(self, x_input): - num_heads = self.num_heads - - # use empirical_attention - if self.q_downsample is not None: - x_q = self.q_downsample(x_input) - else: - x_q = x_input - n, _, h, w = x_q.shape - - if self.kv_downsample is not None: - x_kv = self.kv_downsample(x_input) - else: - x_kv = x_input - _, _, h_kv, w_kv = x_kv.shape - - if self.attention_type[0] or self.attention_type[1]: - proj_query = self.query_conv(x_q).view( - (n, num_heads, self.qk_embed_dim, h * w)) - proj_query = proj_query.permute(0, 1, 3, 2) - - if self.attention_type[0] or self.attention_type[2]: - proj_key = self.key_conv(x_kv).view( - (n, num_heads, self.qk_embed_dim, h_kv * w_kv)) - - if self.attention_type[1] or self.attention_type[3]: - position_embed_x, position_embed_y = self.get_position_embedding( - h, w, h_kv, w_kv, self.q_stride, self.kv_stride, - x_input.device, x_input.dtype, self.position_embedding_dim) - # (n, num_heads, w, w_kv, dim) - position_feat_x = self.appr_geom_fc_x(position_embed_x).\ - view(1, w, w_kv, num_heads, self.qk_embed_dim).\ - permute(0, 3, 1, 2, 4).\ - repeat(n, 1, 1, 1, 1) - - # (n, num_heads, h, h_kv, dim) - position_feat_y = self.appr_geom_fc_y(position_embed_y).\ - view(1, h, h_kv, num_heads, self.qk_embed_dim).\ - permute(0, 3, 1, 2, 4).\ - repeat(n, 1, 1, 1, 1) - - position_feat_x /= math.sqrt(2) - position_feat_y /= math.sqrt(2) - - # accelerate for saliency only - if (np.sum(self.attention_type) == 1) and self.attention_type[2]: - appr_bias = self.appr_bias.\ - view(1, num_heads, 1, self.qk_embed_dim).\ - repeat(n, 1, 1, 1) - - energy = torch.matmul(appr_bias, proj_key).\ - view(n, num_heads, 1, h_kv * w_kv) - - h = 1 - w = 1 - else: - # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for - if not self.attention_type[0]: - energy = torch.zeros( - n, - num_heads, - h, - w, - h_kv, - w_kv, - dtype=x_input.dtype, - device=x_input.device) - - # attention_type[0]: appr - appr - # attention_type[1]: appr - position - # attention_type[2]: bias - appr - # attention_type[3]: bias - position - if self.attention_type[0] or self.attention_type[2]: - if self.attention_type[0] and self.attention_type[2]: - appr_bias = self.appr_bias.\ - view(1, num_heads, 1, self.qk_embed_dim) - energy = torch.matmul(proj_query + appr_bias, proj_key).\ - view(n, num_heads, h, w, h_kv, w_kv) - - elif self.attention_type[0]: - energy = torch.matmul(proj_query, proj_key).\ - view(n, num_heads, h, w, h_kv, w_kv) - - elif self.attention_type[2]: - appr_bias = self.appr_bias.\ - view(1, num_heads, 1, self.qk_embed_dim).\ - repeat(n, 1, 1, 1) - - energy += torch.matmul(appr_bias, proj_key).\ - view(n, num_heads, 1, 1, h_kv, w_kv) - - if self.attention_type[1] or self.attention_type[3]: - if self.attention_type[1] and self.attention_type[3]: - geom_bias = self.geom_bias.\ - view(1, num_heads, 1, self.qk_embed_dim) - - proj_query_reshape = (proj_query + geom_bias).\ - view(n, num_heads, h, w, self.qk_embed_dim) - - energy_x = torch.matmul( - proj_query_reshape.permute(0, 1, 3, 2, 4), - position_feat_x.permute(0, 1, 2, 4, 3)) - energy_x = energy_x.\ - permute(0, 1, 3, 2, 4).unsqueeze(4) - - energy_y = torch.matmul( - proj_query_reshape, - position_feat_y.permute(0, 1, 2, 4, 3)) - energy_y = energy_y.unsqueeze(5) - - energy += energy_x + energy_y - - elif self.attention_type[1]: - proj_query_reshape = proj_query.\ - view(n, num_heads, h, w, self.qk_embed_dim) - proj_query_reshape = proj_query_reshape.\ - permute(0, 1, 3, 2, 4) - position_feat_x_reshape = position_feat_x.\ - permute(0, 1, 2, 4, 3) - position_feat_y_reshape = position_feat_y.\ - permute(0, 1, 2, 4, 3) - - energy_x = torch.matmul(proj_query_reshape, - position_feat_x_reshape) - energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4) - - energy_y = torch.matmul(proj_query_reshape, - position_feat_y_reshape) - energy_y = energy_y.unsqueeze(5) - - energy += energy_x + energy_y - - elif self.attention_type[3]: - geom_bias = self.geom_bias.\ - view(1, num_heads, self.qk_embed_dim, 1).\ - repeat(n, 1, 1, 1) - - position_feat_x_reshape = position_feat_x.\ - view(n, num_heads, w*w_kv, self.qk_embed_dim) - - position_feat_y_reshape = position_feat_y.\ - view(n, num_heads, h * h_kv, self.qk_embed_dim) - - energy_x = torch.matmul(position_feat_x_reshape, geom_bias) - energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv) - - energy_y = torch.matmul(position_feat_y_reshape, geom_bias) - energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1) - - energy += energy_x + energy_y - - energy = energy.view(n, num_heads, h * w, h_kv * w_kv) - - if self.spatial_range >= 0: - cur_local_constraint_map = \ - self.local_constraint_map[:h, :w, :h_kv, :w_kv].\ - contiguous().\ - view(1, 1, h*w, h_kv*w_kv) - - energy = energy.masked_fill_(cur_local_constraint_map, - float('-inf')) - - attention = F.softmax(energy, 3) - - proj_value = self.value_conv(x_kv) - proj_value_reshape = proj_value.\ - view((n, num_heads, self.v_dim, h_kv * w_kv)).\ - permute(0, 1, 3, 2) - - out = torch.matmul(attention, proj_value_reshape).\ - permute(0, 1, 3, 2).\ - contiguous().\ - view(n, self.v_dim * self.num_heads, h, w) - - out = self.proj_conv(out) - - # output is downsampled, upsample back to input size - if self.q_downsample is not None: - out = F.interpolate( - out, - size=x_input.shape[2:], - mode='bilinear', - align_corners=False) - - out = self.gamma * out + x_input - return out - - def init_weights(self): - for m in self.modules(): - if hasattr(m, 'kaiming_init') and m.kaiming_init: - kaiming_init( - m, - mode='fan_in', - nonlinearity='leaky_relu', - bias=0, - distribution='uniform', - a=1) diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/core/anchor/point_generator.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/core/anchor/point_generator.py deleted file mode 100644 index e6fbd988c317992c092c68c827dc4c53223b4a4a..0000000000000000000000000000000000000000 --- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet/core/anchor/point_generator.py +++ /dev/null @@ -1,37 +0,0 @@ -import torch - -from .builder import ANCHOR_GENERATORS - - -@ANCHOR_GENERATORS.register_module() -class PointGenerator(object): - - def _meshgrid(self, x, y, row_major=True): - xx = x.repeat(len(y)) - yy = y.view(-1, 1).repeat(1, len(x)).view(-1) - if row_major: - return xx, yy - else: - return yy, xx - - def grid_points(self, featmap_size, stride=16, device='cuda'): - feat_h, feat_w = featmap_size - shift_x = torch.arange(0., feat_w, device=device) * stride - shift_y = torch.arange(0., feat_h, device=device) * stride - shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) - stride = shift_x.new_full((shift_xx.shape[0], ), stride) - shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) - all_points = shifts.to(device) - return all_points - - def valid_flags(self, featmap_size, valid_size, device='cuda'): - feat_h, feat_w = featmap_size - valid_h, valid_w = valid_size - assert valid_h <= feat_h and valid_w <= feat_w - valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) - valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) - valid_x[:valid_w] = 1 - valid_y[:valid_h] = 1 - valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) - valid = valid_xx & valid_yy - return valid diff --git a/spaces/Rominn/vits-uma-genshin-honkai/commons.py b/spaces/Rominn/vits-uma-genshin-honkai/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/Rominn/vits-uma-genshin-honkai/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm diff --git a/spaces/SAMControlNet/SyntheticDataSAM/README.md b/spaces/SAMControlNet/SyntheticDataSAM/README.md deleted file mode 100644 index bb2a027d421f6ff9b80f5583adeeaa601689549f..0000000000000000000000000000000000000000 --- a/spaces/SAMControlNet/SyntheticDataSAM/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: WildSynth (ControlNet+SAM -emoji: 🦬 -colorFrom: blue -colorTo: green -sdk: gradio -sdk_version: 3.28.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SQSora/VITS-Umamusume-voice-synthesizer/text/__init__.py b/spaces/SQSora/VITS-Umamusume-voice-synthesizer/text/__init__.py deleted file mode 100644 index 4e69c354dd24e3243980236eca962cd5945a92fc..0000000000000000000000000000000000000000 --- a/spaces/SQSora/VITS-Umamusume-voice-synthesizer/text/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -""" from https://github.com/keithito/tacotron """ -from text import cleaners - - -def text_to_sequence(text, symbols, cleaner_names): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - cleaner_names: names of the cleaner functions to run the text through - Returns: - List of integers corresponding to the symbols in the text - ''' - _symbol_to_id = {s: i for i, s in enumerate(symbols)} - - sequence = [] - - clean_text = _clean_text(text, cleaner_names) - for symbol in clean_text: - if symbol not in _symbol_to_id.keys(): - continue - symbol_id = _symbol_to_id[symbol] - sequence += [symbol_id] - return sequence - - -def _clean_text(text, cleaner_names): - for name in cleaner_names: - cleaner = getattr(cleaners, name) - if not cleaner: - raise Exception('Unknown cleaner: %s' % name) - text = cleaner(text) - return text diff --git a/spaces/Sakil/question_answering_app/README.md b/spaces/Sakil/question_answering_app/README.md deleted file mode 100644 index dfd859bb4ac38dd9778c2b42750237a9a3a5ae48..0000000000000000000000000000000000000000 --- a/spaces/Sakil/question_answering_app/README.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Question_answering_app -emoji: 🐢 -colorFrom: red -colorTo: gray -sdk: gradio -app_file: app.py -pinned: false -license: apache-2.0 ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/Sarath2002/YouTube_Video_Summarizer/support.py b/spaces/Sarath2002/YouTube_Video_Summarizer/support.py deleted file mode 100644 index 892dbdcdebd4ef947c1c61b47bba2cd3a8ed82a2..0000000000000000000000000000000000000000 --- a/spaces/Sarath2002/YouTube_Video_Summarizer/support.py +++ /dev/null @@ -1,72 +0,0 @@ -from youtube_transcript_api import YouTubeTranscriptApi -from youtube_transcript_api.formatters import TextFormatter -from transformers import BartForConditionalGeneration, BartTokenizer -from sumy.parsers.plaintext import PlaintextParser -from sumy.nlp.tokenizers import Tokenizer -from sumy.summarizers.text_rank import TextRankSummarizer -import torch - -def get_vidid(url): - if "youtu.be" in url: - url=url.replace("https://youtu.be/","") - - else: - url=url.replace("https://www.youtube.com/watch?v=", '') - - - return url - - -def vid_transcript(video): - - transcript = YouTubeTranscriptApi.get_transcript(video) - formatter = TextFormatter() - text_formatted = formatter.format_transcript(transcript) - with open('plaintext.txt', 'w', encoding='utf-8') as file: - file.write(text_formatted) - - - -def ext_summarizer(path): - - language = "english" - word_limit = 1500 - - with open(path, "r", encoding="utf-8") as file: - text = file.read() - - - summarizer = TextRankSummarizer() - parser = PlaintextParser.from_string(text, Tokenizer(language)) - summary = summarizer(parser.document, word_limit) - - summary_text = " ".join(str(sentence) for sentence in summary) - - return summary_text - - -def abs_summarizer(t, max_length): - - - tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') - model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn') - - text_input_ids = tokenizer.batch_encode_plus([t], return_tensors='pt', max_length=max_length)['input_ids'] - summary_ids = model.generate(text_input_ids, num_beams=10, max_length=max_length, min_length=30) - summary_txt = tokenizer.decode(summary_ids.squeeze(), skip_special_tokens=True) - return summary_txt - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/spaces/Saturdays/mamamIA/app.py b/spaces/Saturdays/mamamIA/app.py deleted file mode 100644 index 6f59145a90784e9bba02ad37493c80172ea56efa..0000000000000000000000000000000000000000 --- a/spaces/Saturdays/mamamIA/app.py +++ /dev/null @@ -1,141 +0,0 @@ -from numpy import dtype -import streamlit as st -import pandas as pd -from sklearn.preprocessing import StandardScaler -import numpy as np -import joblib as jl - - -# VALORES POR DEFECTO QUE INDICAN CELULAS NO CANCEROSAS -# radius_mean 14.12 -# texture_mean 19.28 -# perimeter_mean 91.96 -# area_mean 551,17 -# compactness_mean 0.0092 -# concavity_mean 0.061 -# concave_points_mean 0.033 -# area_se 24.5 -# radius_worst 14.97 -# texture_worst 25.41 -# perimeter_worst 97.6 -# area_worst 686.5 -# smoothness_worst 0.1313 -# compactness_worst 0.20 -# concavity_worst 0.22 -# concave points_worst 0.09 - - -col=['radius_mean', 'texture_mean', 'perimeter_mean', - 'area_mean', 'compactness_mean', 'concavity_mean', - 'concave points_mean', 'area_se', 'radius_worst', 'texture_worst', - 'perimeter_worst', 'area_worst', 'smoothness_worst', - 'compactness_worst', 'concavity_worst', 'concave points_worst'] - - -modnames=['mlp_final.pkl','svm_final.pkl','lr_final.pkl'] - -#@st.cache -def getScaler(): - # Cargo el dataset para poder normalizar los valores recogidos en el formulario - print ("cargando dataset") - data=pd.read_csv('https://raw.githubusercontent.com/gitmecalvon/mamamIA/main/resources/data/cleaned/train_web.csv',sep=';') - print("dataset cargado") - scaler = StandardScaler() - scaler.fit(data) - return scaler - - -# cargandolos para poder usarlos desde un sidebar si da tiempo -def cargaModelos (indice): - print('Preparando el guardado de Modelos ' ) - modelo=jl.load(modnames[indice]) - return modelo - -def interpreta (prediccion): - respuesta ="Los datos introducidos pronostican que son células de tipo " - if prediccion ==1: - respuesta= respuesta + "Maligno" - else: - respuesta= respuesta + "BENIGNO" - return respuesta - - -def contruyeFormulario(): - - # st.set_page_config(layout="wide") - - st.title("Mama mIA") - st.markdown('<style>body{background-color: Black;}</style>',unsafe_allow_html=True) - html_temp = """ <div style ="background-color:Pink;padding:13px"> - <h1 style ="color:black;text-align:center;">Algoritmo de ayuda a la predicción diagnóstica del Cáncer de mama</h1> - </div>""" - st.markdown(html_temp, unsafe_allow_html = True) - - st.subheader("Por favor introduzca las medidas de la muestra") - form = st.form(key="formulario") - # col1, col2 = form.columns(2) # intento de dos columnas sin recurrir a html - # with col1: - radius_mean = form.number_input( label="Radio Promedio", min_value=0.00000, max_value=20.0,value=13.54, step=0.0001,format="%4f") - texture_mean = form.number_input(label="Textura Promedio", min_value=0.00000, max_value=36.0,value=14.36, step=0.0001,format="%4f") - perimeter_mean = form.number_input(label="Perímertro Promedio", min_value=0.00000, max_value=150.0,value=87.46, step=0.0001,format="%4f") - area_mean = form.number_input(label="Área Promedio", min_value=0.00000, max_value=1600.0,value=566.3, step=0.0001,format="%4f") - compactness_mean = form.number_input(label="Promedio de Compactabilidad", min_value=0.00000, max_value=1.0,value=0.08129, step=0.0001,format="%5f") - concavity_mean = form.number_input(label="Promedio de Concavidad", min_value=0.00000, max_value=1.0,value=0.06664, step=0.0001,format="%5f") - concave_points_mean = form.number_input(label="Puntos Cóncavos promedio", min_value=0.00000, max_value=1.0,value=0.04781, step=0.0001,format="%4f") - area_se = form.number_input(label="Area Error Estandar", min_value=0.00000, max_value=150.0,value=23.56, step=0.0001,format="%4f") - # with col2: - radius_worst = form.number_input(label="Radio worst ", min_value=0.00000, max_value=30.0,value=15.11, step=0.0001,format="%4f") - texture_worst= form.number_input(label="Textura worsk", min_value=0.00000, max_value=70.0,value=19.26, step=0.0001,format="%4f") - perimeter_worst = form.number_input(label="Perimetro worst", min_value=0.00000, max_value=99.70,value=0.0092, step=0.0001,format="%4f") - area_worst = form.number_input(label="Area ", min_value=0.00000, max_value=800.0,value=711.2, step=0.0001,format="%4f") - smoothness_worst = form.number_input(label="Suavidad worst", min_value=0.00000, max_value=1.0,value=0.144, step=0.0001,format="%4f") - compactness_worst = form.number_input(label="Compactabilidad worst", min_value=0.00000, max_value=2.0,value=0.1773, step=0.0001,format="%4f") - concavity_worst = form.number_input(label="Concavidad worst", min_value=0.00000, max_value=2.0,value=0.2390, step=0.0001,format="%4f") - concavepoints_worst = form.number_input(label="Puntos cóncavos worst", min_value=0.00000, max_value=2.0,value=0.1288, step=0.0001,format="%4f") - - submit = form.form_submit_button(label="Predicción") - - if submit: - # Escalamos los datos del formulario - scaler=getScaler() - nbnormaliz=scaler.transform ([[radius_mean, texture_mean, perimeter_mean ,area_mean , compactness_mean , concavity_mean , - concave_points_mean , area_se , radius_worst , texture_worst ,perimeter_worst , area_worst , smoothness_worst , - compactness_worst , concavity_worst , concavepoints_worst ]]) - - # Recuperamos el modelo - print ("cargando modelo") - print (modnames[2]) - algoritmo=cargaModelos(2) - - # Realizamos la prediccion - - print ("Preparando la prediccion...") - prediccion=algoritmo.predict (nbnormaliz) - print (prediccion) - st.write ("") - st.write (interpreta (prediccion)) - - -def main(): - - contruyeFormulario() - -if __name__ == '__main__': - main() - - - - - - - - - - - - - - - - - diff --git a/spaces/SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net/app.py b/spaces/SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net/app.py deleted file mode 100644 index 88dbdabc5ea2a00cedc87e8901a39407e0aa4442..0000000000000000000000000000000000000000 --- a/spaces/SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net/app.py +++ /dev/null @@ -1,100 +0,0 @@ -import streamlit as st - -import tensorflow as tf -from PIL import Image -import numpy as np -import cv2 -from huggingface_hub import from_pretrained_keras - - -try: - model=from_pretrained_keras("SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net") -except: - model=tf.keras.models.load_model("dental_xray_seg.h5") - pass - -st.header("Segmentation of Teeth in Panoramic X-ray Image Using UNet") - -examples=["107.png","108.png","109.png"] -link='Check Out Our Github Repo ! [link](https://github.com/SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net)' -st.markdown(link,unsafe_allow_html=True) - - -def load_image(image_file): - img = Image.open(image_file) - return img - -def convert_one_channel(img): - #some images have 3 channels , although they are grayscale image - if len(img.shape)>2: - img= cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) - return img - else: - return img - -def convert_rgb(img): - #some images have 3 channels , although they are grayscale image - if len(img.shape)==2: - img= cv2.cvtColor(img,cv2.COLOR_GRAY2RGB) - return img - else: - return img - - -st.subheader("Upload Dental Panoramic X-ray Image Image") -image_file = st.file_uploader("Upload Images", type=["png","jpg","jpeg"]) - - -col1, col2, col3 = st.columns(3) -with col1: - ex=load_image(examples[0]) - st.image(ex,width=200) - if st.button('Example 1'): - image_file=examples[0] - -with col2: - ex1=load_image(examples[1]) - st.image(ex1,width=200) - if st.button('Example 2'): - image_file=examples[1] - - -with col3: - ex2=load_image(examples[2]) - st.image(ex2,width=200) - if st.button('Example 3'): - image_file=examples[2] - - -if image_file is not None: - - img=load_image(image_file) - - st.text("Making A Prediction ....") - st.image(img,width=850) - - img=np.asarray(img) - - img_cv=convert_one_channel(img) - img_cv=cv2.resize(img_cv,(512,512), interpolation=cv2.INTER_LANCZOS4) - img_cv=np.float32(img_cv/255) - - img_cv=np.reshape(img_cv,(1,512,512,1)) - prediction=model.predict(img_cv) - predicted=prediction[0] - predicted = cv2.resize(predicted, (img.shape[1],img.shape[0]), interpolation=cv2.INTER_LANCZOS4) - mask=np.uint8(predicted*255)# - _, mask = cv2.threshold(mask, thresh=0, maxval=255, type=cv2.THRESH_BINARY+cv2.THRESH_OTSU) - kernel =( np.ones((5,5), dtype=np.float32)) - mask=cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,iterations=1 ) - mask=cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel,iterations=1 ) - cnts,hieararch=cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) - output = cv2.drawContours(convert_rgb(img), cnts, -1, (255, 0, 0) , 3) - - - if output is not None : - st.subheader("Predicted Image") - st.write(output.shape) - st.image(output,width=850) - - st.text("DONE ! ....") diff --git a/spaces/Shad0ws/Chat-with-Files/embeddings.py b/spaces/Shad0ws/Chat-with-Files/embeddings.py deleted file mode 100644 index d7596d473dd2539e182058296e1f8844c0a37a22..0000000000000000000000000000000000000000 --- a/spaces/Shad0ws/Chat-with-Files/embeddings.py +++ /dev/null @@ -1,115 +0,0 @@ -"""Wrapper around OpenAI embedding models.""" -from typing import Any, Dict, List, Optional - -from pydantic import BaseModel, Extra, root_validator - -from langchain.embeddings.base import Embeddings -from langchain.utils import get_from_dict_or_env - -from tenacity import ( - retry, - retry_if_exception_type, - stop_after_attempt, - wait_exponential, -) -from openai.error import Timeout, APIError, APIConnectionError, RateLimitError - - -class OpenAIEmbeddings(BaseModel, Embeddings): - """Wrapper around OpenAI embedding models. - To use, you should have the ``openai`` python package installed, and the - environment variable ``OPENAI_API_KEY`` set with your API key or pass it - as a named parameter to the constructor. - Example: - .. code-block:: python - from langchain.embeddings import OpenAIEmbeddings - openai = OpenAIEmbeddings(openai_api_key="my-api-key") - """ - - client: Any #: :meta private: - document_model_name: str = "text-embedding-ada-002" - query_model_name: str = "text-embedding-ada-002" - openai_api_key: Optional[str] = None - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - - # TODO: deprecate this - @root_validator(pre=True, allow_reuse=True) - def get_model_names(cls, values: Dict) -> Dict: - """Get model names from just old model name.""" - if "model_name" in values: - if "document_model_name" in values: - raise ValueError( - "Both `model_name` and `document_model_name` were provided, " - "but only one should be." - ) - if "query_model_name" in values: - raise ValueError( - "Both `model_name` and `query_model_name` were provided, " - "but only one should be." - ) - model_name = values.pop("model_name") - values["document_model_name"] = f"text-search-{model_name}-doc-001" - values["query_model_name"] = f"text-search-{model_name}-query-001" - return values - - @root_validator(allow_reuse=True) - def validate_environment(cls, values: Dict) -> Dict: - """Validate that api key and python package exists in environment.""" - openai_api_key = get_from_dict_or_env( - values, "openai_api_key", "OPENAI_API_KEY" - ) - try: - import openai - - openai.api_key = openai_api_key - values["client"] = openai.Embedding - except ImportError: - raise ValueError( - "Could not import openai python package. " - "Please it install it with `pip install openai`." - ) - return values - - @retry( - reraise=True, - stop=stop_after_attempt(100), - wait=wait_exponential(multiplier=1, min=10, max=60), - retry=( - retry_if_exception_type(Timeout) - | retry_if_exception_type(APIError) - | retry_if_exception_type(APIConnectionError) - | retry_if_exception_type(RateLimitError) - ), - ) - def _embedding_func(self, text: str, *, engine: str) -> List[float]: - """Call out to OpenAI's embedding endpoint with exponential backoff.""" - # replace newlines, which can negatively affect performance. - text = text.replace("\n", " ") - return self.client.create(input=[text], engine=engine)["data"][0]["embedding"] - - def embed_documents(self, texts: List[str]) -> List[List[float]]: - """Call out to OpenAI's embedding endpoint for embedding search docs. - Args: - texts: The list of texts to embed. - Returns: - List of embeddings, one for each text. - """ - responses = [ - self._embedding_func(text, engine=self.document_model_name) - for text in texts - ] - return responses - - def embed_query(self, text: str) -> List[float]: - """Call out to OpenAI's embedding endpoint for embedding query text. - Args: - text: The text to embed. - Returns: - Embeddings for the text. - """ - embedding = self._embedding_func(text, engine=self.query_model_name) - return embedding \ No newline at end of file diff --git a/spaces/Shad0ws/imagetomusic/README.md b/spaces/Shad0ws/imagetomusic/README.md deleted file mode 100644 index abcacaf53e12be7ab17decb462f397c10688cfc1..0000000000000000000000000000000000000000 --- a/spaces/Shad0ws/imagetomusic/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Img to Music Video -emoji: ⚡ -colorFrom: red -colorTo: green -sdk: gradio -sdk_version: 3.10.1 -app_file: app.py -pinned: false -license: unknown -duplicated_from: doevent/msk ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SoUmNerd/RemoteMojo/Dockerfile b/spaces/SoUmNerd/RemoteMojo/Dockerfile deleted file mode 100644 index 9c4dda9e42b468a004526e94e94412da31f95c7f..0000000000000000000000000000000000000000 --- a/spaces/SoUmNerd/RemoteMojo/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM ubuntu:latest - -RUN apt-get update -RUN apt-get install -y curl && apt-get install -y python3 && apt-get install -y python3-pip -RUN pip install fastapi uvicorn -RUN curl https://get.modular.com | \ -MODULAR_AUTH=mut_e87f7861fb9a4d4aa311afb0491b0398 sh - - -RUN modular install mojo - -COPY . . -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] \ No newline at end of file diff --git a/spaces/Soumen/Text-Summarization-and-NLP-tasks/README.md b/spaces/Soumen/Text-Summarization-and-NLP-tasks/README.md deleted file mode 100644 index a0b6af331bfda7d9b8d861c3c6743f74a5d33aa8..0000000000000000000000000000000000000000 --- a/spaces/Soumen/Text-Summarization-and-NLP-tasks/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Text Summarization And NLP Tasks -emoji: 🏢 -colorFrom: purple -colorTo: red -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -license: bsd ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/SoundreameR/craiyon-exploration/README.md b/spaces/SoundreameR/craiyon-exploration/README.md deleted file mode 100644 index 5342f9537e7fdaff8762e3f8b2c0e23f6886452a..0000000000000000000000000000000000000000 --- a/spaces/SoundreameR/craiyon-exploration/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Craiyon Exploration -emoji: 🏃 -colorFrom: green -colorTo: purple -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/StephanST/WALDOonline/run_local_onnx_largeinput_tiled_process.py b/spaces/StephanST/WALDOonline/run_local_onnx_largeinput_tiled_process.py deleted file mode 100644 index 18772d9639e9d0dd34bceff6cb400fc6cc1e35f2..0000000000000000000000000000000000000000 --- a/spaces/StephanST/WALDOonline/run_local_onnx_largeinput_tiled_process.py +++ /dev/null @@ -1,196 +0,0 @@ -import math -import cv2 -import time -import requests -import random -import numpy as np -import onnxruntime as ort -from PIL import Image -from pathlib import Path -from collections import OrderedDict,namedtuple -import re - - -def get_resolution_from_model_path(model_path): - resolution = re.search(r"(\d+)px", model_path) - if resolution: - return int(resolution.group(1)) - return None - - - - -def letterbox(im, new_shape=(960, 960), color=(114, 114, 114), auto=True, scaleup=True, stride=32): - # Resize and pad image while meeting stride-multiple constraints - shape = im.shape[:2] # current shape [height, width] - if isinstance(new_shape, int): - new_shape = (new_shape, new_shape) - - # Scale ratio (new / old) - r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - if not scaleup: # only scale down, do not scale up (for better val mAP) - r = min(r, 1.0) - - # Compute padding - new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - - if auto: # minimum rectangle - dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - - dw /= 2 # divide padding into 2 sides - dh /= 2 - - if shape[::-1] != new_unpad: # resize - im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - return im, r, (dw, dh) - -def split_image(image, tile_size=(960, 960), padding=(0, 0)): - height, width, _ = image.shape - tile_height, tile_width = tile_size - pad_height, pad_width = padding - - # Calculate the number of tiles needed in each dimension - num_tiles_x = math.ceil(width / tile_width) - num_tiles_y = math.ceil(height / tile_height) - - # Pad the image to ensure it's divisible by the tile size - padded_image = cv2.copyMakeBorder( - image, - 0, - tile_height * num_tiles_y - height + pad_height * 2, - 0, - tile_width * num_tiles_x - width + pad_width * 2, - cv2.BORDER_CONSTANT, - value=(114, 114, 114), - ) - - # Split the image into tiles - tiles = [] - for y in range(num_tiles_y): - for x in range(num_tiles_x): - tile = padded_image[ - y * tile_height : (y + 1) * tile_height + pad_height * 2, - x * tile_width : (x + 1) * tile_width + pad_width * 2, - :, - ] - tiles.append(((x, y), tile)) - - return tiles, padded_image.shape[:2] - -def merge_tiles(tiles, output_shape, padding=(0, 0)): - tile_height, tile_width = tiles[0][1].shape[:2] - num_tiles_x = output_shape[1] // (tile_width - 2 * padding[1]) - num_tiles_y = output_shape[0] // (tile_height - 2 * padding[0]) - - merged_image = np.zeros((*output_shape, 3), dtype=np.uint8) - - for (x, y), tile in tiles: - tile_no_padding = tile[padding[0] : -padding[0], padding[1] : -padding[1], :] - merged_image[ - y * (tile_height - 2 * padding[0]) : (y + 1) * (tile_height - 2 * padding[0]), - x * (tile_width - 2 * padding[1]) : (x + 1) * (tile_width - 2 * padding[1]), - :, - ] = tile_no_padding - - return merged_image - - -def process_large_image(image, model): - - - # set cuda = true if you have an NVIDIA GPU - cuda = False - - w = model - - if "ppp" in model: - names = ['solarpanels', 'pool'] - else: - names = ['car', 'van', 'truck', 'building', 'human', 'gastank', 'digger', 'container', 'bus', 'pylon', 'boat', 'bike'] - - - colors = {name:[random.randint(0, 255) for _ in range(3)] for i,name in enumerate(names)} - - img = image - - providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] - session = ort.InferenceSession(w, providers=providers) - - outname = [i.name for i in session.get_outputs()] - outname - - inname = [i.name for i in session.get_inputs()] - inname - - # Load the image and split it into tiles - resolution = get_resolution_from_model_path(model) - if resolution is None: - print("Warning: Model resolution not found in the model path. Defaulting to 960px.") - resolution = 960 - tile_size = (resolution, resolution) - padding = (32, 32) - tiles, padded_shape = split_image(image, tile_size=tile_size, padding=padding) - - # Initialize a dictionary to store the count of each category - category_count = {name: 0 for name in names} - - # Process each tile with the ONNX model - processed_tiles = [] - for i, (tile_idx, tile) in enumerate(tiles): - image = tile.copy() - image, ratio, dwdh = letterbox(image, new_shape=tile_size, auto=False) - image = image.transpose((2, 0, 1)) - image = np.expand_dims(image, 0) - image = np.ascontiguousarray(image) - - im = image.astype(np.float32) - im /= 255 - - inp = {inname[0]: im} - outputs = session.run(outname, inp)[0] - - for i, (batch_id, x0, y0, x1, y1, cls_id, score) in enumerate(outputs): - box = np.array([x0, y0, x1, y1]) - box -= np.array(dwdh * 2) - box /= ratio - box = box.round().astype(np.int32).tolist() - cls_id = int(cls_id) - score = round(float(score), 3) - name = names[cls_id] - color = colors[name] - name += ' ' + str(score) - cv2.rectangle(tile, box[:2], box[2:], color, 2) - cv2.putText(tile, name, (box[0], box[1] - 2), cv2.FONT_HERSHEY_SIMPLEX, 0.75, [225, 255, 255], thickness=2) - - # Update the count for the detected category - category_count[name.split()[0]] += 1 - - processed_tiles.append((tile_idx, tile)) - - # Merge the processed tiles back into the original image - merged_image = merge_tiles(processed_tiles, padded_shape, padding=padding) - - # Remove padding from the merged image to get the final output - final_image = merged_image[: img.shape[0], : img.shape[1], :] - - # Convert color space from RGB to BGR - final_image_bgr = cv2.cvtColor(final_image, cv2.COLOR_RGB2BGR) - - # # Save the final image - # cv2.imwrite('./Columbus_out.jpg', final_image) - - outputs_array = [] - # Print the total count of each class - print("Total count of each class:") - for name, count in category_count.items(): - print(f"{name}: {count}") - outputs_array.append(f"{name}: {count}") - - return final_image, str(outputs_array) - - - diff --git a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/utils/samples/__init__.py b/spaces/SuYuanS/AudioCraft_Plus/audiocraft/utils/samples/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/SuYuanS/AudioCraft_Plus/audiocraft/utils/samples/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/Sudhansu/07GR-NLP-Seq2Seq-AutoQA/app.py b/spaces/Sudhansu/07GR-NLP-Seq2Seq-AutoQA/app.py deleted file mode 100644 index c1cd92499cf1c7d2a91b4dc226bf2d558ff67661..0000000000000000000000000000000000000000 --- a/spaces/Sudhansu/07GR-NLP-Seq2Seq-AutoQA/app.py +++ /dev/null @@ -1,51 +0,0 @@ -import gradio as gr -from qasrl_model_pipeline import QASRL_Pipeline - -models = ["kleinay/qanom-seq2seq-model-baseline", - "kleinay/qanom-seq2seq-model-joint"] -pipelines = {model: QASRL_Pipeline(model) for model in models} - - -description = f"""Using Seq2Seq T5 model which takes a sequence of items and outputs another sequence this model generates Questions and Answers (QA) with focus on Semantic Role Labeling (SRL)""" -title="Seq2Seq T5 Questions and Answers (QA) with Semantic Role Labeling (SRL)" -examples = [[models[0], "In March and April the patient <p> had two falls. One was related to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions like anaphylaxis and shortness of breath.", True, "fall"], - [models[1], "In March and April the patient had two falls. One was related to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions <p> like anaphylaxis and shortness of breath.", True, "reactions"], - [models[0], "In March and April the patient had two falls. One was related <p> to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions like anaphylaxis and shortness of breath.", True, "relate"], - [models[1], "In March and April the patient <p> had two falls. One was related to asthma, heart palpitations. The second was due to syncope and post covid vaccination dizziness during exercise. The patient is now getting an EKG. Former EKG had shown that there was a bundle branch block. Patient had some uncontrolled immune system reactions like anaphylaxis and shortness of breath.", False, "fall"]] - -input_sent_box_label = "Insert sentence here. Mark the predicate by adding the token '<p>' before it." -verb_form_inp_placeholder = "e.g. 'decide' for the nominalization 'decision', 'teach' for 'teacher', etc." -links = """<p style='text-align: center'> -<a href='https://www.qasrl.org' target='_blank'>QASRL Website</a> | <a href='https://huggingface.co/kleinay/qanom-seq2seq-model-baseline' target='_blank'>Model Repo at Huggingface Hub</a> -</p>""" -def call(model_name, sentence, is_nominal, verb_form): - predicate_marker="<p>" - if predicate_marker not in sentence: - raise ValueError("You must highlight one word of the sentence as a predicate using preceding '<p>'.") - - if not verb_form: - if is_nominal: - raise ValueError("You should provide the verbal form of the nominalization") - - toks = sentence.split(" ") - pred_idx = toks.index(predicate_marker) - predicate = toks(pred_idx+1) - verb_form=predicate - pipeline = pipelines[model_name] - pipe_out = pipeline([sentence], - predicate_marker=predicate_marker, - predicate_type="nominal" if is_nominal else "verbal", - verb_form=verb_form)[0] - return pipe_out["QAs"], pipe_out["generated_text"] -iface = gr.Interface(fn=call, - inputs=[gr.inputs.Radio(choices=models, default=models[0], label="Model"), - gr.inputs.Textbox(placeholder=input_sent_box_label, label="Sentence", lines=4), - gr.inputs.Checkbox(default=True, label="Is Nominalization?"), - gr.inputs.Textbox(placeholder=verb_form_inp_placeholder, label="Verbal form (for nominalizations)", default='')], - outputs=[gr.outputs.JSON(label="Model Output - QASRL"), gr.outputs.Textbox(label="Raw output sequence")], - title=title, - description=description, - article=links, - examples=examples ) - -iface.launch() \ No newline at end of file diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/computation/numpy_backend.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/computation/numpy_backend.py deleted file mode 100644 index 30d50cc0174859bde97552042f9154b9e68d538b..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/computation/numpy_backend.py +++ /dev/null @@ -1,275 +0,0 @@ -import warnings -from typing import Any, List, Optional, Tuple - -import numpy as np - -from docarray.computation import AbstractComputationalBackend -from docarray.computation.abstract_numpy_based_backend import AbstractNumpyBasedBackend - - -def _expand_if_single_axis(*matrices: np.ndarray) -> List[np.ndarray]: - """Expands arrays that only have one axis, at dim 0. - This ensures that all outputs can be treated as matrices, not vectors. - - :param matrices: Matrices to be expanded - :return: List of the input matrices, - where single axis matrices are expanded at dim 0. - """ - expanded = [] - for m in matrices: - if len(m.shape) == 1: - expanded.append(np.expand_dims(m, axis=0)) - else: - expanded.append(m) - return expanded - - -def _expand_if_scalar(arr: np.ndarray) -> np.ndarray: - if len(arr.shape) == 0: # avoid scalar output - arr = np.expand_dims(arr, axis=0) - return arr - - -def identity(array: np.ndarray) -> np.ndarray: - return array - - -class NumpyCompBackend(AbstractNumpyBasedBackend): - """ - Computational backend for Numpy. - """ - - _module = np - _cast_output = identity - _get_tensor = identity - - @classmethod - def to_device(cls, tensor: 'np.ndarray', device: str) -> 'np.ndarray': - """Move the tensor to the specified device.""" - raise NotImplementedError('Numpy does not support devices (GPU).') - - @classmethod - def device(cls, tensor: 'np.ndarray') -> Optional[str]: - """Return device on which the tensor is allocated.""" - return None - - @classmethod - def to_numpy(cls, array: 'np.ndarray') -> 'np.ndarray': - return array - - @classmethod - def none_value(cls) -> Any: - """Provide a compatible value that represents None in numpy.""" - return None - - @classmethod - def detach(cls, tensor: 'np.ndarray') -> 'np.ndarray': - """ - Returns the tensor detached from its current graph. - - :param tensor: tensor to be detached - :return: a detached tensor with the same data. - """ - return tensor - - @classmethod - def dtype(cls, tensor: 'np.ndarray') -> np.dtype: - """Get the data type of the tensor.""" - return tensor.dtype - - @classmethod - def minmax_normalize( - cls, - tensor: 'np.ndarray', - t_range: Tuple = (0, 1), - x_range: Optional[Tuple] = None, - eps: float = 1e-7, - ) -> 'np.ndarray': - """ - Normalize values in `tensor` into `t_range`. - - `tensor` can be a 1D array or a 2D array. When `tensor` is a 2D array, then - normalization is row-based. - - !!! note - - - with `t_range=(0, 1)` will normalize the min-value of data to 0, max to 1; - - with `t_range=(1, 0)` will normalize the min-value of data to 1, max value - of the data to 0. - - :param tensor: the data to be normalized - :param t_range: a tuple represents the target range. - :param x_range: a tuple represents tensors range. - :param eps: a small jitter to avoid divide by zero - :return: normalized data in `t_range` - """ - a, b = t_range - - min_d = x_range[0] if x_range else np.min(tensor, axis=-1, keepdims=True) - max_d = x_range[1] if x_range else np.max(tensor, axis=-1, keepdims=True) - r = (b - a) * (tensor - min_d) / (max_d - min_d + eps) + a - - return np.clip(r, *((a, b) if a < b else (b, a))) - - class Retrieval(AbstractComputationalBackend.Retrieval[np.ndarray]): - """ - Abstract class for retrieval and ranking functionalities - """ - - @staticmethod - def top_k( - values: 'np.ndarray', - k: int, - descending: bool = False, - device: Optional[str] = None, - ) -> Tuple['np.ndarray', 'np.ndarray']: - """ - Retrieves the top k smallest values in `values`, - and returns them alongside their indices in the input `values`. - Can also be used to retrieve the top k largest values, - by setting the `descending` flag. - - :param values: Torch tensor of values to rank. - Should be of shape (n_queries, n_values_per_query). - Inputs of shape (n_values_per_query,) will be expanded - to (1, n_values_per_query). - :param k: number of values to retrieve - :param descending: retrieve largest values instead of smallest values - :param device: Not supported for this backend - :return: Tuple containing the retrieved values, and their indices. - Both ar of shape (n_queries, k) - """ - if device is not None: - warnings.warn('`device` is not supported for numpy operations') - - if len(values.shape) == 1: - values = np.expand_dims(values, axis=0) - - if descending: - values = -values - - if k >= values.shape[1]: - idx = values.argsort(axis=1)[:, :k] - values = np.take_along_axis(values, idx, axis=1) - else: - idx_ps = values.argpartition(kth=k, axis=1)[:, :k] - values = np.take_along_axis(values, idx_ps, axis=1) - idx_fs = values.argsort(axis=1) - idx = np.take_along_axis(idx_ps, idx_fs, axis=1) - values = np.take_along_axis(values, idx_fs, axis=1) - - if descending: - values = -values - - return values, idx - - class Metrics(AbstractComputationalBackend.Metrics[np.ndarray]): - """ - Abstract base class for metrics (distances and similarities). - """ - - @staticmethod - def cosine_sim( - x_mat: np.ndarray, - y_mat: np.ndarray, - eps: float = 1e-7, - device: Optional[str] = None, - ) -> np.ndarray: - """Pairwise cosine similarities between all vectors in x_mat and y_mat. - - :param x_mat: np.ndarray of shape (n_vectors, n_dim), where n_vectors is - the number of vectors and n_dim is the number of dimensions of each - example. - :param y_mat: np.ndarray of shape (n_vectors, n_dim), where n_vectors is - the number of vectors and n_dim is the number of dimensions of each - example. - :param eps: a small jitter to avoid divde by zero - :param device: Not supported for this backend - :return: np.ndarray of shape (n_vectors, n_vectors) containing all - pairwise cosine distances. - The index [i_x, i_y] contains the cosine distance between - x_mat[i_x] and y_mat[i_y]. - """ - if device is not None: - warnings.warn('`device` is not supported for numpy operations') - - x_mat, y_mat = _expand_if_single_axis(x_mat, y_mat) - - sims = np.clip( - (np.dot(x_mat, y_mat.T) + eps) - / ( - np.outer( - np.linalg.norm(x_mat, axis=1), np.linalg.norm(y_mat, axis=1) - ) - + eps - ), - -1, - 1, - ).squeeze() - return _expand_if_scalar(sims) - - @classmethod - def euclidean_dist( - cls, x_mat: np.ndarray, y_mat: np.ndarray, device: Optional[str] = None - ) -> np.ndarray: - """Pairwise Euclidian distances between all vectors in x_mat and y_mat. - - :param x_mat: np.ndarray of shape (n_vectors, n_dim), where n_vectors is - the number of vectors and n_dim is the number of dimensions of each - example. - :param y_mat: np.ndarray of shape (n_vectors, n_dim), where n_vectors is - the number of vectors and n_dim is the number of dimensions of each - example. - :param eps: a small jitter to avoid divde by zero - :param device: Not supported for this backend - :return: np.ndarray of shape (n_vectors, n_vectors) containing all - pairwise euclidian distances. - The index [i_x, i_y] contains the euclidian distance between - x_mat[i_x] and y_mat[i_y]. - """ - if device is not None: - warnings.warn('`device` is not supported for numpy operations') - - x_mat, y_mat = _expand_if_single_axis(x_mat, y_mat) - - return _expand_if_scalar( - np.sqrt(cls.sqeuclidean_dist(x_mat, y_mat)).squeeze() - ) - - @staticmethod - def sqeuclidean_dist( - x_mat: np.ndarray, - y_mat: np.ndarray, - device: Optional[str] = None, - ) -> np.ndarray: - """Pairwise Squared Euclidian distances between all vectors in - x_mat and y_mat. - - :param x_mat: np.ndarray of shape (n_vectors, n_dim), where n_vectors is - the number of vectors and n_dim is the number of dimensions of each - example. - :param y_mat: np.ndarray of shape (n_vectors, n_dim), where n_vectors is - the number of vectors and n_dim is the number of dimensions of each - example. - :param device: Not supported for this backend - :return: np.ndarray of shape (n_vectors, n_vectors) containing all - pairwise Squared Euclidian distances. - The index [i_x, i_y] contains the cosine Squared Euclidian between - x_mat[i_x] and y_mat[i_y]. - """ - eps: float = 1e-7 # avoid problems with numerical inaccuracies - - if device is not None: - warnings.warn('`device` is not supported for numpy operations') - - x_mat, y_mat = _expand_if_single_axis(x_mat, y_mat) - - dists = ( - np.sum(y_mat**2, axis=1) - + np.sum(x_mat**2, axis=1)[:, np.newaxis] - - 2 * np.dot(x_mat, y_mat.T) - ).squeeze() - - # remove numerical artifacts - dists = np.where(np.logical_and(dists < 0, dists > -eps), 0, dists) - return _expand_if_scalar(dists) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/ndarray.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/ndarray.py deleted file mode 100644 index 18e84050a25554eca73918e01c2ba63c182cf25e..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/docarray/typing/tensor/ndarray.py +++ /dev/null @@ -1,202 +0,0 @@ -from typing import TYPE_CHECKING, Any, Generic, List, Tuple, Type, TypeVar, Union, cast - -import numpy as np - -from docarray.typing.proto_register import _register_proto -from docarray.typing.tensor.abstract_tensor import AbstractTensor - -if TYPE_CHECKING: - from pydantic import BaseConfig - from pydantic.fields import ModelField - - from docarray.computation.numpy_backend import NumpyCompBackend - from docarray.proto import NdArrayProto - -from docarray.base_doc.base_node import BaseNode - -T = TypeVar('T', bound='NdArray') -ShapeT = TypeVar('ShapeT') - -tensor_base: type = type(BaseNode) - - -# the mypy error suppression below should not be necessary anymore once the following -# is released in mypy: https://github.com/python/mypy/pull/14135 -class metaNumpy(AbstractTensor.__parametrized_meta__, tensor_base): # type: ignore - pass - - -@_register_proto(proto_type_name='ndarray') -class NdArray(np.ndarray, AbstractTensor, Generic[ShapeT]): - """ - Subclass of `np.ndarray`, intended for use in a Document. - This enables (de)serialization from/to protobuf and json, data validation, - and coersion from compatible types like `torch.Tensor`. - - This type can also be used in a parametrized way, specifying the shape of the array. - - --- - - ```python - from docarray import BaseDoc - from docarray.typing import NdArray - import numpy as np - - - class MyDoc(BaseDoc): - arr: NdArray - image_arr: NdArray[3, 224, 224] - square_crop: NdArray[3, 'x', 'x'] - random_image: NdArray[3, ...] # first dimension is fixed, can have arbitrary shape - - - # create a document with tensors - doc = MyDoc( - arr=np.zeros((128,)), - image_arr=np.zeros((3, 224, 224)), - square_crop=np.zeros((3, 64, 64)), - random_image=np.zeros((3, 128, 256)), - ) - assert doc.image_arr.shape == (3, 224, 224) - - # automatic shape conversion - doc = MyDoc( - arr=np.zeros((128,)), - image_arr=np.zeros((224, 224, 3)), # will reshape to (3, 224, 224) - square_crop=np.zeros((3, 128, 128)), - random_image=np.zeros((3, 64, 128)), - ) - assert doc.image_arr.shape == (3, 224, 224) - - # !! The following will raise an error due to shape mismatch !! - from pydantic import ValidationError - - try: - doc = MyDoc( - arr=np.zeros((128,)), - image_arr=np.zeros((224, 224)), # this will fail validation - square_crop=np.zeros((3, 128, 64)), # this will also fail validation - random_image=np.zeros((4, 64, 128)), # this will also fail validation - ) - except ValidationError as e: - pass - ``` - - --- - """ - - __parametrized_meta__ = metaNumpy - - @classmethod - def __get_validators__(cls): - # one or more validators may be yielded which will be called in the - # order to validate the input, each validator will receive as an input - # the value returned from the previous validator - yield cls.validate - - @classmethod - def validate( - cls: Type[T], - value: Union[T, np.ndarray, List[Any], Tuple[Any], Any], - field: 'ModelField', - config: 'BaseConfig', - ) -> T: - if isinstance(value, np.ndarray): - return cls._docarray_from_native(value) - elif isinstance(value, NdArray): - return cast(T, value) - elif isinstance(value, list) or isinstance(value, tuple): - try: - arr_from_list: np.ndarray = np.asarray(value) - return cls._docarray_from_native(arr_from_list) - except Exception: - pass # handled below - else: - try: - arr: np.ndarray = np.ndarray(value) - return cls._docarray_from_native(arr) - except Exception: - pass # handled below - raise ValueError(f'Expected a numpy.ndarray compatible type, got {type(value)}') - - @classmethod - def _docarray_from_native(cls: Type[T], value: np.ndarray) -> T: - if cls.__unparametrizedcls__: # This is not None if the tensor is parametrized - return cast(T, value.view(cls.__unparametrizedcls__)) - return value.view(cls) - - def _docarray_to_json_compatible(self) -> np.ndarray: - """ - Convert `NdArray` into a json compatible object - :return: a representation of the tensor compatible with orjson - """ - return self.unwrap() - - def unwrap(self) -> np.ndarray: - """ - Return the original ndarray without any memory copy. - - The original view rest intact and is still a Document `NdArray` - but the return object is a pure `np.ndarray` but both object share - the same memory layout. - - --- - - ```python - from docarray.typing import NdArray - import numpy as np - - t1 = NdArray.validate(np.zeros((3, 224, 224)), None, None) - # here t1 is a docarray NdArray - t2 = t1.unwrap() - # here t2 is a pure np.ndarray but t1 is still a Docarray NdArray - # But both share the same underlying memory - ``` - - --- - - :return: a `numpy.ndarray` - """ - return self.view(np.ndarray) - - @classmethod - def from_protobuf(cls: Type[T], pb_msg: 'NdArrayProto') -> 'T': - """ - Read ndarray from a proto msg - :param pb_msg: - :return: a numpy array - """ - source = pb_msg.dense - if source.buffer: - x = np.frombuffer(bytearray(source.buffer), dtype=source.dtype) - return cls._docarray_from_native(x.reshape(source.shape)) - elif len(source.shape) > 0: - return cls._docarray_from_native(np.zeros(source.shape)) - else: - raise ValueError(f'proto message {pb_msg} cannot be cast to a NdArray') - - def to_protobuf(self) -> 'NdArrayProto': - """ - Transform self into a NdArrayProto protobuf message - """ - from docarray.proto import NdArrayProto - - nd_proto = NdArrayProto() - - nd_proto.dense.buffer = self.tobytes() - nd_proto.dense.ClearField('shape') - nd_proto.dense.shape.extend(list(self.shape)) - nd_proto.dense.dtype = self.dtype.str - - return nd_proto - - @staticmethod - def get_comp_backend() -> 'NumpyCompBackend': - """Return the computational backend of the tensor""" - from docarray.computation.numpy_backend import NumpyCompBackend - - return NumpyCompBackend() - - def __class_getitem__(cls, item: Any, *args, **kwargs): - # see here for mypy bug: https://github.com/python/mypy/issues/14123 - return AbstractTensor.__class_getitem__.__func__(cls, item) # type: ignore diff --git a/spaces/Swan608/Spaceair/app.py b/spaces/Swan608/Spaceair/app.py deleted file mode 100644 index 213184a4e2be5569151d1f5af573676a7a1d58ea..0000000000000000000000000000000000000000 --- a/spaces/Swan608/Spaceair/app.py +++ /dev/null @@ -1,46 +0,0 @@ -import gradio as gr -import numpy as np -import keras -import os - -model = keras.models.load_model("mnist_model.h5") - -def rgb2gray(rgb): - - return [255] - np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140]) - -def number_classifier(target): - - target = rgb2gray(target).flatten() - - inputs = np.array([target]) - - results = model.predict(inputs) - - result_as_dict = {} - - for i in range(10): - - result_as_dict[str(i)] = float(results[0][i]) - - return result_as_dict - -# def add_two_number(a,b): -# return str(a+b) + "입니다." - -# app = gr.Interface(fn=add_two_number, inputs=["number", "number"], outputs=["text"]) - -# app.launch() - -examples_list = [] - -for item in os.listdir("examples/"): - examples_list.append("examples/" + item) - -app = gr.Interface(fn=number_classifier, - inputs=[gr.Image(shape=(28, 28))], - outputs=[gr.Label(num_top_classes=3)], - examples=examples_list - ) - -app.launch() \ No newline at end of file diff --git a/spaces/TRaw/dtet/Dockerfile b/spaces/TRaw/dtet/Dockerfile deleted file mode 100644 index fb4d04336ede050357a8846aba48ef5c42f13f88..0000000000000000000000000000000000000000 --- a/spaces/TRaw/dtet/Dockerfile +++ /dev/null @@ -1,121 +0,0 @@ -ARG MODEL_NAME -ARG MODEL_PARAMS -ARG APP_COLOR -ARG APP_NAME - - -FROM node:19 as chatui-builder -ARG MODEL_NAME -ARG MODEL_PARAMS -ARG APP_COLOR -ARG APP_NAME - -WORKDIR /app - -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - git gettext && \ - rm -rf /var/lib/apt/lists/* - - -RUN git clone https://github.com/huggingface/chat-ui.git - -WORKDIR /app/chat-ui - - -COPY .env.local.template .env.local.template - -RUN mkdir defaults -ADD defaults /defaults -RUN chmod -R 777 /defaults -RUN --mount=type=secret,id=MONGODB_URL,mode=0444 \ - MODEL_NAME="${MODEL_NAME:="$(cat /defaults/MODEL_NAME)"}" && export MODEL_NAME \ - && MODEL_PARAMS="${MODEL_PARAMS:="$(cat /defaults/MODEL_PARAMS)"}" && export MODEL_PARAMS \ - && APP_COLOR="${APP_COLOR:="$(cat /defaults/APP_COLOR)"}" && export APP_COLOR \ - && APP_NAME="${APP_NAME:="$(cat /defaults/APP_NAME)"}" && export APP_NAME \ - && MONGODB_URL=$(cat /run/secrets/MONGODB_URL > /dev/null | grep '^' || cat /defaults/MONGODB_URL) && export MONGODB_URL && \ - echo "${MONGODB_URL}" && \ - envsubst < ".env.local.template" > ".env.local" \ - && rm .env.local.template - - - -RUN --mount=type=cache,target=/app/.npm \ - npm set cache /app/.npm && \ - npm ci - -RUN npm run build - -FROM ghcr.io/huggingface/text-generation-inference:0.9.4 - -ARG MODEL_NAME -ARG MODEL_PARAMS -ARG APP_COLOR -ARG APP_NAME - -ENV TZ=Europe/Paris \ - PORT=3000 - - - -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - gnupg \ - curl \ - gettext && \ - rm -rf /var/lib/apt/lists/* -COPY entrypoint.sh.template entrypoint.sh.template - -RUN mkdir defaults -ADD defaults /defaults -RUN chmod -R 777 /defaults - -RUN --mount=type=secret,id=MONGODB_URL,mode=0444 \ - MODEL_NAME="${MODEL_NAME:="$(cat /defaults/MODEL_NAME)"}" && export MODEL_NAME \ - && MODEL_PARAMS="${MODEL_PARAMS:="$(cat /defaults/MODEL_PARAMS)"}" && export MODEL_PARAMS \ - && APP_COLOR="${APP_COLOR:="$(cat /defaults/APP_COLOR)"}" && export APP_COLOR \ - && APP_NAME="${APP_NAME:="$(cat /defaults/APP_NAME)"}" && export APP_NAME \ - && MONGODB_URL=$(cat /run/secrets/MONGODB_URL > /dev/null | grep '^' || cat /defaults/MONGODB_URL) && export MONGODB_URL && \ - envsubst < "entrypoint.sh.template" > "entrypoint.sh" \ - && rm entrypoint.sh.template - - -RUN curl -fsSL https://pgp.mongodb.com/server-6.0.asc | \ - gpg -o /usr/share/keyrings/mongodb-server-6.0.gpg \ - --dearmor - -RUN echo "deb [ arch=amd64,arm64 signed-by=/usr/share/keyrings/mongodb-server-6.0.gpg ] https://repo.mongodb.org/apt/ubuntu focal/mongodb-org/6.0 multiverse" | tee /etc/apt/sources.list.d/mongodb-org-6.0.list - -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - mongodb-org && \ - rm -rf /var/lib/apt/lists/* - -RUN mkdir -p /data/db -RUN chown -R 1000:1000 /data - -RUN curl -fsSL https://deb.nodesource.com/setup_19.x | /bin/bash - - -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ - nodejs && \ - rm -rf /var/lib/apt/lists/* - -RUN mkdir /app -RUN chown -R 1000:1000 /app - -RUN useradd -m -u 1000 user - -# Switch to the "user" user -USER user - -ENV HOME=/home/user \ - PATH=/home/user/.local/bin:$PATH - -RUN npm config set prefix /home/user/.local -RUN npm install -g pm2 - -COPY --from=chatui-builder --chown=1000 /app/chat-ui/node_modules /app/node_modules -COPY --from=chatui-builder --chown=1000 /app/chat-ui/package.json /app/package.json -COPY --from=chatui-builder --chown=1000 /app/chat-ui/build /app/build - -ENTRYPOINT ["/bin/bash"] -CMD ["entrypoint.sh"] - - diff --git a/spaces/TachibanaYoshino/AnimeGANv3/app.py b/spaces/TachibanaYoshino/AnimeGANv3/app.py deleted file mode 100644 index 677f429041bf7da6a7161135726355e315db4712..0000000000000000000000000000000000000000 --- a/spaces/TachibanaYoshino/AnimeGANv3/app.py +++ /dev/null @@ -1,108 +0,0 @@ -import os -import cv2 -import gradio as gr -import AnimeGANv3_src - - -os.makedirs('output', exist_ok=True) - - -def inference(img_path, Style, if_face=None): - print(img_path, Style, if_face) - try: - img = cv2.imread(img_path) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - if Style == "AnimeGANv3_Arcane": - f = "A" - elif Style == "AnimeGANv3_Trump v1.0": - f = "T" - elif Style == "AnimeGANv3_Shinkai": - f = "S" - elif Style == "AnimeGANv3_PortraitSketch": - f = "P" - elif Style == "AnimeGANv3_Hayao": - f = "H" - elif Style == "AnimeGANv3_Disney v1.0": - f = "D" - elif Style == "AnimeGANv3_JP_face v1.0": - f = "J" - else: - f = "U" - - try: - det_face = True if if_face=="Yes" else False - output = AnimeGANv3_src.Convert(img, f, det_face) - save_path = f"output/out.{img_path.rsplit('.')[-1]}" - cv2.imwrite(save_path, output[:, :, ::-1]) - return output, save_path - except RuntimeError as error: - print('Error', error) - except Exception as error: - print('global exception', error) - return None, None - - -title = "AnimeGANv3: To produce your own animation." -description = r"""Official online demo for <a href='https://github.com/TachibanaYoshino/AnimeGANv3' target='_blank'><b>AnimeGANv3</b></a>. If you like what I'm doing you can tip me on <a href='https://www.patreon.com/Asher_Chan' target='_blank'><b>**patreon**</b></a>.<br> -It can be used to turn your photos or videos into anime.<br> -To use it, simply upload your image. It can convert landscape photos to Hayao Miyazaki or Makoto Shinkai style anime, as well as 6 style conversions about human faces.<br> -If AnimeGANv3 is helpful, please help to ⭐ the <a href='https://github.com/TachibanaYoshino/AnimeGANv3' target='_blank'>Github Repo</a> and recommend it to your friends. 😊 - -""" -article = r""" - -[![GitHub Stars](https://img.shields.io/github/stars/TachibanaYoshino/AnimeGANv3?style=social)](https://github.com/TachibanaYoshino/AnimeGANv3) - -### 🔥 Demo -I. Video to anime (Hayao Style) -<p style="display: flex;"> - <a href="https://youtu.be/EosubeJmAnE" target="___blank" style="margin-left: 14px;"><img src="https://img.shields.io/static/v1?label=YouTube&message=video 1&color=red"/></a> - <a href="https://youtu.be/5qLUflWb45E" target="___blank" style="margin-left: 14px;"><img src="https://img.shields.io/static/v1?label=YouTube&message=video 2&color=green"/></a> - <a href="https://www.youtube.com/watch?v=iFjiaPlhVm4" target="___blank" style="margin-left: 14px;"><img src="https://img.shields.io/static/v1?label=YouTube&message=video 3&color=pink"/></a> -</p> -II. Video to anime (USA cartoon + Disney style) - <a href="https://youtu.be/vJqQQMRYKh0"><img src="https://img.shields.io/static/v1?label=YouTube&message=AnimeGANv3_Trump style v1.5 &color=gold"/></a> - ----------- - -## License -This repo is made freely available to academic and non-academic entities for non-commercial purposes such as academic research, teaching, scientific publications. Permission is granted to use the AnimeGANv3 given that you agree to my license terms. Regarding the request for commercial use, please contact us via email to help you obtain the authorization letter. - -## Acknowledgement -* Huggingface UI is referenced from @akhaliq/GFPGAN. -* The dataset of AnimeGANv3_JP_face v1.0 is from DCTnet and then manually optimized. - -## Author -Xin Chen -If you have any question, please open an issue on GitHub Repo. - - -<center><img src='https://visitor-badge.glitch.me/badge?page_id=AnimeGANv3_online' alt='visitor badge'></center> -""" -gr.Interface( - inference, [ - gr.inputs.Image(type="filepath", label="Input"), - gr.Dropdown([ - 'AnimeGANv3_Hayao', - 'AnimeGANv3_Shinkai', - 'AnimeGANv3_Arcane', - 'AnimeGANv3_USA', - 'AnimeGANv3_Trump v1.0', - 'AnimeGANv3_Disney v1.0', - 'AnimeGANv3_PortraitSketch', - 'AnimeGANv3_JP_face v1.0', - ], - type="value", - value='AnimeGANv3_Hayao', - label='AnimeGANv3 Style'), - gr.inputs.Radio(['Yes', 'No'], type="value", default='No', label='Extract face'), - ], [ - gr.outputs.Image(type="numpy", label="Output (The whole image)"), - gr.outputs.File(label="Download the output image") - ], - title=title, - description=description, - article=article, - allow_flagging="never", - examples=[['samples/7_out.jpg', 'AnimeGANv3_Arcane', "Yes"], ['samples/15566.jpg', 'AnimeGANv3_USA', "Yes"],['samples/23034.jpg', 'AnimeGANv3_Trump v1.0', "Yes"], ['samples/jp_13.jpg', 'AnimeGANv3_Hayao', "No"], - ['samples/jp_20.jpg', 'AnimeGANv3_Shinkai', "No"], ['samples/Hamabe Minami.jpg', 'AnimeGANv3_Disney v1.0', "Yes"], ['samples/120.jpg', 'AnimeGANv3_JP_face v1.0', "Yes"], ['samples/52014.jpg', 'AnimeGANv3_PortraitSketch', "Yes"]]).launch(enable_queue=True) diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/manifest.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/manifest.py deleted file mode 100644 index ca0fe442d9ca499466df9438df16eca405c5f102..0000000000000000000000000000000000000000 --- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/distlib/manifest.py +++ /dev/null @@ -1,393 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012-2013 Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -""" -Class representing the list of files in a distribution. - -Equivalent to distutils.filelist, but fixes some problems. -""" -import fnmatch -import logging -import os -import re -import sys - -from . import DistlibException -from .compat import fsdecode -from .util import convert_path - - -__all__ = ['Manifest'] - -logger = logging.getLogger(__name__) - -# a \ followed by some spaces + EOL -_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M) -_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S) - -# -# Due to the different results returned by fnmatch.translate, we need -# to do slightly different processing for Python 2.7 and 3.2 ... this needed -# to be brought in for Python 3.6 onwards. -# -_PYTHON_VERSION = sys.version_info[:2] - -class Manifest(object): - """A list of files built by on exploring the filesystem and filtered by - applying various patterns to what we find there. - """ - - def __init__(self, base=None): - """ - Initialise an instance. - - :param base: The base directory to explore under. - """ - self.base = os.path.abspath(os.path.normpath(base or os.getcwd())) - self.prefix = self.base + os.sep - self.allfiles = None - self.files = set() - - # - # Public API - # - - def findall(self): - """Find all files under the base and set ``allfiles`` to the absolute - pathnames of files found. - """ - from stat import S_ISREG, S_ISDIR, S_ISLNK - - self.allfiles = allfiles = [] - root = self.base - stack = [root] - pop = stack.pop - push = stack.append - - while stack: - root = pop() - names = os.listdir(root) - - for name in names: - fullname = os.path.join(root, name) - - # Avoid excess stat calls -- just one will do, thank you! - stat = os.stat(fullname) - mode = stat.st_mode - if S_ISREG(mode): - allfiles.append(fsdecode(fullname)) - elif S_ISDIR(mode) and not S_ISLNK(mode): - push(fullname) - - def add(self, item): - """ - Add a file to the manifest. - - :param item: The pathname to add. This can be relative to the base. - """ - if not item.startswith(self.prefix): - item = os.path.join(self.base, item) - self.files.add(os.path.normpath(item)) - - def add_many(self, items): - """ - Add a list of files to the manifest. - - :param items: The pathnames to add. These can be relative to the base. - """ - for item in items: - self.add(item) - - def sorted(self, wantdirs=False): - """ - Return sorted files in directory order - """ - - def add_dir(dirs, d): - dirs.add(d) - logger.debug('add_dir added %s', d) - if d != self.base: - parent, _ = os.path.split(d) - assert parent not in ('', '/') - add_dir(dirs, parent) - - result = set(self.files) # make a copy! - if wantdirs: - dirs = set() - for f in result: - add_dir(dirs, os.path.dirname(f)) - result |= dirs - return [os.path.join(*path_tuple) for path_tuple in - sorted(os.path.split(path) for path in result)] - - def clear(self): - """Clear all collected files.""" - self.files = set() - self.allfiles = [] - - def process_directive(self, directive): - """ - Process a directive which either adds some files from ``allfiles`` to - ``files``, or removes some files from ``files``. - - :param directive: The directive to process. This should be in a format - compatible with distutils ``MANIFEST.in`` files: - - http://docs.python.org/distutils/sourcedist.html#commands - """ - # Parse the line: split it up, make sure the right number of words - # is there, and return the relevant words. 'action' is always - # defined: it's the first word of the line. Which of the other - # three are defined depends on the action; it'll be either - # patterns, (dir and patterns), or (dirpattern). - action, patterns, thedir, dirpattern = self._parse_directive(directive) - - # OK, now we know that the action is valid and we have the - # right number of words on the line for that action -- so we - # can proceed with minimal error-checking. - if action == 'include': - for pattern in patterns: - if not self._include_pattern(pattern, anchor=True): - logger.warning('no files found matching %r', pattern) - - elif action == 'exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, anchor=True) - #if not found: - # logger.warning('no previously-included files ' - # 'found matching %r', pattern) - - elif action == 'global-include': - for pattern in patterns: - if not self._include_pattern(pattern, anchor=False): - logger.warning('no files found matching %r ' - 'anywhere in distribution', pattern) - - elif action == 'global-exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, anchor=False) - #if not found: - # logger.warning('no previously-included files ' - # 'matching %r found anywhere in ' - # 'distribution', pattern) - - elif action == 'recursive-include': - for pattern in patterns: - if not self._include_pattern(pattern, prefix=thedir): - logger.warning('no files found matching %r ' - 'under directory %r', pattern, thedir) - - elif action == 'recursive-exclude': - for pattern in patterns: - found = self._exclude_pattern(pattern, prefix=thedir) - #if not found: - # logger.warning('no previously-included files ' - # 'matching %r found under directory %r', - # pattern, thedir) - - elif action == 'graft': - if not self._include_pattern(None, prefix=dirpattern): - logger.warning('no directories found matching %r', - dirpattern) - - elif action == 'prune': - if not self._exclude_pattern(None, prefix=dirpattern): - logger.warning('no previously-included directories found ' - 'matching %r', dirpattern) - else: # pragma: no cover - # This should never happen, as it should be caught in - # _parse_template_line - raise DistlibException( - 'invalid action %r' % action) - - # - # Private API - # - - def _parse_directive(self, directive): - """ - Validate a directive. - :param directive: The directive to validate. - :return: A tuple of action, patterns, thedir, dir_patterns - """ - words = directive.split() - if len(words) == 1 and words[0] not in ('include', 'exclude', - 'global-include', - 'global-exclude', - 'recursive-include', - 'recursive-exclude', - 'graft', 'prune'): - # no action given, let's use the default 'include' - words.insert(0, 'include') - - action = words[0] - patterns = thedir = dir_pattern = None - - if action in ('include', 'exclude', - 'global-include', 'global-exclude'): - if len(words) < 2: - raise DistlibException( - '%r expects <pattern1> <pattern2> ...' % action) - - patterns = [convert_path(word) for word in words[1:]] - - elif action in ('recursive-include', 'recursive-exclude'): - if len(words) < 3: - raise DistlibException( - '%r expects <dir> <pattern1> <pattern2> ...' % action) - - thedir = convert_path(words[1]) - patterns = [convert_path(word) for word in words[2:]] - - elif action in ('graft', 'prune'): - if len(words) != 2: - raise DistlibException( - '%r expects a single <dir_pattern>' % action) - - dir_pattern = convert_path(words[1]) - - else: - raise DistlibException('unknown action %r' % action) - - return action, patterns, thedir, dir_pattern - - def _include_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Select strings (presumably filenames) from 'self.files' that - match 'pattern', a Unix-style wildcard (glob) pattern. - - Patterns are not quite the same as implemented by the 'fnmatch' - module: '*' and '?' match non-special characters, where "special" - is platform-dependent: slash on Unix; colon, slash, and backslash on - DOS/Windows; and colon on Mac OS. - - If 'anchor' is true (the default), then the pattern match is more - stringent: "*.py" will match "foo.py" but not "foo/bar.py". If - 'anchor' is false, both of these will match. - - If 'prefix' is supplied, then only filenames starting with 'prefix' - (itself a pattern) and ending with 'pattern', with anything in between - them, will match. 'anchor' is ignored in this case. - - If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and - 'pattern' is assumed to be either a string containing a regex or a - regex object -- no translation is done, the regex is just compiled - and used as-is. - - Selected strings will be added to self.files. - - Return True if files are found. - """ - # XXX docstring lying about what the special chars are? - found = False - pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) - - # delayed loading of allfiles list - if self.allfiles is None: - self.findall() - - for name in self.allfiles: - if pattern_re.search(name): - self.files.add(name) - found = True - return found - - def _exclude_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Remove strings (presumably filenames) from 'files' that match - 'pattern'. - - Other parameters are the same as for 'include_pattern()', above. - The list 'self.files' is modified in place. Return True if files are - found. - - This API is public to allow e.g. exclusion of SCM subdirs, e.g. when - packaging source distributions - """ - found = False - pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) - for f in list(self.files): - if pattern_re.search(f): - self.files.remove(f) - found = True - return found - - def _translate_pattern(self, pattern, anchor=True, prefix=None, - is_regex=False): - """Translate a shell-like wildcard pattern to a compiled regular - expression. - - Return the compiled regex. If 'is_regex' true, - then 'pattern' is directly compiled to a regex (if it's a string) - or just returned as-is (assumes it's a regex object). - """ - if is_regex: - if isinstance(pattern, str): - return re.compile(pattern) - else: - return pattern - - if _PYTHON_VERSION > (3, 2): - # ditch start and end characters - start, _, end = self._glob_to_re('_').partition('_') - - if pattern: - pattern_re = self._glob_to_re(pattern) - if _PYTHON_VERSION > (3, 2): - assert pattern_re.startswith(start) and pattern_re.endswith(end) - else: - pattern_re = '' - - base = re.escape(os.path.join(self.base, '')) - if prefix is not None: - # ditch end of pattern character - if _PYTHON_VERSION <= (3, 2): - empty_pattern = self._glob_to_re('') - prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)] - else: - prefix_re = self._glob_to_re(prefix) - assert prefix_re.startswith(start) and prefix_re.endswith(end) - prefix_re = prefix_re[len(start): len(prefix_re) - len(end)] - sep = os.sep - if os.sep == '\\': - sep = r'\\' - if _PYTHON_VERSION <= (3, 2): - pattern_re = '^' + base + sep.join((prefix_re, - '.*' + pattern_re)) - else: - pattern_re = pattern_re[len(start): len(pattern_re) - len(end)] - pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep, - pattern_re, end) - else: # no prefix -- respect anchor flag - if anchor: - if _PYTHON_VERSION <= (3, 2): - pattern_re = '^' + base + pattern_re - else: - pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):]) - - return re.compile(pattern_re) - - def _glob_to_re(self, pattern): - """Translate a shell-like glob pattern to a regular expression. - - Return a string containing the regex. Differs from - 'fnmatch.translate()' in that '*' does not match "special characters" - (which are platform-specific). - """ - pattern_re = fnmatch.translate(pattern) - - # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which - # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix, - # and by extension they shouldn't match such "special characters" under - # any OS. So change all non-escaped dots in the RE to match any - # character except the special characters (currently: just os.sep). - sep = os.sep - if os.sep == '\\': - # we're using a regex to manipulate a regex, so we need - # to escape the backslash twice - sep = r'\\\\' - escaped = r'\1[^%s]' % sep - pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re) - return pattern_re diff --git a/spaces/Vegecken/sovits4dzl/data_utils.py b/spaces/Vegecken/sovits4dzl/data_utils.py deleted file mode 100644 index bd67adc7d42da7b9ff4ca11e543d8cc9cd34e60b..0000000000000000000000000000000000000000 --- a/spaces/Vegecken/sovits4dzl/data_utils.py +++ /dev/null @@ -1,142 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import modules.commons as commons -import utils -from modules.mel_processing import spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text - -# import h5py - - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.spec_len = hparams.train.max_speclen - self.spk_map = hparams.spk - - random.seed(1234) - random.shuffle(self.audiopaths) - - def get_audio(self, filename): - filename = filename.replace("\\", "/") - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split("/")[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - f0 = np.load(filename + ".f0.npy") - f0, uv = utils.interpolate_f0(f0) - f0 = torch.FloatTensor(f0) - uv = torch.FloatTensor(uv) - - c = torch.load(filename+ ".soft.pt") - c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[0]) - - - lmin = min(c.size(-1), spec.size(-1)) - assert abs(c.size(-1) - spec.size(-1)) < 3, (c.size(-1), spec.size(-1), f0.shape, filename) - assert abs(audio_norm.shape[1]-lmin * self.hop_length) < 3 * self.hop_length - spec, c, f0, uv = spec[:, :lmin], c[:, :lmin], f0[:lmin], uv[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - if spec.shape[1] < 60: - print("skip too short audio:", filename) - return None - if spec.shape[1] > 800: - start = random.randint(0, spec.shape[1]-800) - end = start + 790 - spec, c, f0, uv = spec[:, start:end], c[:, start:end], f0[start:end], uv[start:end] - audio_norm = audio_norm[:, start * self.hop_length : end * self.hop_length] - - return c, f0, spec, audio_norm, spk, uv - - def __getitem__(self, index): - return self.get_audio(self.audiopaths[index][0]) - - def __len__(self): - return len(self.audiopaths) - - -class TextAudioCollate: - - def __call__(self, batch): - batch = [b for b in batch if b is not None] - - input_lengths, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[0].shape[1] for x in batch]), - dim=0, descending=True) - - max_c_len = max([x[0].size(1) for x in batch]) - max_wav_len = max([x[3].size(1) for x in batch]) - - lengths = torch.LongTensor(len(batch)) - - c_padded = torch.FloatTensor(len(batch), batch[0][0].shape[0], max_c_len) - f0_padded = torch.FloatTensor(len(batch), max_c_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][2].shape[0], max_c_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - spkids = torch.LongTensor(len(batch), 1) - uv_padded = torch.FloatTensor(len(batch), max_c_len) - - c_padded.zero_() - spec_padded.zero_() - f0_padded.zero_() - wav_padded.zero_() - uv_padded.zero_() - - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - c = row[0] - c_padded[i, :, :c.size(1)] = c - lengths[i] = c.size(1) - - f0 = row[1] - f0_padded[i, :f0.size(0)] = f0 - - spec = row[2] - spec_padded[i, :, :spec.size(1)] = spec - - wav = row[3] - wav_padded[i, :, :wav.size(1)] = wav - - spkids[i, 0] = row[4] - - uv = row[5] - uv_padded[i, :uv.size(0)] = uv - - return c_padded, f0_padded, spec_padded, wav_padded, spkids, lengths, uv_padded diff --git a/spaces/Vertaix/vendiscore/vendiscore.py b/spaces/Vertaix/vendiscore/vendiscore.py deleted file mode 100644 index 66f52ece421dd61e6e1eced6ee5baf1434d99e54..0000000000000000000000000000000000000000 --- a/spaces/Vertaix/vendiscore/vendiscore.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import evaluate -import datasets -import numpy as np - -from vendi_score import vendi, text_utils - -# TODO: Add BibTeX citation -_CITATION = "" -_DESCRIPTION = """\ -The Vendi Score is a metric for evaluating diversity in machine learning. -The input to metric is a collection of samples and a pairwise similarity function, and the output is a number, which can be interpreted as the effective number of unique elements in the sample. -See the project's README at https://github.com/vertaix/Vendi-Score for more information. -The interactive example calculates the Vendi Score for a set of strings using the n-gram overlap similarity, averaged between n=1 and n=2. -""" - - -_KWARGS_DESCRIPTION = """ -Calculates the Vendi Score given samples and a similarity function. -Args: - samples: an iterable containing n samples to score, an n x n similarity - matrix K, or an n x d feature matrix X. - k: a pairwise similarity function, or a string identifying a predefined - similarity function. - Options: ngram_overlap, text_embeddings. - score_K: if true, samples is an n x n similarity matrix K. - score_X: if true, samples is an n x d feature matrix X. - score_dual: if true, compute diversity score of X @ X.T. - normalize: if true, normalize the similarity scores. - model (optional): if k is "text_embeddings", a model mapping sentences to - embeddings (output should be an object with an attribute called - `pooler_output` or `last_hidden_state`). - tokenizer (optional): if k is "text_embeddings" or "ngram_overlap", a - tokenizer mapping strings to lists. - model_path (optional): if k is "text_embeddings", the name of a model on the - HuggingFace hub. - ns (optional): if k is "ngram_overlap", the values of n to calculate. - batch_size (optional): batch size to use if k is "text_embedding". - device (optional): a string (e.g. "cuda", "cpu") or torch.device identifying - the device to use if k is "text_embedding". -Returns: - VS: The Vendi Score. -Examples: - >>> vendiscore = evaluate.load("Vertaix/vendiscore", "text") - >>> samples = ["Look, Jane.", - "See Spot.", - "See Spot run.", - "Run, Spot, run.", - "Jane sees Spot run."] - >>> results = vendiscore.compute(samples, k="ngram_overlap", ns=[1, 2]) - >>> print(results) - {'VS': 3.90657...} -""" - - -def get_features(config_name): - if config_name in ("text", "default"): - return datasets.Features({"samples": datasets.Value("string")}) - # if config_name == "image": - # return datasets.Features({"samples": datasets.Image}) - if config_name in ("K", "X"): - return [ - datasets.Features( - {"samples": datasets.Sequence(datasets.Value("float"))} - ), - datasets.Features( - {"samples": datasets.Sequence(datasets.Value("int32"))} - ), - ] - return [ - datasets.Features({"samples": datasets.Value("float")}), - datasets.Features({"samples": datasets.Value("int32")}), - datasets.Features({"samples": datasets.Array2D}), - ] - - -@evaluate.utils.file_utils.add_start_docstrings( - _DESCRIPTION, _KWARGS_DESCRIPTION -) -class VendiScore(evaluate.Metric): - """TODO: Short description of my evaluation module.""" - - def _info(self): - # TODO: Specifies the evaluate.EvaluationModuleInfo object - return evaluate.MetricInfo( - # This is the description that will appear on the modules page. - module_type="metric", - description=_DESCRIPTION, - citation=_CITATION, - inputs_description=_KWARGS_DESCRIPTION, - features=get_features(self.config_name), - homepage="http://github.com/Vertaix/Vendi-Score", - codebase_urls=["http://github.com/Vertaix/Vendi-Score"], - reference_urls=[], - ) - - def _download_and_prepare(self, dl_manager): - import nltk - - nltk.download("punkt") - - def _compute( - self, - samples, - k="ngram_overlap", - score_K=False, - score_X=False, - score_dual=False, - normalize=False, - model=None, - tokenizer=None, - model_path=None, - ns=[1, 2], - batch_size=16, - device="cpu", - ): - if score_K: - vs = vendi.score_K(np.array(samples), normalize=normalize) - elif score_dual: - vs = vendi.score_dual(np.array(samples), normalize=normalize) - elif score_X: - vs = vendi.score_X(np.array(samples), normalize=normalize) - elif type(k) == str and k == "ngram_overlap": - vs = text_utils.ngram_vendi_score( - samples, ns=ns, tokenizer=tokenizer - ) - elif type(k) == str and k == "text_embeddings": - vs = text_utils.embedding_vendi_score( - samples, - model=model, - tokenizer=tokenizer, - batch_size=batch_size, - device=device, - model_path=model_path, - ) - # elif type(k) == str and k == "pixels": - # vs = image_utils.pixel_vendi_score( - # [Image.fromarray(x) for x in samples] - # ) - # elif type(k) == str and k == "image_embeddings": - # vs = image_utils.embedding_vendi_score( - # [Image.fromarray(x) for x in samples], - # batch_size=batch_size, - # device=device, - # model=model, - # transform=transform, - # ) - else: - vs = vendi.score(samples, k) - return {"VS": vs} diff --git a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/GetGpt.py b/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/GetGpt.py deleted file mode 100644 index 56a121f6ee5f430da7beda3b65abdea64a87c36b..0000000000000000000000000000000000000000 --- a/spaces/VickyKira/NASAGPT/g4f/Provider/Providers/GetGpt.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import json -import uuid -import requests -from Crypto.Cipher import AES -from ...typing import sha256, Dict, get_type_hints - -url = 'https://chat.getgpt.world/' -model = ['gpt-3.5-turbo'] -supports_stream = True -needs_auth = False - -def _create_completion(model: str, messages: list, stream: bool, **kwargs): - def encrypt(e): - t = os.urandom(8).hex().encode('utf-8') - n = os.urandom(8).hex().encode('utf-8') - r = e.encode('utf-8') - cipher = AES.new(t, AES.MODE_CBC, n) - ciphertext = cipher.encrypt(pad_data(r)) - return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8') - - def pad_data(data: bytes) -> bytes: - block_size = AES.block_size - padding_size = block_size - len(data) % block_size - padding = bytes([padding_size] * padding_size) - return data + padding - - headers = { - 'Content-Type': 'application/json', - 'Referer': 'https://chat.getgpt.world/', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' - } - - data = json.dumps({ - 'messages': messages, - 'frequency_penalty': kwargs.get('frequency_penalty', 0), - 'max_tokens': kwargs.get('max_tokens', 4000), - 'model': 'gpt-3.5-turbo', - 'presence_penalty': kwargs.get('presence_penalty', 0), - 'temperature': kwargs.get('temperature', 1), - 'top_p': kwargs.get('top_p', 1), - 'stream': True, - 'uuid': str(uuid.uuid4()) - }) - - res = requests.post('https://chat.getgpt.world/api/chat/stream', - headers=headers, json={'signature': encrypt(data)}, stream=True) - - for line in res.iter_lines(): - if b'content' in line: - line_json = json.loads(line.decode('utf-8').split('data: ')[1]) - yield (line_json['choices'][0]['delta']['content']) - - -params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \ - '(%s)' % ', '.join( - [f'{name}: {get_type_hints(_create_completion)[name].__name__}' for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]]) diff --git a/spaces/VoiceHero69/changer/webui/modules/implementations/rvc/slicer2.py b/spaces/VoiceHero69/changer/webui/modules/implementations/rvc/slicer2.py deleted file mode 100644 index 5b29ee262aa54045e807be2cffeb41687499ba58..0000000000000000000000000000000000000000 --- a/spaces/VoiceHero69/changer/webui/modules/implementations/rvc/slicer2.py +++ /dev/null @@ -1,260 +0,0 @@ -import numpy as np - - -# This function is obtained from librosa. -def get_rms( - y, - frame_length=2048, - hop_length=512, - pad_mode="constant", -): - padding = (int(frame_length // 2), int(frame_length // 2)) - y = np.pad(y, padding, mode=pad_mode) - - axis = -1 - # put our new within-frame axis at the end for now - out_strides = y.strides + tuple([y.strides[axis]]) - # Reduce the shape on the framing axis - x_shape_trimmed = list(y.shape) - x_shape_trimmed[axis] -= frame_length - 1 - out_shape = tuple(x_shape_trimmed) + tuple([frame_length]) - xw = np.lib.stride_tricks.as_strided(y, shape=out_shape, strides=out_strides) - if axis < 0: - target_axis = axis - 1 - else: - target_axis = axis + 1 - xw = np.moveaxis(xw, -1, target_axis) - # Downsample along the target axis - slices = [slice(None)] * xw.ndim - slices[axis] = slice(0, None, hop_length) - x = xw[tuple(slices)] - - # Calculate power - power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True) - - return np.sqrt(power) - - -class Slicer: - def __init__( - self, - sr: int, - threshold: float = -40.0, - min_length: int = 5000, - min_interval: int = 300, - hop_size: int = 20, - max_sil_kept: int = 5000, - ): - if not min_length >= min_interval >= hop_size: - raise ValueError( - "The following condition must be satisfied: min_length >= min_interval >= hop_size" - ) - if not max_sil_kept >= hop_size: - raise ValueError( - "The following condition must be satisfied: max_sil_kept >= hop_size" - ) - min_interval = sr * min_interval / 1000 - self.threshold = 10 ** (threshold / 20.0) - self.hop_size = round(sr * hop_size / 1000) - self.win_size = min(round(min_interval), 4 * self.hop_size) - self.min_length = round(sr * min_length / 1000 / self.hop_size) - self.min_interval = round(min_interval / self.hop_size) - self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size) - - def _apply_slice(self, waveform, begin, end): - if len(waveform.shape) > 1: - return waveform[ - :, begin * self.hop_size : min(waveform.shape[1], end * self.hop_size) - ] - else: - return waveform[ - begin * self.hop_size : min(waveform.shape[0], end * self.hop_size) - ] - - # @timeit - def slice(self, waveform): - if len(waveform.shape) > 1: - samples = waveform.mean(axis=0) - else: - samples = waveform - if samples.shape[0] <= self.min_length: - return [waveform] - rms_list = get_rms( - y=samples, frame_length=self.win_size, hop_length=self.hop_size - ).squeeze(0) - sil_tags = [] - silence_start = None - clip_start = 0 - for i, rms in enumerate(rms_list): - # Keep looping while frame is silent. - if rms < self.threshold: - # Record start of silent frames. - if silence_start is None: - silence_start = i - continue - # Keep looping while frame is not silent and silence start has not been recorded. - if silence_start is None: - continue - # Clear recorded silence start if interval is not enough or clip is too short - is_leading_silence = silence_start == 0 and i > self.max_sil_kept - need_slice_middle = ( - i - silence_start >= self.min_interval - and i - clip_start >= self.min_length - ) - if not is_leading_silence and not need_slice_middle: - silence_start = None - continue - # Need slicing. Record the range of silent frames to be removed. - if i - silence_start <= self.max_sil_kept: - pos = rms_list[silence_start : i + 1].argmin() + silence_start - if silence_start == 0: - sil_tags.append((0, pos)) - else: - sil_tags.append((pos, pos)) - clip_start = pos - elif i - silence_start <= self.max_sil_kept * 2: - pos = rms_list[ - i - self.max_sil_kept : silence_start + self.max_sil_kept + 1 - ].argmin() - pos += i - self.max_sil_kept - pos_l = ( - rms_list[ - silence_start : silence_start + self.max_sil_kept + 1 - ].argmin() - + silence_start - ) - pos_r = ( - rms_list[i - self.max_sil_kept : i + 1].argmin() - + i - - self.max_sil_kept - ) - if silence_start == 0: - sil_tags.append((0, pos_r)) - clip_start = pos_r - else: - sil_tags.append((min(pos_l, pos), max(pos_r, pos))) - clip_start = max(pos_r, pos) - else: - pos_l = ( - rms_list[ - silence_start : silence_start + self.max_sil_kept + 1 - ].argmin() - + silence_start - ) - pos_r = ( - rms_list[i - self.max_sil_kept : i + 1].argmin() - + i - - self.max_sil_kept - ) - if silence_start == 0: - sil_tags.append((0, pos_r)) - else: - sil_tags.append((pos_l, pos_r)) - clip_start = pos_r - silence_start = None - # Deal with trailing silence. - total_frames = rms_list.shape[0] - if ( - silence_start is not None - and total_frames - silence_start >= self.min_interval - ): - silence_end = min(total_frames, silence_start + self.max_sil_kept) - pos = rms_list[silence_start : silence_end + 1].argmin() + silence_start - sil_tags.append((pos, total_frames + 1)) - # Apply and return slices. - if len(sil_tags) == 0: - return [waveform] - else: - chunks = [] - if sil_tags[0][0] > 0: - chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0])) - for i in range(len(sil_tags) - 1): - chunks.append( - self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0]) - ) - if sil_tags[-1][1] < total_frames: - chunks.append( - self._apply_slice(waveform, sil_tags[-1][1], total_frames) - ) - return chunks - - -def main(): - import os.path - from argparse import ArgumentParser - - import librosa - import soundfile - - parser = ArgumentParser() - parser.add_argument("audio", type=str, help="The audio to be sliced") - parser.add_argument( - "--out", type=str, help="Output directory of the sliced audio clips" - ) - parser.add_argument( - "--db_thresh", - type=float, - required=False, - default=-40, - help="The dB threshold for silence detection", - ) - parser.add_argument( - "--min_length", - type=int, - required=False, - default=5000, - help="The minimum milliseconds required for each sliced audio clip", - ) - parser.add_argument( - "--min_interval", - type=int, - required=False, - default=300, - help="The minimum milliseconds for a silence part to be sliced", - ) - parser.add_argument( - "--hop_size", - type=int, - required=False, - default=10, - help="Frame length in milliseconds", - ) - parser.add_argument( - "--max_sil_kept", - type=int, - required=False, - default=500, - help="The maximum silence length kept around the sliced clip, presented in milliseconds", - ) - args = parser.parse_args() - out = args.out - if out is None: - out = os.path.dirname(os.path.abspath(args.audio)) - audio, sr = librosa.load(args.audio, sr=None, mono=False) - slicer = Slicer( - sr=sr, - threshold=args.db_thresh, - min_length=args.min_length, - min_interval=args.min_interval, - hop_size=args.hop_size, - max_sil_kept=args.max_sil_kept, - ) - chunks = slicer.slice(audio) - if not os.path.exists(out): - os.makedirs(out) - for i, chunk in enumerate(chunks): - if len(chunk.shape) > 1: - chunk = chunk.T - soundfile.write( - os.path.join( - out, - f"%s_%d.wav" - % (os.path.basename(args.audio).rsplit(".", maxsplit=1)[0], i), - ), - chunk, - sr, - ) - - -if __name__ == "__main__": - main() diff --git a/spaces/Wayben/ChatGPT/assets/custom.js b/spaces/Wayben/ChatGPT/assets/custom.js deleted file mode 100644 index 7b1761043149ff97ca498501c87a0d15db5258ee..0000000000000000000000000000000000000000 --- a/spaces/Wayben/ChatGPT/assets/custom.js +++ /dev/null @@ -1 +0,0 @@ -// custom javascript here \ No newline at end of file diff --git a/spaces/XzJosh/otto-Bert-VITS2/models.py b/spaces/XzJosh/otto-Bert-VITS2/models.py deleted file mode 100644 index d4afe44d883691610c5903e602a3ca245fcb3a5c..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/otto-Bert-VITS2/models.py +++ /dev/null @@ -1,707 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -from commons import init_weights, get_padding -from text import symbols, num_tones, num_languages -class DurationDiscriminator(nn.Module): #vits2 - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.dur_proj = nn.Conv1d(1, filter_channels, 1) - - self.pre_out_conv_1 = nn.Conv1d(2*filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.pre_out_norm_1 = modules.LayerNorm(filter_channels) - self.pre_out_conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.pre_out_norm_2 = modules.LayerNorm(filter_channels) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - self.output_layer = nn.Sequential( - nn.Linear(filter_channels, 1), - nn.Sigmoid() - ) - - def forward_probability(self, x, x_mask, dur, g=None): - dur = self.dur_proj(dur) - x = torch.cat([x, dur], dim=1) - x = self.pre_out_conv_1(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_1(x) - x = self.drop(x) - x = self.pre_out_conv_2(x * x_mask) - x = torch.relu(x) - x = self.pre_out_norm_2(x) - x = self.drop(x) - x = x * x_mask - x = x.transpose(1, 2) - output_prob = self.output_layer(x) - return output_prob - - def forward(self, x, x_mask, dur_r, dur_hat, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - - output_probs = [] - for dur in [dur_r, dur_hat]: - output_prob = self.forward_probability(x, x_mask, dur, g) - output_probs.append(output_prob) - - return output_probs - -class TransformerCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - n_flows=4, - gin_channels=0, - share_parameter=False - ): - - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - - self.wn = attentions.FFT(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout, isflow = True, gin_channels = self.gin_channels) if share_parameter else None - - for i in range(n_flows): - self.flows.append( - modules.TransformerCouplingLayer(channels, hidden_channels, kernel_size, n_layers, n_heads, p_dropout, filter_channels, mean_only=True, wn_sharing_parameter=self.wn, gin_channels = self.gin_channels)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) - logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=0): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - self.emb = nn.Embedding(len(symbols), hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - self.tone_emb = nn.Embedding(num_tones, hidden_channels) - nn.init.normal_(self.tone_emb.weight, 0.0, hidden_channels ** -0.5) - self.language_emb = nn.Embedding(num_languages, hidden_channels) - nn.init.normal_(self.language_emb.weight, 0.0, hidden_channels ** -0.5) - self.bert_proj = nn.Conv1d(1024, hidden_channels, 1) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, tone, language, bert, g=None): - x = (self.emb(x)+ self.tone_emb(tone)+ self.language_emb(language)+self.bert_proj(bert).transpose(1,2)) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask, g=g) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, - gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), - k, u, padding=(k - u) // 2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - -class ReferenceEncoder(nn.Module): - ''' - inputs --- [N, Ty/r, n_mels*r] mels - outputs --- [N, ref_enc_gru_size] - ''' - - def __init__(self, spec_channels, gin_channels=0): - - super().__init__() - self.spec_channels = spec_channels - ref_enc_filters = [32, 32, 64, 64, 128, 128] - K = len(ref_enc_filters) - filters = [1] + ref_enc_filters - convs = [weight_norm(nn.Conv2d(in_channels=filters[i], - out_channels=filters[i + 1], - kernel_size=(3, 3), - stride=(2, 2), - padding=(1, 1))) for i in range(K)] - self.convs = nn.ModuleList(convs) - # self.wns = nn.ModuleList([weight_norm(num_features=ref_enc_filters[i]) for i in range(K)]) - - out_channels = self.calculate_channels(spec_channels, 3, 2, 1, K) - self.gru = nn.GRU(input_size=ref_enc_filters[-1] * out_channels, - hidden_size=256 // 2, - batch_first=True) - self.proj = nn.Linear(128, gin_channels) - - def forward(self, inputs, mask=None): - N = inputs.size(0) - out = inputs.view(N, 1, -1, self.spec_channels) # [N, 1, Ty, n_freqs] - for conv in self.convs: - out = conv(out) - # out = wn(out) - out = F.relu(out) # [N, 128, Ty//2^K, n_mels//2^K] - - out = out.transpose(1, 2) # [N, Ty//2^K, 128, n_mels//2^K] - T = out.size(1) - N = out.size(0) - out = out.contiguous().view(N, T, -1) # [N, Ty//2^K, 128*n_mels//2^K] - - self.gru.flatten_parameters() - memory, out = self.gru(out) # out --- [1, N, 128] - - return self.proj(out.squeeze(0)) - - def calculate_channels(self, L, kernel_size, stride, pad, n_convs): - for i in range(n_convs): - L = (L - kernel_size + 2 * pad) // stride + 1 - return L - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=256, - gin_channels=256, - use_sdp=True, - n_flow_layer = 4, - n_layers_trans_flow = 3, - flow_share_parameter = False, - use_transformer_flow = True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - self.n_layers_trans_flow = n_layers_trans_flow - self.use_spk_conditioned_encoder = kwargs.get("use_spk_conditioned_encoder", True) - self.use_sdp = use_sdp - self.use_noise_scaled_mas = kwargs.get("use_noise_scaled_mas", False) - self.mas_noise_scale_initial = kwargs.get("mas_noise_scale_initial", 0.01) - self.noise_scale_delta = kwargs.get("noise_scale_delta", 2e-6) - self.current_mas_noise_scale = self.mas_noise_scale_initial - if self.use_spk_conditioned_encoder and gin_channels > 0: - self.enc_gin_channels = gin_channels - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - gin_channels=self.enc_gin_channels) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, - upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, - gin_channels=gin_channels) - if use_transformer_flow: - self.flow = TransformerCouplingBlock(inter_channels, hidden_channels, filter_channels, n_heads, n_layers_trans_flow, 5, p_dropout, n_flow_layer, gin_channels=gin_channels,share_parameter= flow_share_parameter) - else: - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, n_flow_layer, gin_channels=gin_channels) - self.sdp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers >= 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - else: - self.ref_enc = ReferenceEncoder(spec_channels, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert): - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), - s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - if self.use_noise_scaled_mas: - epsilon = torch.std(neg_cent) * torch.randn_like(neg_cent) * self.current_mas_noise_scale - neg_cent = neg_cent + epsilon - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - - l_length_sdp = self.sdp(x, x_mask, w, g=g) - l_length_sdp = l_length_sdp / torch.sum(x_mask) - - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging - - l_length = l_length_dp + l_length_sdp - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (x, logw, logw_) - - def infer(self, x, x_lengths, sid, tone, language, bert, noise_scale=.667, length_scale=1, noise_scale_w=0.8, max_len=None, sdp_ratio=0,y=None): - #x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert) - # g = self.gst(y) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = self.ref_enc(y.transpose(1,2)).unsqueeze(-1) - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert,g=g) - logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (sdp_ratio) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, - 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:, :, :max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) diff --git a/spaces/YUANAI/DiffspeechResearch/docs/portaspeech.md b/spaces/YUANAI/DiffspeechResearch/docs/portaspeech.md deleted file mode 100644 index 94e8b9b4241a2daae5bbfba660aa2a4a9068360d..0000000000000000000000000000000000000000 --- a/spaces/YUANAI/DiffspeechResearch/docs/portaspeech.md +++ /dev/null @@ -1,61 +0,0 @@ -# Run PortaSpeech - -## Quick Start - -### Install Dependencies - -Install dependencies following [readme.md](../readme.md) - -### Set Config Path and Experiment Name - -#### PortaSpeech (normal) -```bash -export CONFIG_NAME=egs/datasets/audio/lj/ps_flow_nips2021.yaml -export MY_EXP_NAME=ps_normal_exp -``` - -#### PortaSpeech (small) -```bash -export CONFIG_NAME=egs/datasets/audio/lj/ps_flow_small_nips2021.yaml -export MY_EXP_NAME=ps_small_exp -``` - -### Preprocess and binary dataset - -Prepare dataset following [prepare_data.md](./prepare_data.md) - -### Prepare Vocoder - -Prepare vocoder following [prepare_vocoder.md](./prepare_vocoder.md) - -## Training - -```bash -CUDA_VISIBLE_DEVICES=0 python tasks/run.py --config $CONFIG_NAME --exp_name $MY_EXP_NAME --reset -``` - -You can check the training and validation curves open Tensorboard via: - -```bash -tensorboard --logdir checkpoints/$MY_EXP_NAME -``` - -## Inference (Testing) - -```bash -CUDA_VISIBLE_DEVICES=0 python tasks/run.py --config $PS_CONFIG --exp_name $MY_EXP_NAME --infer -``` - -## Citation - -If you find this useful for your research, please use the following. - -``` -@article{ren2021portaspeech, - title={PortaSpeech: Portable and High-Quality Generative Text-to-Speech}, - author={Ren, Yi and Liu, Jinglin and Zhao, Zhou}, - journal={Advances in Neural Information Processing Systems}, - volume={34}, - year={2021} -} -``` diff --git a/spaces/Yabo/ControlVideo/models/RIFE/IFNet_HDv3.py b/spaces/Yabo/ControlVideo/models/RIFE/IFNet_HDv3.py deleted file mode 100644 index d57f0a2f0889fec5d68c52bf99bf2dbd91150381..0000000000000000000000000000000000000000 --- a/spaces/Yabo/ControlVideo/models/RIFE/IFNet_HDv3.py +++ /dev/null @@ -1,130 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from diffusers import ModelMixin - -from .warplayer import warp - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): - return nn.Sequential( - nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, - padding=padding, dilation=dilation, bias=True), - nn.PReLU(out_planes) - ) - -def conv_bn(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1): - return nn.Sequential( - nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, - padding=padding, dilation=dilation, bias=False), - nn.BatchNorm2d(out_planes), - nn.PReLU(out_planes) - ) - -def convert(param): - return { - k.replace("module.", ""): v - for k, v in param.items() - if "module." in k - } - -class IFBlock(nn.Module): - def __init__(self, in_planes, c=64): - super(IFBlock, self).__init__() - self.conv0 = nn.Sequential( - conv(in_planes, c//2, 3, 2, 1), - conv(c//2, c, 3, 2, 1), - ) - self.convblock0 = nn.Sequential( - conv(c, c), - conv(c, c) - ) - self.convblock1 = nn.Sequential( - conv(c, c), - conv(c, c) - ) - self.convblock2 = nn.Sequential( - conv(c, c), - conv(c, c) - ) - self.convblock3 = nn.Sequential( - conv(c, c), - conv(c, c) - ) - self.conv1 = nn.Sequential( - nn.ConvTranspose2d(c, c//2, 4, 2, 1), - nn.PReLU(c//2), - nn.ConvTranspose2d(c//2, 4, 4, 2, 1), - ) - self.conv2 = nn.Sequential( - nn.ConvTranspose2d(c, c//2, 4, 2, 1), - nn.PReLU(c//2), - nn.ConvTranspose2d(c//2, 1, 4, 2, 1), - ) - - def forward(self, x, flow, scale=1): - x = F.interpolate(x, scale_factor= 1. / scale, mode="bilinear", align_corners=False, recompute_scale_factor=False) - flow = F.interpolate(flow, scale_factor= 1. / scale, mode="bilinear", align_corners=False, recompute_scale_factor=False) * 1. / scale - feat = self.conv0(torch.cat((x, flow), 1)) - feat = self.convblock0(feat) + feat - feat = self.convblock1(feat) + feat - feat = self.convblock2(feat) + feat - feat = self.convblock3(feat) + feat - flow = self.conv1(feat) - mask = self.conv2(feat) - flow = F.interpolate(flow, scale_factor=scale, mode="bilinear", align_corners=False, recompute_scale_factor=False) * scale - mask = F.interpolate(mask, scale_factor=scale, mode="bilinear", align_corners=False, recompute_scale_factor=False) - return flow, mask - -class IFNet(ModelMixin): - def __init__(self, ckpt_path="checkpoints/flownet.pkl"): - super(IFNet, self).__init__() - self.block0 = IFBlock(7+4, c=90) - self.block1 = IFBlock(7+4, c=90) - self.block2 = IFBlock(7+4, c=90) - self.block_tea = IFBlock(10+4, c=90) - if ckpt_path is not None: - self.load_state_dict(convert(torch.load(ckpt_path, map_location ='cpu'))) - - def inference(self, img0, img1, scale=1.0): - imgs = torch.cat((img0, img1), 1) - scale_list = [4/scale, 2/scale, 1/scale] - flow, mask, merged = self.forward(imgs, scale_list) - return merged[2] - - def forward(self, x, scale_list=[4, 2, 1], training=False): - if training == False: - channel = x.shape[1] // 2 - img0 = x[:, :channel] - img1 = x[:, channel:] - flow_list = [] - merged = [] - mask_list = [] - warped_img0 = img0 - warped_img1 = img1 - flow = (x[:, :4]).detach() * 0 - mask = (x[:, :1]).detach() * 0 - loss_cons = 0 - block = [self.block0, self.block1, self.block2] - for i in range(3): - f0, m0 = block[i](torch.cat((warped_img0[:, :3], warped_img1[:, :3], mask), 1), flow, scale=scale_list[i]) - f1, m1 = block[i](torch.cat((warped_img1[:, :3], warped_img0[:, :3], -mask), 1), torch.cat((flow[:, 2:4], flow[:, :2]), 1), scale=scale_list[i]) - flow = flow + (f0 + torch.cat((f1[:, 2:4], f1[:, :2]), 1)) / 2 - mask = mask + (m0 + (-m1)) / 2 - mask_list.append(mask) - flow_list.append(flow) - warped_img0 = warp(img0, flow[:, :2]) - warped_img1 = warp(img1, flow[:, 2:4]) - merged.append((warped_img0, warped_img1)) - ''' - c0 = self.contextnet(img0, flow[:, :2]) - c1 = self.contextnet(img1, flow[:, 2:4]) - tmp = self.unet(img0, img1, warped_img0, warped_img1, mask, flow, c0, c1) - res = tmp[:, 1:4] * 2 - 1 - ''' - for i in range(3): - mask_list[i] = torch.sigmoid(mask_list[i]) - merged[i] = merged[i][0] * mask_list[i] + merged[i][1] * (1 - mask_list[i]) - # merged[i] = torch.clamp(merged[i] + res, 0, 1) - return flow_list, mask_list[2], merged diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py deleted file mode 100644 index 2242d21b1d9147b61181cd43c59649dbafbdc598..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/stable_diffusion/pipeline_onnx_stable_diffusion_img2img.py +++ /dev/null @@ -1,459 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import Callable, List, Optional, Union - -import numpy as np -import torch - -import PIL -from transformers import CLIPFeatureExtractor, CLIPTokenizer - -from ...configuration_utils import FrozenDict -from ...onnx_utils import ORT_TO_NP_TYPE, OnnxRuntimeModel -from ...pipeline_utils import DiffusionPipeline -from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from ...utils import PIL_INTERPOLATION, deprecate, logging -from . import StableDiffusionPipelineOutput - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -def preprocess(image): - w, h = image.size - w, h = map(lambda x: x - x % 32, (w, h)) # resize to integer multiple of 32 - image = image.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]) - image = np.array(image).astype(np.float32) / 255.0 - image = image[None].transpose(0, 3, 1, 2) - return 2.0 * image - 1.0 - - -class OnnxStableDiffusionImg2ImgPipeline(DiffusionPipeline): - r""" - Pipeline for text-guided image to image generation using Stable Diffusion. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPFeatureExtractor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - vae_encoder: OnnxRuntimeModel - vae_decoder: OnnxRuntimeModel - text_encoder: OnnxRuntimeModel - tokenizer: CLIPTokenizer - unet: OnnxRuntimeModel - scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] - safety_checker: OnnxRuntimeModel - feature_extractor: CLIPFeatureExtractor - - _optional_components = ["safety_checker", "feature_extractor"] - - def __init__( - self, - vae_encoder: OnnxRuntimeModel, - vae_decoder: OnnxRuntimeModel, - text_encoder: OnnxRuntimeModel, - tokenizer: CLIPTokenizer, - unet: OnnxRuntimeModel, - scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], - safety_checker: OnnxRuntimeModel, - feature_extractor: CLIPFeatureExtractor, - requires_safety_checker: bool = True, - ): - super().__init__() - - if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" - f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " - "to update the config accordingly as leaving `steps_offset` might led to incorrect results" - " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," - " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" - " file" - ) - deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["steps_offset"] = 1 - scheduler._internal_dict = FrozenDict(new_config) - - if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: - deprecation_message = ( - f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." - " `clip_sample` should be set to False in the configuration file. Please make sure to update the" - " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" - " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" - " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" - ) - deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) - new_config = dict(scheduler.config) - new_config["clip_sample"] = False - scheduler._internal_dict = FrozenDict(new_config) - - if safety_checker is None and requires_safety_checker: - logger.warning( - f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" - " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" - " results in services or applications open to the public. Both the diffusers team and Hugging Face" - " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" - " it only for use-cases that involve analyzing network behavior or auditing its results. For more" - " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." - ) - - if safety_checker is not None and feature_extractor is None: - raise ValueError( - "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" - " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." - ) - - self.register_modules( - vae_encoder=vae_encoder, - vae_decoder=vae_decoder, - text_encoder=text_encoder, - tokenizer=tokenizer, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_onnx_stable_diffusion.OnnxStableDiffusionPipeline._encode_prompt - def _encode_prompt(self, prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt): - r""" - Encodes the prompt into text encoder hidden states. - - Args: - prompt (`str` or `list(int)`): - prompt to be encoded - num_images_per_prompt (`int`): - number of images that should be generated per prompt - do_classifier_free_guidance (`bool`): - whether to use classifier free guidance or not - negative_prompt (`str` or `List[str]`): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - """ - batch_size = len(prompt) if isinstance(prompt, list) else 1 - - # get prompt text embeddings - text_inputs = self.tokenizer( - prompt, - padding="max_length", - max_length=self.tokenizer.model_max_length, - truncation=True, - return_tensors="np", - ) - text_input_ids = text_inputs.input_ids - untruncated_ids = self.tokenizer(prompt, padding="max_length", return_tensors="np").input_ids - - if not np.array_equal(text_input_ids, untruncated_ids): - removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) - logger.warning( - "The following part of your input was truncated because CLIP can only handle sequences up to" - f" {self.tokenizer.model_max_length} tokens: {removed_text}" - ) - - text_embeddings = self.text_encoder(input_ids=text_input_ids.astype(np.int32))[0] - text_embeddings = np.repeat(text_embeddings, num_images_per_prompt, axis=0) - - # get unconditional embeddings for classifier free guidance - if do_classifier_free_guidance: - uncond_tokens: List[str] - if negative_prompt is None: - uncond_tokens = [""] * batch_size - elif type(prompt) is not type(negative_prompt): - raise TypeError( - f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" - f" {type(prompt)}." - ) - elif isinstance(negative_prompt, str): - uncond_tokens = [negative_prompt] * batch_size - elif batch_size != len(negative_prompt): - raise ValueError( - f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" - f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" - " the batch size of `prompt`." - ) - else: - uncond_tokens = negative_prompt - - max_length = text_input_ids.shape[-1] - uncond_input = self.tokenizer( - uncond_tokens, - padding="max_length", - max_length=max_length, - truncation=True, - return_tensors="np", - ) - uncond_embeddings = self.text_encoder(input_ids=uncond_input.input_ids.astype(np.int32))[0] - uncond_embeddings = np.repeat(uncond_embeddings, num_images_per_prompt, axis=0) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - text_embeddings = np.concatenate([uncond_embeddings, text_embeddings]) - - return text_embeddings - - def __call__( - self, - prompt: Union[str, List[str]], - image: Union[np.ndarray, PIL.Image.Image], - strength: float = 0.8, - num_inference_steps: Optional[int] = 50, - guidance_scale: Optional[float] = 7.5, - negative_prompt: Optional[Union[str, List[str]]] = None, - num_images_per_prompt: Optional[int] = 1, - eta: Optional[float] = 0.0, - generator: Optional[np.random.RandomState] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, np.ndarray], None]] = None, - callback_steps: Optional[int] = 1, - **kwargs, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`): - The prompt or prompts to guide the image generation. - image (`np.ndarray` or `PIL.Image.Image`): - `Image`, or tensor representing an image batch, that will be used as the starting point for the - process. - strength (`float`, *optional*, defaults to 0.8): - Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` - will be used as a starting point, adding more noise to it the larger the `strength`. The number of - denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will - be maximum and the denoising process will run for the full number of iterations specified in - `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. This parameter will be modulated by `strength`. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored - if `guidance_scale` is less than `1`). - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`np.random.RandomState`, *optional*): - A np.random.RandomState to make generation deterministic. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: np.ndarray)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - message = "Please use `image` instead of `init_image`." - init_image = deprecate("init_image", "0.12.0", message, take_from=kwargs) - image = init_image or image - - if isinstance(prompt, str): - batch_size = 1 - elif isinstance(prompt, list): - batch_size = len(prompt) - else: - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if strength < 0 or strength > 1: - raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - if generator is None: - generator = np.random - - # set timesteps - self.scheduler.set_timesteps(num_inference_steps) - - if isinstance(image, PIL.Image.Image): - image = preprocess(image) - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - text_embeddings = self._encode_prompt( - prompt, num_images_per_prompt, do_classifier_free_guidance, negative_prompt - ) - - latents_dtype = text_embeddings.dtype - image = image.astype(latents_dtype) - # encode the init image into latents and scale the latents - init_latents = self.vae_encoder(sample=image)[0] - init_latents = 0.18215 * init_latents - - if isinstance(prompt, str): - prompt = [prompt] - if len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] == 0: - # expand init_latents for batch_size - deprecation_message = ( - f"You have passed {len(prompt)} text prompts (`prompt`), but only {init_latents.shape[0]} initial" - " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" - " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" - " your script to pass as many initial images as text prompts to suppress this warning." - ) - deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False) - additional_image_per_prompt = len(prompt) // init_latents.shape[0] - init_latents = np.concatenate([init_latents] * additional_image_per_prompt * num_images_per_prompt, axis=0) - elif len(prompt) > init_latents.shape[0] and len(prompt) % init_latents.shape[0] != 0: - raise ValueError( - f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {len(prompt)} text prompts." - ) - else: - init_latents = np.concatenate([init_latents] * num_images_per_prompt, axis=0) - - # get the original timestep using init_timestep - offset = self.scheduler.config.get("steps_offset", 0) - init_timestep = int(num_inference_steps * strength) + offset - init_timestep = min(init_timestep, num_inference_steps) - - timesteps = self.scheduler.timesteps.numpy()[-init_timestep] - timesteps = np.array([timesteps] * batch_size * num_images_per_prompt) - - # add noise to latents using the timesteps - noise = generator.randn(*init_latents.shape).astype(latents_dtype) - init_latents = self.scheduler.add_noise( - torch.from_numpy(init_latents), torch.from_numpy(noise), torch.from_numpy(timesteps) - ) - init_latents = init_latents.numpy() - - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - latents = init_latents - - t_start = max(num_inference_steps - init_timestep + offset, 0) - timesteps = self.scheduler.timesteps[t_start:].numpy() - - timestep_dtype = next( - (input.type for input in self.unet.model.get_inputs() if input.name == "timestep"), "tensor(float)" - ) - timestep_dtype = ORT_TO_NP_TYPE[timestep_dtype] - - for i, t in enumerate(self.progress_bar(timesteps)): - # expand the latents if we are doing classifier free guidance - latent_model_input = np.concatenate([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(torch.from_numpy(latent_model_input), t) - latent_model_input = latent_model_input.cpu().numpy() - - # predict the noise residual - timestep = np.array([t], dtype=timestep_dtype) - noise_pred = self.unet( - sample=latent_model_input, timestep=timestep, encoder_hidden_states=text_embeddings - )[0] - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = np.split(noise_pred, 2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - scheduler_output = self.scheduler.step( - torch.from_numpy(noise_pred), t, torch.from_numpy(latents), **extra_step_kwargs - ) - latents = scheduler_output.prev_sample.numpy() - - # call the callback, if provided - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - latents = 1 / 0.18215 * latents - # image = self.vae_decoder(latent_sample=latents)[0] - # it seems likes there is a strange result for using half-precision vae decoder if batchsize>1 - image = np.concatenate( - [self.vae_decoder(latent_sample=latents[i : i + 1])[0] for i in range(latents.shape[0])] - ) - - image = np.clip(image / 2 + 0.5, 0, 1) - image = image.transpose((0, 2, 3, 1)) - - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor( - self.numpy_to_pil(image), return_tensors="np" - ).pixel_values.astype(image.dtype) - # safety_checker does not support batched inputs yet - images, has_nsfw_concept = [], [] - for i in range(image.shape[0]): - image_i, has_nsfw_concept_i = self.safety_checker( - clip_input=safety_checker_input[i : i + 1], images=image[i : i + 1] - ) - images.append(image_i) - has_nsfw_concept.append(has_nsfw_concept_i[0]) - image = np.concatenate(images) - else: - has_nsfw_concept = None - - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/YuAnthony/Audio-Caption/data_handling/.ipynb_checkpoints/collate_fn_test-checkpoint.py b/spaces/YuAnthony/Audio-Caption/data_handling/.ipynb_checkpoints/collate_fn_test-checkpoint.py deleted file mode 100644 index b0891eeadde635953497663f310214e48878612f..0000000000000000000000000000000000000000 --- a/spaces/YuAnthony/Audio-Caption/data_handling/.ipynb_checkpoints/collate_fn_test-checkpoint.py +++ /dev/null @@ -1,30 +0,0 @@ -from torch import cat as pt_cat, zeros as pt_zeros, from_numpy, Tensor -def clotho_collate_fn_test(batch, nb_t_steps, input_pad_at): - if type(nb_t_steps) == str: - truncate_fn = max if nb_t_steps.lower() == 'max' else min - in_t_steps = truncate_fn([i[0].shape[0] for i in batch]) - else: - in_t_steps = nb_t_steps - - in_dim = batch[0][0].shape[-1] - - input_tensor = [] - - for in_b, filename in batch: - if in_t_steps >= in_b.shape[0]: - padding = pt_zeros(in_t_steps - in_b.shape[0], in_dim).float() - data = [from_numpy(in_b).float()] - if input_pad_at.lower() == 'start': - data.insert(0, padding) - else: - data.append(padding) - tmp_in: Tensor = pt_cat(data) - else: - tmp_in: Tensor = from_numpy(in_b[:in_t_steps, :]).float() - input_tensor.append(tmp_in.unsqueeze_(0)) - - input_tensor = pt_cat(input_tensor) - - filename = [i[1] for i in batch] - - return input_tensor, filename \ No newline at end of file diff --git a/spaces/abdvl/datahub_qa_bot/docs/api/tutorials/references/prepare-datahub.md b/spaces/abdvl/datahub_qa_bot/docs/api/tutorials/references/prepare-datahub.md deleted file mode 100644 index b9b7f477f0ceea58721426a15de890d8d3edce50..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/api/tutorials/references/prepare-datahub.md +++ /dev/null @@ -1,39 +0,0 @@ -# Preparing Your Local DataHub Environment - -## Deploy DataHub Quickstart - -You'll need a local instance of DataHub running for this tutorial: -- Follow the [DataHub Quickstart Guide](/docs/quickstart.md) to get one up and running. -```shell -python3 -m pip install --upgrade pip wheel setuptools -python3 -m pip install --upgrade acryl-datahub -``` -If you can see datahub version like this, you're good to go. -```shell -$ datahub version -DataHub CLI version: 0.10.0.1 -Python version: 3.9.6 (default, Jun 16 2022, 21:38:53) -[Clang 13.0.0 (clang-1300.0.27.3)] -``` - -Run datahub quickstart. This will deploy local datahub server to http://localhost:9002 -```shell -datahub docker quickstart -``` -After logging in with the default credential(`username: datahub / password: datahub`), you can see DataHub ready for you. - -![datahub-main-ui](../../../imgs/apis/tutorials/datahub-main-ui.png) - -Please refer to [DataHub Quickstart Guide](/docs/quickstart.md) for more information. - -## Ingest Sample Data -We will use sample data provided with datahub quickstart. -If you already have data on your datahub, you might skip this part. - -```shell -datahub docker ingest-sample-data -``` -This will ingest various entities like datasets, terms and tags to your local DataHub. -![datahub-main-ui](../../../imgs/apis/tutorials/sample-ingestion.png) - -Now you're ready to start! \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/furthest_point_sample.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/furthest_point_sample.py deleted file mode 100644 index 374b7a878f1972c183941af28ba1df216ac1a60f..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmcv/ops/furthest_point_sample.py +++ /dev/null @@ -1,83 +0,0 @@ -import torch -from torch.autograd import Function - -from ..utils import ext_loader - -ext_module = ext_loader.load_ext('_ext', [ - 'furthest_point_sampling_forward', - 'furthest_point_sampling_with_dist_forward' -]) - - -class FurthestPointSampling(Function): - """Uses iterative furthest point sampling to select a set of features whose - corresponding points have the furthest distance.""" - - @staticmethod - def forward(ctx, points_xyz: torch.Tensor, - num_points: int) -> torch.Tensor: - """ - Args: - points_xyz (Tensor): (B, N, 3) where N > num_points. - num_points (int): Number of points in the sampled set. - - Returns: - Tensor: (B, num_points) indices of the sampled points. - """ - assert points_xyz.is_contiguous() - - B, N = points_xyz.size()[:2] - output = torch.cuda.IntTensor(B, num_points) - temp = torch.cuda.FloatTensor(B, N).fill_(1e10) - - ext_module.furthest_point_sampling_forward( - points_xyz, - temp, - output, - b=B, - n=N, - m=num_points, - ) - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(output) - return output - - @staticmethod - def backward(xyz, a=None): - return None, None - - -class FurthestPointSamplingWithDist(Function): - """Uses iterative furthest point sampling to select a set of features whose - corresponding points have the furthest distance.""" - - @staticmethod - def forward(ctx, points_dist: torch.Tensor, - num_points: int) -> torch.Tensor: - """ - Args: - points_dist (Tensor): (B, N, N) Distance between each point pair. - num_points (int): Number of points in the sampled set. - - Returns: - Tensor: (B, num_points) indices of the sampled points. - """ - assert points_dist.is_contiguous() - - B, N, _ = points_dist.size() - output = points_dist.new_zeros([B, num_points], dtype=torch.int32) - temp = points_dist.new_zeros([B, N]).fill_(1e10) - - ext_module.furthest_point_sampling_with_dist_forward( - points_dist, temp, output, b=B, n=N, m=num_points) - if torch.__version__ != 'parrots': - ctx.mark_non_differentiable(output) - return output - - @staticmethod - def backward(xyz, a=None): - return None, None - - -furthest_point_sample = FurthestPointSampling.apply -furthest_point_sample_with_dist = FurthestPointSamplingWithDist.apply diff --git a/spaces/abrar-lohia/text-2-character-anim/VQTrans/checkpoints/train_vq.py b/spaces/abrar-lohia/text-2-character-anim/VQTrans/checkpoints/train_vq.py deleted file mode 100644 index d89b9930ba1262747542df3d5b2f03f8fab1b04a..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/VQTrans/checkpoints/train_vq.py +++ /dev/null @@ -1,171 +0,0 @@ -import os -import json - -import torch -import torch.optim as optim -from torch.utils.tensorboard import SummaryWriter - -import models.vqvae as vqvae -import utils.losses as losses -import options.option_vq as option_vq -import utils.utils_model as utils_model -from dataset import dataset_VQ, dataset_TM_eval -import utils.eval_trans as eval_trans -from options.get_eval_option import get_opt -from models.evaluator_wrapper import EvaluatorModelWrapper -import warnings -warnings.filterwarnings('ignore') -from utils.word_vectorizer import WordVectorizer - -def update_lr_warm_up(optimizer, nb_iter, warm_up_iter, lr): - - current_lr = lr * (nb_iter + 1) / (warm_up_iter + 1) - for param_group in optimizer.param_groups: - param_group["lr"] = current_lr - - return optimizer, current_lr - -##### ---- Exp dirs ---- ##### -args = option_vq.get_args_parser() -torch.manual_seed(args.seed) - -args.out_dir = os.path.join(args.out_dir, f'{args.exp_name}') -os.makedirs(args.out_dir, exist_ok = True) - -##### ---- Logger ---- ##### -logger = utils_model.get_logger(args.out_dir) -writer = SummaryWriter(args.out_dir) -logger.info(json.dumps(vars(args), indent=4, sort_keys=True)) - - - -w_vectorizer = WordVectorizer('./glove', 'our_vab') - -if args.dataname == 'kit' : - dataset_opt_path = 'checkpoints/kit/Comp_v6_KLD005/opt.txt' - args.nb_joints = 21 - -else : - dataset_opt_path = 'checkpoints/t2m/Comp_v6_KLD005/opt.txt' - args.nb_joints = 22 - -logger.info(f'Training on {args.dataname}, motions are with {args.nb_joints} joints') - -wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda')) -eval_wrapper = EvaluatorModelWrapper(wrapper_opt) - - -##### ---- Dataloader ---- ##### -train_loader = dataset_VQ.DATALoader(args.dataname, - args.batch_size, - window_size=args.window_size, - unit_length=2**args.down_t) - -train_loader_iter = dataset_VQ.cycle(train_loader) - -val_loader = dataset_TM_eval.DATALoader(args.dataname, False, - 32, - w_vectorizer, - unit_length=2**args.down_t) - -##### ---- Network ---- ##### -net = vqvae.HumanVQVAE(args, ## use args to define different parameters in different quantizers - args.nb_code, - args.code_dim, - args.output_emb_width, - args.down_t, - args.stride_t, - args.width, - args.depth, - args.dilation_growth_rate, - args.vq_act, - args.vq_norm) - - -if args.resume_pth : - logger.info('loading checkpoint from {}'.format(args.resume_pth)) - ckpt = torch.load(args.resume_pth, map_location='cpu') - net.load_state_dict(ckpt['net'], strict=True) -net.train() -net.cuda() - -##### ---- Optimizer & Scheduler ---- ##### -optimizer = optim.AdamW(net.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=args.weight_decay) -scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_scheduler, gamma=args.gamma) - - -Loss = losses.ReConsLoss(args.recons_loss, args.nb_joints) - -##### ------ warm-up ------- ##### -avg_recons, avg_perplexity, avg_commit = 0., 0., 0. - -for nb_iter in range(1, args.warm_up_iter): - - optimizer, current_lr = update_lr_warm_up(optimizer, nb_iter, args.warm_up_iter, args.lr) - - gt_motion = next(train_loader_iter) - gt_motion = gt_motion.cuda().float() # (bs, 64, dim) - - pred_motion, loss_commit, perplexity = net(gt_motion) - loss_motion = Loss(pred_motion, gt_motion) - loss_vel = Loss.forward_vel(pred_motion, gt_motion) - - loss = loss_motion + args.commit * loss_commit + args.loss_vel * loss_vel - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - avg_recons += loss_motion.item() - avg_perplexity += perplexity.item() - avg_commit += loss_commit.item() - - if nb_iter % args.print_iter == 0 : - avg_recons /= args.print_iter - avg_perplexity /= args.print_iter - avg_commit /= args.print_iter - - logger.info(f"Warmup. Iter {nb_iter} : lr {current_lr:.5f} \t Commit. {avg_commit:.5f} \t PPL. {avg_perplexity:.2f} \t Recons. {avg_recons:.5f}") - - avg_recons, avg_perplexity, avg_commit = 0., 0., 0. - -##### ---- Training ---- ##### -avg_recons, avg_perplexity, avg_commit = 0., 0., 0. -best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, writer, logger = eval_trans.evaluation_vqvae(args.out_dir, val_loader, net, logger, writer, 0, best_fid=1000, best_iter=0, best_div=100, best_top1=0, best_top2=0, best_top3=0, best_matching=100, eval_wrapper=eval_wrapper) - -for nb_iter in range(1, args.total_iter + 1): - - gt_motion = next(train_loader_iter) - gt_motion = gt_motion.cuda().float() # bs, nb_joints, joints_dim, seq_len - - pred_motion, loss_commit, perplexity = net(gt_motion) - loss_motion = Loss(pred_motion, gt_motion) - loss_vel = Loss.forward_vel(pred_motion, gt_motion) - - loss = loss_motion + args.commit * loss_commit + args.loss_vel * loss_vel - - optimizer.zero_grad() - loss.backward() - optimizer.step() - scheduler.step() - - avg_recons += loss_motion.item() - avg_perplexity += perplexity.item() - avg_commit += loss_commit.item() - - if nb_iter % args.print_iter == 0 : - avg_recons /= args.print_iter - avg_perplexity /= args.print_iter - avg_commit /= args.print_iter - - writer.add_scalar('./Train/L1', avg_recons, nb_iter) - writer.add_scalar('./Train/PPL', avg_perplexity, nb_iter) - writer.add_scalar('./Train/Commit', avg_commit, nb_iter) - - logger.info(f"Train. Iter {nb_iter} : \t Commit. {avg_commit:.5f} \t PPL. {avg_perplexity:.2f} \t Recons. {avg_recons:.5f}") - - avg_recons, avg_perplexity, avg_commit = 0., 0., 0., - - if nb_iter % args.eval_iter==0 : - best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, writer, logger = eval_trans.evaluation_vqvae(args.out_dir, val_loader, net, logger, writer, nb_iter, best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, eval_wrapper=eval_wrapper) - \ No newline at end of file diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/clock.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/clock.py deleted file mode 100644 index f05cf948d2c517f34a356c6568e3951fb91a06b9..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/clock.py +++ /dev/null @@ -1,642 +0,0 @@ -"""Precise framerate calculation function scheduling. - -The :py:mod:`~pyglet.clock` module allows you to schedule functions -to run periodically, or for one-shot future execution. pyglet's default -event loop (:py:func:`~pyglet.app.run`) keeps an internal instance of -a :py:class:`~pyglet.clock.Clock`, which is ticked automatically. - -..note:: Some internal modules will schedule items on the clock. If you - are using a custom event loop, always remember to `tick` the clock! - -Scheduling -========== - -You can schedule a function to be called every time the clock is ticked:: - - def callback(dt): - print(f"{dt} seconds since last callback") - - clock.schedule(callback) - -The `schedule_interval` method causes a function to be called every "n" -seconds:: - - clock.schedule_interval(callback, 0.5) # called twice a second - -The `schedule_once` method causes a function to be called once "n" seconds -in the future:: - - clock.schedule_once(callback, 5) # called in 5 seconds - -All the `schedule` methods will pass on any additional args or keyword args -you specify to the callback function:: - - def move(dt, velocity, sprite): - sprite.position += dt * velocity - - clock.schedule(move, velocity=5.0, sprite=alien) - -You can cancel a function scheduled with any of these methods using -`unschedule`:: - - clock.unschedule(move) - -Using multiple clocks -===================== - -The clock functions are all relayed to an instance of -:py:class:`~pyglet.clock.Clock` which is initialised with the module. You can -get this instance to use directly:: - - clk = pyglet.clock.get_default() - -You can also replace the default clock with your own: - - myclk = pyglet.clock.Clock() - pyglet.clock.set_default(myclk) - -Each clock maintains its own set of scheduled functions and frequency -measurement. Each clock must be "ticked" separately. - -Multiple and derived clocks potentially allow you to separate "game-time" and -"wall-time", or to synchronise your clock to an audio or video stream instead -of the system clock. -""" - -import time as _time - -from typing import Callable -from heapq import heappop as _heappop -from heapq import heappush as _heappush -from heapq import heappushpop as _heappushpop -from operator import attrgetter as _attrgetter -from collections import deque as _deque - - -class _ScheduledItem: - __slots__ = ['func', 'args', 'kwargs'] - - def __init__(self, func, args, kwargs): - self.func = func - self.args = args - self.kwargs = kwargs - - -class _ScheduledIntervalItem: - __slots__ = ['func', 'interval', 'last_ts', 'next_ts', 'args', 'kwargs'] - - def __init__(self, func, interval, last_ts, next_ts, args, kwargs): - self.func = func - self.interval = interval - self.last_ts = last_ts - self.next_ts = next_ts - self.args = args - self.kwargs = kwargs - - def __lt__(self, other): - try: - return self.next_ts < other.next_ts - except AttributeError: - return self.next_ts < other - - -class Clock: - """Class for calculating and limiting framerate. - - It is also used for calling scheduled functions. - """ - # List of functions to call every tick. - _schedule_items = None - - # List of schedule interval items kept in sort order. - _schedule_interval_items = None - - # If True, a sleep(0) is inserted on every tick. - _force_sleep = False - - def __init__(self, time_function=_time.perf_counter): - """Initialise a Clock, with optional custom time function. - - You can provide a custom time function to return the elapsed - time of the application, in seconds. Defaults to time.perf_counter, - but can be replaced to allow for easy time dilation effects or game - pausing. - """ - self.time = time_function - self.next_ts = self.time() - self.last_ts = None - - # Used by self.get_frequency to show update frequency - self.times = _deque() - self.cumulative_time = 0 - self.window_size = 60 - - self._schedule_items = [] - self._schedule_interval_items = [] - self._current_interval_item = None - - @staticmethod - def sleep(microseconds): - _time.sleep(microseconds * 1e-6) - - def update_time(self): - """Get the elapsed time since the last call to `update_time`. - - This updates the clock's internal measure of time and returns - the difference since the last update (or since the clock was created). - - .. versionadded:: 1.2 - - :rtype: float - :return: The number of seconds since the last `update_time`, or 0 - if this was the first time it was called. - """ - ts = self.time() - if self.last_ts is None: - delta_t = 0 - else: - delta_t = ts - self.last_ts - self.times.appendleft(delta_t) - if len(self.times) > self.window_size: - self.cumulative_time -= self.times.pop() - self.cumulative_time += delta_t - self.last_ts = ts - - return delta_t - - def call_scheduled_functions(self, dt): - """Call scheduled functions that elapsed on the last `update_time`. - - .. versionadded:: 1.2 - - :Parameters: - dt : float - The elapsed time since the last update to pass to each - scheduled function. This is *not* used to calculate which - functions have elapsed. - - :rtype: bool - :return: True if any functions were called, otherwise False. - """ - now = self.last_ts - result = False # flag indicates if any function was called - - # handle items scheduled for every tick - if self._schedule_items: - result = True - # duplicate list in case event unschedules itself - for item in list(self._schedule_items): - item.func(dt, *item.args, **item.kwargs) - - # check the next scheduled item that is not called each tick - # if it is scheduled in the future, then exit - interval_items = self._schedule_interval_items - try: - if interval_items[0].next_ts > now: - return result - - # raised when the interval_items list is empty - except IndexError: - return result - - # NOTE: there is no special handling required to manage things - # that are scheduled during this loop, due to the heap - self._current_interval_item = item = None - get_soft_next_ts = self._get_soft_next_ts - while interval_items: - - # the scheduler will hold onto a reference to an item in - # case it needs to be rescheduled. it is more efficient - # to push and pop the heap at once rather than two operations - if item is None: - item = _heappop(interval_items) - else: - item = _heappushpop(interval_items, item) - - # a scheduled function may try to unschedule itself, - # so we need to keep a reference to the current - # item no longer on heap to be able to check - self._current_interval_item = item - - # if next item is scheduled in the future then break - if item.next_ts > now: - break - - # execute the callback - try: - item.func(now - item.last_ts, *item.args, **item.kwargs) - except ReferenceError: - pass # weakly-referenced object no longer exists. - - if item.interval: - - # Try to keep timing regular, even if overslept this time; - # but don't schedule in the past (which could lead to - # infinitely-worsening error). - item.next_ts = item.last_ts + item.interval - item.last_ts = now - - # test the schedule for the next execution - if item.next_ts <= now: - # the scheduled time of this item has already - # passed, so it must be rescheduled - if now - item.next_ts < 0.05: - # missed execution time by 'reasonable' amount, so - # reschedule at normal interval - item.next_ts = now + item.interval - else: - # missed by significant amount, now many events have - # likely missed execution. do a soft re-schedule to - # avoid lumping many events together. - # in this case, the next dt will not be accurate - item.next_ts = get_soft_next_ts(now, item.interval) - item.last_ts = item.next_ts - item.interval - else: - # not an interval, so this item will not be rescheduled - self._current_interval_item = item = None - - if item is not None: - _heappush(interval_items, item) - - return True - - def tick(self, poll=False): - """Signify that one frame has passed. - - This will call any scheduled functions that have elapsed. - - :Parameters: - `poll` : bool - If True, the function will call any scheduled functions - but will not sleep or busy-wait for any reason. Recommended - for advanced applications managing their own sleep timers - only. - - Since pyglet 1.1. - - :rtype: float - :return: The number of seconds since the last "tick", or 0 if this was - the first frame. - """ - if not poll and self._force_sleep: - self.sleep(0) - - delta_t = self.update_time() - self.call_scheduled_functions(delta_t) - return delta_t - - def get_sleep_time(self, sleep_idle): - """Get the time until the next item is scheduled. - - Applications can choose to continue receiving updates at the - maximum framerate during idle time (when no functions are scheduled), - or they can sleep through their idle time and allow the CPU to - switch to other processes or run in low-power mode. - - If `sleep_idle` is ``True`` the latter behaviour is selected, and - ``None`` will be returned if there are no scheduled items. - - Otherwise, if `sleep_idle` is ``False``, or if any scheduled items - exist, a value of 0 is returned. - - :Parameters: - `sleep_idle` : bool - If True, the application intends to sleep through its idle - time; otherwise it will continue ticking at the maximum - frame rate allowed. - - :rtype: float - :return: Time until the next scheduled event in seconds, or ``None`` - if there is no event scheduled. - - .. versionadded:: 1.1 - """ - if self._schedule_items or not sleep_idle: - return 0.0 - - if self._schedule_interval_items: - return max(self._schedule_interval_items[0].next_ts - self.time(), 0.0) - - return None - - def get_frequency(self): - """Get the average clock update frequency of recent history. - - The result is the average of a sliding window of the last "n" updates, - where "n" is some number designed to cover approximately 1 second. - This is **not** the Window redraw rate. - - :rtype: float - :return: The measured updates per second. - """ - if not self.cumulative_time: - return 0 - return len(self.times) / self.cumulative_time - - def _get_nearest_ts(self): - """Get the nearest timestamp. - - Schedule from now, unless now is sufficiently close to last_ts, in - which case use last_ts. This clusters together scheduled items that - probably want to be scheduled together. The old (pre 1.1.1) - behaviour was to always use self.last_ts, and not look at ts. The - new behaviour is needed because clock ticks can now be quite - irregular, and span several seconds. - """ - last_ts = self.last_ts or self.next_ts - ts = self.time() - if ts - last_ts > 0.2: - return ts - return last_ts - - def _get_soft_next_ts(self, last_ts, interval): - - def taken(ts, e): - """Check if `ts` has already got an item scheduled nearby.""" - # TODO this function is slow and called very often. - # Optimise it, maybe? - for item in self._schedule_interval_items: - if abs(item.next_ts - ts) <= e: - return True - elif item.next_ts > ts + e: - return False - - return False - - # sorted list is required to produce expected results - # taken() will iterate through the heap, expecting it to be sorted - # and will not always catch the smallest value, so sort here. - # do not remove the sort key...it is faster than relaying comparisons - # NOTE: do not rewrite as popping from heap, as that is super slow! - self._schedule_interval_items.sort(key=_attrgetter('next_ts')) - - # Binary division over interval: - # - # 0 interval - # |--------------------------| - # 5 3 6 2 7 4 8 1 Order of search - # - # i.e., first scheduled at interval, - # then at interval/2 - # then at interval/4 - # then at interval*3/4 - # then at ... - # - # Schedule is hopefully then evenly distributed for any interval, - # and any number of scheduled functions. - - next_ts = last_ts + interval - if not taken(next_ts, interval / 4): - return next_ts - - dt = interval - divs = 1 - while True: - next_ts = last_ts - for i in range(divs - 1): - next_ts += dt - if not taken(next_ts, dt / 4): - return next_ts - dt /= 2 - divs *= 2 - - # Avoid infinite loop in pathological case - if divs > 16: - return next_ts - - def schedule(self, func, *args, **kwargs): - """Schedule a function to be called every frame. - - The function should have a prototype that includes ``dt`` as the - first argument, which gives the elapsed time, in seconds, since the - last clock tick. Any additional arguments given to this function - are passed on to the callback:: - - def callback(dt, *args, **kwargs): - pass - - :Parameters: - `func` : callable - The function to call each frame. - """ - item = _ScheduledItem(func, args, kwargs) - self._schedule_items.append(item) - - def schedule_once(self, func, delay, *args, **kwargs): - """Schedule a function to be called once after `delay` seconds. - - The callback function prototype is the same as for `schedule`. - - :Parameters: - `func` : callable - The function to call when the timer lapses. - `delay` : float - The number of seconds to wait before the timer lapses. - """ - last_ts = self._get_nearest_ts() - next_ts = last_ts + delay - item = _ScheduledIntervalItem(func, 0, last_ts, next_ts, args, kwargs) - _heappush(self._schedule_interval_items, item) - - def schedule_interval(self, func, interval, *args, **kwargs): - """Schedule a function to be called every `interval` seconds. - - Specifying an interval of 0 prevents the function from being - called again (see `schedule` to call a function as often as possible). - - The callback function prototype is the same as for `schedule`. - - :Parameters: - `func` : callable - The function to call when the timer lapses. - `interval` : float - The number of seconds to wait between each call. - - """ - last_ts = self._get_nearest_ts() - next_ts = last_ts + interval - item = _ScheduledIntervalItem(func, interval, last_ts, next_ts, args, kwargs) - _heappush(self._schedule_interval_items, item) - - def schedule_interval_soft(self, func, interval, *args, **kwargs): - """Schedule a function to be called every ``interval`` seconds. - - This method is similar to `schedule_interval`, except that the - clock will move the interval out of phase with other scheduled - functions in order to distribute CPU load more evenly. - - This is useful for functions that need to be called regularly, - but not relative to the initial start time. :py:mod:`pyglet.media` - does this for scheduling audio buffer updates, which need to occur - regularly -- if all audio updates are scheduled at the same time - (for example, mixing several tracks of a music score, or playing - multiple videos back simultaneously), the resulting load on the - CPU is excessive for those intervals but idle outside. Using - the soft interval scheduling, the load is more evenly distributed. - - Soft interval scheduling can also be used as an easy way to schedule - graphics animations out of phase; for example, multiple flags - waving in the wind. - - .. versionadded:: 1.1 - - :Parameters: - `func` : callable - The function to call when the timer lapses. - `interval` : float - The number of seconds to wait between each call. - - """ - next_ts = self._get_soft_next_ts(self._get_nearest_ts(), interval) - last_ts = next_ts - interval - item = _ScheduledIntervalItem(func, interval, last_ts, next_ts, args, kwargs) - _heappush(self._schedule_interval_items, item) - - def unschedule(self, func): - """Remove a function from the schedule. - - If the function appears in the schedule more than once, all occurrences - are removed. If the function was not scheduled, no error is raised. - - :Parameters: - `func` : callable - The function to remove from the schedule. - - """ - # clever remove item without disturbing the heap: - # 1. set function to an empty lambda -- original function is not called - # 2. set interval to 0 -- item will be removed from heap eventually - valid_items = set(item for item in self._schedule_interval_items if item.func == func) - - if self._current_interval_item: - if self._current_interval_item.func == func: - valid_items.add(self._current_interval_item) - - for item in valid_items: - item.interval = 0 - item.func = lambda x, *args, **kwargs: x - - self._schedule_items = [i for i in self._schedule_items if i.func != func] - - -# Default clock. -_default = Clock() - - -def set_default(default) -> None: - """Set the default clock to use for all module-level functions. - - By default, an instance of :py:class:`~pyglet.clock.Clock` is used. - """ - global _default - _default = default - - -def get_default(): - """Get the pyglet default Clock. - - Return the :py:class:`~pyglet.clock.Clock` instance that is used by all - module-level clock functions. - """ - return _default - - -def tick(poll: bool = False) -> float: - """Signify that one frame has passed on the default clock. - - This will call any scheduled functions that have elapsed, - and return the elapsed seconds since the last tick. The - return value will be 0.0 if this is the first tick. - - :Parameters: - `poll` : bool - If True, the function will call any scheduled functions - but will not sleep or busy-wait for any reason. Recommended - for advanced applications managing their own sleep timers - only. - - Since pyglet 1.1. - """ - return _default.tick(poll) - - -def get_sleep_time(sleep_idle: bool) -> float: - """Get the time until the next item is scheduled on the default clock. - - Returns the time until the next scheduled event in seconds, or - ``None`` if there is no event scheduled. - - See `Clock.get_sleep_time` for details. - - :Parameters: - `sleep_idle` : bool - If True, the application intends to sleep through its idle - time; otherwise it will continue ticking at the maximum - frame rate allowed. - """ - return _default.get_sleep_time(sleep_idle) - - -def get_frequency() -> float: - """Get the average clock update frequency. - - The result is the sliding average of the last "n" updates, - where "n" is some number designed to cover approximately 1 - second. This is the internal clock update rate, **not** the - Window redraw rate. Platform events, such as moving the - mouse rapidly, will cause the clock to refresh more often. - """ - return _default.get_frequency() - - -def schedule(func: Callable, *args, **kwargs) -> None: - """Schedule 'func' to be called every frame on the default clock. - - The arguments passed to func are ``dt``, followed by any ``*args`` and - ``**kwargs`` given here. - """ - _default.schedule(func, *args, **kwargs) - - -def schedule_interval(func: Callable, interval: float, *args, **kwargs) -> None: - """Schedule ``func`` on the default clock every ``interval`` seconds. - - The arguments passed to ``func`` are ``dt`` (time since last function - call), followed by any ``*args`` and ``**kwargs`` given here. - """ - _default.schedule_interval(func, interval, *args, **kwargs) - - -def schedule_interval_soft(func: Callable, interval: float, *args, **kwargs) -> None: - """Schedule ``func`` on the default clock every interval seconds. - - The clock will move the interval out of phase with other scheduled - functions in order to distribute CPU load more evenly. - - The arguments passed to ``func`` are ``dt`` (time since last function - call), followed by any ``*args`` and ``**kwargs`` given here. - - :see: `Clock.schedule_interval_soft` - """ - _default.schedule_interval_soft(func, interval, *args, **kwargs) - - -def schedule_once(func: Callable, delay: float, *args, **kwargs) -> None: - """Schedule ``func`` to be called once after ``delay`` seconds. - - This function uses the default clock. ``delay`` can be a float. The - arguments passed to ``func`` are ``dt`` (time since last function call), - followed by any ``*args`` and ``**kwargs`` given here. - - If no default clock is set, the func is queued and will be scheduled - on the default clock as soon as it is created. - """ - _default.schedule_once(func, delay, *args, **kwargs) - - -def unschedule(func: Callable) -> None: - """Remove ``func`` from the default clock's schedule. - - No error is raised if the ``func`` was never scheduled. - """ - _default.unschedule(func) diff --git a/spaces/adlozano1/gibberish_detector/README.md b/spaces/adlozano1/gibberish_detector/README.md deleted file mode 100644 index 16f0d17e7c828732da33ebae276878b71f929f7c..0000000000000000000000000000000000000000 --- a/spaces/adlozano1/gibberish_detector/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Gibberish_detector -emoji: 🔥 -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 2.8.12 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference - -A gibberish detection program based on https://github.com/rrenaud/Gibberish-Detector deployed in Gradio. diff --git a/spaces/ahmedxeno/brain_tumor_vs_normal_classification/app.py b/spaces/ahmedxeno/brain_tumor_vs_normal_classification/app.py deleted file mode 100644 index 78b051e415575baef57f2e4986fefbf4d01389fa..0000000000000000000000000000000000000000 --- a/spaces/ahmedxeno/brain_tumor_vs_normal_classification/app.py +++ /dev/null @@ -1,36 +0,0 @@ - -import gradio as gr -import tensorflow as tf -import tensorflow.keras -import matplotlib.pyplot as plt -import cv2 -import tensorflow_io as tfio -import numpy as np - -loaded_model = tf.keras.models.load_model( 'brain1.h5') - -def take_img(img): - - resize = tf.image.resize(img, (128,128)) - gray = tfio.experimental.color.bgr_to_rgb(resize) - yhat = loaded_model.predict(np.expand_dims(gray/255, 0)) - label_names = { - "1": "Tumor", - "2": "Normal"} - classes_x=np.argmax(yhat,axis=1) - a = classes_x[0] - input_value = a + 1 - input_str = str(input_value) - predicted_label = label_names[input_str] - tumor = yhat[0][0] - tumor = str(tumor) - normal = yhat[0][1] - normal = str(normal) - return {'Tumour': tumor, 'Normal':normal} - - - -image = gr.inputs.Image(shape=(128,128)) - -label = gr.outputs.Label('ok') -gr.Interface(fn=take_img, inputs=image, outputs="label",interpretation='default').launch(debug='True') diff --git a/spaces/akdeniz27/contract-understanding-atticus-dataset-demo/README.md b/spaces/akdeniz27/contract-understanding-atticus-dataset-demo/README.md deleted file mode 100644 index 03003fe5d80504b497d7a88ee29bf8deaf3c6322..0000000000000000000000000000000000000000 --- a/spaces/akdeniz27/contract-understanding-atticus-dataset-demo/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Contract Understanding Atticus Dataset (CUAD) Demo -emoji: 💻 -colorFrom: red -colorTo: purple -sdk: streamlit -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/akhaliq/stylegan3_clip/torch_utils/ops/grid_sample_gradfix.py b/spaces/akhaliq/stylegan3_clip/torch_utils/ops/grid_sample_gradfix.py deleted file mode 100644 index 269ffe81b04a8b11b4a8dea1913ae876b0ac4d30..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/stylegan3_clip/torch_utils/ops/grid_sample_gradfix.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Custom replacement for `torch.nn.functional.grid_sample` that -supports arbitrarily high order gradients between the input and output. -Only works on 2D images and assumes -`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`.""" - -import torch - -# pylint: disable=redefined-builtin -# pylint: disable=arguments-differ -# pylint: disable=protected-access - -#---------------------------------------------------------------------------- - -enabled = False # Enable the custom op by setting this to true. - -#---------------------------------------------------------------------------- - -def grid_sample(input, grid): - if _should_use_custom_op(): - return _GridSample2dForward.apply(input, grid) - return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) - -#---------------------------------------------------------------------------- - -def _should_use_custom_op(): - return enabled - -#---------------------------------------------------------------------------- - -class _GridSample2dForward(torch.autograd.Function): - @staticmethod - def forward(ctx, input, grid): - assert input.ndim == 4 - assert grid.ndim == 4 - output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False) - ctx.save_for_backward(input, grid) - return output - - @staticmethod - def backward(ctx, grad_output): - input, grid = ctx.saved_tensors - grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid) - return grad_input, grad_grid - -#---------------------------------------------------------------------------- - -class _GridSample2dBackward(torch.autograd.Function): - @staticmethod - def forward(ctx, grad_output, input, grid): - op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward') - grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False) - ctx.save_for_backward(grid) - return grad_input, grad_grid - - @staticmethod - def backward(ctx, grad2_grad_input, grad2_grad_grid): - _ = grad2_grad_grid # unused - grid, = ctx.saved_tensors - grad2_grad_output = None - grad2_input = None - grad2_grid = None - - if ctx.needs_input_grad[0]: - grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid) - - assert not ctx.needs_input_grad[2] - return grad2_grad_output, grad2_input, grad2_grid - -#---------------------------------------------------------------------------- diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/cli/parser.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/cli/parser.py deleted file mode 100644 index a1c99a8cb301f222feb1845be4e80d9b1f9d2622..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/cli/parser.py +++ /dev/null @@ -1,292 +0,0 @@ -"""Base option parser setup""" - -import logging -import optparse -import shutil -import sys -import textwrap -from contextlib import suppress -from typing import Any, Dict, Iterator, List, Tuple - -from pip._internal.cli.status_codes import UNKNOWN_ERROR -from pip._internal.configuration import Configuration, ConfigurationError -from pip._internal.utils.misc import redact_auth_from_url, strtobool - -logger = logging.getLogger(__name__) - - -class PrettyHelpFormatter(optparse.IndentedHelpFormatter): - """A prettier/less verbose help formatter for optparse.""" - - def __init__(self, *args: Any, **kwargs: Any) -> None: - # help position must be aligned with __init__.parseopts.description - kwargs["max_help_position"] = 30 - kwargs["indent_increment"] = 1 - kwargs["width"] = shutil.get_terminal_size()[0] - 2 - super().__init__(*args, **kwargs) - - def format_option_strings(self, option: optparse.Option) -> str: - return self._format_option_strings(option) - - def _format_option_strings( - self, option: optparse.Option, mvarfmt: str = " <{}>", optsep: str = ", " - ) -> str: - """ - Return a comma-separated list of option strings and metavars. - - :param option: tuple of (short opt, long opt), e.g: ('-f', '--format') - :param mvarfmt: metavar format string - :param optsep: separator - """ - opts = [] - - if option._short_opts: - opts.append(option._short_opts[0]) - if option._long_opts: - opts.append(option._long_opts[0]) - if len(opts) > 1: - opts.insert(1, optsep) - - if option.takes_value(): - assert option.dest is not None - metavar = option.metavar or option.dest.lower() - opts.append(mvarfmt.format(metavar.lower())) - - return "".join(opts) - - def format_heading(self, heading: str) -> str: - if heading == "Options": - return "" - return heading + ":\n" - - def format_usage(self, usage: str) -> str: - """ - Ensure there is only one newline between usage and the first heading - if there is no description. - """ - msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " ")) - return msg - - def format_description(self, description: str) -> str: - # leave full control over description to us - if description: - if hasattr(self.parser, "main"): - label = "Commands" - else: - label = "Description" - # some doc strings have initial newlines, some don't - description = description.lstrip("\n") - # some doc strings have final newlines and spaces, some don't - description = description.rstrip() - # dedent, then reindent - description = self.indent_lines(textwrap.dedent(description), " ") - description = f"{label}:\n{description}\n" - return description - else: - return "" - - def format_epilog(self, epilog: str) -> str: - # leave full control over epilog to us - if epilog: - return epilog - else: - return "" - - def indent_lines(self, text: str, indent: str) -> str: - new_lines = [indent + line for line in text.split("\n")] - return "\n".join(new_lines) - - -class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): - """Custom help formatter for use in ConfigOptionParser. - - This is updates the defaults before expanding them, allowing - them to show up correctly in the help listing. - - Also redact auth from url type options - """ - - def expand_default(self, option: optparse.Option) -> str: - default_values = None - if self.parser is not None: - assert isinstance(self.parser, ConfigOptionParser) - self.parser._update_defaults(self.parser.defaults) - assert option.dest is not None - default_values = self.parser.defaults.get(option.dest) - help_text = super().expand_default(option) - - if default_values and option.metavar == "URL": - if isinstance(default_values, str): - default_values = [default_values] - - # If its not a list, we should abort and just return the help text - if not isinstance(default_values, list): - default_values = [] - - for val in default_values: - help_text = help_text.replace(val, redact_auth_from_url(val)) - - return help_text - - -class CustomOptionParser(optparse.OptionParser): - def insert_option_group( - self, idx: int, *args: Any, **kwargs: Any - ) -> optparse.OptionGroup: - """Insert an OptionGroup at a given position.""" - group = self.add_option_group(*args, **kwargs) - - self.option_groups.pop() - self.option_groups.insert(idx, group) - - return group - - @property - def option_list_all(self) -> List[optparse.Option]: - """Get a list of all options, including those in option groups.""" - res = self.option_list[:] - for i in self.option_groups: - res.extend(i.option_list) - - return res - - -class ConfigOptionParser(CustomOptionParser): - """Custom option parser which updates its defaults by checking the - configuration files and environmental variables""" - - def __init__( - self, - *args: Any, - name: str, - isolated: bool = False, - **kwargs: Any, - ) -> None: - self.name = name - self.config = Configuration(isolated) - - assert self.name - super().__init__(*args, **kwargs) - - def check_default(self, option: optparse.Option, key: str, val: Any) -> Any: - try: - return option.check_value(key, val) - except optparse.OptionValueError as exc: - print(f"An error occurred during configuration: {exc}") - sys.exit(3) - - def _get_ordered_configuration_items(self) -> Iterator[Tuple[str, Any]]: - # Configuration gives keys in an unordered manner. Order them. - override_order = ["global", self.name, ":env:"] - - # Pool the options into different groups - section_items: Dict[str, List[Tuple[str, Any]]] = { - name: [] for name in override_order - } - for section_key, val in self.config.items(): - # ignore empty values - if not val: - logger.debug( - "Ignoring configuration key '%s' as it's value is empty.", - section_key, - ) - continue - - section, key = section_key.split(".", 1) - if section in override_order: - section_items[section].append((key, val)) - - # Yield each group in their override order - for section in override_order: - for key, val in section_items[section]: - yield key, val - - def _update_defaults(self, defaults: Dict[str, Any]) -> Dict[str, Any]: - """Updates the given defaults with values from the config files and - the environ. Does a little special handling for certain types of - options (lists).""" - - # Accumulate complex default state. - self.values = optparse.Values(self.defaults) - late_eval = set() - # Then set the options with those values - for key, val in self._get_ordered_configuration_items(): - # '--' because configuration supports only long names - option = self.get_option("--" + key) - - # Ignore options not present in this parser. E.g. non-globals put - # in [global] by users that want them to apply to all applicable - # commands. - if option is None: - continue - - assert option.dest is not None - - if option.action in ("store_true", "store_false"): - try: - val = strtobool(val) - except ValueError: - self.error( - "{} is not a valid value for {} option, " # noqa - "please specify a boolean value like yes/no, " - "true/false or 1/0 instead.".format(val, key) - ) - elif option.action == "count": - with suppress(ValueError): - val = strtobool(val) - with suppress(ValueError): - val = int(val) - if not isinstance(val, int) or val < 0: - self.error( - "{} is not a valid value for {} option, " # noqa - "please instead specify either a non-negative integer " - "or a boolean value like yes/no or false/true " - "which is equivalent to 1/0.".format(val, key) - ) - elif option.action == "append": - val = val.split() - val = [self.check_default(option, key, v) for v in val] - elif option.action == "callback": - assert option.callback is not None - late_eval.add(option.dest) - opt_str = option.get_opt_string() - val = option.convert_value(opt_str, val) - # From take_action - args = option.callback_args or () - kwargs = option.callback_kwargs or {} - option.callback(option, opt_str, val, self, *args, **kwargs) - else: - val = self.check_default(option, key, val) - - defaults[option.dest] = val - - for key in late_eval: - defaults[key] = getattr(self.values, key) - self.values = None - return defaults - - def get_default_values(self) -> optparse.Values: - """Overriding to make updating the defaults after instantiation of - the option parser possible, _update_defaults() does the dirty work.""" - if not self.process_default_values: - # Old, pre-Optik 1.5 behaviour. - return optparse.Values(self.defaults) - - # Load the configuration, or error out in case of an error - try: - self.config.load() - except ConfigurationError as err: - self.exit(UNKNOWN_ERROR, str(err)) - - defaults = self._update_defaults(self.defaults.copy()) # ours - for option in self._get_all_options(): - assert option.dest is not None - default = defaults.get(option.dest) - if isinstance(default, str): - opt_str = option.get_opt_string() - defaults[option.dest] = option.check_value(opt_str, default) - return optparse.Values(defaults) - - def error(self, msg: str) -> None: - self.print_usage(sys.stderr) - self.exit(UNKNOWN_ERROR, f"{msg}\n") diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/operations/build/wheel.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/operations/build/wheel.py deleted file mode 100644 index b0d2fc9eadb9349c0b8e69b58351648f3e54dfb5..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_internal/operations/build/wheel.py +++ /dev/null @@ -1,37 +0,0 @@ -import logging -import os -from typing import Optional - -from pip._vendor.pep517.wrappers import Pep517HookCaller - -from pip._internal.utils.subprocess import runner_with_spinner_message - -logger = logging.getLogger(__name__) - - -def build_wheel_pep517( - name: str, - backend: Pep517HookCaller, - metadata_directory: str, - tempd: str, -) -> Optional[str]: - """Build one InstallRequirement using the PEP 517 build process. - - Returns path to wheel if successfully built. Otherwise, returns None. - """ - assert metadata_directory is not None - try: - logger.debug("Destination directory: %s", tempd) - - runner = runner_with_spinner_message( - f"Building wheel for {name} (pyproject.toml)" - ) - with backend.subprocess_runner(runner): - wheel_name = backend.build_wheel( - tempd, - metadata_directory=metadata_directory, - ) - except Exception: - logger.error("Failed building wheel for %s", name) - return None - return os.path.join(tempd, wheel_name) diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sjisprober.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sjisprober.py deleted file mode 100644 index 9e29623bdc54a7c6d11bcc167d71bb44cc9be39d..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/chardet/sjisprober.py +++ /dev/null @@ -1,92 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is mozilla.org code. -# -# The Initial Developer of the Original Code is -# Netscape Communications Corporation. -# Portions created by the Initial Developer are Copyright (C) 1998 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from .mbcharsetprober import MultiByteCharSetProber -from .codingstatemachine import CodingStateMachine -from .chardistribution import SJISDistributionAnalysis -from .jpcntx import SJISContextAnalysis -from .mbcssm import SJIS_SM_MODEL -from .enums import ProbingState, MachineState - - -class SJISProber(MultiByteCharSetProber): - def __init__(self): - super(SJISProber, self).__init__() - self.coding_sm = CodingStateMachine(SJIS_SM_MODEL) - self.distribution_analyzer = SJISDistributionAnalysis() - self.context_analyzer = SJISContextAnalysis() - self.reset() - - def reset(self): - super(SJISProber, self).reset() - self.context_analyzer.reset() - - @property - def charset_name(self): - return self.context_analyzer.charset_name - - @property - def language(self): - return "Japanese" - - def feed(self, byte_str): - for i in range(len(byte_str)): - coding_state = self.coding_sm.next_state(byte_str[i]) - if coding_state == MachineState.ERROR: - self.logger.debug('%s %s prober hit error at byte %s', - self.charset_name, self.language, i) - self._state = ProbingState.NOT_ME - break - elif coding_state == MachineState.ITS_ME: - self._state = ProbingState.FOUND_IT - break - elif coding_state == MachineState.START: - char_len = self.coding_sm.get_current_charlen() - if i == 0: - self._last_char[1] = byte_str[0] - self.context_analyzer.feed(self._last_char[2 - char_len:], - char_len) - self.distribution_analyzer.feed(self._last_char, char_len) - else: - self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3 - - char_len], char_len) - self.distribution_analyzer.feed(byte_str[i - 1:i + 1], - char_len) - - self._last_char[0] = byte_str[-1] - - if self.state == ProbingState.DETECTING: - if (self.context_analyzer.got_enough_data() and - (self.get_confidence() > self.SHORTCUT_THRESHOLD)): - self._state = ProbingState.FOUND_IT - - return self.state - - def get_confidence(self): - context_conf = self.context_analyzer.get_confidence() - distrib_conf = self.distribution_analyzer.get_confidence() - return max(context_conf, distrib_conf) diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/_loop.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/_loop.py deleted file mode 100644 index 01c6cafbe53f1fcb12f7b382b2b35e2fd2c69933..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/rich/_loop.py +++ /dev/null @@ -1,43 +0,0 @@ -from typing import Iterable, Tuple, TypeVar - -T = TypeVar("T") - - -def loop_first(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: - """Iterate and generate a tuple with a flag for first value.""" - iter_values = iter(values) - try: - value = next(iter_values) - except StopIteration: - return - yield True, value - for value in iter_values: - yield False, value - - -def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]: - """Iterate and generate a tuple with a flag for last value.""" - iter_values = iter(values) - try: - previous_value = next(iter_values) - except StopIteration: - return - for value in iter_values: - yield False, previous_value - previous_value = value - yield True, previous_value - - -def loop_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]: - """Iterate and generate a tuple with a flag for first and last value.""" - iter_values = iter(values) - try: - previous_value = next(iter_values) - except StopIteration: - return - first = True - for value in iter_values: - yield first, False, previous_value - first = False - previous_value = value - yield first, True, previous_value diff --git a/spaces/ali-ghamdan/deoldify/fastai/vision/models/unet.py b/spaces/ali-ghamdan/deoldify/fastai/vision/models/unet.py deleted file mode 100644 index 06ed75c4c10890086e07da775d50e690e91f1d88..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/deoldify/fastai/vision/models/unet.py +++ /dev/null @@ -1,78 +0,0 @@ -from ...torch_core import * -from ...layers import * -from ...callbacks.hooks import * - -__all__ = ['DynamicUnet', 'UnetBlock'] - -def _get_sfs_idxs(sizes:Sizes) -> List[int]: - "Get the indexes of the layers where the size of the activation changes." - feature_szs = [size[-1] for size in sizes] - sfs_idxs = list(np.where(np.array(feature_szs[:-1]) != np.array(feature_szs[1:]))[0]) - if feature_szs[0] != feature_szs[1]: sfs_idxs = [0] + sfs_idxs - return sfs_idxs - -class UnetBlock(Module): - "A quasi-UNet block, using `PixelShuffle_ICNR upsampling`." - def __init__(self, up_in_c:int, x_in_c:int, hook:Hook, final_div:bool=True, blur:bool=False, leaky:float=None, - self_attention:bool=False, **kwargs): - self.hook = hook - self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, leaky=leaky, **kwargs) - self.bn = batchnorm_2d(x_in_c) - ni = up_in_c//2 + x_in_c - nf = ni if final_div else ni//2 - self.conv1 = conv_layer(ni, nf, leaky=leaky, **kwargs) - self.conv2 = conv_layer(nf, nf, leaky=leaky, self_attention=self_attention, **kwargs) - self.relu = relu(leaky=leaky) - - def forward(self, up_in:Tensor) -> Tensor: - s = self.hook.stored - up_out = self.shuf(up_in) - ssh = s.shape[-2:] - if ssh != up_out.shape[-2:]: - up_out = F.interpolate(up_out, s.shape[-2:], mode='nearest') - cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1)) - return self.conv2(self.conv1(cat_x)) - - -class DynamicUnet(SequentialEx): - "Create a U-Net from a given architecture." - def __init__(self, encoder:nn.Module, n_classes:int, img_size:Tuple[int,int]=(256,256), blur:bool=False, blur_final=True, self_attention:bool=False, - y_range:Optional[Tuple[float,float]]=None, - last_cross:bool=True, bottle:bool=False, **kwargs): - imsize = img_size - sfs_szs = model_sizes(encoder, size=imsize) - sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs))) - self.sfs = hook_outputs([encoder[i] for i in sfs_idxs]) - x = dummy_eval(encoder, imsize).detach() - - ni = sfs_szs[-1][1] - middle_conv = nn.Sequential(conv_layer(ni, ni*2, **kwargs), - conv_layer(ni*2, ni, **kwargs)).eval() - x = middle_conv(x) - layers = [encoder, batchnorm_2d(ni), nn.ReLU(), middle_conv] - - for i,idx in enumerate(sfs_idxs): - not_final = i!=len(sfs_idxs)-1 - up_in_c, x_in_c = int(x.shape[1]), int(sfs_szs[idx][1]) - do_blur = blur and (not_final or blur_final) - sa = self_attention and (i==len(sfs_idxs)-3) - unet_block = UnetBlock(up_in_c, x_in_c, self.sfs[i], final_div=not_final, blur=do_blur, self_attention=sa, - **kwargs).eval() - layers.append(unet_block) - x = unet_block(x) - - ni = x.shape[1] - if imsize != sfs_szs[0][-2:]: layers.append(PixelShuffle_ICNR(ni, **kwargs)) - x = PixelShuffle_ICNR(ni)(x) - if imsize != x.shape[-2:]: layers.append(Lambda(lambda x: F.interpolate(x, imsize, mode='nearest'))) - if last_cross: - layers.append(MergeLayer(dense=True)) - ni += in_channels(encoder) - layers.append(res_block(ni, bottle=bottle, **kwargs)) - layers += [conv_layer(ni, n_classes, ks=1, use_activ=False, **kwargs)] - if y_range is not None: layers.append(SigmoidRange(*y_range)) - super().__init__(*layers) - - def __del__(self): - if hasattr(self, "sfs"): self.sfs.remove() - diff --git a/spaces/allknowingroger/Image-Models-Test200/README.md b/spaces/allknowingroger/Image-Models-Test200/README.md deleted file mode 100644 index f91e4b31ab345f987b425de029c057bfb69d9e1b..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test200/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test ---- - -<!--Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference--> \ No newline at end of file diff --git a/spaces/allknowingroger/Image-Models-Test71/README.md b/spaces/allknowingroger/Image-Models-Test71/README.md deleted file mode 100644 index 89b5280627d2e22d3886b1ec804526e771816274..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/Image-Models-Test71/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: More Image Models -emoji: 😻 -colorFrom: red -colorTo: gray -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: true -duplicated_from: allknowingroger/Image-Models-Test70 ---- - -<!--Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference--> \ No newline at end of file diff --git a/spaces/alvanlii/domain-expansion/expansion_utils/closed_form_factrorization.py b/spaces/alvanlii/domain-expansion/expansion_utils/closed_form_factrorization.py deleted file mode 100644 index a3b017cc523cf7cbd9ff55536dc7f7ecd8a41b03..0000000000000000000000000000000000000000 --- a/spaces/alvanlii/domain-expansion/expansion_utils/closed_form_factrorization.py +++ /dev/null @@ -1,53 +0,0 @@ -# Based on a script from https://github.com/rosinality/stylegan2-pytorch - -# ========================================================================================== -# -# Adobe’s modifications are Copyright 2023 Adobe Research. All rights reserved. -# Adobe’s modifications are licensed under the Adobe Research License. To view a copy of the license, visit -# LICENSE.md. -# -# ========================================================================================== - - -import argparse -import numpy as np -import torch -from pathlib import Path - -import dnnlib - -import legacy - - -def factorize(G): - modulate = { - k: v - for k, v in G.named_parameters() - if ('b4' in k or "torgb" not in k) and ("affine" in k and "weight" in k) - } - - weight_mat = [] - for k, v in modulate.items(): - weight_mat.append(v) - - W = torch.cat(weight_mat, 0) - eigvec = torch.svd(W).V - - return eigvec - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Extract factor/eigenvectors of latent spaces using closed form factorization" - ) - - parser.add_argument("--out", type=str, requited=True, help="path to output file") - parser.add_argument("ckpt", type=str, help="name of the model checkpoint") - - args = parser.parse_args() - device = 'cuda' - with dnnlib.util.open_url(args.ckpt) as f: - G = legacy.load_network_pkl(f)['G_ema'].to(device) - - eigvec = factorize(G) - torch.save(eigvec, args.out) diff --git a/spaces/amankishore/sjc/sd1/ldm/modules/encoders/modules_bak.py b/spaces/amankishore/sjc/sd1/ldm/modules/encoders/modules_bak.py deleted file mode 100644 index 418fc52d6012a9e4acf6f2ba19ce4d038eb45be2..0000000000000000000000000000000000000000 --- a/spaces/amankishore/sjc/sd1/ldm/modules/encoders/modules_bak.py +++ /dev/null @@ -1,510 +0,0 @@ -import torch -import torch.nn as nn -from functools import partial -import clip -from einops import rearrange, repeat -from transformers import CLIPTokenizer, CLIPTextModel -import kornia - -from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test - -def _expand_mask(mask, dtype, tgt_len = None): - """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. - """ - bsz, src_len = mask.size() - tgt_len = tgt_len if tgt_len is not None else src_len - - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) - - inverted_mask = 1.0 - expanded_mask - - return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) - -def _build_causal_attention_mask(bsz, seq_len, dtype): - # lazily create causal attention mask, with full attention between the vision tokens - # pytorch uses additive attention mask; fill with -inf - mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype) - mask.fill_(torch.tensor(torch.finfo(dtype).min)) - mask.triu_(1) # zero out the lower diagonal - mask = mask.unsqueeze(1) # expand mask - return mask - -class AbstractEncoder(nn.Module): - def __init__(self): - super().__init__() - - def encode(self, *args, **kwargs): - raise NotImplementedError - - - -class ClassEmbedder(nn.Module): - def __init__(self, embed_dim, n_classes=1000, key='class'): - super().__init__() - self.key = key - self.embedding = nn.Embedding(n_classes, embed_dim) - - def forward(self, batch, key=None): - if key is None: - key = self.key - # this is for use in crossattn - c = batch[key][:, None] - c = self.embedding(c) - return c - - -class TransformerEmbedder(AbstractEncoder): - """Some transformer encoder layers""" - def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): - super().__init__() - self.device = device - self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, - attn_layers=Encoder(dim=n_embed, depth=n_layer)) - - def forward(self, tokens): - tokens = tokens.to(self.device) # meh - z = self.transformer(tokens, return_embeddings=True) - return z - - def encode(self, x): - return self(x) - - -class BERTTokenizer(AbstractEncoder): - """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" - def __init__(self, device="cuda", vq_interface=True, max_length=77): - super().__init__() - from transformers import BertTokenizerFast # TODO: add to reuquirements - self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") - self.device = device - self.vq_interface = vq_interface - self.max_length = max_length - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - return tokens - - @torch.no_grad() - def encode(self, text): - tokens = self(text) - if not self.vq_interface: - return tokens - return None, None, [None, None, tokens] - - def decode(self, text): - return text - - -class BERTEmbedder(AbstractEncoder): - """Uses the BERT tokenizr model and add some transformer encoder layers""" - def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, - device="cuda",use_tokenizer=True, embedding_dropout=0.0): - super().__init__() - self.use_tknz_fn = use_tokenizer - if self.use_tknz_fn: - self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) - self.device = device - self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, - attn_layers=Encoder(dim=n_embed, depth=n_layer), - emb_dropout=embedding_dropout) - - def forward(self, text, embedding_manager=None): - if self.use_tknz_fn: - tokens = self.tknz_fn(text)#.to(self.device) - else: - tokens = text - z = self.transformer(tokens, return_embeddings=True, embedding_manager=embedding_manager) - return z - - def encode(self, text, **kwargs): - # output of length 77 - return self(text, **kwargs) - -class SpatialRescaler(nn.Module): - def __init__(self, - n_stages=1, - method='bilinear', - multiplier=0.5, - in_channels=3, - out_channels=None, - bias=False): - super().__init__() - self.n_stages = n_stages - assert self.n_stages >= 0 - assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] - self.multiplier = multiplier - self.interpolator = partial(torch.nn.functional.interpolate, mode=method) - self.remap_output = out_channels is not None - if self.remap_output: - print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') - self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) - - def forward(self,x): - for stage in range(self.n_stages): - x = self.interpolator(x, scale_factor=self.multiplier) - - - if self.remap_output: - x = self.channel_mapper(x) - return x - - def encode(self, x): - return self(x) - -class FrozenCLIPEmbedder(AbstractEncoder): - """Uses the CLIP transformer encoder for text (from Hugging Face)""" - def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): - super().__init__() - self.tokenizer = CLIPTokenizer.from_pretrained(version) - self.transformer = CLIPTextModel.from_pretrained(version) - self.device = device - self.max_length = max_length - self.freeze() - - def embedding_forward( - self, - input_ids = None, - position_ids = None, - inputs_embeds = None, - embedding_manager = None, - ) -> torch.Tensor: - - seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] - - if position_ids is None: - position_ids = self.position_ids[:, :seq_length] - - if inputs_embeds is None: - inputs_embeds = self.token_embedding(input_ids) - - if embedding_manager is not None: - inputs_embeds = embedding_manager(input_ids, inputs_embeds) - - - position_embeddings = self.position_embedding(position_ids) - embeddings = inputs_embeds + position_embeddings - - return embeddings - - self.transformer.text_model.embeddings.forward = embedding_forward.__get__(self.transformer.text_model.embeddings) - - def encoder_forward( - self, - inputs_embeds, - attention_mask = None, - causal_attention_mask = None, - output_attentions = None, - output_hidden_states = None, - return_dict = None, - ): - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - hidden_states = inputs_embeds - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - layer_outputs = encoder_layer( - hidden_states, - attention_mask, - causal_attention_mask, - output_attentions=output_attentions, - ) - - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - return hidden_states - - # if not return_dict: - # return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - # return BaseModelOutput( - # last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - # ) - - self.transformer.text_model.encoder.forward = encoder_forward.__get__(self.transformer.text_model.encoder) - - - def text_encoder_forward( - self, - input_ids = None, - attention_mask = None, - position_ids = None, - output_attentions = None, - output_hidden_states = None, - return_dict = None, - embedding_manager = None, - ): - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if input_ids is None: - raise ValueError("You have to specify either input_ids") - - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) - - hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids, embedding_manager=embedding_manager) - - bsz, seq_len = input_shape - # CLIP's text model uses causal mask, prepare it here. - # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 - causal_attention_mask = _build_causal_attention_mask(bsz, seq_len, hidden_states.dtype).to( - hidden_states.device - ) - - # expand attention_mask - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - attention_mask = _expand_mask(attention_mask, hidden_states.dtype) - - last_hidden_state = self.encoder( - inputs_embeds=hidden_states, - attention_mask=attention_mask, - causal_attention_mask=causal_attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - # last_hidden_state = encoder_outputs[0] - last_hidden_state = self.final_layer_norm(last_hidden_state) - - # text_embeds.shape = [batch_size, sequence_length, transformer.width] - # take features from the eot embedding (eot_token is the highest number in each sequence) - # pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0]), input_ids.argmax(dim=-1)] - - # if not return_dict: - # return (last_hidden_state, pooled_output) + encoder_outputs[1:] - - return last_hidden_state - - self.transformer.text_model.forward = text_encoder_forward.__get__(self.transformer.text_model) - - def transformer_forward( - self, - input_ids = None, - attention_mask = None, - position_ids = None, - output_attentions = None, - output_hidden_states = None, - return_dict = None, - embedding_manager = None, - ): - return self.text_model( - input_ids=input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - embedding_manager = embedding_manager - ) - - self.transformer.forward = transformer_forward.__get__(self.transformer) - - - # def update_embedding_func(self, embedding_manager): - # text_model = self.transformer.text_model - # # text_model.old_embeddings = text_model.embeddings - - # # def new_embeddings( - # # input_ids = None, - # # position_ids = None, - # # inputs_embeds = None, - # # ) -> torch.Tensor: - - # # seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] - - # # if position_ids is None: - # # position_ids = text_model.old_embeddings.position_ids[:, :seq_length] - - # # if inputs_embeds is None: - # # inputs_embeds = text_model.old_embeddings.token_embedding(input_ids) - - - # # inputs_embeds = embedding_manager(input_ids, inputs_embeds) - - # # position_embeddings = text_model.old_embeddings.position_embedding(position_ids) - # # embeddings = inputs_embeds + position_embeddings - - # # return embeddings - - # # del text_model.embeddings - # # text_model.embeddings = new_embeddings - - # # class NewEmbeddings(torch.nn.Module): - - # # def __init__(self, orig_embedder): - # # super().__init__() - # # self.orig_embedder = orig_embedder - - # # def forward( - # # self, - # # input_ids = None, - # # position_ids = None, - # # inputs_embeds = None, - # # ) -> torch.Tensor: - - # # seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] - - # # if position_ids is None: - # # position_ids = self.orig_embedder.position_ids[:, :seq_length] - - # # if inputs_embeds is None: - # # inputs_embeds = self.orig_embedder.token_embedding(input_ids) - - # # inputs_embeds = embedding_manager(input_ids, inputs_embeds) - - # # position_embeddings = self.orig_embedder.position_embedding(position_ids) - # # embeddings = inputs_embeds + position_embeddings - - # # return embeddings - - # # # self.new_embeddings = - # # # text_model.embeddings = new_embeddings.__call__.__get__(text_model) - # # text_model.embeddings = NewEmbeddings(text_model.embeddings) - - # class NewEmbeddings(torch.nn.Module): - - # def __init__(self, orig_embedder, embedding_manager): - # super().__init__() - # self.embedding_manager = embedding_manager - # self.orig_embedder = orig_embedder - - # def forward( - # self, - # input_ids = None, - # position_ids = None, - # inputs_embeds = None, - # ) -> torch.Tensor: - - # seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] - - # if position_ids is None: - # position_ids = self.orig_embedder.position_ids[:, :seq_length] - - # if inputs_embeds is None: - # inputs_embeds = self.orig_embedder.token_embedding(input_ids) - - # # init_embeds = inputs_embeds.clone() - # inputs_embeds = self.embedding_manager(input_ids, inputs_embeds) - - # # print(inputs_embeds - init_embeds) - # # print((inputs_embeds - init_embeds).max()) - # # exit(0) - - # position_embeddings = self.orig_embedder.position_embedding(position_ids) - # embeddings = inputs_embeds + position_embeddings - - # return embeddings - - # # self.new_embeddings = - # # text_model.embeddings = new_embeddings.__call__.__get__(text_model) - # text_model.embeddings = NewEmbeddings(text_model.embeddings, embedding_manager) - - def freeze(self): - self.transformer = self.transformer.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text, **kwargs): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - z = self.transformer(input_ids=tokens, **kwargs) - - return z - - def encode(self, text, **kwargs): - return self(text, **kwargs) - - -class FrozenCLIPTextEmbedder(nn.Module): - """ - Uses the CLIP transformer encoder for text. - """ - def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True): - super().__init__() - self.model, _ = clip.load(version, jit=False, device="cpu") - self.device = device - self.max_length = max_length - self.n_repeat = n_repeat - self.normalize = normalize - - def freeze(self): - self.model = self.model.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - tokens = clip.tokenize(text).to(self.device) - z = self.model.encode_text(tokens) - if self.normalize: - z = z / torch.linalg.norm(z, dim=1, keepdim=True) - return z - - def encode(self, text): - z = self(text) - if z.ndim==2: - z = z[:, None, :] - z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat) - return z - - -class FrozenClipImageEmbedder(nn.Module): - """ - Uses the CLIP image encoder. - """ - def __init__( - self, - model, - jit=False, - device='cuda' if torch.cuda.is_available() else 'cpu', - antialias=False, - ): - super().__init__() - self.model, _ = clip.load(name=model, device=device, jit=jit) - - self.antialias = antialias - - self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) - self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) - - def preprocess(self, x): - # normalize to [0,1] - x = kornia.geometry.resize(x, (224, 224), - interpolation='bicubic',align_corners=True, - antialias=self.antialias) - x = (x + 1.) / 2. - # renormalize according to clip - x = kornia.enhance.normalize(x, self.mean, self.std) - return x - - def forward(self, x): - # x is assumed to be in range [-1,1] - return self.model.encode_image(self.preprocess(x)) - - -if __name__ == "__main__": - from ldm.util import count_params - model = FrozenCLIPEmbedder() - count_params(model, verbose=True) \ No newline at end of file diff --git a/spaces/anaclaudia13ct/insect_detection/utils/general.py b/spaces/anaclaudia13ct/insect_detection/utils/general.py deleted file mode 100644 index 99a96576c3fdda77710f42776a3b87f42ec78fd4..0000000000000000000000000000000000000000 --- a/spaces/anaclaudia13ct/insect_detection/utils/general.py +++ /dev/null @@ -1,1140 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -General utils -""" - -import contextlib -import glob -import inspect -import logging -import logging.config -import math -import os -import platform -import random -import re -import signal -import sys -import time -import urllib -from copy import deepcopy -from datetime import datetime -from itertools import repeat -from multiprocessing.pool import ThreadPool -from pathlib import Path -from subprocess import check_output -from tarfile import is_tarfile -from typing import Optional -from zipfile import ZipFile, is_zipfile - -import cv2 -import IPython -import numpy as np -import pandas as pd -import pkg_resources as pkg -import torch -import torchvision -import yaml - -from utils import TryExcept, emojis -from utils.downloads import gsutil_getsize -from utils.metrics import box_iou, fitness - -FILE = Path(__file__).resolve() -ROOT = FILE.parents[1] # YOLOv5 root directory -RANK = int(os.getenv('RANK', -1)) - -# Settings -NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads -DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory -AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode -VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode -TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format -FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf - -torch.set_printoptions(linewidth=320, precision=5, profile='long') -np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 -pd.options.display.max_columns = 10 -cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) -os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads -os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) - - -def is_ascii(s=''): - # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) - s = str(s) # convert list, tuple, None, etc. to str - return len(s.encode().decode('ascii', 'ignore')) == len(s) - - -def is_chinese(s='人工智能'): - # Is string composed of any Chinese characters? - return bool(re.search('[\u4e00-\u9fff]', str(s))) - - -def is_colab(): - # Is environment a Google Colab instance? - return 'google.colab' in sys.modules - - -def is_notebook(): - # Is environment a Jupyter notebook? Verified on Colab, Jupyterlab, Kaggle, Paperspace - ipython_type = str(type(IPython.get_ipython())) - return 'colab' in ipython_type or 'zmqshell' in ipython_type - - -def is_kaggle(): - # Is environment a Kaggle Notebook? - return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' - - -def is_docker() -> bool: - """Check if the process runs inside a docker container.""" - if Path("/.dockerenv").exists(): - return True - try: # check if docker is in control groups - with open("/proc/self/cgroup") as file: - return any("docker" in line for line in file) - except OSError: - return False - - -def is_writeable(dir, test=False): - # Return True if directory has write permissions, test opening a file with write permissions if test=True - if not test: - return os.access(dir, os.W_OK) # possible issues on Windows - file = Path(dir) / 'tmp.txt' - try: - with open(file, 'w'): # open file with write permissions - pass - file.unlink() # remove file - return True - except OSError: - return False - - -LOGGING_NAME = "yolov5" - - -def set_logging(name=LOGGING_NAME, verbose=True): - # sets up logging for the given name - rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings - level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR - logging.config.dictConfig({ - "version": 1, - "disable_existing_loggers": False, - "formatters": { - name: { - "format": "%(message)s"}}, - "handlers": { - name: { - "class": "logging.StreamHandler", - "formatter": name, - "level": level,}}, - "loggers": { - name: { - "level": level, - "handlers": [name], - "propagate": False,}}}) - - -set_logging(LOGGING_NAME) # run before defining LOGGER -LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) -if platform.system() == 'Windows': - for fn in LOGGER.info, LOGGER.warning: - setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging - - -def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): - # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. - env = os.getenv(env_var) - if env: - path = Path(env) # use environment variable - else: - cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs - path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir - path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable - path.mkdir(exist_ok=True) # make if required - return path - - -CONFIG_DIR = user_config_dir() # Ultralytics settings dir - - -class Profile(contextlib.ContextDecorator): - # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager - def __init__(self, t=0.0): - self.t = t - self.cuda = torch.cuda.is_available() - - def __enter__(self): - self.start = self.time() - return self - - def __exit__(self, type, value, traceback): - self.dt = self.time() - self.start # delta-time - self.t += self.dt # accumulate dt - - def time(self): - if self.cuda: - torch.cuda.synchronize() - return time.time() - - -class Timeout(contextlib.ContextDecorator): - # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager - def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): - self.seconds = int(seconds) - self.timeout_message = timeout_msg - self.suppress = bool(suppress_timeout_errors) - - def _timeout_handler(self, signum, frame): - raise TimeoutError(self.timeout_message) - - def __enter__(self): - if platform.system() != 'Windows': # not supported on Windows - signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM - signal.alarm(self.seconds) # start countdown for SIGALRM to be raised - - def __exit__(self, exc_type, exc_val, exc_tb): - if platform.system() != 'Windows': - signal.alarm(0) # Cancel SIGALRM if it's scheduled - if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError - return True - - -class WorkingDirectory(contextlib.ContextDecorator): - # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager - def __init__(self, new_dir): - self.dir = new_dir # new dir - self.cwd = Path.cwd().resolve() # current dir - - def __enter__(self): - os.chdir(self.dir) - - def __exit__(self, exc_type, exc_val, exc_tb): - os.chdir(self.cwd) - - -def methods(instance): - # Get class/instance methods - return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith("__")] - - -def print_args(args: Optional[dict] = None, show_file=True, show_func=False): - # Print function arguments (optional args dict) - x = inspect.currentframe().f_back # previous frame - file, _, func, _, _ = inspect.getframeinfo(x) - if args is None: # get args automatically - args, _, _, frm = inspect.getargvalues(x) - args = {k: v for k, v in frm.items() if k in args} - try: - file = Path(file).resolve().relative_to(ROOT).with_suffix('') - except ValueError: - file = Path(file).stem - s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') - LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) - - -def init_seeds(seed=0, deterministic=False): - # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html - random.seed(seed) - np.random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe - # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 - if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 - torch.use_deterministic_algorithms(True) - torch.backends.cudnn.deterministic = True - os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' - os.environ['PYTHONHASHSEED'] = str(seed) - - -def intersect_dicts(da, db, exclude=()): - # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values - return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} - - -def get_default_args(func): - # Get func() default arguments - signature = inspect.signature(func) - return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} - - -def get_latest_run(search_dir='.'): - # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) - last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) - return max(last_list, key=os.path.getctime) if last_list else '' - - -def file_age(path=__file__): - # Return days since last file update - dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta - return dt.days # + dt.seconds / 86400 # fractional days - - -def file_date(path=__file__): - # Return human-readable file modification date, i.e. '2021-3-26' - t = datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' - - -def file_size(path): - # Return file/dir size (MB) - mb = 1 << 20 # bytes to MiB (1024 ** 2) - path = Path(path) - if path.is_file(): - return path.stat().st_size / mb - elif path.is_dir(): - return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb - else: - return 0.0 - - -def check_online(): - # Check internet connectivity - import socket - - def run_once(): - # Check once - try: - socket.create_connection(("1.1.1.1", 443), 5) # check host accessibility - return True - except OSError: - return False - - return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues - - -def git_describe(path=ROOT): # path must be a directory - # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - try: - assert (Path(path) / '.git').is_dir() - return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] - except Exception: - return '' - - -@TryExcept() -@WorkingDirectory(ROOT) -def check_git_status(repo='ultralytics/yolov5', branch='master'): - # YOLOv5 status check, recommend 'git pull' if code is out of date - url = f'https://github.com/{repo}' - msg = f', for updates see {url}' - s = colorstr('github: ') # string - assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg - assert check_online(), s + 'skipping check (offline)' + msg - - splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode()) - matches = [repo in s for s in splits] - if any(matches): - remote = splits[matches.index(True) - 1] - else: - remote = 'ultralytics' - check_output(f'git remote add {remote} {url}', shell=True) - check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch - local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out - n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind - if n > 0: - pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' - s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use `{pull}` or `git clone {url}` to update." - else: - s += f'up to date with {url} ✅' - LOGGER.info(s) - - -@WorkingDirectory(ROOT) -def check_git_info(path='.'): - # YOLOv5 git info check, return {remote, branch, commit} - check_requirements('gitpython') - import git - try: - repo = git.Repo(path) - remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/ultralytics/yolov5' - commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d' - try: - branch = repo.active_branch.name # i.e. 'main' - except TypeError: # not on any branch - branch = None # i.e. 'detached HEAD' state - return {'remote': remote, 'branch': branch, 'commit': commit} - except git.exc.InvalidGitRepositoryError: # path is not a git dir - return {'remote': None, 'branch': None, 'commit': None} - - -def check_python(minimum='3.7.0'): - # Check current python version vs. required python version - check_version(platform.python_version(), minimum, name='Python ', hard=True) - - -def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): - # Check version vs. required version - current, minimum = (pkg.parse_version(x) for x in (current, minimum)) - result = (current == minimum) if pinned else (current >= minimum) # bool - s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string - if hard: - assert result, emojis(s) # assert min requirements met - if verbose and not result: - LOGGER.warning(s) - return result - - -@TryExcept() -def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): - # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) - prefix = colorstr('red', 'bold', 'requirements:') - check_python() # check python version - if isinstance(requirements, Path): # requirements.txt file - file = requirements.resolve() - assert file.exists(), f"{prefix} {file} not found, check failed." - with file.open() as f: - requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] - elif isinstance(requirements, str): - requirements = [requirements] - - s = '' - n = 0 - for r in requirements: - try: - pkg.require(r) - except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met - s += f'"{r}" ' - n += 1 - - if s and install and AUTOINSTALL: # check environment variable - LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") - try: - # assert check_online(), "AutoUpdate skipped (offline)" - LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) - source = file if 'file' in locals() else requirements - s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ - f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" - LOGGER.info(s) - except Exception as e: - LOGGER.warning(f'{prefix} ❌ {e}') - - -def check_img_size(imgsz, s=32, floor=0): - # Verify image size is a multiple of stride s in each dimension - if isinstance(imgsz, int): # integer i.e. img_size=640 - new_size = max(make_divisible(imgsz, int(s)), floor) - else: # list i.e. img_size=[640, 480] - imgsz = list(imgsz) # convert to list if tuple - new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] - if new_size != imgsz: - LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') - return new_size - - -def check_imshow(warn=False): - # Check if environment supports image displays - try: - assert not is_notebook() - assert not is_docker() - cv2.imshow('test', np.zeros((1, 1, 3))) - cv2.waitKey(1) - cv2.destroyAllWindows() - cv2.waitKey(1) - return True - except Exception as e: - if warn: - LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') - return False - - -def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): - # Check file(s) for acceptable suffix - if file and suffix: - if isinstance(suffix, str): - suffix = [suffix] - for f in file if isinstance(file, (list, tuple)) else [file]: - s = Path(f).suffix.lower() # file suffix - if len(s): - assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}" - - -def check_yaml(file, suffix=('.yaml', '.yml')): - # Search/download YAML file (if necessary) and return path, checking suffix - return check_file(file, suffix) - - -def check_file(file, suffix=''): - # Search/download file (if necessary) and return path - check_suffix(file, suffix) # optional - file = str(file) # convert to str() - if os.path.isfile(file) or not file: # exists - return file - elif file.startswith(('http:/', 'https:/')): # download - url = file # warning: Pathlib turns :// -> :/ - file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth - if os.path.isfile(file): - LOGGER.info(f'Found {url} locally at {file}') # file already exists - else: - LOGGER.info(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, file) - assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check - return file - elif file.startswith('clearml://'): # ClearML Dataset ID - assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." - return file - else: # search - files = [] - for d in 'data', 'models', 'utils': # search directories - files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file - assert len(files), f'File not found: {file}' # assert file was found - assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique - return files[0] # return file - - -def check_font(font=FONT, progress=False): - # Download font to CONFIG_DIR if necessary - font = Path(font) - file = CONFIG_DIR / font.name - if not font.exists() and not file.exists(): - url = f'https://ultralytics.com/assets/{font.name}' - LOGGER.info(f'Downloading {url} to {file}...') - torch.hub.download_url_to_file(url, str(file), progress=progress) - - -def check_dataset(data, autodownload=True): - # Download, check and/or unzip dataset if not found locally - - # Download (optional) - extract_dir = '' - if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): - download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) - data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) - extract_dir, autodownload = data.parent, False - - # Read yaml (optional) - if isinstance(data, (str, Path)): - data = yaml_load(data) # dictionary - - # Checks - for k in 'train', 'val', 'names': - assert k in data, emojis(f"data.yaml '{k}:' field missing ❌") - if isinstance(data['names'], (list, tuple)): # old array format - data['names'] = dict(enumerate(data['names'])) # convert to dict - assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car' - data['nc'] = len(data['names']) - - # Resolve paths - path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' - if not path.is_absolute(): - path = (ROOT / path).resolve() - data['path'] = path # download scripts - for k in 'train', 'val', 'test': - if data.get(k): # prepend path - if isinstance(data[k], str): - x = (path / data[k]).resolve() - if not x.exists() and data[k].startswith('../'): - x = (path / data[k][3:]).resolve() - data[k] = str(x) - else: - data[k] = [str((path / x).resolve()) for x in data[k]] - - # Parse yaml - train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) - if val: - val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path - if not all(x.exists() for x in val): - LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]) - if not s or not autodownload: - raise Exception('Dataset not found ❌') - t = time.time() - if s.startswith('http') and s.endswith('.zip'): # URL - f = Path(s).name # filename - LOGGER.info(f'Downloading {s} to {f}...') - torch.hub.download_url_to_file(s, f) - Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root - unzip_file(f, path=DATASETS_DIR) # unzip - Path(f).unlink() # remove zip - r = None # success - elif s.startswith('bash '): # bash script - LOGGER.info(f'Running {s} ...') - r = os.system(s) - else: # python script - r = exec(s, {'yaml': data}) # return None - dt = f'({round(time.time() - t, 1)}s)' - s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f"failure {dt} ❌" - LOGGER.info(f"Dataset download {s}") - check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts - return data # dictionary - - -def check_amp(model): - # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation - from models.common import AutoShape, DetectMultiBackend - - def amp_allclose(model, im): - # All close FP32 vs AMP results - m = AutoShape(model, verbose=False) # model - a = m(im).xywhn[0] # FP32 inference - m.amp = True - b = m(im).xywhn[0] # AMP inference - return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance - - prefix = colorstr('AMP: ') - device = next(model.parameters()).device # get model device - if device.type in ('cpu', 'mps'): - return False # AMP only used on CUDA devices - f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check - im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) - try: - assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) - LOGGER.info(f'{prefix}checks passed ✅') - return True - except Exception: - help_url = 'https://github.com/ultralytics/yolov5/issues/7908' - LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}') - return False - - -def yaml_load(file='data.yaml'): - # Single-line safe yaml loading - with open(file, errors='ignore') as f: - return yaml.safe_load(f) - - -def yaml_save(file='data.yaml', data={}): - # Single-line safe yaml saving - with open(file, 'w') as f: - yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) - - -def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')): - # Unzip a *.zip file to path/, excluding files containing strings in exclude list - if path is None: - path = Path(file).parent # default path - with ZipFile(file) as zipObj: - for f in zipObj.namelist(): # list all archived filenames in the zip - if all(x not in f for x in exclude): - zipObj.extract(f, path=path) - - -def url2file(url): - # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt - url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ - return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth - - -def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): - # Multithreaded file download and unzip function, used in data.yaml for autodownload - def download_one(url, dir): - # Download 1 file - success = True - if os.path.isfile(url): - f = Path(url) # filename - else: # does not exist - f = dir / Path(url).name - LOGGER.info(f'Downloading {url} to {f}...') - for i in range(retry + 1): - if curl: - s = 'sS' if threads > 1 else '' # silent - r = os.system( - f'curl -# -{s}L "{url}" -o "{f}" --retry 9 -C -') # curl download with retry, continue - success = r == 0 - else: - torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download - success = f.is_file() - if success: - break - elif i < retry: - LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') - else: - LOGGER.warning(f'❌ Failed to download {url}...') - - if unzip and success and (f.suffix == '.gz' or is_zipfile(f) or is_tarfile(f)): - LOGGER.info(f'Unzipping {f}...') - if is_zipfile(f): - unzip_file(f, dir) # unzip - elif is_tarfile(f): - os.system(f'tar xf {f} --directory {f.parent}') # unzip - elif f.suffix == '.gz': - os.system(f'tar xfz {f} --directory {f.parent}') # unzip - if delete: - f.unlink() # remove zip - - dir = Path(dir) - dir.mkdir(parents=True, exist_ok=True) # make directory - if threads > 1: - pool = ThreadPool(threads) - pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded - pool.close() - pool.join() - else: - for u in [url] if isinstance(url, (str, Path)) else url: - download_one(u, dir) - - -def make_divisible(x, divisor): - # Returns nearest x divisible by divisor - if isinstance(divisor, torch.Tensor): - divisor = int(divisor.max()) # to int - return math.ceil(x / divisor) * divisor - - -def clean_str(s): - # Cleans a string by replacing special characters with underscore _ - return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) - - -def one_cycle(y1=0.0, y2=1.0, steps=100): - # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf - return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 - - -def colorstr(*input): - # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') - *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string - colors = { - 'black': '\033[30m', # basic colors - 'red': '\033[31m', - 'green': '\033[32m', - 'yellow': '\033[33m', - 'blue': '\033[34m', - 'magenta': '\033[35m', - 'cyan': '\033[36m', - 'white': '\033[37m', - 'bright_black': '\033[90m', # bright colors - 'bright_red': '\033[91m', - 'bright_green': '\033[92m', - 'bright_yellow': '\033[93m', - 'bright_blue': '\033[94m', - 'bright_magenta': '\033[95m', - 'bright_cyan': '\033[96m', - 'bright_white': '\033[97m', - 'end': '\033[0m', # misc - 'bold': '\033[1m', - 'underline': '\033[4m'} - return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] - - -def labels_to_class_weights(labels, nc=80): - # Get class weights (inverse frequency) from training labels - if labels[0] is None: # no labels loaded - return torch.Tensor() - - labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO - classes = labels[:, 0].astype(int) # labels = [class xywh] - weights = np.bincount(classes, minlength=nc) # occurrences per class - - # Prepend gridpoint count (for uCE training) - # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image - # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start - - weights[weights == 0] = 1 # replace empty bins with 1 - weights = 1 / weights # number of targets per class - weights /= weights.sum() # normalize - return torch.from_numpy(weights).float() - - -def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): - # Produces image weights based on class_weights and image contents - # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample - class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels]) - return (class_weights.reshape(1, nc) * class_counts).sum(1) - - -def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) - # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ - # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') - # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') - # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco - # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet - return [ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, - 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] - - -def xyxy2xywh(x): - # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height - return y - - -def xywh2xyxy(x): - # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x - y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y - y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x - y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y - return y - - -def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): - # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x - y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y - y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x - y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y - return y - - -def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): - # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right - if clip: - clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center - y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center - y[:, 2] = (x[:, 2] - x[:, 0]) / w # width - y[:, 3] = (x[:, 3] - x[:, 1]) / h # height - return y - - -def xyn2xy(x, w=640, h=640, padw=0, padh=0): - # Convert normalized segments into pixel segments, shape (n,2) - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = w * x[:, 0] + padw # top left x - y[:, 1] = h * x[:, 1] + padh # top left y - return y - - -def segment2box(segment, width=640, height=640): - # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) - x, y = segment.T # segment xy - inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) - x, y, = x[inside], y[inside] - return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy - - -def segments2boxes(segments): - # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) - boxes = [] - for s in segments: - x, y = s.T # segment xy - boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy - return xyxy2xywh(np.array(boxes)) # cls, xywh - - -def resample_segments(segments, n=1000): - # Up-sample an (n,2) segment - for i, s in enumerate(segments): - s = np.concatenate((s, s[0:1, :]), axis=0) - x = np.linspace(0, len(s) - 1, n) - xp = np.arange(len(s)) - segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy - return segments - - -def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): - # Rescale boxes (xyxy) from img1_shape to img0_shape - if ratio_pad is None: # calculate from img0_shape - gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new - pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - else: - gain = ratio_pad[0][0] - pad = ratio_pad[1] - - boxes[:, [0, 2]] -= pad[0] # x padding - boxes[:, [1, 3]] -= pad[1] # y padding - boxes[:, :4] /= gain - clip_boxes(boxes, img0_shape) - return boxes - - -def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False): - # Rescale coords (xyxy) from img1_shape to img0_shape - if ratio_pad is None: # calculate from img0_shape - gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new - pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding - else: - gain = ratio_pad[0][0] - pad = ratio_pad[1] - - segments[:, 0] -= pad[0] # x padding - segments[:, 1] -= pad[1] # y padding - segments /= gain - clip_segments(segments, img0_shape) - if normalize: - segments[:, 0] /= img0_shape[1] # width - segments[:, 1] /= img0_shape[0] # height - return segments - - -def clip_boxes(boxes, shape): - # Clip boxes (xyxy) to image shape (height, width) - if isinstance(boxes, torch.Tensor): # faster individually - boxes[:, 0].clamp_(0, shape[1]) # x1 - boxes[:, 1].clamp_(0, shape[0]) # y1 - boxes[:, 2].clamp_(0, shape[1]) # x2 - boxes[:, 3].clamp_(0, shape[0]) # y2 - else: # np.array (faster grouped) - boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 - boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 - - -def clip_segments(segments, shape): - # Clip segments (xy1,xy2,...) to image shape (height, width) - if isinstance(segments, torch.Tensor): # faster individually - segments[:, 0].clamp_(0, shape[1]) # x - segments[:, 1].clamp_(0, shape[0]) # y - else: # np.array (faster grouped) - segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x - segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y - - -def non_max_suppression( - prediction, - conf_thres=0.25, - iou_thres=0.45, - classes=None, - agnostic=False, - multi_label=False, - labels=(), - max_det=300, - nm=0, # number of masks -): - """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections - - Returns: - list of detections, on (n,6) tensor per image [xyxy, conf, cls] - """ - - if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) - prediction = prediction[0] # select only inference output - - device = prediction.device - mps = 'mps' in device.type # Apple MPS - if mps: # MPS not fully supported yet, convert tensors to CPU before NMS - prediction = prediction.cpu() - bs = prediction.shape[0] # batch size - nc = prediction.shape[2] - nm - 5 # number of classes - xc = prediction[..., 4] > conf_thres # candidates - - # Checks - assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' - assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' - - # Settings - # min_wh = 2 # (pixels) minimum box width and height - max_wh = 7680 # (pixels) maximum box width and height - max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() - time_limit = 0.5 + 0.05 * bs # seconds to quit after - redundant = True # require redundant detections - multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) - merge = False # use merge-NMS - - t = time.time() - mi = 5 + nc # mask start index - output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs - for xi, x in enumerate(prediction): # image index, image inference - # Apply constraints - # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height - x = x[xc[xi]] # confidence - - # Cat apriori labels if autolabelling - if labels and len(labels[xi]): - lb = labels[xi] - v = torch.zeros((len(lb), nc + nm + 5), device=x.device) - v[:, :4] = lb[:, 1:5] # box - v[:, 4] = 1.0 # conf - v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls - x = torch.cat((x, v), 0) - - # If none remain process next image - if not x.shape[0]: - continue - - # Compute conf - x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf - - # Box/Mask - box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2) - mask = x[:, mi:] # zero columns if no masks - - # Detections matrix nx6 (xyxy, conf, cls) - if multi_label: - i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T - x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1) - else: # best class only - conf, j = x[:, 5:mi].max(1, keepdim=True) - x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] - - # Filter by class - if classes is not None: - x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] - - # Apply finite constraint - # if not torch.isfinite(x).all(): - # x = x[torch.isfinite(x).all(1)] - - # Check shape - n = x.shape[0] # number of boxes - if not n: # no boxes - continue - elif n > max_nms: # excess boxes - x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence - else: - x = x[x[:, 4].argsort(descending=True)] # sort by confidence - - # Batched NMS - c = x[:, 5:6] * (0 if agnostic else max_wh) # classes - boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores - i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS - if i.shape[0] > max_det: # limit detections - i = i[:max_det] - if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) - # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) - iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix - weights = iou * scores[None] # box weights - x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes - if redundant: - i = i[iou.sum(1) > 1] # require redundancy - - output[xi] = x[i] - if mps: - output[xi] = output[xi].to(device) - if (time.time() - t) > time_limit: - LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') - break # time limit exceeded - - return output - - -def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() - # Strip optimizer from 'f' to finalize training, optionally save as 's' - x = torch.load(f, map_location=torch.device('cpu')) - if x.get('ema'): - x['model'] = x['ema'] # replace model with ema - for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys - x[k] = None - x['epoch'] = -1 - x['model'].half() # to FP16 - for p in x['model'].parameters(): - p.requires_grad = False - torch.save(x, s or f) - mb = os.path.getsize(s or f) / 1E6 # filesize - LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") - - -def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): - evolve_csv = save_dir / 'evolve.csv' - evolve_yaml = save_dir / 'hyp_evolve.yaml' - keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps] - keys = tuple(x.strip() for x in keys) - vals = results + tuple(hyp.values()) - n = len(keys) - - # Download (optional) - if bucket: - url = f'gs://{bucket}/evolve.csv' - if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): - os.system(f'gsutil cp {url} {save_dir}') # download evolve.csv if larger than local - - # Log to evolve.csv - s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header - with open(evolve_csv, 'a') as f: - f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') - - # Save yaml - with open(evolve_yaml, 'w') as f: - data = pd.read_csv(evolve_csv, skipinitialspace=True) - data = data.rename(columns=lambda x: x.strip()) # strip keys - i = np.argmax(fitness(data.values[:, :4])) # - generations = len(data) - f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + - f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + - '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') - yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) - - # Print to screen - LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix + - ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}' - for x in vals) + '\n\n') - - if bucket: - os.system(f'gsutil cp {evolve_csv} {evolve_yaml} gs://{bucket}') # upload - - -def apply_classifier(x, model, img, im0): - # Apply a second stage classifier to YOLO outputs - # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() - im0 = [im0] if isinstance(im0, np.ndarray) else im0 - for i, d in enumerate(x): # per image - if d is not None and len(d): - d = d.clone() - - # Reshape and pad cutouts - b = xyxy2xywh(d[:, :4]) # boxes - b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square - b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad - d[:, :4] = xywh2xyxy(b).long() - - # Rescale boxes from img_size to im0 size - scale_boxes(img.shape[2:], d[:, :4], im0[i].shape) - - # Classes - pred_cls1 = d[:, 5].long() - ims = [] - for a in d: - cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] - im = cv2.resize(cutout, (224, 224)) # BGR - - im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 - im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 - im /= 255 # 0 - 255 to 0.0 - 1.0 - ims.append(im) - - pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction - x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections - - return x - - -def increment_path(path, exist_ok=False, sep='', mkdir=False): - # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. - path = Path(path) # os-agnostic - if path.exists() and not exist_ok: - path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') - - # Method 1 - for n in range(2, 9999): - p = f'{path}{sep}{n}{suffix}' # increment path - if not os.path.exists(p): # - break - path = Path(p) - - # Method 2 (deprecated) - # dirs = glob.glob(f"{path}{sep}*") # similar paths - # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs] - # i = [int(m.groups()[0]) for m in matches if m] # indices - # n = max(i) + 1 if i else 2 # increment number - # path = Path(f"{path}{sep}{n}{suffix}") # increment path - - if mkdir: - path.mkdir(parents=True, exist_ok=True) # make directory - - return path - - -# OpenCV Chinese-friendly functions ------------------------------------------------------------------------------------ -imshow_ = cv2.imshow # copy to avoid recursion errors - - -def imread(path, flags=cv2.IMREAD_COLOR): - return cv2.imdecode(np.fromfile(path, np.uint8), flags) - - -def imwrite(path, im): - try: - cv2.imencode(Path(path).suffix, im)[1].tofile(path) - return True - except Exception: - return False - - -def imshow(path, im): - imshow_(path.encode('unicode_escape').decode(), im) - - -cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine - -# Variables ------------------------------------------------------------------------------------------------------------ diff --git a/spaces/aodianyun/stable-diffusion-webui/modules/sd_models_config.py b/spaces/aodianyun/stable-diffusion-webui/modules/sd_models_config.py deleted file mode 100644 index 222793d451b3659f7954c208260af71840b475a2..0000000000000000000000000000000000000000 --- a/spaces/aodianyun/stable-diffusion-webui/modules/sd_models_config.py +++ /dev/null @@ -1,112 +0,0 @@ -import re -import os - -import torch - -from modules import shared, paths, sd_disable_initialization - -sd_configs_path = shared.sd_configs_path -sd_repo_configs_path = os.path.join(paths.paths['Stable Diffusion'], "configs", "stable-diffusion") - - -config_default = shared.sd_default_config -config_sd2 = os.path.join(sd_repo_configs_path, "v2-inference.yaml") -config_sd2v = os.path.join(sd_repo_configs_path, "v2-inference-v.yaml") -config_sd2_inpainting = os.path.join(sd_repo_configs_path, "v2-inpainting-inference.yaml") -config_depth_model = os.path.join(sd_repo_configs_path, "v2-midas-inference.yaml") -config_inpainting = os.path.join(sd_configs_path, "v1-inpainting-inference.yaml") -config_instruct_pix2pix = os.path.join(sd_configs_path, "instruct-pix2pix.yaml") -config_alt_diffusion = os.path.join(sd_configs_path, "alt-diffusion-inference.yaml") - - -def is_using_v_parameterization_for_sd2(state_dict): - """ - Detects whether unet in state_dict is using v-parameterization. Returns True if it is. You're welcome. - """ - - import ldm.modules.diffusionmodules.openaimodel - from modules import devices - - device = devices.cpu - - with sd_disable_initialization.DisableInitialization(): - unet = ldm.modules.diffusionmodules.openaimodel.UNetModel( - use_checkpoint=True, - use_fp16=False, - image_size=32, - in_channels=4, - out_channels=4, - model_channels=320, - attention_resolutions=[4, 2, 1], - num_res_blocks=2, - channel_mult=[1, 2, 4, 4], - num_head_channels=64, - use_spatial_transformer=True, - use_linear_in_transformer=True, - transformer_depth=1, - context_dim=1024, - legacy=False - ) - unet.eval() - - with torch.no_grad(): - unet_sd = {k.replace("model.diffusion_model.", ""): v for k, v in state_dict.items() if "model.diffusion_model." in k} - unet.load_state_dict(unet_sd, strict=True) - unet.to(device=device, dtype=torch.float) - - test_cond = torch.ones((1, 2, 1024), device=device) * 0.5 - x_test = torch.ones((1, 4, 8, 8), device=device) * 0.5 - - out = (unet(x_test, torch.asarray([999], device=device), context=test_cond) - x_test).mean().item() - - return out < -1 - - -def guess_model_config_from_state_dict(sd, filename): - sd2_cond_proj_weight = sd.get('cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight', None) - diffusion_model_input = sd.get('model.diffusion_model.input_blocks.0.0.weight', None) - - if sd.get('depth_model.model.pretrained.act_postprocess3.0.project.0.bias', None) is not None: - return config_depth_model - - if sd2_cond_proj_weight is not None and sd2_cond_proj_weight.shape[1] == 1024: - if diffusion_model_input.shape[1] == 9: - return config_sd2_inpainting - elif is_using_v_parameterization_for_sd2(sd): - return config_sd2v - else: - return config_sd2 - - if diffusion_model_input is not None: - if diffusion_model_input.shape[1] == 9: - return config_inpainting - if diffusion_model_input.shape[1] == 8: - return config_instruct_pix2pix - - if sd.get('cond_stage_model.roberta.embeddings.word_embeddings.weight', None) is not None: - return config_alt_diffusion - - return config_default - - -def find_checkpoint_config(state_dict, info): - if info is None: - return guess_model_config_from_state_dict(state_dict, "") - - config = find_checkpoint_config_near_filename(info) - if config is not None: - return config - - return guess_model_config_from_state_dict(state_dict, info.filename) - - -def find_checkpoint_config_near_filename(info): - if info is None: - return None - - config = os.path.splitext(info.filename)[0] + ".yaml" - if os.path.exists(config): - return config - - return None - diff --git a/spaces/arch-123/bingo/src/lib/hooks/use-enter-submit.tsx b/spaces/arch-123/bingo/src/lib/hooks/use-enter-submit.tsx deleted file mode 100644 index d66b2d3253baff164235d4ca791aae6d84721835..0000000000000000000000000000000000000000 --- a/spaces/arch-123/bingo/src/lib/hooks/use-enter-submit.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import { useRef, type RefObject } from 'react' - -export function useEnterSubmit(): { - formRef: RefObject<HTMLFormElement> - onKeyDown: (event: React.KeyboardEvent<HTMLTextAreaElement>) => void -} { - const formRef = useRef<HTMLFormElement>(null) - - const handleKeyDown = ( - event: React.KeyboardEvent<HTMLTextAreaElement> - ): void => { - if ( - event.key === 'Enter' && - !event.shiftKey && - !event.nativeEvent.isComposing - ) { - formRef.current?.requestSubmit() - event.preventDefault() - } - } - - return { formRef, onKeyDown: handleKeyDown } -} diff --git a/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_vocoder_melgan_generator.py b/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_vocoder_melgan_generator.py deleted file mode 100644 index f4958de427ece20296adbcec54441455de997518..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/tests/vocoder_tests/test_vocoder_melgan_generator.py +++ /dev/null @@ -1,14 +0,0 @@ -import numpy as np -import torch - -from TTS.vocoder.models.melgan_generator import MelganGenerator - - -def test_melgan_generator(): - model = MelganGenerator() - print(model) - dummy_input = torch.rand((4, 80, 64)) - output = model(dummy_input) - assert np.all(output.shape == (4, 1, 64 * 256)) - output = model.inference(dummy_input) - assert np.all(output.shape == (4, 1, (64 + 4) * 256)) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/absl/flags/_flag.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/absl/flags/_flag.py deleted file mode 100644 index 124f137166209878b645bdfd59aa20e8b21e8e2d..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/absl/flags/_flag.py +++ /dev/null @@ -1,488 +0,0 @@ -# Copyright 2017 The Abseil Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Contains Flag class - information about single command-line flag. - -Do NOT import this module directly. Import the flags package and use the -aliases defined at the package level instead. -""" - -from collections import abc -import copy -import functools - -from absl.flags import _argument_parser -from absl.flags import _exceptions -from absl.flags import _helpers - - -@functools.total_ordering -class Flag(object): - """Information about a command-line flag. - - Attributes: - name: the name for this flag - default: the default value for this flag - default_unparsed: the unparsed default value for this flag. - default_as_str: default value as repr'd string, e.g., "'true'" - (or None) - value: the most recent parsed value of this flag set by :meth:`parse` - help: a help string or None if no help is available - short_name: the single letter alias for this flag (or None) - boolean: if 'true', this flag does not accept arguments - present: true if this flag was parsed from command line flags - parser: an :class:`~absl.flags.ArgumentParser` object - serializer: an ArgumentSerializer object - allow_override: the flag may be redefined without raising an error, - and newly defined flag overrides the old one. - allow_override_cpp: use the flag from C++ if available the flag - definition is replaced by the C++ flag after init - allow_hide_cpp: use the Python flag despite having a C++ flag with - the same name (ignore the C++ flag) - using_default_value: the flag value has not been set by user - allow_overwrite: the flag may be parsed more than once without - raising an error, the last set value will be used - allow_using_method_names: whether this flag can be defined even if - it has a name that conflicts with a FlagValues method. - validators: list of the flag validators. - - The only public method of a ``Flag`` object is :meth:`parse`, but it is - typically only called by a :class:`~absl.flags.FlagValues` object. The - :meth:`parse` method is a thin wrapper around the - :meth:`ArgumentParser.parse()<absl.flags.ArgumentParser.parse>` method. The - parsed value is saved in ``.value``, and the ``.present`` attribute is - updated. If this flag was already present, an Error is raised. - - :meth:`parse` is also called during ``__init__`` to parse the default value - and initialize the ``.value`` attribute. This enables other python modules to - safely use flags even if the ``__main__`` module neglects to parse the - command line arguments. The ``.present`` attribute is cleared after - ``__init__`` parsing. If the default value is set to ``None``, then the - ``__init__`` parsing step is skipped and the ``.value`` attribute is - initialized to None. - - Note: The default value is also presented to the user in the help - string, so it is important that it be a legal value for this flag. - """ - - def __init__(self, parser, serializer, name, default, help_string, - short_name=None, boolean=False, allow_override=False, - allow_override_cpp=False, allow_hide_cpp=False, - allow_overwrite=True, allow_using_method_names=False): - self.name = name - - if not help_string: - help_string = '(no help available)' - - self.help = help_string - self.short_name = short_name - self.boolean = boolean - self.present = 0 - self.parser = parser - self.serializer = serializer - self.allow_override = allow_override - self.allow_override_cpp = allow_override_cpp - self.allow_hide_cpp = allow_hide_cpp - self.allow_overwrite = allow_overwrite - self.allow_using_method_names = allow_using_method_names - - self.using_default_value = True - self._value = None - self.validators = [] - if self.allow_hide_cpp and self.allow_override_cpp: - raise _exceptions.Error( - "Can't have both allow_hide_cpp (means use Python flag) and " - 'allow_override_cpp (means use C++ flag after InitGoogle)') - - self._set_default(default) - - @property - def value(self): - return self._value - - @value.setter - def value(self, value): - self._value = value - - def __hash__(self): - return hash(id(self)) - - def __eq__(self, other): - return self is other - - def __lt__(self, other): - if isinstance(other, Flag): - return id(self) < id(other) - return NotImplemented - - def __bool__(self): - raise TypeError('A Flag instance would always be True. ' - 'Did you mean to test the `.value` attribute?') - - def __getstate__(self): - raise TypeError("can't pickle Flag objects") - - def __copy__(self): - raise TypeError('%s does not support shallow copies. ' - 'Use copy.deepcopy instead.' % type(self).__name__) - - def __deepcopy__(self, memo): - result = object.__new__(type(self)) - result.__dict__ = copy.deepcopy(self.__dict__, memo) - return result - - def _get_parsed_value_as_string(self, value): - """Returns parsed flag value as string.""" - if value is None: - return None - if self.serializer: - return repr(self.serializer.serialize(value)) - if self.boolean: - if value: - return repr('true') - else: - return repr('false') - return repr(str(value)) - - def parse(self, argument): - """Parses string and sets flag value. - - Args: - argument: str or the correct flag value type, argument to be parsed. - """ - if self.present and not self.allow_overwrite: - raise _exceptions.IllegalFlagValueError( - 'flag --%s=%s: already defined as %s' % ( - self.name, argument, self.value)) - self.value = self._parse(argument) - self.present += 1 - - def _parse(self, argument): - """Internal parse function. - - It returns the parsed value, and does not modify class states. - - Args: - argument: str or the correct flag value type, argument to be parsed. - - Returns: - The parsed value. - """ - try: - return self.parser.parse(argument) - except (TypeError, ValueError) as e: # Recast as IllegalFlagValueError. - raise _exceptions.IllegalFlagValueError( - 'flag --%s=%s: %s' % (self.name, argument, e)) - - def unparse(self): - self.value = self.default - self.using_default_value = True - self.present = 0 - - def serialize(self): - """Serializes the flag.""" - return self._serialize(self.value) - - def _serialize(self, value): - """Internal serialize function.""" - if value is None: - return '' - if self.boolean: - if value: - return '--%s' % self.name - else: - return '--no%s' % self.name - else: - if not self.serializer: - raise _exceptions.Error( - 'Serializer not present for flag %s' % self.name) - return '--%s=%s' % (self.name, self.serializer.serialize(value)) - - def _set_default(self, value): - """Changes the default value (and current value too) for this Flag.""" - self.default_unparsed = value - if value is None: - self.default = None - else: - self.default = self._parse_from_default(value) - self.default_as_str = self._get_parsed_value_as_string(self.default) - if self.using_default_value: - self.value = self.default - - # This is split out so that aliases can skip regular parsing of the default - # value. - def _parse_from_default(self, value): - return self._parse(value) - - def flag_type(self): - """Returns a str that describes the type of the flag. - - NOTE: we use strings, and not the types.*Type constants because - our flags can have more exotic types, e.g., 'comma separated list - of strings', 'whitespace separated list of strings', etc. - """ - return self.parser.flag_type() - - def _create_xml_dom_element(self, doc, module_name, is_key=False): - """Returns an XML element that contains this flag's information. - - This is information that is relevant to all flags (e.g., name, - meaning, etc.). If you defined a flag that has some other pieces of - info, then please override _ExtraXMLInfo. - - Please do NOT override this method. - - Args: - doc: minidom.Document, the DOM document it should create nodes from. - module_name: str,, the name of the module that defines this flag. - is_key: boolean, True iff this flag is key for main module. - - Returns: - A minidom.Element instance. - """ - element = doc.createElement('flag') - if is_key: - element.appendChild(_helpers.create_xml_dom_element(doc, 'key', 'yes')) - element.appendChild(_helpers.create_xml_dom_element( - doc, 'file', module_name)) - # Adds flag features that are relevant for all flags. - element.appendChild(_helpers.create_xml_dom_element(doc, 'name', self.name)) - if self.short_name: - element.appendChild(_helpers.create_xml_dom_element( - doc, 'short_name', self.short_name)) - if self.help: - element.appendChild(_helpers.create_xml_dom_element( - doc, 'meaning', self.help)) - # The default flag value can either be represented as a string like on the - # command line, or as a Python object. We serialize this value in the - # latter case in order to remain consistent. - if self.serializer and not isinstance(self.default, str): - if self.default is not None: - default_serialized = self.serializer.serialize(self.default) - else: - default_serialized = '' - else: - default_serialized = self.default - element.appendChild(_helpers.create_xml_dom_element( - doc, 'default', default_serialized)) - value_serialized = self._serialize_value_for_xml(self.value) - element.appendChild(_helpers.create_xml_dom_element( - doc, 'current', value_serialized)) - element.appendChild(_helpers.create_xml_dom_element( - doc, 'type', self.flag_type())) - # Adds extra flag features this flag may have. - for e in self._extra_xml_dom_elements(doc): - element.appendChild(e) - return element - - def _serialize_value_for_xml(self, value): - """Returns the serialized value, for use in an XML help text.""" - return value - - def _extra_xml_dom_elements(self, doc): - """Returns extra info about this flag in XML. - - "Extra" means "not already included by _create_xml_dom_element above." - - Args: - doc: minidom.Document, the DOM document it should create nodes from. - - Returns: - A list of minidom.Element. - """ - # Usually, the parser knows the extra details about the flag, so - # we just forward the call to it. - return self.parser._custom_xml_dom_elements(doc) # pylint: disable=protected-access - - -class BooleanFlag(Flag): - """Basic boolean flag. - - Boolean flags do not take any arguments, and their value is either - ``True`` (1) or ``False`` (0). The false value is specified on the command - line by prepending the word ``'no'`` to either the long or the short flag - name. - - For example, if a Boolean flag was created whose long name was - ``'update'`` and whose short name was ``'x'``, then this flag could be - explicitly unset through either ``--noupdate`` or ``--nox``. - """ - - def __init__(self, name, default, help, short_name=None, **args): # pylint: disable=redefined-builtin - p = _argument_parser.BooleanParser() - super(BooleanFlag, self).__init__( - p, None, name, default, help, short_name, 1, **args) - - -class EnumFlag(Flag): - """Basic enum flag; its value can be any string from list of enum_values.""" - - def __init__(self, name, default, help, enum_values, # pylint: disable=redefined-builtin - short_name=None, case_sensitive=True, **args): - p = _argument_parser.EnumParser(enum_values, case_sensitive) - g = _argument_parser.ArgumentSerializer() - super(EnumFlag, self).__init__( - p, g, name, default, help, short_name, **args) - self.help = '<%s>: %s' % ('|'.join(enum_values), self.help) - - def _extra_xml_dom_elements(self, doc): - elements = [] - for enum_value in self.parser.enum_values: - elements.append(_helpers.create_xml_dom_element( - doc, 'enum_value', enum_value)) - return elements - - -class EnumClassFlag(Flag): - """Basic enum flag; its value is an enum class's member.""" - - def __init__( - self, - name, - default, - help, # pylint: disable=redefined-builtin - enum_class, - short_name=None, - case_sensitive=False, - **args): - p = _argument_parser.EnumClassParser( - enum_class, case_sensitive=case_sensitive) - g = _argument_parser.EnumClassSerializer(lowercase=not case_sensitive) - super(EnumClassFlag, self).__init__( - p, g, name, default, help, short_name, **args) - self.help = '<%s>: %s' % ('|'.join(p.member_names), self.help) - - def _extra_xml_dom_elements(self, doc): - elements = [] - for enum_value in self.parser.enum_class.__members__.keys(): - elements.append(_helpers.create_xml_dom_element( - doc, 'enum_value', enum_value)) - return elements - - -class MultiFlag(Flag): - """A flag that can appear multiple time on the command-line. - - The value of such a flag is a list that contains the individual values - from all the appearances of that flag on the command-line. - - See the __doc__ for Flag for most behavior of this class. Only - differences in behavior are described here: - - * The default value may be either a single value or an iterable of values. - A single value is transformed into a single-item list of that value. - - * The value of the flag is always a list, even if the option was - only supplied once, and even if the default value is a single - value - """ - - def __init__(self, *args, **kwargs): - super(MultiFlag, self).__init__(*args, **kwargs) - self.help += ';\n repeat this option to specify a list of values' - - def parse(self, arguments): - """Parses one or more arguments with the installed parser. - - Args: - arguments: a single argument or a list of arguments (typically a - list of default values); a single argument is converted - internally into a list containing one item. - """ - new_values = self._parse(arguments) - if self.present: - self.value.extend(new_values) - else: - self.value = new_values - self.present += len(new_values) - - def _parse(self, arguments): - if (isinstance(arguments, abc.Iterable) and - not isinstance(arguments, str)): - arguments = list(arguments) - - if not isinstance(arguments, list): - # Default value may be a list of values. Most other arguments - # will not be, so convert them into a single-item list to make - # processing simpler below. - arguments = [arguments] - - return [super(MultiFlag, self)._parse(item) for item in arguments] - - def _serialize(self, value): - """See base class.""" - if not self.serializer: - raise _exceptions.Error( - 'Serializer not present for flag %s' % self.name) - if value is None: - return '' - - serialized_items = [ - super(MultiFlag, self)._serialize(value_item) for value_item in value - ] - - return '\n'.join(serialized_items) - - def flag_type(self): - """See base class.""" - return 'multi ' + self.parser.flag_type() - - def _extra_xml_dom_elements(self, doc): - elements = [] - if hasattr(self.parser, 'enum_values'): - for enum_value in self.parser.enum_values: - elements.append(_helpers.create_xml_dom_element( - doc, 'enum_value', enum_value)) - return elements - - -class MultiEnumClassFlag(MultiFlag): - """A multi_enum_class flag. - - See the __doc__ for MultiFlag for most behaviors of this class. In addition, - this class knows how to handle enum.Enum instances as values for this flag - type. - """ - - def __init__(self, - name, - default, - help_string, - enum_class, - case_sensitive=False, - **args): - p = _argument_parser.EnumClassParser( - enum_class, case_sensitive=case_sensitive) - g = _argument_parser.EnumClassListSerializer( - list_sep=',', lowercase=not case_sensitive) - super(MultiEnumClassFlag, self).__init__( - p, g, name, default, help_string, **args) - self.help = ( - '<%s>: %s;\n repeat this option to specify a list of values' % - ('|'.join(p.member_names), help_string or '(no help available)')) - - def _extra_xml_dom_elements(self, doc): - elements = [] - for enum_value in self.parser.enum_class.__members__.keys(): - elements.append(_helpers.create_xml_dom_element( - doc, 'enum_value', enum_value)) - return elements - - def _serialize_value_for_xml(self, value): - """See base class.""" - if value is not None: - value_serialized = self.serializer.serialize(value) - else: - value_serialized = '' - return value_serialized diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/nested_dictionary_dataset.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/nested_dictionary_dataset.py deleted file mode 100644 index 52e74abddacc923c5e29b0a0c41d7efc85482d3b..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/data/nested_dictionary_dataset.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from collections import OrderedDict - -import torch -from torch.utils.data.dataloader import default_collate - -from . import FairseqDataset - - -def _flatten(dico, prefix=None): - """Flatten a nested dictionary.""" - new_dico = OrderedDict() - if isinstance(dico, dict): - prefix = prefix + "." if prefix is not None else "" - for k, v in dico.items(): - if v is None: - continue - new_dico.update(_flatten(v, prefix + k)) - elif isinstance(dico, list): - for i, v in enumerate(dico): - new_dico.update(_flatten(v, prefix + ".[" + str(i) + "]")) - else: - new_dico = OrderedDict({prefix: dico}) - return new_dico - - -def _unflatten(dico): - """Unflatten a flattened dictionary into a nested dictionary.""" - new_dico = OrderedDict() - for full_k, v in dico.items(): - full_k = full_k.split(".") - node = new_dico - for k in full_k[:-1]: - if k.startswith("[") and k.endswith("]"): - k = int(k[1:-1]) - if k not in node: - node[k] = OrderedDict() - node = node[k] - node[full_k[-1]] = v - return new_dico - - -class NestedDictionaryDataset(FairseqDataset): - def __init__(self, defn, sizes=None): - super().__init__() - self.defn = _flatten(defn) - self.sizes = [sizes] if not isinstance(sizes, (list, tuple)) else sizes - - first = None - for v in self.defn.values(): - if not isinstance( - v, - ( - FairseqDataset, - torch.utils.data.Dataset, - ), - ): - raise ValueError("Expected Dataset but found: {}".format(v.__class__)) - first = first or v - if len(v) > 0: - assert len(v) == len(first), "dataset lengths must match" - - self._len = len(first) - - def __getitem__(self, index): - return OrderedDict((k, ds[index]) for k, ds in self.defn.items()) - - def __len__(self): - return self._len - - def collater(self, samples): - """Merge a list of samples to form a mini-batch. - - Args: - samples (List[dict]): samples to collate - - Returns: - dict: a mini-batch suitable for forwarding with a Model - """ - if len(samples) == 0: - return {} - sample = OrderedDict() - for k, ds in self.defn.items(): - try: - sample[k] = ds.collater([s[k] for s in samples]) - except NotImplementedError: - sample[k] = default_collate([s[k] for s in samples]) - return _unflatten(sample) - - def num_tokens(self, index): - """Return the number of tokens in a sample. This value is used to - enforce ``--max-tokens`` during batching.""" - return max(s[index] for s in self.sizes) - - def size(self, index): - """Return an example's size as a float or tuple. This value is used when - filtering a dataset with ``--max-positions``.""" - if len(self.sizes) == 1: - return self.sizes[0][index] - else: - return (s[index] for s in self.sizes) - - @property - def supports_prefetch(self): - """Whether this dataset supports prefetching.""" - return any(ds.supports_prefetch for ds in self.defn.values()) - - def prefetch(self, indices): - """Prefetch the data required for this epoch.""" - for ds in self.defn.values(): - if getattr(ds, "supports_prefetch", False): - ds.prefetch(indices) - - @property - def can_reuse_epoch_itr_across_epochs(self): - return all(ds.can_reuse_epoch_itr_across_epochs for ds in self.defn.values()) - - def set_epoch(self, epoch): - super().set_epoch(epoch) - for ds in self.defn.values(): - ds.set_epoch(epoch) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/examples/MMPT/scripts/video_feature_extractor/model.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/examples/MMPT/scripts/video_feature_extractor/model.py deleted file mode 100644 index ac266e844c86246bbfce02b9e6a2999353661df9..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/fairseq/examples/MMPT/scripts/video_feature_extractor/model.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Howto100M authors and Facebook, Inc. All Rights Reserved - -import torch as th - -from torch import nn - - -class GlobalAvgPool(nn.Module): - def __init__(self): - super(GlobalAvgPool, self).__init__() - - def forward(self, x): - return th.mean(x, dim=[-2, -1]) - - -def get_model(args): - assert args.type in ['2d', '3d', 'vmz', 's3d', 'vae'] - if args.type == '2d': - print('Loading 2D-ResNet-152 ...') - import torchvision.models as models - model = models.resnet152(pretrained=True) - model = nn.Sequential(*list(model.children())[:-2], GlobalAvgPool()) - model = model.cuda() - elif args.type == 'vmz': - print('Loading VMZ ...') - from vmz34 import r2plus1d_34 - model = r2plus1d_34(pretrained_path=args.vmz_model_path, pretrained_num_classes=487) - model = model.cuda() - elif args.type == 's3d': - # we use one copy of s3d instead of dup another one for feature extraction. - from mmpt.processors.models.s3dg import S3D - model = S3D('pretrained_models/s3d_dict.npy', 512) - model.load_state_dict(th.load('pretrained_models/s3d_howto100m.pth')) - model = model.cuda() - - elif args.type == '3d': - print('Loading 3D-ResneXt-101 ...') - from videocnn.models import resnext - model = resnext.resnet101( - num_classes=400, - shortcut_type='B', - cardinality=32, - sample_size=112, - sample_duration=16, - last_fc=False) - model = model.cuda() - model_data = th.load(args.resnext101_model_path) - model.load_state_dict(model_data) - elif args.type == 'vae': - from openaivae import OpenAIParallelDiscreteVAE - model = OpenAIParallelDiscreteVAE() - model = model.cuda() - else: - raise ValueError("model not supported yet.") - - model.eval() - print('loaded') - return model diff --git a/spaces/ashercn97/AsherTesting/modules/relative_imports.py b/spaces/ashercn97/AsherTesting/modules/relative_imports.py deleted file mode 100644 index 3c0eb56b77c6cb6b38fdbdeebabe9ad3b8d91b97..0000000000000000000000000000000000000000 --- a/spaces/ashercn97/AsherTesting/modules/relative_imports.py +++ /dev/null @@ -1,13 +0,0 @@ -import sys -from pathlib import Path - - -class RelativeImport: - def __init__(self, path): - self.import_path = Path(path) - - def __enter__(self): - sys.path.insert(0, str(self.import_path)) - - def __exit__(self, exc_type, exc_value, traceback): - sys.path.remove(str(self.import_path)) diff --git "a/spaces/ashhadahsan/summarizer-space/pages/1_\360\237\223\210_predict.py" "b/spaces/ashhadahsan/summarizer-space/pages/1_\360\237\223\210_predict.py" deleted file mode 100644 index 3065be26fb735dbaa66fed82a4a0491a54c913d4..0000000000000000000000000000000000000000 --- "a/spaces/ashhadahsan/summarizer-space/pages/1_\360\237\223\210_predict.py" +++ /dev/null @@ -1,560 +0,0 @@ -import streamlit as st -import pandas as pd -from transformers import BertTokenizer, TFBertForSequenceClassification -from transformers import TextClassificationPipeline -from transformers import pipeline -from stqdm import stqdm -from simplet5 import SimpleT5 -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM -from transformers import BertTokenizer, TFBertForSequenceClassification -import logging -from datasets import load_dataset -import gc -from typing import List -from collections import OrderedDict -from datetime import datetime - -tokenizer_kwargs = dict(max_length=128, truncation=True, padding=True) - - -flan_t5_kwargs = dict(repetition_penalty=1.2) -SLEEP = 2 - - -date = datetime.now().strftime(r"%Y-%m-%d") - - -def clean_memory(obj: TextClassificationPipeline): - del obj - gc.collect() - - -@st.cache_data -def get_all_cats(): - data = load_dataset("ashhadahsan/amazon_theme") - data = data["train"].to_pandas() - labels = [x for x in list(set(data.iloc[:, 1].values.tolist())) if x != "Unknown"] - del data - return labels - - -@st.cache_data -def get_all_subcats(): - data = load_dataset("ashhadahsan/amazon_subtheme") - data = data["train"].to_pandas() - labels = [x for x in list(set(data.iloc[:, 1].values.tolist())) if x != "Unknown"] - del data - return labels - - -@st.cache_resource -def load_zero_shot_classification_large(): - classifier_zero = pipeline( - "zero-shot-classification", - model="facebook/bart-large-mnli", - ) - return classifier_zero - - -def assign_label_zeroshot(zero, to: str, old: List): - assigned = zero(to, old) - assigned_dict = dict(zip(assigned["labels"], assigned["scores"])) - od = OrderedDict(sorted(assigned_dict.items(), key=lambda x: x[1], reverse=True)) - print(list(od.keys())[0]) - print(type(list(od.keys())[0])) - - return list(od.keys())[0] - - -def assign_labels_flant5(pipe, what: str, to: str, old: List): - old = ", ".join(old) - - return pipe( - f"""'Generate a new one word {what} to this summary of the text of a review - {to} for context - already assigned {what} are , {themes} - theme:""" - )[0]["generated_text"] - - -@st.cache_resource -def load_t5() -> (AutoModelForSeq2SeqLM, AutoTokenizer): - model = AutoModelForSeq2SeqLM.from_pretrained( - "t5-base", - ) - - tokenizer = AutoTokenizer.from_pretrained( - pretrained_model_name_or_path="t5-base", - ) - return model, tokenizer - - -@st.cache_resource -def load_flan_t5_large(): - return pipeline( - task="text2text-generation", - model="google/flan-t5-large", - model_kwargs=flan_t5_kwargs, - ) - - -@st.cache_resource -def summarizationModel(): - return pipeline( - task="summarization", - model="my_awesome_sum/", - ) - - -@st.cache_resource -def convert_df(df: pd.DataFrame): - return df.to_csv(index=False).encode("utf-8") - - -def load_one_line_summarizer(model): - return model.load_model( - "t5", - "snrspeaks/t5-one-line-summary", - ) - - -@st.cache_resource -def classify_theme() -> TextClassificationPipeline: - tokenizer = BertTokenizer.from_pretrained( - "ashhadahsan/amazon-theme-bert-base-finetuned", - ) - model = TFBertForSequenceClassification.from_pretrained( - "ashhadahsan/amazon-theme-bert-base-finetuned", - ) - pipeline = TextClassificationPipeline( - model=model, - tokenizer=tokenizer, - **tokenizer_kwargs, - ) - return pipeline - - -@st.cache_resource -def classify_sub_theme() -> TextClassificationPipeline: - tokenizer = BertTokenizer.from_pretrained( - "ashhadahsan/amazon-subtheme-bert-base-finetuned", - ) - model = TFBertForSequenceClassification.from_pretrained( - "ashhadahsan/amazon-subtheme-bert-base-finetuned", - ) - pipeline = TextClassificationPipeline( - model=model, tokenizer=tokenizer, **tokenizer_kwargs - ) - return pipeline - - -st.set_page_config(layout="wide", page_title="Amazon Review | Summarizer") -st.title(body="Amazon Review Summarizer") - -uploaded_file = st.file_uploader(label="Choose a file", type=["xlsx", "xls", "csv"]) - - -summarizer_option = st.selectbox( - label="Select Summarizer", - options=("Custom trained on the dataset", "t5-base", "t5-one-line-summary"), -) -col1, col2, col3 = st.columns(spec=[1, 1, 1]) - -with col1: - summary_yes = st.checkbox(label="Summrization", value=False) - -with col2: - classification = st.checkbox(label="Classify Category", value=True) - -with col3: - sub_theme = st.checkbox(label="Sub theme classification", value=True) - -treshold = st.slider( - label="Model Confidence value", - min_value=0.1, - max_value=0.8, - step=0.1, - value=0.6, - help="If the model has a confidence score below this number , then a new label is assigned (0.6) means 60 percent and so on", -) - -ps = st.empty() - -if st.button("Process", type="primary"): - themes = get_all_cats() - subthemes = get_all_subcats() - - oneline = SimpleT5() - load_one_line_summarizer(model=oneline) - zeroline = load_zero_shot_classification_large() - bot = load_flan_t5_large() - - cancel_button = st.empty() - cancel_button2 = st.empty() - cancel_button3 = st.empty() - if uploaded_file is not None: - if uploaded_file.name.split(".")[-1] in ["xls", "xlsx"]: - df = pd.read_excel(io=uploaded_file, engine="openpyxl") - if uploaded_file.name.split(".")[-1] in [".csv"]: - df = pd.read_csv(filepath_or_buffer=uploaded_file) - columns = df.columns.values.tolist() - columns = [x.lower() for x in columns] - df.columns = columns - print(summarizer_option) - outputdf = pd.DataFrame() - try: - text = df["text"].values.tolist() - outputdf["text"] = text - if summarizer_option == "Custom trained on the dataset": - if summary_yes: - model = summarizationModel() - - progress_text = "Summarization in progress. Please wait." - summary = [] - - for x in stqdm(iterable=range(len(text))): - if cancel_button.button("Cancel", key=x): - del model - break - try: - summary.append( - model( - f"summarize: {text[x]}", - max_length=50, - early_stopping=True, - )[0]["summary_text"] - ) - except: - pass - outputdf["summary"] = summary - del model - if classification: - themePipe = classify_theme() - classes = [] - classesUnlabel = [] - classesUnlabelZero = [] - for x in stqdm( - iterable=text, - desc="Assigning Themes ...", - total=len(text), - colour="#BF1A1A", - ): - output = themePipe(x)[0]["label"] - classes.append(output) - score = round(number=themePipe(x)[0]["score"], ndigits=2) - if score <= treshold: - onelineoutput = oneline.predict(source_text=x)[0] - - print("hit") - classesUnlabel.append( - assign_labels_flant5( - bot, - what="theme", - to=onelineoutput, - old=themes, - ) - ) - classesUnlabelZero.append( - assign_label_zeroshot( - zero=zeroline, to=onelineoutput, old=themes - ) - ) - - else: - classesUnlabel.append("") - classesUnlabelZero.append("") - - outputdf["Review Theme"] = classes - outputdf["Review Theme-issue-new"] = classesUnlabel - outputdf["Review SubTheme-issue-zero"] = classesUnlabelZero - clean_memory(themePipe) - if sub_theme: - subThemePipe = classify_sub_theme() - classes = [] - classesUnlabel = [] - classesUnlabelZero = [] - for x in stqdm( - iterable=text, - desc="Assigning Subthemes ...", - total=len(text), - colour="green", - ): - output = subThemePipe(x)[0]["label"] - classes.append(output) - score = round(subThemePipe(x)[0]["score"], 2) - if score <= treshold: - onelineoutput = oneline.predict(x)[0] - - print("hit") - classesUnlabel.append( - assign_labels_flant5( - bot, - what="subtheme", - to=onelineoutput, - old=subthemes, - ) - ) - classesUnlabelZero.append( - assign_label_zeroshot( - zero=zeroline, - to=onelineoutput, - old=subthemes, - ) - ) - - else: - classesUnlabel.append("") - classesUnlabelZero.append("") - - outputdf["Review SubTheme"] = classes - outputdf["Review SubTheme-issue-new"] = classesUnlabel - outputdf["Review SubTheme-issue-zero"] = classesUnlabelZero - - clean_memory(subThemePipe) - - csv = convert_df(outputdf) - st.download_button( - label="Download output as CSV", - data=csv, - file_name=f"{summarizer_option}_{date}_df.csv", - mime="text/csv", - use_container_width=True, - ) - if summarizer_option == "t5-base": - if summary_yes: - model, tokenizer = load_t5() - summary = [] - for x in stqdm(range(len(text))): - if cancel_button2.button("Cancel", key=x): - del model, tokenizer - break - tokens_input = tokenizer.encode( - "summarize: " + text[x], - return_tensors="pt", - max_length=tokenizer.model_max_length, - truncation=True, - ) - summary_ids = model.generate( - tokens_input, - min_length=80, - max_length=150, - length_penalty=20, - num_beams=2, - ) - summary_gen = tokenizer.decode( - summary_ids[0], skip_special_tokens=True - ) - summary.append(summary_gen) - del model, tokenizer - outputdf["summary"] = summary - - if classification: - themePipe = classify_theme() - classes = [] - classesUnlabel = [] - classesUnlabelZero = [] - for x in stqdm( - text, desc="Assigning Themes ...", total=len(text), colour="red" - ): - output = themePipe(x)[0]["label"] - classes.append(output) - score = round(themePipe(x)[0]["score"], 2) - if score <= treshold: - onelineoutput = oneline.predict(x)[0] - - print("hit") - - classesUnlabel.append( - assign_labels_flant5( - bot, - what="theme", - to=onelineoutput, - old=themes, - ) - ) - classesUnlabelZero.append( - assign_label_zeroshot( - zero=zeroline, to=onelineoutput, old=themes - ) - ) - - else: - classesUnlabel.append("") - classesUnlabelZero.append("") - outputdf["Review Theme"] = classes - outputdf["Review Theme-issue-new"] = classesUnlabel - outputdf["Review SubTheme-issue-zero"] = classesUnlabelZero - clean_memory(themePipe) - - if sub_theme: - subThemePipe = classify_sub_theme() - classes = [] - classesUnlabelZero = [] - - for x in stqdm( - text, - desc="Assigning Subthemes ...", - total=len(text), - colour="green", - ): - output = subThemePipe(x)[0]["label"] - classes.append(output) - score = round(subThemePipe(x)[0]["score"], 2) - if score <= treshold: - onelineoutput = oneline.predict(x)[0] - - print("hit") - classesUnlabel.append( - assign_labels_flant5( - bot, - what="subtheme", - to=onelineoutput, - old=subthemes, - ) - ) - classesUnlabelZero.append( - assign_label_zeroshot( - zero=zeroline, - to=onelineoutput, - old=subthemes, - ) - ) - - else: - classesUnlabel.append("") - classesUnlabelZero.append("") - - outputdf["Review SubTheme"] = classes - outputdf["Review SubTheme-issue-new"] = classesUnlabel - outputdf["Review SubTheme-issue-zero"] = classesUnlabelZero - - clean_memory(subThemePipe) - - csv = convert_df(outputdf) - st.download_button( - label="Download output as CSV", - data=csv, - file_name=f"{summarizer_option}_{date}_df.csv", - mime="text/csv", - use_container_width=True, - ) - - if summarizer_option == "t5-one-line-summary": - if summary_yes: - model = SimpleT5() - load_one_line_summarizer(model=model) - - summary = [] - for x in stqdm(iterable=range(len(text))): - if cancel_button3.button(label="Cancel", key=x): - del model - break - try: - summary.append(model.predict(source_text=text[x])[0]) - except: - pass - outputdf["summary"] = summary - del model - - if classification: - themePipe = classify_theme() - classes = [] - classesUnlabel = [] - classesUnlabelZero = [] - for x in stqdm( - iterable=text, - desc="Assigning Themes ...", - total=len(text), - colour="red", - ): - output = themePipe(x)[0]["label"] - classes.append(output) - score = round(number=themePipe(x)[0]["score"], ndigits=2) - if score <= treshold: - onelineoutput = oneline.predict(x)[0] - - print("hit") - classesUnlabel.append( - assign_labels_flant5( - bot, - what="theme", - to=onelineoutput, - old=themes, - ) - ) - classesUnlabelZero.append( - assign_label_zeroshot( - zero=zeroline, to=onelineoutput, old=themes - ) - ) - - else: - classesUnlabel.append("") - classesUnlabelZero.append("") - outputdf["Review Theme"] = classes - outputdf["Review Theme-issue-new"] = classesUnlabel - outputdf["Review SubTheme-issue-zero"] = classesUnlabelZero - - if sub_theme: - subThemePipe = classify_sub_theme() - classes = [] - classesUnlabelZero = [] - - for x in stqdm( - iterable=text, - desc="Assigning Subthemes ...", - total=len(text), - colour="green", - ): - output = subThemePipe(x)[0]["label"] - classes.append(output) - score = round(subThemePipe(x)[0]["score"], 2) - if score <= treshold: - print("hit") - onelineoutput = oneline.predict(source_text=x)[0] - - classesUnlabel.append( - assign_labels_flant5( - bot, - what="subtheme", - to=onelineoutput, - old=subthemes, - ) - ) - classesUnlabelZero.append( - assign_label_zeroshot( - zero=zeroline, - to=onelineoutput, - old=subthemes, - ) - ) - - else: - classesUnlabel.append("") - classesUnlabelZero.append("") - - outputdf["Review SubTheme"] = classes - outputdf["Review SubTheme-issue-new"] = classesUnlabel - outputdf["Review SubTheme-issue-zero"] = classesUnlabelZero - - clean_memory(subThemePipe) - - csv = convert_df(outputdf) - st.download_button( - label="Download output as CSV", - data=csv, - file_name=f"{summarizer_option}_{date}_df.csv", - mime="text/csv", - use_container_width=True, - ) - - except KeyError as e: - st.error( - body="Please Make sure that your data must have a column named text", - icon="🚨", - ) - st.info(body="Text column must have amazon reviews", icon="ℹ️") - st.exception(e) - - except BaseException as e: - logging.exception(msg="An exception was occurred") diff --git a/spaces/attention-refocusing/Attention-refocusing/dataset/tsv_dataset.py b/spaces/attention-refocusing/Attention-refocusing/dataset/tsv_dataset.py deleted file mode 100644 index dc2db59faf1254970b35d2fc8dec78afde4f6918..0000000000000000000000000000000000000000 --- a/spaces/attention-refocusing/Attention-refocusing/dataset/tsv_dataset.py +++ /dev/null @@ -1,326 +0,0 @@ -from tkinter.messagebox import NO -import torch -import json -from collections import defaultdict -from PIL import Image, ImageDraw -from copy import deepcopy -import os -import torchvision.transforms as transforms -import torchvision -from .base_dataset import BaseDataset, check_filenames_in_zipdata, recalculate_box_and_verify_if_valid -from io import BytesIO -import random - -from .tsv import TSVFile - -from io import BytesIO -import base64 -from PIL import Image -import numpy as np - - -def decode_base64_to_pillow(image_b64): - return Image.open(BytesIO(base64.b64decode(image_b64))).convert('RGB') - -def decode_tensor_from_string(arr_str, use_tensor=True): - arr = np.frombuffer(base64.b64decode(arr_str), dtype='float32') - if use_tensor: - arr = torch.from_numpy(arr) - return arr - -def decode_item(item): - item = json.loads(item) - item['image'] = decode_base64_to_pillow(item['image']) - - for anno in item['annos']: - anno['image_embedding_before'] = decode_tensor_from_string(anno['image_embedding_before']) - anno['text_embedding_before'] = decode_tensor_from_string(anno['text_embedding_before']) - anno['image_embedding_after'] = decode_tensor_from_string(anno['image_embedding_after']) - anno['text_embedding_after'] = decode_tensor_from_string(anno['text_embedding_after']) - return item - -def check_unique(images, fields): - for field in fields: - temp_list = [] - for img_info in images: - temp_list.append(img_info[field]) - assert len(set(temp_list)) == len(temp_list), field - -def clean_data(data): - for data_info in data: - data_info.pop("original_img_id", None) - data_info.pop("original_id", None) - data_info.pop("sentence_id", None) # sentence id for each image (multiple sentences for one image) - data_info.pop("dataset_name", None) - data_info.pop("data_source", None) - data_info["data_id"] = data_info.pop("id") - - -def clean_annotations(annotations): - for anno_info in annotations: - anno_info.pop("iscrowd", None) # I have checked that all 0 for flickr, vg, coco - anno_info.pop("category_id", None) # I have checked that all 1 for flickr vg. This is not always 1 for coco, but I do not think we need this annotation - anno_info.pop("area", None) - # anno_info.pop("id", None) - anno_info["data_id"] = anno_info.pop("image_id") - - -def draw_box(img, boxes): - draw = ImageDraw.Draw(img) - for box in boxes: - draw.rectangle([box[0], box[1], box[2], box[3]], outline ="red", width=2) # x0 y0 x1 y1 - return img - - -def xyhw2xyxy(box): - x0, y0, w, h = box - return [ x0, y0, x0+w, y0+h ] - - -def make_a_sentence(obj_names, clean=False): - - if clean: - obj_names = [ name[:-6] if ("-other" in name) else name for name in obj_names] - - caption = "" - tokens_positive = [] - for obj_name in obj_names: - start_len = len(caption) - caption += obj_name - end_len = len(caption) - caption += ", " - tokens_positive.append( - [[start_len, end_len]] # in real caption, positive tokens can be disjoint, thus using list of list - ) - caption = caption[:-2] # remove last ", " - - return caption #, tokens_positive - - -def mask_for_random_drop_text_or_image_feature(masks, random_drop_embedding): - """ - input masks tell how many valid grounding tokens for this image - e.g., 1,1,1,1,0,0,0,0,0,0... - - If random_drop_embedding=both. we will random drop either image or - text feature for each token, - but we always make sure there is at least one feature used. - In other words, the following masks are not valid - (because for the second obj, no feature at all): - image: 1,0,1,1,0,0,0,0,0 - text: 1,0,0,0,0,0,0,0,0 - - if random_drop_embedding=image. we will random drop image feature - and always keep the text one. - - """ - N = masks.shape[0] - - if random_drop_embedding=='both': - temp_mask = torch.ones(2,N) - for i in range(N): - if random.uniform(0, 1) < 0.5: # else keep both features - idx = random.sample([0,1], 1)[0] # randomly choose to drop image or text feature - temp_mask[idx,i] = 0 - image_masks = temp_mask[0]*masks - text_masks = temp_mask[1]*masks - - if random_drop_embedding=='image': - image_masks = masks*(torch.rand(N)>0.5)*1 - text_masks = masks - - return image_masks, text_masks - - - - - -def project(x, projection_matrix): - """ - x (Batch*768) should be the penultimate feature of CLIP (before projection) - projection_matrix (768*768) is the CLIP projection matrix, which should be weight.data of Linear layer - defined in CLIP (out_dim, in_dim), thus we need to apply transpose below. - this function will return the CLIP feature (without normalziation) - """ - return x@torch.transpose(projection_matrix, 0, 1) - - -def inv_project(y, projection_matrix): - """ - y (Batch*768) should be the CLIP feature (after projection) - projection_matrix (768*768) is the CLIP projection matrix, which should be weight.data of Linear layer - defined in CLIP (out_dim, in_dim). - this function will return the CLIP penultimate feature. - - Note: to make sure getting the correct penultimate feature, the input y should not be normalized. - If it is normalized, then the result will be scaled by CLIP feature norm, which is unknown. - """ - return y@torch.transpose(torch.linalg.inv(projection_matrix), 0, 1) - - - - -class TSVDataset(BaseDataset): - def __init__(self, - tsv_path, - which_embedder='clip', - which_layer=['after','after'], # text and image - prob_use_caption=1, - random_drop_embedding='none', - image_size=256, - min_box_size=0.01, - max_boxes_per_data=8, - max_images=None, # set as 30K used to eval - random_crop = False, - random_flip = True, - ): - image_root = "a placeholder path as we are using tsv here" - super().__init__(image_root, random_crop, random_flip, image_size) - self.tsv_path = tsv_path - self.which_embedder = which_embedder - self.prob_use_caption = prob_use_caption - self.random_drop_embedding = random_drop_embedding - self.min_box_size = min_box_size - self.max_boxes_per_data = max_boxes_per_data - self.max_images = max_images - - assert which_layer in [ ['after','after'], ['before','after_renorm'], ['before','after_reproject'] ] - assert random_drop_embedding in ['none', 'both', 'image'] - self.which_layer_text = which_layer[0] - self.which_layer_image = which_layer[1] - - #self.projection_matrix = torch.load(os.path.join(os.path.dirname(__file__), 'projection_matrix') ) - self.projection_matrix = torch.load('projection_matrix.pth') - - # Load tsv data - self.tsv_file = TSVFile(self.tsv_path) - - - # Load preprocessed name embedding - if which_embedder == 'bert': - self.embedding_len = 1280 - elif which_embedder == 'clip': - self.embedding_len = 768 - else: - assert False - - def total_images(self): - return len(self) - - def get_item_from_tsv(self, index): - _, item = self.tsv_file[index] - item = decode_item(item) - return item - - - def mapping(self, image_embedding): - if self.which_layer_image == 'after': - # both use CLIP aligned feature - return image_embedding - elif self.which_layer_image == 'after_renorm': - # text use before, but image use after projection but normalize to 28.7 - return image_embedding*28.7 - elif self.which_layer_image == 'after_reproject': - image_embedding = project( image_embedding.unsqueeze(0), self.projection_matrix.T ) - image_embedding = image_embedding.squeeze(0) - image_embedding = image_embedding / image_embedding.norm() - image_embedding = image_embedding * 28.7 - return image_embedding - - - - def __getitem__(self, index): - if self.max_boxes_per_data > 99: - assert False, "Are you sure setting such large number of boxes?" - - raw_item = self.get_item_from_tsv(index) - is_det = raw_item.get('is_det', False) # if it is from detection (such as o365), then we will make a caption - - out = {} - - # -------------------- id and image ------------------- # - out['id'] = raw_item['data_id'] - image = raw_item['image'] - image_tensor, trans_info = self.transform_image(image) - out["image"] = image_tensor - - - - # -------------------- grounding token ------------------- # - annos = raw_item['annos'] - - areas = [] - all_boxes = [] - all_masks = [] - all_text_embeddings = [] - all_image_embeddings = [] - if is_det: - all_category_names = [] - - text_embedding_name = 'text_embedding_before' if self.which_layer_text == 'before' else 'text_embedding_after' - image_embedding_name = 'image_embedding_after' - - for anno in annos: - x, y, w, h = anno['bbox'] - valid, (x0, y0, x1, y1) = recalculate_box_and_verify_if_valid(x, y, w, h, trans_info, self.image_size, self.min_box_size) - - if valid: - areas.append( (x1-x0)*(y1-y0) ) - all_boxes.append( torch.tensor([x0,y0,x1,y1]) / self.image_size ) # scale to 0-1 - all_masks.append(1) - all_text_embeddings.append(anno[text_embedding_name]) - all_image_embeddings.append( self.mapping(anno[image_embedding_name]) ) - if is_det: - all_category_names.append(anno["category_name"]) - - - wanted_idxs = torch.tensor(areas).sort(descending=True)[1] - wanted_idxs = wanted_idxs[0:self.max_boxes_per_data] - - boxes = torch.zeros(self.max_boxes_per_data, 4) - masks = torch.zeros(self.max_boxes_per_data) - text_embeddings = torch.zeros(self.max_boxes_per_data, self.embedding_len) - image_embeddings = torch.zeros(self.max_boxes_per_data, self.embedding_len) - if is_det: - category_names = [] - for i, idx in enumerate(wanted_idxs): - boxes[i] = all_boxes[idx] - masks[i] = all_masks[idx] - text_embeddings[i] = all_text_embeddings[idx] - image_embeddings[i] = all_image_embeddings[idx] - if is_det: - category_names.append(all_category_names[idx]) - - if self.random_drop_embedding != 'none': - image_masks, text_masks = mask_for_random_drop_text_or_image_feature(masks, self.random_drop_embedding) - else: - image_masks = masks - text_masks = masks - - - out["boxes"] = boxes - out["masks"] = masks - out["image_masks"] = image_masks - out["text_masks"] = text_masks - out["text_embeddings"] = text_embeddings - out["image_embeddings"] = image_embeddings - - - - # -------------------- caption ------------------- # - if random.uniform(0, 1) < self.prob_use_caption: - if is_det: - out["caption"] = make_a_sentence(category_names) - else: - out["caption"] = raw_item["caption"] - else: - out["caption"] = "" - - return out - - - - def __len__(self): - return len(self.tsv_file) - - diff --git a/spaces/avirathtibrewala/YTToText/README.md b/spaces/avirathtibrewala/YTToText/README.md deleted file mode 100644 index e34dd85173150d561a6e143487387a16558e5a62..0000000000000000000000000000000000000000 --- a/spaces/avirathtibrewala/YTToText/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: YTToText -emoji: 🚀 -colorFrom: indigo -colorTo: blue -sdk: gradio -sdk_version: 3.13.0 -app_file: app.py -pinned: false -license: unknown ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/awacke1/ArtStyleLineDrawing/app.py b/spaces/awacke1/ArtStyleLineDrawing/app.py deleted file mode 100644 index 2fa07247e19bb4ec8f1317443122703951315286..0000000000000000000000000000000000000000 --- a/spaces/awacke1/ArtStyleLineDrawing/app.py +++ /dev/null @@ -1,115 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -import gradio as gr -from PIL import Image -import torchvision.transforms as transforms -norm_layer = nn.InstanceNorm2d - -class ResidualBlock(nn.Module): - def __init__(self, in_features): - super(ResidualBlock, self).__init__() - conv_block = [ nn.ReflectionPad2d(1), - nn.Conv2d(in_features, in_features, 3), - norm_layer(in_features), - nn.ReLU(inplace=True), - nn.ReflectionPad2d(1), - nn.Conv2d(in_features, in_features, 3), - norm_layer(in_features) - ] - self.conv_block = nn.Sequential(*conv_block) - def forward(self, x): - return x + self.conv_block(x) - - -class Generator(nn.Module): - def __init__(self, input_nc, output_nc, n_residual_blocks=9, sigmoid=True): - super(Generator, self).__init__() - model0 = [ nn.ReflectionPad2d(3), - nn.Conv2d(input_nc, 64, 7), - norm_layer(64), - nn.ReLU(inplace=True) ] - self.model0 = nn.Sequential(*model0) - model1 = [] - in_features = 64 - out_features = in_features*2 - for _ in range(2): - model1 += [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), - norm_layer(out_features), - nn.ReLU(inplace=True) ] - in_features = out_features - out_features = in_features*2 - self.model1 = nn.Sequential(*model1) - model2 = [] - for _ in range(n_residual_blocks): - model2 += [ResidualBlock(in_features)] - self.model2 = nn.Sequential(*model2) - model3 = [] - out_features = in_features//2 - for _ in range(2): - model3 += [ nn.ConvTranspose2d(in_features, out_features, 3, stride=2, padding=1, output_padding=1), - norm_layer(out_features), - nn.ReLU(inplace=True) ] - in_features = out_features - out_features = in_features//2 - self.model3 = nn.Sequential(*model3) - model4 = [ nn.ReflectionPad2d(3), - nn.Conv2d(64, output_nc, 7)] - if sigmoid: - model4 += [nn.Sigmoid()] - self.model4 = nn.Sequential(*model4) - - def forward(self, x, cond=None): - out = self.model0(x) - out = self.model1(out) - out = self.model2(out) - out = self.model3(out) - out = self.model4(out) - return out - -model1 = Generator(3, 1, 3) -model1.load_state_dict(torch.load('model.pth', map_location=torch.device('cpu'))) -model1.eval() - -model2 = Generator(3, 1, 3) -model2.load_state_dict(torch.load('model2.pth', map_location=torch.device('cpu'))) -model2.eval() - -def predict(input_img, ver): - input_img = Image.open(input_img) - transform = transforms.Compose([transforms.Resize(256, Image.BICUBIC), transforms.ToTensor()]) - input_img = transform(input_img) - input_img = torch.unsqueeze(input_img, 0) - - drawing = 0 - with torch.no_grad(): - if ver == 'Simple Lines': - drawing = model2(input_img)[0].detach() - else: - drawing = model1(input_img)[0].detach() - - drawing = transforms.ToPILImage()(drawing) - return drawing - -title="Art Style Line Drawings - Complex and Simple Portraits and Landscapes" -description="Art Style Line Drawings 🦀🦁🦂🦃🦄🦅🦆🦇🦈🦉🦊🦋🦌🦍🦎🦏 🦐🦑🦒🦓🦔🦕🦖🦗🦘🦙🦚🦛🦜🦝🦞🦟🦠🦡🦢🦣🦤🦥🦦🦧🦨🦩🦪🦫🦬🦭🦮" -# article = "<p style='text-align: center'></p>" -examples=[ -['QSHYNkOyhArcsgDrSFqq_15.625x.jpg', 'Simple Lines'], -['Xenomporh-art-scale-6_00x-gigapixel.png', 'Simple Lines'], -['Alien Chairs-art-scale-6_00x-gigapixel.png', 'Complex Lines'], -['Brain Coral B-gigapixel-art-scale-6_00x.jpg', 'Simple Lines'], -['Brain Coral-gigapixel-art-scale-6_00x.jpg', 'Complex Lines'], -['Dark Ritual Wisp Loop-art-scale-6_00x-gigapixel.png', 'Simple Lines'], -['Dungeons and Dragons Cartoon-art-scale-6_00x-gigapixel.png', 'Complex Lines'], -['Fantasy Art 2-art-scale-6_00x-gigapixel.png', 'Simple Lines'] -] - - -iface = gr.Interface(predict, [gr.inputs.Image(type='filepath'), - gr.inputs.Radio(['Complex Lines','Simple Lines'], type="value", default='Simple Lines', label='version')], - gr.outputs.Image(type="pil"), title=title,description=description,examples=examples) - -#iface.launch() -iface.launch() - diff --git a/spaces/awacke1/Embedding-Iframe-HTML5-to-Gradio/style.css b/spaces/awacke1/Embedding-Iframe-HTML5-to-Gradio/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Embedding-Iframe-HTML5-to-Gradio/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/awacke1/Positive.Reframing.Organization.Culture/README.md b/spaces/awacke1/Positive.Reframing.Organization.Culture/README.md deleted file mode 100644 index 7d5f611de3d72bda0d790b492bbd78ac10836c8d..0000000000000000000000000000000000000000 --- a/spaces/awacke1/Positive.Reframing.Organization.Culture/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Positive.Reframing.Organization.Culture -emoji: 💻 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false -duplicated_from: dominguesm/positive-reframing-en ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/baixing/hackathon_test/README.md b/spaces/baixing/hackathon_test/README.md deleted file mode 100644 index 2abf8756d2642d8eff00b253e9a35085e8180ee5..0000000000000000000000000000000000000000 --- a/spaces/baixing/hackathon_test/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Hackathon Test -emoji: 🌖 -colorFrom: indigo -colorTo: red -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false -license: cc-by-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/geometries/LightningStrike.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/geometries/LightningStrike.js deleted file mode 100644 index 79ef155cc606ad8c944a25f5ca3d6fe6cedeb42e..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/geometries/LightningStrike.js +++ /dev/null @@ -1,1005 +0,0 @@ -/** - * @author yomboprime https://github.com/yomboprime - * - * @fileoverview LightningStrike object for creating lightning strikes and voltaic arcs. - * - * - * Usage - * - * var myRay = new THREE.LightningStrike( paramsObject ); - * var myRayMesh = new THREE.Mesh( myRay, myMaterial ); - * scene.add( myRayMesh ); - * ... - * myRay.update( currentTime ); - * - * The "currentTime" can vary its rate, go forwards, backwards or even jump, but it cannot be negative. - * - * You should normally leave the ray position to (0, 0, 0). You should control it by changing the sourceOffset and destOffset parameters. - * - * - * LightningStrike parameters - * - * The paramsObject can contain any of the following parameters. - * - * Legend: - * 'LightningStrike' (also called 'ray'): An independent voltaic arc with its ramifications and defined with a set of parameters. - * 'Subray': A ramification of the ray. It is not a LightningStrike object. - * 'Segment': A linear segment piece of a subray. - * 'Leaf segment': A ray segment which cannot be smaller. - * - * - * The following parameters can be changed any time and if they vary smoothly, the ray form will also change smoothly: - * - * @param {Vector3} sourceOffset The point where the ray starts. - * - * @param {Vector3} destOffset The point where the ray ends. - * - * @param {double} timeScale The rate at wich the ray form changes in time. Default: 1 - * - * @param {double} roughness From 0 to 1. The higher the value, the more wrinkled is the ray. Default: 0.9 - * - * @param {double} straightness From 0 to 1. The higher the value, the more straight will be a subray path. Default: 0.7 - * - * @param {Vector3} up0 Ray 'up' direction at the ray starting point. Must be normalized. It should be perpendicular to the ray forward direction but it doesn't matter much. - * - * @param {Vector3} up1 Like the up0 parameter but at the end of the ray. Must be normalized. - * - * @param {double} radius0 Radius of the main ray trunk at the start point. Default: 1 - * - * @param {double} radius1 Radius of the main ray trunk at the end point. Default: 1 - * - * @param {double} radius0Factor The radius0 of a subray is this factor times the radius0 of its parent subray. Default: 0.5 - * - * @param {double} radius1Factor The radius1 of a subray is this factor times the radius1 of its parent subray. Default: 0.2 - * - * @param {minRadius} Minimum value a subray radius0 or radius1 can get. Default: 0.1 - * - * - * The following parameters should not be changed after lightning creation. They can be changed but the ray will change its form abruptly: - * - * @param {boolean} isEternal If true the ray never extinguishes. Otherwise its life is controlled by the 'birthTime' and 'deathTime' parameters. Default: true if any of those two parameters is undefined. - * - * @param {double} birthTime The time at which the ray starts its life and begins propagating. Only if isEternal is false. Default: None. - * - * @param {double} deathTime The time at which the ray ends vanishing and its life. Only if isEternal is false. Default: None. - * - * @param {double} propagationTimeFactor From 0 to 1. Lifetime factor at which the ray ends propagating and enters the steady phase. For example, 0.1 means it is propagating 1/10 of its lifetime. Default: 0.1 - * - * @param {double} vanishingTimeFactor From 0 to 1. Lifetime factor at which the ray ends the steady phase and begins vanishing. For example, 0.9 means it is vanishing 1/10 of its lifetime. Default: 0.9 - * - * @param {double} subrayPeriod Subrays cycle periodically. This is their time period. Default: 4 - * - * @param {double} subrayDutyCycle From 0 to 1. This is the fraction of time a subray is active. Default: 0.6 - * - * - * These parameters cannot change after lightning creation: - * - * @param {integer} maxIterations: Greater than 0. The number of ray's leaf segments is 2**maxIterations. Default: 9 - * - * @param {boolean} isStatic Set to true only for rays which won't change over time and are not attached to moving objects (Rare case). It is used to set the vertex buffers non-dynamic. You can omit calling update() for these rays. - * - * @param {integer} ramification Greater than 0. Maximum number of child subrays a subray can have. Default: 5 - * - * @param {integer} maxSubrayRecursion Greater than 0. Maximum level of recursion (subray descendant generations). Default: 3 - * - * @param {double} recursionProbability From 0 to 1. The lower the value, the less chance each new generation of subrays has to generate new subrays. Default: 0.6 - * - * @param {boolean} generateUVs If true, the ray geometry will have uv coordinates generated. u runs along the ray, and v across its perimeter. Default: false. - * - * @param {Object} randomGenerator Set here your random number generator which will seed the SimplexNoise and other decisions during ray tree creation. - * It can be used to generate repeatable rays. For that, set also the noiseSeed parameter, and each ray created with that generator and seed pair will be identical in time. - * The randomGenerator parameter should be an object with a random() function similar to Math.random, but seedable. - * It must have also a getSeed() method, which returns the current seed, and a setSeed( seed ) method, which accepts as seed a fractional number from 0 to 1, as well as any other number. - * The default value is an internal generator for some uses and Math.random for others (It is non-repeatable even if noiseSeed is supplied) - * - * @param {double} noiseSeed Seed used to make repeatable rays (see the randomGenerator) - * - * @param {function} onDecideSubrayCreation Set this to change the callback which decides subray creation. You can look at the default callback in the code (createDefaultSubrayCreationCallbacks)for more info. - * - * @param {function} onSubrayCreation This is another callback, more simple than the previous one. It can be used to adapt the form of subrays or other parameters once a subray has been created and initialized. It is used in the examples to adapt subrays to a sphere or to a plane. - * - * -*/ - -THREE.LightningStrike = function ( rayParameters ) { - - THREE.BufferGeometry.call( this ); - - this.type = 'LightningStrike'; - - // Set parameters, and set undefined parameters to default values - rayParameters = rayParameters || {}; - this.init( THREE.LightningStrike.copyParameters( rayParameters, rayParameters ) ); - - // Creates and populates the mesh - this.createMesh(); - -}; - -THREE.LightningStrike.prototype = Object.create( THREE.BufferGeometry.prototype ); - -THREE.LightningStrike.prototype.constructor = THREE.LightningStrike; - -THREE.LightningStrike.prototype.isLightningStrike = true; - -// Ray states -THREE.LightningStrike.RAY_INITIALIZED = 0; -THREE.LightningStrike.RAY_UNBORN = 1; -THREE.LightningStrike.RAY_PROPAGATING = 2; -THREE.LightningStrike.RAY_STEADY = 3; -THREE.LightningStrike.RAY_VANISHING = 4; -THREE.LightningStrike.RAY_EXTINGUISHED = 5; - -THREE.LightningStrike.COS30DEG = Math.cos( 30 * Math.PI / 180 ); -THREE.LightningStrike.SIN30DEG = Math.sin( 30 * Math.PI / 180 ); - -THREE.LightningStrike.createRandomGenerator = function () { - - var numSeeds = 2053; - var seeds = []; - - for ( var i = 0; i < numSeeds; i++ ) { - - seeds.push( Math.random() ); - - } - - var generator = { - - currentSeed: 0, - - random: function () { - - var value = seeds[ generator.currentSeed ]; - - generator.currentSeed = ( generator.currentSeed + 1 ) % numSeeds; - - return value; - - }, - - getSeed: function () { - - return generator.currentSeed / numSeeds; - - }, - - setSeed: function ( seed ) { - - generator.currentSeed = Math.floor( seed * numSeeds ) % numSeeds; - - } - - }; - - return generator; - -}; - -THREE.LightningStrike.copyParameters = function ( dest, source) { - - source = source || {}; - dest = dest || {}; - - var vecCopy = function( v ) { - - if ( source === dest ) { - - return v; - - } - else { - - return v.clone(); - - } - - } - - dest.sourceOffset = source.sourceOffset !== undefined ? vecCopy( source.sourceOffset ) : new THREE.Vector3( 0, 100, 0 ), - dest.destOffset = source.destOffset !== undefined ? vecCopy( source.destOffset ) : new THREE.Vector3( 0, 0, 0 ), - - dest.timeScale = source.timeScale !== undefined ? source.timeScale : 1, - dest.roughness = source.roughness !== undefined ? source.roughness : 0.9, - dest.straightness = source.straightness !== undefined ? source.straightness : 0.7, - - dest.up0 = source.up0 !== undefined ? vecCopy( source.up0 ) : new THREE.Vector3( 0, 0, 1 ); - dest.up1 = source.up1 !== undefined ? vecCopy( source.up1 ) : new THREE.Vector3( 0, 0, 1 ), - dest.radius0 = source.radius0 !== undefined ? source.radius0 : 1, - dest.radius1 = source.radius1 !== undefined ? source.radius1 : 1, - dest.radius0Factor = source.radius0Factor !== undefined ? source.radius0Factor : 0.5, - dest.radius1Factor = source.radius1Factor !== undefined ? source.radius1Factor : 0.2, - dest.minRadius = source.minRadius !== undefined ? source.minRadius : 0.2, - - // These parameters should not be changed after lightning creation. They can be changed but the ray will change its form abruptly: - - dest.isEternal = source.isEternal !== undefined ? source.isEternal : ( source.birthTime === undefined || source.deathTime === undefined ), - dest.birthTime = source.birthTime, - dest.deathTime = source.deathTime, - dest.propagationTimeFactor = source.propagationTimeFactor !== undefined ? source.propagationTimeFactor : 0.1, - dest.vanishingTimeFactor = source.vanishingTimeFactor !== undefined ? source.vanishingTimeFactor : 0.9, - dest.subrayPeriod = source.subrayPeriod !== undefined ? source.subrayPeriod : 4, - dest.subrayDutyCycle = source.subrayDutyCycle !== undefined ? source.subrayDutyCycle : 0.6; - - // These parameters cannot change after lightning creation: - - dest.maxIterations = source.maxIterations !== undefined ? source.maxIterations : 9; - dest.isStatic = source.isStatic !== undefined ? source.isStatic : false; - dest.ramification = source.ramification !== undefined ? source.ramification : 5; - dest.maxSubrayRecursion = source.maxSubrayRecursion !== undefined ? source.maxSubrayRecursion : 3; - dest.recursionProbability = source.recursionProbability !== undefined ? source.recursionProbability : 0.6; - dest.generateUVs = source.generateUVs !== undefined ? source.generateUVs : false; - dest.randomGenerator = source.randomGenerator, - dest.noiseSeed = source.noiseSeed, - dest.onDecideSubrayCreation = source.onDecideSubrayCreation, - dest.onSubrayCreation = source.onSubrayCreation; - - return dest; - -}; - -THREE.LightningStrike.prototype.update = function ( time ) { - - if ( this.isStatic ) { - return; - } - - if ( this.rayParameters.isEternal || ( this.rayParameters.birthTime <= time && time <= this.rayParameters.deathTime ) ) { - - this.updateMesh( time ); - - if ( time < this.subrays[ 0 ].endPropagationTime ) { - - this.state = THREE.LightningStrike.RAY_PROPAGATING; - - } - else if ( time > this.subrays[ 0 ].beginVanishingTime ) { - - this.state = THREE.LightningStrike.RAY_VANISHING; - - } - else { - - this.state = THREE.LightningStrike.RAY_STEADY; - - } - - this.visible = true; - - } - else { - - this.visible = false; - - if ( time < this.rayParameters.birthTime ) { - - this.state = THREE.LightningStrike.RAY_UNBORN; - - } - else { - - this.state = THREE.LightningStrike.RAY_EXTINGUISHED; - - } - - } - -}; - -THREE.LightningStrike.prototype.init = function ( rayParameters ) { - - // Init all the state from the parameters - - this.rayParameters = rayParameters; - - // These parameters cannot change after lightning creation: - - this.maxIterations = rayParameters.maxIterations !== undefined ? Math.floor( rayParameters.maxIterations ) : 9; - rayParameters.maxIterations = this.maxIterations; - this.isStatic = rayParameters.isStatic !== undefined ? rayParameters.isStatic : false; - rayParameters.isStatic = this.isStatic; - this.ramification = rayParameters.ramification !== undefined ? Math.floor( rayParameters.ramification ) : 5; - rayParameters.ramification = this.ramification; - this.maxSubrayRecursion = rayParameters.maxSubrayRecursion !== undefined ? Math.floor( rayParameters.maxSubrayRecursion ) : 3; - rayParameters.maxSubrayRecursion = this.maxSubrayRecursion; - this.recursionProbability = rayParameters.recursionProbability !== undefined ? rayParameters.recursionProbability : 0.6; - rayParameters.recursionProbability = this.recursionProbability; - this.generateUVs = rayParameters.generateUVs !== undefined ? rayParameters.generateUVs : false; - rayParameters.generateUVs = this.generateUVs; - - // Random generator - if ( rayParameters.randomGenerator !== undefined ) { - - this.randomGenerator = rayParameters.randomGenerator; - this.seedGenerator = rayParameters.randomGenerator; - - if ( rayParameters.noiseSeed !== undefined ) { - - this.seedGenerator.setSeed( rayParameters.noiseSeed ); - - } - - } - else { - - this.randomGenerator = THREE.LightningStrike.createRandomGenerator(); - this.seedGenerator = Math; - - } - - // Ray creation callbacks - if ( rayParameters.onDecideSubrayCreation !== undefined ) { - - this.onDecideSubrayCreation = rayParameters.onDecideSubrayCreation; - - } - else { - - this.createDefaultSubrayCreationCallbacks(); - - if ( rayParameters.onSubrayCreation !== undefined ) { - - this.onSubrayCreation = rayParameters.onSubrayCreation; - - } - - } - - // Internal state - - this.state = THREE.LightningStrike.RAY_INITIALIZED; - - this.maxSubrays = Math.ceil( 1 + Math.pow( this.ramification, Math.max( 0, this.maxSubrayRecursion - 1 ) ) ); - rayParameters.maxSubrays = this.maxSubrays; - - this.maxRaySegments = 2 * ( 1 << this.maxIterations ); - - this.subrays = []; - - for ( var i = 0; i < this.maxSubrays; i++ ) { - - this.subrays.push( this.createSubray() ); - - } - - this.raySegments = []; - - for ( var i = 0; i < this.maxRaySegments; i++ ) { - - this.raySegments.push( this.createSegment() ); - - } - - this.time = 0; - this.timeFraction = 0; - this.currentSegmentCallback = null; - this.currentCreateTriangleVertices = this.generateUVs ? this.createTriangleVerticesWithUVs : this.createTriangleVerticesWithoutUVs; - this.numSubrays = 0; - this.currentSubray = null; - this.currentSegmentIndex = 0; - this.isInitialSegment = false; - this.subrayProbability = 0; - - this.currentVertex = 0; - this.currentIndex = 0; - this.currentCoordinate = 0; - this.currentUVCoordinate = 0; - this.vertices = null; - this.uvs = null; - this.indices = null; - this.positionAttribute = null; - this.uvsAttribute = null; - - this.simplexX = new SimplexNoise( this.seedGenerator ); - this.simplexY = new SimplexNoise( this.seedGenerator ); - this.simplexZ = new SimplexNoise( this.seedGenerator ); - - // Temp vectors - this.forwards = new THREE.Vector3(); - this.forwardsFill = new THREE.Vector3(); - this.side = new THREE.Vector3(); - this.down = new THREE.Vector3(); - this.middlePos = new THREE.Vector3(); - this.middleLinPos = new THREE.Vector3(); - this.newPos = new THREE.Vector3(); - this.vPos = new THREE.Vector3(); - this.cross1 = new THREE.Vector3(); - -}; - -THREE.LightningStrike.prototype.createMesh = function () { - - var maxDrawableSegmentsPerSubRay = 1 << this.maxIterations; - - var maxVerts = 3 * ( maxDrawableSegmentsPerSubRay + 1 ) * this.maxSubrays; - var maxIndices = 18 * maxDrawableSegmentsPerSubRay * this.maxSubrays; - - this.vertices = new Float32Array( maxVerts * 3 ); - this.indices = new Uint32Array( maxIndices ); - if ( this.generateUVs ) { - this.uvs = new Float32Array( maxVerts * 2 ); - } - - // Populate the mesh - this.fillMesh( 0 ); - - this.setIndex( new THREE.Uint32BufferAttribute( this.indices, 1 ) ); - - this.positionAttribute = new THREE.Float32BufferAttribute( this.vertices, 3 ); - this.addAttribute( 'position', this.positionAttribute ); - - if ( this.generateUVs ) {1 - this.uvsAttribute = new THREE.Float32BufferAttribute( new Float32Array( this.uvs ), 2 ); - this.addAttribute( 'uv', this.uvsAttribute ); - } - - if ( ! this.isStatic ) { - this.index.dynamic = true; - this.positionAttribute.dynamic = true; - if ( this.generateUVs ) { - this.uvsAttribute.dynamic = true; - } - } - - // Store buffers for later modification - this.vertices = this.positionAttribute.array; - this.indices = this.index.array; - if ( this.generateUVs ) { - this.uvs = this.uvsAttribute.array; - } - -}; - -THREE.LightningStrike.prototype.updateMesh = function ( time ) { - - this.fillMesh( time ); - - this.drawRange.count = this.currentIndex; - - this.index.needsUpdate = true; - - this.positionAttribute.needsUpdate = true; - - if ( this.generateUVs ) { - this.uvsAttribute.needsUpdate = true; - } - -}; - -THREE.LightningStrike.prototype.fillMesh = function ( time ) { - - var scope = this; - - this.currentVertex = 0; - this.currentIndex = 0; - this.currentCoordinate = 0; - this.currentUVCoordinate = 0; - - this.fractalRay( time, function fillVertices ( segment ) { - - var subray = scope.currentSubray; - - if ( time < subray.birthTime ) {//&& ( ! this.rayParameters.isEternal || scope.currentSubray.recursion > 0 ) ) { - - return; - - } - else if ( this.rayParameters.isEternal && scope.currentSubray.recursion == 0 ) { - - // Eternal rays don't propagate nor vanish, but its subrays do - - scope.createPrism( segment ); - - scope.onDecideSubrayCreation( segment, scope ); - - } - else if ( time < subray.endPropagationTime ) { - - if ( scope.timeFraction >= segment.fraction0 * subray.propagationTimeFactor ) { - - // Ray propagation has arrived to this segment - - scope.createPrism( segment ); - - scope.onDecideSubrayCreation( segment, scope ); - - } - - } - else if ( time < subray.beginVanishingTime ) { - - // Ray is steady (nor propagating nor vanishing) - - scope.createPrism( segment ); - - scope.onDecideSubrayCreation( segment, scope ); - - } - else { - - if ( scope.timeFraction <= subray.vanishingTimeFactor + segment.fraction1 * ( 1 - subray.vanishingTimeFactor ) ) { - - // Segment has not yet vanished - - scope.createPrism( segment ); - - } - - scope.onDecideSubrayCreation( segment, scope ); - - } - - } ); - -}; - -THREE.LightningStrike.prototype.addNewSubray = function ( rayParameters ) { - - return this.subrays[ this.numSubrays++ ]; - -}; - -THREE.LightningStrike.prototype.initSubray = function ( subray, rayParameters ) { - - subray.pos0.copy( rayParameters.sourceOffset ); - subray.pos1.copy( rayParameters.destOffset ); - subray.up0.copy( rayParameters.up0 ); - subray.up1.copy( rayParameters.up1 ); - subray.radius0 = rayParameters.radius0; - subray.radius1 = rayParameters.radius1; - subray.birthTime = rayParameters.birthTime; - subray.deathTime = rayParameters.deathTime; - subray.timeScale = rayParameters.timeScale; - subray.roughness = rayParameters.roughness; - subray.straightness = rayParameters.straightness; - subray.propagationTimeFactor = rayParameters.propagationTimeFactor; - subray.vanishingTimeFactor = rayParameters.vanishingTimeFactor; - - subray.maxIterations = this.maxIterations; - subray.seed = rayParameters.noiseSeed !== undefined ? rayParameters.noiseSeed : 0; - subray.recursion = 0; - -}; - -THREE.LightningStrike.prototype.fractalRay = function ( time, segmentCallback ) { - - this.time = time; - this.currentSegmentCallback = segmentCallback; - this.numSubrays = 0; - - // Add the top level subray - this.initSubray( this.addNewSubray(), this.rayParameters ); - - // Process all subrays that are being generated until consuming all of them - for ( var subrayIndex = 0; subrayIndex < this.numSubrays; subrayIndex++ ) { - - var subray = this.subrays[ subrayIndex ]; - this.currentSubray = subray; - - this.randomGenerator.setSeed( subray.seed ); - - subray.endPropagationTime = THREE.Math.lerp( subray.birthTime, subray.deathTime, subray.propagationTimeFactor ); - subray.beginVanishingTime = THREE.Math.lerp( subray.deathTime, subray.birthTime, 1 - subray.vanishingTimeFactor ); - - var random1 = this.randomGenerator.random; - subray.linPos0.set( random1(), random1(), random1() ).multiplyScalar( 1000 ); - subray.linPos1.set( random1(), random1(), random1() ).multiplyScalar( 1000 ); - - this.timeFraction = ( time - subray.birthTime ) / ( subray.deathTime - subray.birthTime ); - - this.currentSegmentIndex = 0; - this.isInitialSegment = true; - - var segment = this.getNewSegment(); - segment.iteration = 0; - segment.pos0.copy( subray.pos0 ); - segment.pos1.copy( subray.pos1 ); - segment.linPos0.copy( subray.linPos0 ); - segment.linPos1.copy( subray.linPos1 ); - segment.up0.copy( subray.up0 ); - segment.up1.copy( subray.up1 ); - segment.radius0 = subray.radius0; - segment.radius1 = subray.radius1; - segment.fraction0 = 0; - segment.fraction1 = 1; - segment.positionVariationFactor = 1 - subray.straightness; - - this.subrayProbability = this.ramification * Math.pow( this.recursionProbability, subray.recursion ) / ( 1 << subray.maxIterations ); - - this.fractalRayRecursive( segment ); - - } - - this.currentSegmentCallback = null; - this.currentSubray = null; - -}; - -THREE.LightningStrike.prototype.fractalRayRecursive = function ( segment ) { - - // Leave recursion condition - if ( segment.iteration >= this.currentSubray.maxIterations ) { - - this.currentSegmentCallback( segment ); - - return; - - } - - // Interpolation - this.forwards.subVectors( segment.pos1, segment.pos0 ); - var lForwards = this.forwards.length(); - - if ( lForwards < 0.000001) { - this.forwards.set( 0, 0, 0.01 ); - lForwards = this.forwards.length(); - } - - var middleRadius = ( segment.radius0 + segment.radius1 ) * 0.5; - var middleFraction = ( segment.fraction0 + segment.fraction1 ) * 0.5; - - var timeDimension = this.time * this.currentSubray.timeScale * Math.pow( 2, segment.iteration ); - - this.middlePos.lerpVectors( segment.pos0, segment.pos1, 0.5 ); - this.middleLinPos.lerpVectors( segment.linPos0, segment.linPos1, 0.5 ); - var p = this.middleLinPos; - - // Noise - this.newPos.set( this.simplexX.noise4d( p.x, p.y, p.z, timeDimension ), - this.simplexY.noise4d( p.x, p.y, p.z, timeDimension ), - this.simplexZ.noise4d( p.x, p.y, p.z, timeDimension ) ); - - this.newPos.multiplyScalar( segment.positionVariationFactor * lForwards ); - this.newPos.add( this.middlePos ); - - // Recursion - - var newSegment1 = this.getNewSegment(); - newSegment1.pos0.copy( segment.pos0 ); - newSegment1.pos1.copy( this.newPos ); - newSegment1.linPos0.copy( segment.linPos0 ); - newSegment1.linPos1.copy( this.middleLinPos ); - newSegment1.up0.copy( segment.up0 ); - newSegment1.up1.copy( segment.up1 ); - newSegment1.radius0 = segment.radius0; - newSegment1.radius1 = middleRadius; - newSegment1.fraction0 = segment.fraction0; - newSegment1.fraction1 = middleFraction; - newSegment1.positionVariationFactor = segment.positionVariationFactor * this.currentSubray.roughness; - newSegment1.iteration = segment.iteration + 1; - - var newSegment2 = this.getNewSegment(); - newSegment2.pos0.copy( this.newPos ); - newSegment2.pos1.copy( segment.pos1 ); - newSegment2.linPos0.copy( this.middleLinPos ); - newSegment2.linPos1.copy( segment.linPos1 ); - this.cross1.crossVectors( segment.up0, this.forwards.normalize() ); - newSegment2.up0.crossVectors( this.forwards, this.cross1 ).normalize(); - newSegment2.up1.copy( segment.up1 ); - newSegment2.radius0 = middleRadius; - newSegment2.radius1 = segment.radius1; - newSegment2.fraction0 = middleFraction; - newSegment2.fraction1 = segment.fraction1; - newSegment2.positionVariationFactor = segment.positionVariationFactor * this.currentSubray.roughness; - newSegment2.iteration = segment.iteration + 1; - - this.fractalRayRecursive( newSegment1 ); - - this.fractalRayRecursive( newSegment2 ); - -}; - -THREE.LightningStrike.prototype.createPrism = function ( segment ) { - - // Creates one triangular prism and its vertices at the segment - - this.forwardsFill.subVectors( segment.pos1, segment.pos0 ).normalize(); - - if ( this.isInitialSegment ) { - - this.currentCreateTriangleVertices( segment.pos0, segment.up0, this.forwardsFill, segment.radius0, 0 ); - - this.isInitialSegment = false; - - } - - this.currentCreateTriangleVertices( segment.pos1, segment.up0, this.forwardsFill, segment.radius1, segment.fraction1 ); - - this.createPrismFaces(); - -}; - -THREE.LightningStrike.prototype.createTriangleVerticesWithoutUVs = function ( pos, up, forwards, radius ) { - - // Create an equilateral triangle (only vertices) - - this.side.crossVectors( up, forwards ).multiplyScalar( radius * THREE.LightningStrike.COS30DEG ); - this.down.copy( up ).multiplyScalar( - radius * THREE.LightningStrike.SIN30DEG ); - - var p = this.vPos; - var v = this.vertices; - - p.copy( pos ).sub( this.side ).add( this.down ); - - v[ this.currentCoordinate++ ] = p.x; - v[ this.currentCoordinate++ ] = p.y; - v[ this.currentCoordinate++ ] = p.z; - - p.copy( pos ).add( this.side ).add( this.down ); - - v[ this.currentCoordinate++ ] = p.x; - v[ this.currentCoordinate++ ] = p.y; - v[ this.currentCoordinate++ ] = p.z; - - p.copy( up ).multiplyScalar( radius ).add( pos ); - - v[ this.currentCoordinate++ ] = p.x; - v[ this.currentCoordinate++ ] = p.y; - v[ this.currentCoordinate++ ] = p.z; - - this.currentVertex += 3; - -}; - -THREE.LightningStrike.prototype.createTriangleVerticesWithUVs = function ( pos, up, forwards, radius, u ) { - - // Create an equilateral triangle (only vertices) - - this.side.crossVectors( up, forwards ).multiplyScalar( radius * THREE.LightningStrike.COS30DEG ); - this.down.copy( up ).multiplyScalar( - radius * THREE.LightningStrike.SIN30DEG ); - - var p = this.vPos; - var v = this.vertices; - var uv = this.uvs; - - p.copy( pos ).sub( this.side ).add( this.down ); - - v[ this.currentCoordinate++ ] = p.x; - v[ this.currentCoordinate++ ] = p.y; - v[ this.currentCoordinate++ ] = p.z; - - uv[ this.currentUVCoordinate++ ] = u; - uv[ this.currentUVCoordinate++ ] = 0; - - p.copy( pos ).add( this.side ).add( this.down ); - - v[ this.currentCoordinate++ ] = p.x; - v[ this.currentCoordinate++ ] = p.y; - v[ this.currentCoordinate++ ] = p.z; - - uv[ this.currentUVCoordinate++ ] = u; - uv[ this.currentUVCoordinate++ ] = 0.5; - - p.copy( up ).multiplyScalar( radius ).add( pos ); - - v[ this.currentCoordinate++ ] = p.x; - v[ this.currentCoordinate++ ] = p.y; - v[ this.currentCoordinate++ ] = p.z; - - uv[ this.currentUVCoordinate++ ] = u; - uv[ this.currentUVCoordinate++ ] = 1; - - this.currentVertex += 3; - -}; - -THREE.LightningStrike.prototype.createPrismFaces = function ( vertex, index ) { - - var indices = this.indices; - var vertex = this.currentVertex - 6; - - indices[ this.currentIndex++ ] = vertex + 1; - indices[ this.currentIndex++ ] = vertex + 2; - indices[ this.currentIndex++ ] = vertex + 5; - indices[ this.currentIndex++ ] = vertex + 1; - indices[ this.currentIndex++ ] = vertex + 5; - indices[ this.currentIndex++ ] = vertex + 4; - indices[ this.currentIndex++ ] = vertex + 0; - indices[ this.currentIndex++ ] = vertex + 1; - indices[ this.currentIndex++ ] = vertex + 4; - indices[ this.currentIndex++ ] = vertex + 0; - indices[ this.currentIndex++ ] = vertex + 4; - indices[ this.currentIndex++ ] = vertex + 3; - indices[ this.currentIndex++ ] = vertex + 2; - indices[ this.currentIndex++ ] = vertex + 0; - indices[ this.currentIndex++ ] = vertex + 3; - indices[ this.currentIndex++ ] = vertex + 2; - indices[ this.currentIndex++ ] = vertex + 3; - indices[ this.currentIndex++ ] = vertex + 5; - -}; - -THREE.LightningStrike.prototype.createDefaultSubrayCreationCallbacks = function () { - - var random1 = this.randomGenerator.random; - - this.onDecideSubrayCreation = function ( segment, lightningStrike ) { - - // Decide subrays creation at parent (sub)ray segment - - var subray = lightningStrike.currentSubray; - - var period = lightningStrike.rayParameters.subrayPeriod; - var dutyCycle = lightningStrike.rayParameters.subrayDutyCycle; - - var phase0 = ( lightningStrike.rayParameters.isEternal && subray.recursion == 0 ) ? - random1() * period : THREE.Math.lerp( subray.birthTime, subray.endPropagationTime, segment.fraction0 ) - random1() * period; - - var phase = lightningStrike.time - phase0; - var currentCycle = Math.floor( phase / period ); - - var childSubraySeed = random1() * ( currentCycle + 1 ); - - var isActive = phase % period <= dutyCycle * period; - - probability = lightningStrike.subrayProbability; - var probability = 0; - if ( isActive ) { - probability = lightningStrike.subrayProbability; - // Distribution test: probability *= segment.fraction0 > 0.5 && segment.fraction0 < 0.9 ? 1 / 0.4 : 0; - } - - if ( subray.recursion < lightningStrike.maxSubrayRecursion && lightningStrike.numSubrays < lightningStrike.maxSubrays && random1() < probability ) { - - var childSubray = lightningStrike.addNewSubray(); - - var parentSeed = lightningStrike.randomGenerator.getSeed(); - childSubray.seed = childSubraySeed; - lightningStrike.randomGenerator.setSeed( childSubraySeed ); - - childSubray.recursion = subray.recursion + 1; - childSubray.maxIterations = Math.max( 1, subray.maxIterations - 1 ); - - childSubray.linPos0.set( random1(), random1(), random1() ).multiplyScalar( 1000 ); - childSubray.linPos1.set( random1(), random1(), random1() ).multiplyScalar( 1000 );; - childSubray.up0.copy( subray.up0 ); - childSubray.up1.copy( subray.up1 ); - childSubray.radius0 = segment.radius0 * lightningStrike.rayParameters.radius0Factor; - childSubray.radius1 = Math.min( lightningStrike.rayParameters.minRadius, segment.radius1 * lightningStrike.rayParameters.radius1Factor ); - - childSubray.birthTime = phase0 + ( currentCycle ) * period; - childSubray.deathTime = childSubray.birthTime + period * dutyCycle; - - if ( ! lightningStrike.rayParameters.isEternal && subray.recursion == 0 ) { - - childSubray.birthTime = Math.max( childSubray.birthTime, subray.birthTime ); - childSubray.deathTime = Math.min( childSubray.deathTime, subray.deathTime ); - - } - - childSubray.timeScale = subray.timeScale * 2; - childSubray.roughness = subray.roughness; - childSubray.straightness = subray.straightness; - childSubray.propagationTimeFactor = subray.propagationTimeFactor; - childSubray.vanishingTimeFactor = subray.vanishingTimeFactor; - - lightningStrike.onSubrayCreation( segment, subray, childSubray, lightningStrike ); - - lightningStrike.randomGenerator.setSeed( parentSeed ); - - } - - }; - - var vec1Pos = new THREE.Vector3(); - var vec2Forward = new THREE.Vector3(); - var vec3Side = new THREE.Vector3(); - var vec4Up = new THREE.Vector3(); - - this.onSubrayCreation = function ( segment, parentSubray, childSubray, lightningStrike ) { - - // Decide childSubray origin and destination positions (pos0 and pos1) and possibly other properties of childSubray - - // Just use the default cone position generator - lightningStrike.subrayCylinderPosition( segment, parentSubray, childSubray, 0.5, 0.6, 0.2 ); - - }; - - this.subrayConePosition = function ( segment, parentSubray, childSubray, heightFactor, sideWidthFactor, minSideWidthFactor ) { - - // Sets childSubray pos0 and pos1 in a cone - - childSubray.pos0.copy( segment.pos0 ); - - vec1Pos.subVectors( parentSubray.pos1, parentSubray.pos0 ); - vec2Forward.copy( vec1Pos ).normalize(); - vec1Pos.multiplyScalar( segment.fraction0 + ( 1 - segment.fraction0 ) * ( random1() * heightFactor ) ); - var length = vec1Pos.length(); - vec3Side.crossVectors( parentSubray.up0, vec2Forward ); - var angle = 2 * Math.PI * random1(); - vec3Side.multiplyScalar( Math.cos ( angle ) ); - vec4Up.copy( parentSubray.up0 ).multiplyScalar( Math.sin ( angle ) ); - - childSubray.pos1.copy( vec3Side ).add( vec4Up ).multiplyScalar( length * sideWidthFactor * ( minSideWidthFactor + random1() * ( 1 - minSideWidthFactor ) ) ).add( vec1Pos ).add( parentSubray.pos0 ); - - } - - this.subrayCylinderPosition = function ( segment, parentSubray, childSubray, heightFactor, sideWidthFactor, minSideWidthFactor ) { - - // Sets childSubray pos0 and pos1 in a cylinder - - childSubray.pos0.copy( segment.pos0 ); - - vec1Pos.subVectors( parentSubray.pos1, parentSubray.pos0 ); - vec2Forward.copy( vec1Pos ).normalize(); - vec1Pos.multiplyScalar( segment.fraction0 + ( 1 - segment.fraction0 ) * ( ( 2 * random1() - 1 ) * heightFactor ) ); - var length = vec1Pos.length(); - vec3Side.crossVectors( parentSubray.up0, vec2Forward ); - var angle = 2 * Math.PI * random1(); - vec3Side.multiplyScalar( Math.cos ( angle ) ); - vec4Up.copy( parentSubray.up0 ).multiplyScalar( Math.sin ( angle ) ); - - childSubray.pos1.copy( vec3Side ).add( vec4Up ).multiplyScalar( length * sideWidthFactor * ( minSideWidthFactor + random1() * ( 1 - minSideWidthFactor ) ) ).add( vec1Pos ).add( parentSubray.pos0 ); - - } - -}; - -THREE.LightningStrike.prototype.createSubray = function () { - - return { - - seed: 0, - maxIterations: 0, - recursion: 0, - pos0: new THREE.Vector3(), - pos1: new THREE.Vector3(), - linPos0: new THREE.Vector3(), - linPos1: new THREE.Vector3(), - up0: new THREE.Vector3(), - up1: new THREE.Vector3(), - radius0: 0, - radius1: 0, - birthTime: 0, - deathTime: 0, - timeScale: 0, - roughness: 0, - straightness: 0, - propagationTimeFactor: 0, - vanishingTimeFactor: 0, - endPropagationTime: 0, - beginVanishingTime: 0 - - }; - -}; - -THREE.LightningStrike.prototype.createSegment = function () { - - return { - iteration: 0, - pos0: new THREE.Vector3(), - pos1: new THREE.Vector3(), - linPos0: new THREE.Vector3(), - linPos1: new THREE.Vector3(), - up0: new THREE.Vector3(), - up1: new THREE.Vector3(), - radius0: 0, - radius1: 0, - fraction0: 0, - fraction1: 0, - positionVariationFactor: 0 - } - -}; - -THREE.LightningStrike.prototype.getNewSegment = function () { - - return this.raySegments[ this.currentSegmentIndex++ ]; - -}; - -THREE.LightningStrike.prototype.copy = function ( source ) { - - BufferGeometry.prototype.copy.call( this, source ); - - this.init( THREE.LightningStrike.copyParameters( {}, source.rayParameters ) ); - - return this; - -}; - -THREE.LightningStrike.prototype.clone = function () { - - return new this.constructor( THREE.LightningStrike.copyParameters( {}, this.rayParameters ) ); - -}; diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/VerticalTiltShiftShader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/VerticalTiltShiftShader.js deleted file mode 100644 index ad8ff70c9024ee27aa7436702143751491e49927..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/VerticalTiltShiftShader.js +++ /dev/null @@ -1,65 +0,0 @@ -/** - * @author alteredq / http://alteredqualia.com/ - * - * Simple fake tilt-shift effect, modulating two pass Gaussian blur (see above) by vertical position - * - * - 9 samples per pass - * - standard deviation 2.7 - * - "h" and "v" parameters should be set to "1 / width" and "1 / height" - * - "r" parameter control where "focused" horizontal line lies - */ - -THREE.VerticalTiltShiftShader = { - - uniforms: { - - "tDiffuse": { value: null }, - "v": { value: 1.0 / 512.0 }, - "r": { value: 0.35 } - - }, - - vertexShader: [ - - "varying vec2 vUv;", - - "void main() {", - - "vUv = uv;", - "gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );", - - "}" - - ].join( "\n" ), - - fragmentShader: [ - - "uniform sampler2D tDiffuse;", - "uniform float v;", - "uniform float r;", - - "varying vec2 vUv;", - - "void main() {", - - "vec4 sum = vec4( 0.0 );", - - "float vv = v * abs( r - vUv.y );", - - "sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 4.0 * vv ) ) * 0.051;", - "sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 3.0 * vv ) ) * 0.0918;", - "sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 2.0 * vv ) ) * 0.12245;", - "sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y - 1.0 * vv ) ) * 0.1531;", - "sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y ) ) * 0.1633;", - "sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 1.0 * vv ) ) * 0.1531;", - "sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 2.0 * vv ) ) * 0.12245;", - "sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 3.0 * vv ) ) * 0.0918;", - "sum += texture2D( tDiffuse, vec2( vUv.x, vUv.y + 4.0 * vv ) ) * 0.051;", - - "gl_FragColor = sum;", - - "}" - - ].join( "\n" ) - -}; diff --git a/spaces/benthecoder/news-summarizer/app.py b/spaces/benthecoder/news-summarizer/app.py deleted file mode 100644 index 085addefff091b068b27c38ece1c4cd97b45f27b..0000000000000000000000000000000000000000 --- a/spaces/benthecoder/news-summarizer/app.py +++ /dev/null @@ -1,47 +0,0 @@ -from newspaper import Article -from newspaper import Config -import nltk -nltk.download('punkt') - -from transformers import pipeline -import gradio as gr -from gradio.mix import Parallel, Series - - -def extract_article_text(url): - USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:78.0) Gecko/20100101 Firefox/78.0' - config = Config() - config.browser_user_agent = USER_AGENT - config.request_timeout = 10 - - article = Article(url, config=config) - article.download() - article.parse() - text = article.text - return text - -extractor = gr.Interface(extract_article_text, 'text', 'text') -summarizer = gr.Interface.load("huggingface/facebook/bart-large-cnn") - -sample_url = [['https://www.technologyreview.com/2021/07/22/1029973/deepmind-alphafold-protein-folding-biology-disease-drugs-proteome/'], - ['https://www.technologyreview.com/2021/07/21/1029860/disability-rights-employment-discrimination-ai-hiring/'], - ['https://www.technologyreview.com/2021/07/09/1028140/ai-voice-actors-sound-human/']] - -desc = ''' - Let Hugging Face models summarize articles for you. - Note: Shorter articles generate faster summaries. - This summarizer uses bart-large-cnn model by Facebook - ''' - -iface = Series(extractor, summarizer, - inputs = gr.inputs.Textbox( - lines = 2, - label = 'URL' - ), - outputs = 'text', - title = 'News Summarizer', - theme = 'huggingface', - description = desc, - examples=sample_url) - -iface.launch() \ No newline at end of file diff --git a/spaces/bhasker412/IDD-YOLO-Tracking/utils/autoanchor.py b/spaces/bhasker412/IDD-YOLO-Tracking/utils/autoanchor.py deleted file mode 100644 index f491032e53ab43cd81d966d127bd92f9b414b9fe..0000000000000000000000000000000000000000 --- a/spaces/bhasker412/IDD-YOLO-Tracking/utils/autoanchor.py +++ /dev/null @@ -1,160 +0,0 @@ -# Auto-anchor utils - -import numpy as np -import torch -import yaml -from scipy.cluster.vq import kmeans -from tqdm import tqdm - -from utils.general import colorstr - - -def check_anchor_order(m): - # Check anchor order against stride order for YOLO Detect() module m, and correct if necessary - a = m.anchor_grid.prod(-1).view(-1) # anchor area - da = a[-1] - a[0] # delta a - ds = m.stride[-1] - m.stride[0] # delta s - if da.sign() != ds.sign(): # same order - print('Reversing anchor order') - m.anchors[:] = m.anchors.flip(0) - m.anchor_grid[:] = m.anchor_grid.flip(0) - - -def check_anchors(dataset, model, thr=4.0, imgsz=640): - # Check anchor fit to data, recompute if necessary - prefix = colorstr('autoanchor: ') - print(f'\n{prefix}Analyzing anchors... ', end='') - m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() - shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) - scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale - wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh - - def metric(k): # compute metric - r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric - best = x.max(1)[0] # best_x - aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold - bpr = (best > 1. / thr).float().mean() # best possible recall - return bpr, aat - - anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors - bpr, aat = metric(anchors) - print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') - if bpr < 0.98: # threshold to recompute - print('. Attempting to improve anchors, please wait...') - na = m.anchor_grid.numel() // 2 # number of anchors - try: - anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) - except Exception as e: - print(f'{prefix}ERROR: {e}') - new_bpr = metric(anchors)[0] - if new_bpr > bpr: # replace anchors - anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) - m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference - check_anchor_order(m) - m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss - print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') - else: - print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') - print('') # newline - - -def kmean_anchors(path='./data/coco.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): - """ Creates kmeans-evolved anchors from training dataset - - Arguments: - path: path to dataset *.yaml, or a loaded dataset - n: number of anchors - img_size: image size used for training - thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 - gen: generations to evolve anchors using genetic algorithm - verbose: print all results - - Return: - k: kmeans evolved anchors - - Usage: - from utils.autoanchor import *; _ = kmean_anchors() - """ - thr = 1. / thr - prefix = colorstr('autoanchor: ') - - def metric(k, wh): # compute metrics - r = wh[:, None] / k[None] - x = torch.min(r, 1. / r).min(2)[0] # ratio metric - # x = wh_iou(wh, torch.tensor(k)) # iou metric - return x, x.max(1)[0] # x, best_x - - def anchor_fitness(k): # mutation fitness - _, best = metric(torch.tensor(k, dtype=torch.float32), wh) - return (best * (best > thr).float()).mean() # fitness - - def print_results(k): - k = k[np.argsort(k.prod(1))] # sort small to large - x, best = metric(k, wh0) - bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr - print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') - print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' - f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') - for i, x in enumerate(k): - print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg - return k - - if isinstance(path, str): # *.yaml file - with open(path) as f: - data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict - from utils.datasets import LoadImagesAndLabels - dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) - else: - dataset = path # dataset - - # Get label wh - shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) - wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh - - # Filter - i = (wh0 < 3.0).any(1).sum() - if i: - print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') - wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels - # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 - - # Kmeans calculation - print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') - s = wh.std(0) # sigmas for whitening - k, dist = kmeans(wh / s, n, iter=30) # points, mean distance - assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') - k *= s - wh = torch.tensor(wh, dtype=torch.float32) # filtered - wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered - k = print_results(k) - - # Plot - # k, d = [None] * 20, [None] * 20 - # for i in tqdm(range(1, 21)): - # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance - # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) - # ax = ax.ravel() - # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') - # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh - # ax[0].hist(wh[wh[:, 0]<100, 0],400) - # ax[1].hist(wh[wh[:, 1]<100, 1],400) - # fig.savefig('wh.png', dpi=200) - - # Evolve - npr = np.random - f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma - pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar - for _ in pbar: - v = np.ones(sh) - while (v == 1).all(): # mutate until a change occurs (prevent duplicates) - v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) - kg = (k.copy() * v).clip(min=2.0) - fg = anchor_fitness(kg) - if fg > f: - f, k = fg, kg.copy() - pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' - if verbose: - print_results(k) - - return print_results(k) diff --git a/spaces/bioriAsaeru/text-to-voice/Authentec Fingerprint Driver W7 64bit W7wbf64 Exe [CRACKED].md b/spaces/bioriAsaeru/text-to-voice/Authentec Fingerprint Driver W7 64bit W7wbf64 Exe [CRACKED].md deleted file mode 100644 index 5f2e1adb5d4f5b5d2fed9e1af480799f25262e47..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Authentec Fingerprint Driver W7 64bit W7wbf64 Exe [CRACKED].md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Authentec Fingerprint Driver W7 64bit W7wbf64 Exe</h2><br /><p><b><b>DOWNLOAD</b> ->->->-> <a href="https://urloso.com/2uySbf">https://urloso.com/2uySbf</a></b></p><br /><br /> -<br /> - d5da3c52bf<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/blmdsydm/faster-whisper-webui/src/vad.py b/spaces/blmdsydm/faster-whisper-webui/src/vad.py deleted file mode 100644 index e68ee7391e93f539a05d548601f2d87168bb1282..0000000000000000000000000000000000000000 --- a/spaces/blmdsydm/faster-whisper-webui/src/vad.py +++ /dev/null @@ -1,568 +0,0 @@ -from abc import ABC, abstractmethod -from collections import Counter, deque -import time - -from typing import Any, Deque, Iterator, List, Dict - -from pprint import pprint -from src.hooks.progressListener import ProgressListener -from src.hooks.subTaskProgressListener import SubTaskProgressListener -from src.hooks.whisperProgressHook import create_progress_listener_handle -from src.modelCache import GLOBAL_MODEL_CACHE, ModelCache - -from src.segments import merge_timestamps -from src.whisper.abstractWhisperContainer import AbstractWhisperCallback - -# Workaround for https://github.com/tensorflow/tensorflow/issues/48797 -try: - import tensorflow as tf -except ModuleNotFoundError: - # Error handling - pass - -import torch - -import ffmpeg -import numpy as np - -from src.utils import format_timestamp -from enum import Enum - -class NonSpeechStrategy(Enum): - """ - Ignore non-speech frames segments. - """ - SKIP = 1 - """ - Just treat non-speech segments as speech. - """ - CREATE_SEGMENT = 2 - """ - Expand speech segments into subsequent non-speech segments. - """ - EXPAND_SEGMENT = 3 - -# Defaults for Silero -SPEECH_TRESHOLD = 0.3 - -# Minimum size of segments to process -MIN_SEGMENT_DURATION = 1 - -# The maximum time for texts from old segments to be used in the next segment -MAX_PROMPT_WINDOW = 0 # seconds (0 = disabled) -PROMPT_NO_SPEECH_PROB = 0.1 # Do not pass the text from segments with a no speech probability higher than this - -VAD_MAX_PROCESSING_CHUNK = 60 * 60 # 60 minutes of audio - -class TranscriptionConfig(ABC): - def __init__(self, non_speech_strategy: NonSpeechStrategy = NonSpeechStrategy.SKIP, - segment_padding_left: float = None, segment_padding_right = None, max_silent_period: float = None, - max_merge_size: float = None, max_prompt_window: float = None, initial_segment_index = -1): - self.non_speech_strategy = non_speech_strategy - self.segment_padding_left = segment_padding_left - self.segment_padding_right = segment_padding_right - self.max_silent_period = max_silent_period - self.max_merge_size = max_merge_size - self.max_prompt_window = max_prompt_window - self.initial_segment_index = initial_segment_index - -class PeriodicTranscriptionConfig(TranscriptionConfig): - def __init__(self, periodic_duration: float, non_speech_strategy: NonSpeechStrategy = NonSpeechStrategy.SKIP, - segment_padding_left: float = None, segment_padding_right = None, max_silent_period: float = None, - max_merge_size: float = None, max_prompt_window: float = None, initial_segment_index = -1): - super().__init__(non_speech_strategy, segment_padding_left, segment_padding_right, max_silent_period, max_merge_size, max_prompt_window, initial_segment_index) - self.periodic_duration = periodic_duration - -class AbstractTranscription(ABC): - def __init__(self, sampling_rate: int = 16000): - self.sampling_rate = sampling_rate - - def get_audio_segment(self, str, start_time: str = None, duration: str = None): - return load_audio(str, self.sampling_rate, start_time, duration) - - def is_transcribe_timestamps_fast(self): - """ - Determine if get_transcribe_timestamps is fast enough to not need parallelization. - """ - return False - - @abstractmethod - def get_transcribe_timestamps(self, audio: str, config: TranscriptionConfig, start_time: float, end_time: float): - """ - Get the start and end timestamps of the sections that should be transcribed by this VAD method. - - Parameters - ---------- - audio: str - The audio file. - config: TranscriptionConfig - The transcription configuration. - - Returns - ------- - A list of start and end timestamps, in fractional seconds. - """ - return - - def get_merged_timestamps(self, timestamps: List[Dict[str, Any]], config: TranscriptionConfig, total_duration: float): - """ - Get the start and end timestamps of the sections that should be transcribed by this VAD method, - after merging the given segments using the specified configuration. - - Parameters - ---------- - audio: str - The audio file. - config: TranscriptionConfig - The transcription configuration. - - Returns - ------- - A list of start and end timestamps, in fractional seconds. - """ - merged = merge_timestamps(timestamps, config.max_silent_period, config.max_merge_size, - config.segment_padding_left, config.segment_padding_right) - - if config.non_speech_strategy != NonSpeechStrategy.SKIP: - # Expand segments to include the gaps between them - if (config.non_speech_strategy == NonSpeechStrategy.CREATE_SEGMENT): - # When we have a prompt window, we create speech segments betwen each segment if we exceed the merge size - merged = self.fill_gaps(merged, total_duration=total_duration, max_expand_size=config.max_merge_size) - elif config.non_speech_strategy == NonSpeechStrategy.EXPAND_SEGMENT: - # With no prompt window, it is better to just expand the segments (this effectively passes the prompt to the next segment) - merged = self.expand_gaps(merged, total_duration=total_duration) - else: - raise Exception("Unknown non-speech strategy: " + str(config.non_speech_strategy)) - - print("Transcribing non-speech:") - pprint(merged) - return merged - - def transcribe(self, audio: str, whisperCallable: AbstractWhisperCallback, config: TranscriptionConfig, - progressListener: ProgressListener = None): - """ - Transcribe the given audo file. - - Parameters - ---------- - audio: str - The audio file. - whisperCallable: WhisperCallback - A callback object to call to transcribe each segment. - - Returns - ------- - A list of start and end timestamps, in fractional seconds. - """ - - try: - max_audio_duration = self.get_audio_duration(audio, config) - timestamp_segments = self.get_transcribe_timestamps(audio, config, 0, max_audio_duration) - - # Get speech timestamps from full audio file - merged = self.get_merged_timestamps(timestamp_segments, config, max_audio_duration) - - # A deque of transcribed segments that is passed to the next segment as a prompt - prompt_window = deque() - - print("Processing timestamps:") - pprint(merged) - - result = { - 'text': "", - 'segments': [], - 'language': "" - } - languageCounter = Counter() - detected_language = None - - segment_index = config.initial_segment_index - - # Calculate progress - progress_start_offset = merged[0]['start'] if len(merged) > 0 else 0 - progress_total_duration = sum([segment['end'] - segment['start'] for segment in merged]) - - # For each time segment, run whisper - for segment in merged: - segment_index += 1 - segment_start = segment['start'] - segment_end = segment['end'] - segment_expand_amount = segment.get('expand_amount', 0) - segment_gap = segment.get('gap', False) - - segment_duration = segment_end - segment_start - - if segment_duration < MIN_SEGMENT_DURATION: - continue - - # Audio to run on Whisper - segment_audio = self.get_audio_segment(audio, start_time = str(segment_start), duration = str(segment_duration)) - # Previous segments to use as a prompt - segment_prompt = ' '.join([segment['text'] for segment in prompt_window]) if len(prompt_window) > 0 else None - - # Detected language - detected_language = languageCounter.most_common(1)[0][0] if len(languageCounter) > 0 else None - - print("Running whisper from ", format_timestamp(segment_start), " to ", format_timestamp(segment_end), ", duration: ", - segment_duration, "expanded: ", segment_expand_amount, "prompt: ", segment_prompt, "language: ", detected_language) - - perf_start_time = time.perf_counter() - - scaled_progress_listener = SubTaskProgressListener(progressListener, base_task_total=progress_total_duration, - sub_task_start=segment_start - progress_start_offset, sub_task_total=segment_duration) - segment_result = whisperCallable.invoke(segment_audio, segment_index, segment_prompt, detected_language, progress_listener=scaled_progress_listener) - - perf_end_time = time.perf_counter() - print("Whisper took {} seconds".format(perf_end_time - perf_start_time)) - - adjusted_segments = self.adjust_timestamp(segment_result["segments"], adjust_seconds=segment_start, max_source_time=segment_duration) - - # Propagate expand amount to the segments - if (segment_expand_amount > 0): - segment_without_expansion = segment_duration - segment_expand_amount - - for adjusted_segment in adjusted_segments: - adjusted_segment_end = adjusted_segment['end'] - - # Add expand amount if the segment got expanded - if (adjusted_segment_end > segment_without_expansion): - adjusted_segment["expand_amount"] = adjusted_segment_end - segment_without_expansion - - # Append to output - result['text'] += segment_result['text'] - result['segments'].extend(adjusted_segments) - - # Increment detected language - if not segment_gap: - languageCounter[segment_result['language']] += 1 - - # Update prompt window - self.__update_prompt_window(prompt_window, adjusted_segments, segment_end, segment_gap, config) - - if detected_language is not None: - result['language'] = detected_language - finally: - # Notify progress listener that we are done - if progressListener is not None: - progressListener.on_finished() - return result - - def get_audio_duration(self, audio: str, config: TranscriptionConfig): - return get_audio_duration(audio) - - def __update_prompt_window(self, prompt_window: Deque, adjusted_segments: List, segment_end: float, segment_gap: bool, config: TranscriptionConfig): - if (config.max_prompt_window is not None and config.max_prompt_window > 0): - # Add segments to the current prompt window (unless it is a speech gap) - if not segment_gap: - for segment in adjusted_segments: - if segment.get('no_speech_prob', 0) <= PROMPT_NO_SPEECH_PROB: - prompt_window.append(segment) - - while (len(prompt_window) > 0): - first_end_time = prompt_window[0].get('end', 0) - # Time expanded in the segments should be discounted from the prompt window - first_expand_time = prompt_window[0].get('expand_amount', 0) - - if (first_end_time - first_expand_time < segment_end - config.max_prompt_window): - prompt_window.popleft() - else: - break - - def include_gaps(self, segments: Iterator[dict], min_gap_length: float, total_duration: float): - result = [] - last_end_time = 0 - - for segment in segments: - segment_start = float(segment['start']) - segment_end = float(segment['end']) - - if (last_end_time != segment_start): - delta = segment_start - last_end_time - - if (min_gap_length is None or delta >= min_gap_length): - result.append( { 'start': last_end_time, 'end': segment_start, 'gap': True } ) - - last_end_time = segment_end - result.append(segment) - - # Also include total duration if specified - if (total_duration is not None and last_end_time < total_duration): - delta = total_duration - segment_start - - if (min_gap_length is None or delta >= min_gap_length): - result.append( { 'start': last_end_time, 'end': total_duration, 'gap': True } ) - - return result - - # Expand the end time of each segment to the start of the next segment - def expand_gaps(self, segments: List[Dict[str, Any]], total_duration: float): - result = [] - - if len(segments) == 0: - return result - - # Add gap at the beginning if needed - if (segments[0]['start'] > 0): - result.append({ 'start': 0, 'end': segments[0]['start'], 'gap': True } ) - - for i in range(len(segments) - 1): - current_segment = segments[i] - next_segment = segments[i + 1] - - delta = next_segment['start'] - current_segment['end'] - - # Expand if the gap actually exists - if (delta >= 0): - current_segment = current_segment.copy() - current_segment['expand_amount'] = delta - current_segment['end'] = next_segment['start'] - - result.append(current_segment) - - # Add last segment - last_segment = segments[-1] - result.append(last_segment) - - # Also include total duration if specified - if (total_duration is not None): - last_segment = result[-1] - - if (last_segment['end'] < total_duration): - last_segment = last_segment.copy() - last_segment['end'] = total_duration - result[-1] = last_segment - - return result - - def fill_gaps(self, segments: List[Dict[str, Any]], total_duration: float, max_expand_size: float = None): - result = [] - - if len(segments) == 0: - return result - - # Add gap at the beginning if needed - if (segments[0]['start'] > 0): - result.append({ 'start': 0, 'end': segments[0]['start'], 'gap': True } ) - - for i in range(len(segments) - 1): - expanded = False - current_segment = segments[i] - next_segment = segments[i + 1] - - delta = next_segment['start'] - current_segment['end'] - - if (max_expand_size is not None and delta <= max_expand_size): - # Just expand the current segment - current_segment = current_segment.copy() - current_segment['expand_amount'] = delta - current_segment['end'] = next_segment['start'] - expanded = True - - result.append(current_segment) - - # Add a gap to the next segment if needed - if (delta >= 0 and not expanded): - result.append({ 'start': current_segment['end'], 'end': next_segment['start'], 'gap': True } ) - - # Add last segment - last_segment = segments[-1] - result.append(last_segment) - - # Also include total duration if specified - if (total_duration is not None): - last_segment = result[-1] - - delta = total_duration - last_segment['end'] - - if (delta > 0): - if (max_expand_size is not None and delta <= max_expand_size): - # Expand the last segment - last_segment = last_segment.copy() - last_segment['expand_amount'] = delta - last_segment['end'] = total_duration - result[-1] = last_segment - else: - result.append({ 'start': last_segment['end'], 'end': total_duration, 'gap': True } ) - - return result - - def adjust_timestamp(self, segments: Iterator[dict], adjust_seconds: float, max_source_time: float = None): - result = [] - - for segment in segments: - segment_start = float(segment['start']) - segment_end = float(segment['end']) - - # Filter segments? - if (max_source_time is not None): - if (segment_start > max_source_time): - continue - segment_end = min(max_source_time, segment_end) - - new_segment = segment.copy() - - # Add to start and end - new_segment['start'] = segment_start + adjust_seconds - new_segment['end'] = segment_end + adjust_seconds - - # Handle words - if ('words' in new_segment): - for word in new_segment['words']: - # Adjust start and end - word['start'] = word['start'] + adjust_seconds - word['end'] = word['end'] + adjust_seconds - - result.append(new_segment) - return result - - def multiply_timestamps(self, timestamps: List[Dict[str, Any]], factor: float): - result = [] - - for entry in timestamps: - start = entry['start'] - end = entry['end'] - - result.append({ - 'start': start * factor, - 'end': end * factor - }) - return result - - -class VadSileroTranscription(AbstractTranscription): - def __init__(self, sampling_rate: int = 16000, cache: ModelCache = None): - super().__init__(sampling_rate=sampling_rate) - self.model = None - self.cache = cache - self._initialize_model() - - def _initialize_model(self): - if (self.cache is not None): - model_key = "VadSileroTranscription" - self.model, self.get_speech_timestamps = self.cache.get(model_key, self._create_model) - print("Loaded Silerio model from cache.") - else: - self.model, self.get_speech_timestamps = self._create_model() - print("Created Silerio model") - - def _create_model(self): - model, utils = torch.hub.load(repo_or_dir='snakers4/silero-vad', model='silero_vad') - - # Silero does not benefit from multi-threading - torch.set_num_threads(1) # JIT - (get_speech_timestamps, _, _, _, _) = utils - - return model, get_speech_timestamps - - def get_transcribe_timestamps(self, audio: str, config: TranscriptionConfig, start_time: float, end_time: float): - result = [] - - print("Getting timestamps from audio file: {}, start: {}, duration: {}".format(audio, start_time, end_time)) - perf_start_time = time.perf_counter() - - # Divide procesisng of audio into chunks - chunk_start = start_time - - while (chunk_start < end_time): - chunk_duration = min(end_time - chunk_start, VAD_MAX_PROCESSING_CHUNK) - - print("Processing VAD in chunk from {} to {}".format(format_timestamp(chunk_start), format_timestamp(chunk_start + chunk_duration))) - wav = self.get_audio_segment(audio, str(chunk_start), str(chunk_duration)) - - sample_timestamps = self.get_speech_timestamps(wav, self.model, sampling_rate=self.sampling_rate, threshold=SPEECH_TRESHOLD) - seconds_timestamps = self.multiply_timestamps(sample_timestamps, factor=1 / self.sampling_rate) - adjusted = self.adjust_timestamp(seconds_timestamps, adjust_seconds=chunk_start, max_source_time=chunk_start + chunk_duration) - - #pprint(adjusted) - - result.extend(adjusted) - chunk_start += chunk_duration - - perf_end_time = time.perf_counter() - print("VAD processing took {} seconds".format(perf_end_time - perf_start_time)) - - return result - - def __getstate__(self): - # We only need the sampling rate - return { 'sampling_rate': self.sampling_rate } - - def __setstate__(self, state): - self.sampling_rate = state['sampling_rate'] - self.model = None - # Use the global cache - self.cache = GLOBAL_MODEL_CACHE - self._initialize_model() - -# A very simple VAD that just marks every N seconds as speech -class VadPeriodicTranscription(AbstractTranscription): - def __init__(self, sampling_rate: int = 16000): - super().__init__(sampling_rate=sampling_rate) - - def is_transcribe_timestamps_fast(self): - # This is a very fast VAD - no need to parallelize it - return True - - def get_transcribe_timestamps(self, audio: str, config: PeriodicTranscriptionConfig, start_time: float, end_time: float): - result = [] - - # Generate a timestamp every N seconds - start_timestamp = start_time - - while (start_timestamp < end_time): - end_timestamp = min(start_timestamp + config.periodic_duration, end_time) - segment_duration = end_timestamp - start_timestamp - - # Minimum duration is 1 second - if (segment_duration >= 1): - result.append( { 'start': start_timestamp, 'end': end_timestamp } ) - - start_timestamp = end_timestamp - - return result - -def get_audio_duration(file: str): - return float(ffmpeg.probe(file)["format"]["duration"]) - -def load_audio(file: str, sample_rate: int = 16000, - start_time: str = None, duration: str = None): - """ - Open an audio file and read as mono waveform, resampling as necessary - - Parameters - ---------- - file: str - The audio file to open - - sr: int - The sample rate to resample the audio if necessary - - start_time: str - The start time, using the standard FFMPEG time duration syntax, or None to disable. - - duration: str - The duration, using the standard FFMPEG time duration syntax, or None to disable. - - Returns - ------- - A NumPy array containing the audio waveform, in float32 dtype. - """ - try: - inputArgs = {'threads': 0} - - if (start_time is not None): - inputArgs['ss'] = start_time - if (duration is not None): - inputArgs['t'] = duration - - # This launches a subprocess to decode audio while down-mixing and resampling as necessary. - # Requires the ffmpeg CLI and `ffmpeg-python` package to be installed. - out, _ = ( - ffmpeg.input(file, **inputArgs) - .output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=sample_rate) - .run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True) - ) - except ffmpeg.Error as e: - raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") - - return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 \ No newline at end of file diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/.github/SECURITY.md b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/.github/SECURITY.md deleted file mode 100644 index aa3e8409da6b525245454ad0360642cbaead5569..0000000000000000000000000000000000000000 --- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/.github/SECURITY.md +++ /dev/null @@ -1,7 +0,0 @@ -# Security Policy - -We aim to make YOLOv5 🚀 as secure as possible! If you find potential vulnerabilities or have any concerns please let us know so we can investigate and take corrective action if needed. - -### Reporting a Vulnerability - -To report vulnerabilities please email us at hello@ultralytics.com or visit https://ultralytics.com/contact. Thank you! diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/GETTING_STARTED.md b/spaces/carlosalonso/Detection-video/carpeta_deteccion/GETTING_STARTED.md deleted file mode 100644 index 404b0c8f467264d1adf61e8274e5f864e24018e8..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/GETTING_STARTED.md +++ /dev/null @@ -1,79 +0,0 @@ -## Getting Started with Detectron2 - -This document provides a brief intro of the usage of builtin command-line tools in detectron2. - -For a tutorial that involves actual coding with the API, -see our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) -which covers how to run inference with an -existing model, and how to train a builtin model on a custom dataset. - - -### Inference Demo with Pre-trained Models - -1. Pick a model and its config file from - [model zoo](MODEL_ZOO.md), - for example, `mask_rcnn_R_50_FPN_3x.yaml`. -2. We provide `demo.py` that is able to demo builtin configs. Run it with: -``` -cd demo/ -python demo.py --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml \ - --input input1.jpg input2.jpg \ - [--other-options] - --opts MODEL.WEIGHTS detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl -``` -The configs are made for training, therefore we need to specify `MODEL.WEIGHTS` to a model from model zoo for evaluation. -This command will run the inference and show visualizations in an OpenCV window. - -For details of the command line arguments, see `demo.py -h` or look at its source code -to understand its behavior. Some common arguments are: -* To run __on your webcam__, replace `--input files` with `--webcam`. -* To run __on a video__, replace `--input files` with `--video-input video.mp4`. -* To run __on cpu__, add `MODEL.DEVICE cpu` after `--opts`. -* To save outputs to a directory (for images) or a file (for webcam or video), use `--output`. - - -### Training & Evaluation in Command Line - -We provide two scripts in "tools/plain_train_net.py" and "tools/train_net.py", -that are made to train all the configs provided in detectron2. You may want to -use it as a reference to write your own training script. - -Compared to "train_net.py", "plain_train_net.py" supports fewer default -features. It also includes fewer abstraction, therefore is easier to add custom -logic. - -To train a model with "train_net.py", first -setup the corresponding datasets following -[datasets/README.md](./datasets/README.md), -then run: -``` -cd tools/ -./train_net.py --num-gpus 8 \ - --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml -``` - -The configs are made for 8-GPU training. -To train on 1 GPU, you may need to [change some parameters](https://arxiv.org/abs/1706.02677), e.g.: -``` -./train_net.py \ - --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ - --num-gpus 1 SOLVER.IMS_PER_BATCH 2 SOLVER.BASE_LR 0.0025 -``` - -To evaluate a model's performance, use -``` -./train_net.py \ - --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \ - --eval-only MODEL.WEIGHTS /path/to/checkpoint_file -``` -For more options, see `./train_net.py -h`. - -### Use Detectron2 APIs in Your Code - -See our [Colab Notebook](https://colab.research.google.com/drive/16jcaJoc6bCFAQ96jDe2HwtXj7BMD_-m5) -to learn how to use detectron2 APIs to: -1. run inference with an existing model -2. train a builtin model on a custom dataset - -See [detectron2/projects](https://github.com/facebookresearch/detectron2/tree/main/projects) -for more ways to build your project on detectron2. diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/checkpoint/c2_model_loading.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/checkpoint/c2_model_loading.py deleted file mode 100644 index 8c8d181bd7200bd3fd38446e743f8f16780d6e76..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/checkpoint/c2_model_loading.py +++ /dev/null @@ -1,407 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import copy -import logging -import re -from typing import Dict, List -import torch -from tabulate import tabulate - - -def convert_basic_c2_names(original_keys): - """ - Apply some basic name conversion to names in C2 weights. - It only deals with typical backbone models. - - Args: - original_keys (list[str]): - Returns: - list[str]: The same number of strings matching those in original_keys. - """ - layer_keys = copy.deepcopy(original_keys) - layer_keys = [ - {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys - ] # some hard-coded mappings - - layer_keys = [k.replace("_", ".") for k in layer_keys] - layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] - layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] - # Uniform both bn and gn names to "norm" - layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] - layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] - layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] - layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] - - # stem - layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] - # to avoid mis-matching with "conv1" in other components (e.g. detection head) - layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] - - # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) - # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] - # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] - # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] - # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] - - # blocks - layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] - layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] - layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] - layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] - - # DensePose substitutions - layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] - layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] - layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] - layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] - layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] - return layer_keys - - -def convert_c2_detectron_names(weights): - """ - Map Caffe2 Detectron weight names to Detectron2 names. - - Args: - weights (dict): name -> tensor - - Returns: - dict: detectron2 names -> tensor - dict: detectron2 names -> C2 names - """ - logger = logging.getLogger(__name__) - logger.info("Renaming Caffe2 weights ......") - original_keys = sorted(weights.keys()) - layer_keys = copy.deepcopy(original_keys) - - layer_keys = convert_basic_c2_names(layer_keys) - - # -------------------------------------------------------------------------- - # RPN hidden representation conv - # -------------------------------------------------------------------------- - # FPN case - # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then - # shared for all other levels, hence the appearance of "fpn2" - layer_keys = [ - k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys - ] - # Non-FPN case - layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] - - # -------------------------------------------------------------------------- - # RPN box transformation conv - # -------------------------------------------------------------------------- - # FPN case (see note above about "fpn2") - layer_keys = [ - k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") - for k in layer_keys - ] - layer_keys = [ - k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") - for k in layer_keys - ] - # Non-FPN case - layer_keys = [ - k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys - ] - layer_keys = [ - k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") - for k in layer_keys - ] - - # -------------------------------------------------------------------------- - # Fast R-CNN box head - # -------------------------------------------------------------------------- - layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] - layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] - layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] - layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] - # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s - layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] - - # -------------------------------------------------------------------------- - # FPN lateral and output convolutions - # -------------------------------------------------------------------------- - def fpn_map(name): - """ - Look for keys with the following patterns: - 1) Starts with "fpn.inner." - Example: "fpn.inner.res2.2.sum.lateral.weight" - Meaning: These are lateral pathway convolutions - 2) Starts with "fpn.res" - Example: "fpn.res2.2.sum.weight" - Meaning: These are FPN output convolutions - """ - splits = name.split(".") - norm = ".norm" if "norm" in splits else "" - if name.startswith("fpn.inner."): - # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] - stage = int(splits[2][len("res") :]) - return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) - elif name.startswith("fpn.res"): - # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] - stage = int(splits[1][len("res") :]) - return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) - return name - - layer_keys = [fpn_map(k) for k in layer_keys] - - # -------------------------------------------------------------------------- - # Mask R-CNN mask head - # -------------------------------------------------------------------------- - # roi_heads.StandardROIHeads case - layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] - layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] - layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] - # roi_heads.Res5ROIHeads case - layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] - - # -------------------------------------------------------------------------- - # Keypoint R-CNN head - # -------------------------------------------------------------------------- - # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" - layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] - layer_keys = [ - k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys - ] - layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] - - # -------------------------------------------------------------------------- - # Done with replacements - # -------------------------------------------------------------------------- - assert len(set(layer_keys)) == len(layer_keys) - assert len(original_keys) == len(layer_keys) - - new_weights = {} - new_keys_to_original_keys = {} - for orig, renamed in zip(original_keys, layer_keys): - new_keys_to_original_keys[renamed] = orig - if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): - # remove the meaningless prediction weight for background class - new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 - new_weights[renamed] = weights[orig][new_start_idx:] - logger.info( - "Remove prediction weight for background class in {}. The shape changes from " - "{} to {}.".format( - renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) - ) - ) - elif renamed.startswith("cls_score."): - # move weights of bg class from original index 0 to last index - logger.info( - "Move classification weights for background class in {} from index 0 to " - "index {}.".format(renamed, weights[orig].shape[0] - 1) - ) - new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) - else: - new_weights[renamed] = weights[orig] - - return new_weights, new_keys_to_original_keys - - -# Note the current matching is not symmetric. -# it assumes model_state_dict will have longer names. -def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): - """ - Match names between the two state-dict, and returns a new chkpt_state_dict with names - converted to match model_state_dict with heuristics. The returned dict can be later - loaded with fvcore checkpointer. - If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 - model and will be renamed at first. - - Strategy: suppose that the models that we will create will have prefixes appended - to each of its keys, for example due to an extra level of nesting that the original - pre-trained weights from ImageNet won't contain. For example, model.state_dict() - might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains - res2.conv1.weight. We thus want to match both parameters together. - For that, we look for each model weight, look among all loaded keys if there is one - that is a suffix of the current weight name, and use it if that's the case. - If multiple matches exist, take the one with longest size - of the corresponding name. For example, for the same model as before, the pretrained - weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, - we want to match backbone[0].body.conv1.weight to conv1.weight, and - backbone[0].body.res2.conv1.weight to res2.conv1.weight. - """ - model_keys = sorted(model_state_dict.keys()) - if c2_conversion: - ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) - # original_keys: the name in the original dict (before renaming) - else: - original_keys = {x: x for x in ckpt_state_dict.keys()} - ckpt_keys = sorted(ckpt_state_dict.keys()) - - def match(a, b): - # Matched ckpt_key should be a complete (starts with '.') suffix. - # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, - # but matches whatever_conv1 or mesh_head.whatever_conv1. - return a == b or a.endswith("." + b) - - # get a matrix of string matches, where each (i, j) entry correspond to the size of the - # ckpt_key string, if it matches - match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] - match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) - # use the matched one with longest size in case of multiple matches - max_match_size, idxs = match_matrix.max(1) - # remove indices that correspond to no-match - idxs[max_match_size == 0] = -1 - - logger = logging.getLogger(__name__) - # matched_pairs (matched checkpoint key --> matched model key) - matched_keys = {} - result_state_dict = {} - for idx_model, idx_ckpt in enumerate(idxs.tolist()): - if idx_ckpt == -1: - continue - key_model = model_keys[idx_model] - key_ckpt = ckpt_keys[idx_ckpt] - value_ckpt = ckpt_state_dict[key_ckpt] - shape_in_model = model_state_dict[key_model].shape - - if shape_in_model != value_ckpt.shape: - logger.warning( - "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( - key_ckpt, value_ckpt.shape, key_model, shape_in_model - ) - ) - logger.warning( - "{} will not be loaded. Please double check and see if this is desired.".format( - key_ckpt - ) - ) - continue - - assert key_model not in result_state_dict - result_state_dict[key_model] = value_ckpt - if key_ckpt in matched_keys: # already added to matched_keys - logger.error( - "Ambiguity found for {} in checkpoint!" - "It matches at least two keys in the model ({} and {}).".format( - key_ckpt, key_model, matched_keys[key_ckpt] - ) - ) - raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") - - matched_keys[key_ckpt] = key_model - - # logging: - matched_model_keys = sorted(matched_keys.values()) - if len(matched_model_keys) == 0: - logger.warning("No weights in checkpoint matched with model.") - return ckpt_state_dict - common_prefix = _longest_common_prefix(matched_model_keys) - rev_matched_keys = {v: k for k, v in matched_keys.items()} - original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} - - model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) - table = [] - memo = set() - for key_model in matched_model_keys: - if key_model in memo: - continue - if key_model in model_key_groups: - group = model_key_groups[key_model] - memo |= set(group) - shapes = [tuple(model_state_dict[k].shape) for k in group] - table.append( - ( - _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", - _group_str([original_keys[k] for k in group]), - " ".join([str(x).replace(" ", "") for x in shapes]), - ) - ) - else: - key_checkpoint = original_keys[key_model] - shape = str(tuple(model_state_dict[key_model].shape)) - table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) - table_str = tabulate( - table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] - ) - logger.info( - "Following weights matched with " - + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") - + ":\n" - + table_str - ) - - unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] - for k in unmatched_ckpt_keys: - result_state_dict[k] = ckpt_state_dict[k] - return result_state_dict - - -def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): - """ - Params in the same submodule are grouped together. - - Args: - keys: names of all parameters - original_names: mapping from parameter name to their name in the checkpoint - - Returns: - dict[name -> all other names in the same group] - """ - - def _submodule_name(key): - pos = key.rfind(".") - if pos < 0: - return None - prefix = key[: pos + 1] - return prefix - - all_submodules = [_submodule_name(k) for k in keys] - all_submodules = [x for x in all_submodules if x] - all_submodules = sorted(all_submodules, key=len) - - ret = {} - for prefix in all_submodules: - group = [k for k in keys if k.startswith(prefix)] - if len(group) <= 1: - continue - original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) - if len(original_name_lcp) == 0: - # don't group weights if original names don't share prefix - continue - - for k in group: - if k in ret: - continue - ret[k] = group - return ret - - -def _longest_common_prefix(names: List[str]) -> str: - """ - ["abc.zfg", "abc.zef"] -> "abc." - """ - names = [n.split(".") for n in names] - m1, m2 = min(names), max(names) - ret = [a for a, b in zip(m1, m2) if a == b] - ret = ".".join(ret) + "." if len(ret) else "" - return ret - - -def _longest_common_prefix_str(names: List[str]) -> str: - m1, m2 = min(names), max(names) - lcp = [a for a, b in zip(m1, m2) if a == b] - lcp = "".join(lcp) - return lcp - - -def _group_str(names: List[str]) -> str: - """ - Turn "common1", "common2", "common3" into "common{1,2,3}" - """ - lcp = _longest_common_prefix_str(names) - rest = [x[len(lcp) :] for x in names] - rest = "{" + ",".join(rest) + "}" - ret = lcp + rest - - # add some simplification for BN specifically - ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") - ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") - return ret diff --git a/spaces/cat630/ChuanhuChatGPT/utils.py b/spaces/cat630/ChuanhuChatGPT/utils.py deleted file mode 100644 index 2225759bfd19a0b8913608a611bb9cc03c31ebcd..0000000000000000000000000000000000000000 --- a/spaces/cat630/ChuanhuChatGPT/utils.py +++ /dev/null @@ -1,319 +0,0 @@ -"""Contains all of the components that can be used with Gradio Interface / Blocks. -Along with the docs for each component, you can find the names of example demos that use -each component. These demos are located in the `demo` directory.""" - -from __future__ import annotations -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type -import json -import gradio as gr -# import openai -import os -import traceback -import requests -# import markdown -import csv -import mdtex2html -from pypinyin import lazy_pinyin - -if TYPE_CHECKING: - from typing import TypedDict - - class DataframeData(TypedDict): - headers: List[str] - data: List[List[str | int | bool]] - -initial_prompt = "You are a helpful assistant." -API_URL = "https://api.openai.com/v1/chat/completions" -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -def postprocess( - self, y: List[Tuple[str | None, str | None]] - ) -> List[Tuple[str | None, str | None]]: - """ - Parameters: - y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format. - Returns: - List of tuples representing the message and response. Each message and response will be a string of HTML. - """ - if y is None: - return [] - for i, (message, response) in enumerate(y): - y[i] = ( - # None if message is None else markdown.markdown(message), - # None if response is None else markdown.markdown(response), - None if message is None else mdtex2html.convert((message)), - None if response is None else mdtex2html.convert(response), - ) - return y - -def parse_text(text): - lines = text.split("\n") - lines = [line for line in lines if line != ""] - count = 0 - firstline = False - for i, line in enumerate(lines): - if "```" in line: - count += 1 - items = line.split('`') - if count % 2 == 1: - lines[i] = f'<pre><code class="language-{items[-1]}">' - else: - lines[i] = f'<br></code></pre>' - else: - if i > 0: - if count % 2 == 1: - line = line.replace("`", "\`") - line = line.replace("<", "<") - line = line.replace(">", ">") - line = line.replace(" ", " ") - line = line.replace("*", "*") - line = line.replace("_", "_") - line = line.replace("-", "-") - line = line.replace(".", ".") - line = line.replace("!", "!") - line = line.replace("(", "(") - line = line.replace(")", ")") - line = line.replace("$", "$") - lines[i] = "<br>"+line - text = "".join(lines) - return text - -def predict(inputs, top_p, temperature, openai_api_key, chatbot=[], history=[], system_prompt=initial_prompt, retry=False, summary=False, retry_on_crash = False, stream = True): # repetition_penalty, top_k - - if "猫娘" in inputs: - chatbot.append((inputs, '喵~主人请点击<a href="https://www.bilibili.com/video/BV1GJ411x7h7/">这个链接</a>查看刺激内容哦~')) - yield chatbot, history, "status: 喵~" - print(f"########## 有人上钩了: {inputs} ##########") - return - print(f"====== 收到问题: {inputs} =======") - - if retry_on_crash: - retry = True - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}" - } - - chat_counter = len(history) // 2 - - print(f"chat_counter - {chat_counter}") - - messages = [] - if chat_counter: - for index in range(0, 2*chat_counter, 2): - temp1 = {} - temp1["role"] = "user" - temp1["content"] = history[index] - temp2 = {} - temp2["role"] = "assistant" - temp2["content"] = history[index+1] - if temp1["content"] != "": - if temp2["content"] != "" or retry: - messages.append(temp1) - messages.append(temp2) - else: - messages[-1]['content'] = temp2['content'] - if retry and chat_counter: - if retry_on_crash: - messages = messages[-6:] - messages.pop() - elif summary: - history = [*[i["content"] for i in messages[-2:]], "我们刚刚聊了什么?"] - messages.append(compose_user( - "请帮我总结一下上述对话的内容,实现减少字数的同时,保证对话的质量。在总结中不要加入这一句话。")) - else: - temp3 = {} - temp3["role"] = "user" - temp3["content"] = inputs - messages.append(temp3) - chat_counter += 1 - messages = [compose_system(system_prompt), *messages] - # messages - payload = { - "model": "gpt-3.5-turbo", - "messages": messages, # [{"role": "user", "content": f"{inputs}"}], - "temperature": temperature, # 1.0, - "top_p": top_p, # 1.0, - "n": 1, - "stream": stream, - "presence_penalty": 0, - "frequency_penalty": 0, - } - - if not summary: - history.append(inputs) - else: - print("精简中...") - - print(f"payload: {payload}") - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - try: - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - except: - history.append("") - chatbot.append((inputs, "")) - yield history, chatbot, f"获取请求失败,请检查网络连接。" - return - - token_counter = 0 - partial_words = "" - - counter = 0 - if stream: - chatbot.append((parse_text(history[-1]), "")) - for chunk in response.iter_lines(): - if counter == 0: - counter += 1 - continue - counter += 1 - # check whether each line is non-empty - if chunk: - # decode each line as response data is in bytes - try: - if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0: - chunkjson = json.loads(chunk.decode()[6:]) - status_text = f"id: {chunkjson['id']}, finish_reason: {chunkjson['choices'][0]['finish_reason']}" - yield chatbot, history, status_text - break - except Exception as e: - if not retry_on_crash: - print("正在尝试使用缩短的context重新生成……") - chatbot.pop() - history.append("") - yield next(predict(inputs, top_p, temperature, openai_api_key, chatbot, history, system_prompt, retry, summary=False, retry_on_crash=True, stream=False)) - else: - msg = "☹️发生了错误:生成失败,请检查网络" - print(msg) - history.append(inputs, "") - chatbot.append(inputs, msg) - yield chatbot, history, "status: ERROR" - break - chunkjson = json.loads(chunk.decode()[6:]) - status_text = f"id: {chunkjson['id']}, finish_reason: {chunkjson['choices'][0]['finish_reason']}" - partial_words = partial_words + \ - json.loads(chunk.decode()[6:])[ - 'choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - chatbot[-1] = (parse_text(history[-2]), parse_text(history[-1])) - token_counter += 1 - yield chatbot, history, status_text - else: - try: - responsejson = json.loads(response.text) - content = responsejson["choices"][0]["message"]["content"] - history.append(content) - chatbot.append((parse_text(history[-2]), parse_text(content))) - status_text = "精简完成" - except: - chatbot.append((parse_text(history[-1]), "☹️发生了错误,请检查网络连接或者稍后再试。")) - status_text = "status: ERROR" - yield chatbot, history, status_text - - - -def delete_last_conversation(chatbot, history): - try: - if "☹️发生了错误" in chatbot[-1][1]: - chatbot.pop() - print(history) - return chatbot, history - history.pop() - history.pop() - chatbot.pop() - print(history) - return chatbot, history - except: - return chatbot, history - -def save_chat_history(filename, system, history, chatbot): - if filename == "": - return - if not filename.endswith(".json"): - filename += ".json" - os.makedirs(HISTORY_DIR, exist_ok=True) - json_s = {"system": system, "history": history, "chatbot": chatbot} - print(json_s) - with open(os.path.join(HISTORY_DIR, filename), "w") as f: - json.dump(json_s, f) - - -def load_chat_history(filename, system, history, chatbot): - try: - print("Loading from history...") - with open(os.path.join(HISTORY_DIR, filename), "r") as f: - json_s = json.load(f) - print(json_s) - return filename, json_s["system"], json_s["history"], json_s["chatbot"] - except FileNotFoundError: - print("File not found.") - return filename, system, history, chatbot - -def sorted_by_pinyin(list): - return sorted(list, key=lambda char: lazy_pinyin(char)[0][0]) - -def get_file_names(dir, plain=False, filetypes=[".json"]): - # find all json files in the current directory and return their names - files = [] - try: - for type in filetypes: - files += [f for f in os.listdir(dir) if f.endswith(type)] - except FileNotFoundError: - files = [] - files = sorted_by_pinyin(files) - if files == []: - files = [""] - if plain: - return files - else: - return gr.Dropdown.update(choices=files) - -def get_history_names(plain=False): - return get_file_names(HISTORY_DIR, plain) - -def load_template(filename, mode=0): - lines = [] - print("Loading template...") - if filename.endswith(".json"): - with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f: - lines = json.load(f) - lines = [[i["act"], i["prompt"]] for i in lines] - else: - with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as csvfile: - reader = csv.reader(csvfile) - lines = list(reader) - lines = lines[1:] - if mode == 1: - return sorted_by_pinyin([row[0] for row in lines]) - elif mode == 2: - return {row[0]:row[1] for row in lines} - else: - choices = sorted_by_pinyin([row[0] for row in lines]) - return {row[0]:row[1] for row in lines}, gr.Dropdown.update(choices=choices, value=choices[0]) - -def get_template_names(plain=False): - return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"]) - -def get_template_content(templates, selection, original_system_prompt): - try: - return templates[selection] - except: - return original_system_prompt - -def reset_state(): - return [], [] - -def compose_system(system_prompt): - return {"role": "system", "content": system_prompt} - - -def compose_user(user_input): - return {"role": "user", "content": user_input} - - -def reset_textbox(): - return gr.update(value='') diff --git a/spaces/cccccch/VITS-fast-fine-tuning-DingZhen/text/japanese.py b/spaces/cccccch/VITS-fast-fine-tuning-DingZhen/text/japanese.py deleted file mode 100644 index 375e4d50872d5c68ee57ca17470a2ca425425eba..0000000000000000000000000000000000000000 --- a/spaces/cccccch/VITS-fast-fine-tuning-DingZhen/text/japanese.py +++ /dev/null @@ -1,153 +0,0 @@ -import re -from unidecode import unidecode -import pyopenjtalk - - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - -# List of (romaji, ipa) pairs for marks: -_romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('ts', 'ʦ'), - ('u', 'ɯ'), - ('j', 'ʥ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (romaji, ipa2) pairs for marks: -_romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('u', 'ɯ'), - ('ʧ', 'tʃ'), - ('j', 'dʑ'), - ('y', 'j'), - ('ni', 'n^i'), - ('nj', 'n^'), - ('hi', 'çi'), - ('hj', 'ç'), - ('f', 'ɸ'), - ('I', 'i*'), - ('U', 'ɯ*'), - ('r', 'ɾ') -]] - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_romaji_with_accent(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = '' - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - if text != '': - text += ' ' - labels = pyopenjtalk.extract_fullcontext(sentence) - for n, label in enumerate(labels): - phoneme = re.search(r'\-([^\+]*)\+', label).group(1) - if phoneme not in ['sil', 'pau']: - text += phoneme.replace('ch', 'ʧ').replace('sh', - 'ʃ').replace('cl', 'Q') - else: - continue - # n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) - a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) - a2 = int(re.search(r"\+(\d+)\+", label).group(1)) - a3 = int(re.search(r"\+(\d+)/", label).group(1)) - if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: - a2_next = -1 - else: - a2_next = int( - re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) - # Accent phrase boundary - if a3 == 1 and a2_next == 1: - text += ' ' - # Falling - elif a1 == 0 and a2_next == a2 + 1: - text += '↓' - # Rising - elif a2 == 1 and a2_next == 2: - text += '↑' - if i < len(marks): - text += unidecode(marks[i]).replace(' ', '') - return text - - -def get_real_sokuon(text): - for regex, replacement in _real_sokuon: - text = re.sub(regex, replacement, text) - return text - - -def get_real_hatsuon(text): - for regex, replacement in _real_hatsuon: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = re.sub( - r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa2(text): - text = japanese_to_romaji_with_accent(text).replace('...', '…') - text = get_real_sokuon(text) - text = get_real_hatsuon(text) - for regex, replacement in _romaji_to_ipa2: - text = re.sub(regex, replacement, text) - return text - - -def japanese_to_ipa3(text): - text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace( - 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a') - text = re.sub( - r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text) - text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text) - return text diff --git a/spaces/ccolas/TastyPiano/src/music/pipeline/audio2piano_solo_prob.py b/spaces/ccolas/TastyPiano/src/music/pipeline/audio2piano_solo_prob.py deleted file mode 100644 index 7fd9f2854d83baa8648c0ace88e65d66bd2f0f98..0000000000000000000000000000000000000000 --- a/spaces/ccolas/TastyPiano/src/music/pipeline/audio2piano_solo_prob.py +++ /dev/null @@ -1,47 +0,0 @@ -import numpy as np -import librosa -import sys -sys.path.append('../../../data/') -from src.music.utilities.processing_models import piano_detection_model -from src.music.config import CHKPT_PATH_PIANO_EVAL - -PIANO_SOLO_DETECTOR = piano_detection_model.PianoSoloDetector(CHKPT_PATH_PIANO_EVAL) -exclude_playlist_folders = ['synth_audio_recorded', 'from_url'] - -def clean_start_and_end_blanks(probs): - if len(probs) > 20: - # clean up to 10s in each direction - n_zeros_start = 0 - for i in range(10): - if probs[i] <= 0.001: - n_zeros_start += 1 - else: - break - n_zeros_end = 0 - for i in range(10): - if probs[-(i + 1)] <= 0.001: - n_zeros_end += 1 - else: - break - if n_zeros_end == 0: - return probs[n_zeros_start:] - else: - return probs[n_zeros_start:-n_zeros_end] - else: - return probs - -def calculate_piano_solo_prob(audio_path, verbose=False): - """Calculate the piano solo probability of all downloaded mp3s, and append - the probability to the meta csv file. Code from https://github.com/bytedance/GiantMIDI-Piano - """ - try: - error_msg = 'Error in audio loading?' - (audio, _) = librosa.core.load(audio_path, sr=piano_detection_model.SR, mono=True) - error_msg += ' Nope. Error in solo prediction?' - probs = PIANO_SOLO_DETECTOR.predict(audio) - # probs = clean_start_and_end_blanks(probs) # remove blanks at start and end (<=10s each way). If not piano, the rest of the song will be enough to tell. - piano_solo_prob = np.mean(probs) - error_msg += ' Nope. ' - return piano_solo_prob, '' - except: - return None, error_msg + 'Yes.' diff --git a/spaces/chaocai/superbot/stream_output.py b/spaces/chaocai/superbot/stream_output.py deleted file mode 100644 index 2950a3b4018199862576e1490cc400328c5d3d19..0000000000000000000000000000000000000000 --- a/spaces/chaocai/superbot/stream_output.py +++ /dev/null @@ -1,111 +0,0 @@ -"""Callback Handler streams to stdout on new llm token.""" -from abc import abstractmethod -import sys -from typing import Any, Dict, List, Optional - -from langchain.callbacks.base import BaseCallbackHandler -from langchain.schema import AgentAction, AgentFinish, LLMResult -from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler -DEFAULT_ANSWER_PREFIX_TOKENS = ["Final", "Answer", ":"] -INTER_MID_PREFIX_TOKENS = ["Thought", ":" ] - -class MaxbotStreamCallbackHandler(BaseCallbackHandler): - def append_to_last_tokens(self, token: str) -> None: - self.last_tokens.append(token) - self.last_tokens_stripped.append(token.strip()) - if len(self.last_tokens) > len(self.answer_prefix_tokens): - self.last_tokens.pop(0) - self.last_tokens_stripped.pop(0) - - def check_if_answer_reached(self) -> bool: - if self.strip_tokens: - return self.last_tokens_stripped == self.answer_prefix_tokens_stripped or\ - self.last_tokens_stripped[1:] == self.intermid_prefix_tokens_stripped - else: - return self.last_tokens == self.answer_prefix_tokens or\ - self.last_tokens[1:] == self.intermid_prefix_tokens - - def __init__( - self, - *, - answer_prefix_tokens: Optional[List[str]] = None, - strip_tokens: bool = True, - stream_prefix: bool = False - ) -> None: - """Instantiate FinalStreamingStdOutCallbackHandler. - - Args: - answer_prefix_tokens: Token sequence that prefixes the answer. - Default is ["Final", "Answer", ":"] - strip_tokens: Ignore white spaces and new lines when comparing - answer_prefix_tokens to last tokens? (to determine if answer has been - reached) - stream_prefix: Should answer prefix itself also be streamed? - """ - super().__init__() - if answer_prefix_tokens is None: - self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS - self.intermid_prefix_tokens = INTER_MID_PREFIX_TOKENS - else: - self.answer_prefix_tokens = answer_prefix_tokens - self.intermid_prefix_tokens = INTER_MID_PREFIX_TOKENS - if strip_tokens: - self.answer_prefix_tokens_stripped = [ - token.strip() for token in self.answer_prefix_tokens - ] - self.intermid_prefix_tokens_stripped = [ - token.strip() for token in self.intermid_prefix_tokens - ] - else: - self.answer_prefix_tokens_stripped = self.answer_prefix_tokens - self.intermid_prefix_tokens_stripped = self.intermid_prefix_tokens - self.last_tokens = [""] * len(self.answer_prefix_tokens) - self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens) - self.strip_tokens = strip_tokens - self.stream_prefix = stream_prefix - self.answer_reached = False - - def on_llm_start( - self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any - ) -> None: - """Run when LLM starts running.""" - self.answer_reached = False - - def on_llm_new_token(self, token: str, **kwargs: Any) -> None: - """Run on new LLM token. Only available when streaming is enabled.""" - - # Remember the last n tokens, where n = len(answer_prefix_tokens) - self.append_to_last_tokens(token) - - # Check if the last n tokens match the answer_prefix_tokens list ... - if self.check_if_answer_reached(): - self.answer_reached = True - if self.stream_prefix: - for t in self.last_tokens: - self.handle_incoming_token(t) - return - - # ... if yes, then print tokens from now on - if self.answer_reached: - self.handle_incoming_token(token) - - def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: - """Run when LLM ends running.""" - if self.answer_reached: - self.handle_converastion_end() - - def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: - """Run on agent end.""" - print("\nagent end.") - - def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: - """Run when chain ends running.""" - print("\nchain end.") - - @abstractmethod - def handle_incoming_token(self, token: str) -> None: - pass - - @abstractmethod - def handle_converastion_end(self) -> None: - pass \ No newline at end of file diff --git a/spaces/chendl/compositional_test/transformers/examples/tensorflow/token-classification/README.md b/spaces/chendl/compositional_test/transformers/examples/tensorflow/token-classification/README.md deleted file mode 100644 index 0e5ec84528f8f20631e878cb8b10d4fba0377f08..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/tensorflow/token-classification/README.md +++ /dev/null @@ -1,47 +0,0 @@ -<!--- -Copyright 2021 The HuggingFace Team. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ---> - -# Token classification - -Fine-tuning the library models for token classification task such as Named Entity Recognition (NER), Parts-of-speech -tagging (POS) or phrase extraction (CHUNKS). The main script `run_ner.py` leverages the [🤗 Datasets](https://github.com/huggingface/datasets) library. You can easily -customize it to your needs if you need extra processing on your datasets. - -It will either run on a datasets hosted on our [hub](https://huggingface.co/datasets) or with your own text files for -training and validation, you might just need to add some tweaks in the data preprocessing. - -The following example fine-tunes BERT on CoNLL-2003: - -```bash -python run_ner.py \ - --model_name_or_path bert-base-uncased \ - --dataset_name conll2003 \ - --output_dir /tmp/test-ner -``` - -To run on your own training and validation files, use the following command: - -```bash -python run_ner.py \ - --model_name_or_path bert-base-uncased \ - --train_file path_to_train_file \ - --validation_file path_to_validation_file \ - --output_dir /tmp/test-ner -``` - -**Note:** This script only works with models that have a fast tokenizer (backed by the [🤗 Tokenizers](https://github.com/huggingface/tokenizers) library) as it -uses special features of those tokenizers. You can check if your favorite model has a fast tokenizer in -[this table](https://huggingface.co/transformers/index.html#supported-frameworks). diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/TiffImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/TiffImagePlugin.py deleted file mode 100644 index d5148828506b36c72bac626b2032ebf129a62678..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/TiffImagePlugin.py +++ /dev/null @@ -1,2163 +0,0 @@ -# -# The Python Imaging Library. -# $Id$ -# -# TIFF file handling -# -# TIFF is a flexible, if somewhat aged, image file format originally -# defined by Aldus. Although TIFF supports a wide variety of pixel -# layouts and compression methods, the name doesn't really stand for -# "thousands of incompatible file formats," it just feels that way. -# -# To read TIFF data from a stream, the stream must be seekable. For -# progressive decoding, make sure to use TIFF files where the tag -# directory is placed first in the file. -# -# History: -# 1995-09-01 fl Created -# 1996-05-04 fl Handle JPEGTABLES tag -# 1996-05-18 fl Fixed COLORMAP support -# 1997-01-05 fl Fixed PREDICTOR support -# 1997-08-27 fl Added support for rational tags (from Perry Stoll) -# 1998-01-10 fl Fixed seek/tell (from Jan Blom) -# 1998-07-15 fl Use private names for internal variables -# 1999-06-13 fl Rewritten for PIL 1.0 (1.0) -# 2000-10-11 fl Additional fixes for Python 2.0 (1.1) -# 2001-04-17 fl Fixed rewind support (seek to frame 0) (1.2) -# 2001-05-12 fl Added write support for more tags (from Greg Couch) (1.3) -# 2001-12-18 fl Added workaround for broken Matrox library -# 2002-01-18 fl Don't mess up if photometric tag is missing (D. Alan Stewart) -# 2003-05-19 fl Check FILLORDER tag -# 2003-09-26 fl Added RGBa support -# 2004-02-24 fl Added DPI support; fixed rational write support -# 2005-02-07 fl Added workaround for broken Corel Draw 10 files -# 2006-01-09 fl Added support for float/double tags (from Russell Nelson) -# -# Copyright (c) 1997-2006 by Secret Labs AB. All rights reserved. -# Copyright (c) 1995-1997 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# -import io -import itertools -import logging -import math -import os -import struct -import warnings -from collections.abc import MutableMapping -from fractions import Fraction -from numbers import Number, Rational - -from . import ExifTags, Image, ImageFile, ImageOps, ImagePalette, TiffTags -from ._binary import i16be as i16 -from ._binary import i32be as i32 -from ._binary import o8 -from .TiffTags import TYPES - -logger = logging.getLogger(__name__) - -# Set these to true to force use of libtiff for reading or writing. -READ_LIBTIFF = False -WRITE_LIBTIFF = False -IFD_LEGACY_API = True -STRIP_SIZE = 65536 - -II = b"II" # little-endian (Intel style) -MM = b"MM" # big-endian (Motorola style) - -# -# -------------------------------------------------------------------- -# Read TIFF files - -# a few tag names, just to make the code below a bit more readable -IMAGEWIDTH = 256 -IMAGELENGTH = 257 -BITSPERSAMPLE = 258 -COMPRESSION = 259 -PHOTOMETRIC_INTERPRETATION = 262 -FILLORDER = 266 -IMAGEDESCRIPTION = 270 -STRIPOFFSETS = 273 -SAMPLESPERPIXEL = 277 -ROWSPERSTRIP = 278 -STRIPBYTECOUNTS = 279 -X_RESOLUTION = 282 -Y_RESOLUTION = 283 -PLANAR_CONFIGURATION = 284 -RESOLUTION_UNIT = 296 -TRANSFERFUNCTION = 301 -SOFTWARE = 305 -DATE_TIME = 306 -ARTIST = 315 -PREDICTOR = 317 -COLORMAP = 320 -TILEWIDTH = 322 -TILELENGTH = 323 -TILEOFFSETS = 324 -TILEBYTECOUNTS = 325 -SUBIFD = 330 -EXTRASAMPLES = 338 -SAMPLEFORMAT = 339 -JPEGTABLES = 347 -YCBCRSUBSAMPLING = 530 -REFERENCEBLACKWHITE = 532 -COPYRIGHT = 33432 -IPTC_NAA_CHUNK = 33723 # newsphoto properties -PHOTOSHOP_CHUNK = 34377 # photoshop properties -ICCPROFILE = 34675 -EXIFIFD = 34665 -XMP = 700 -JPEGQUALITY = 65537 # pseudo-tag by libtiff - -# https://github.com/imagej/ImageJA/blob/master/src/main/java/ij/io/TiffDecoder.java -IMAGEJ_META_DATA_BYTE_COUNTS = 50838 -IMAGEJ_META_DATA = 50839 - -COMPRESSION_INFO = { - # Compression => pil compression name - 1: "raw", - 2: "tiff_ccitt", - 3: "group3", - 4: "group4", - 5: "tiff_lzw", - 6: "tiff_jpeg", # obsolete - 7: "jpeg", - 8: "tiff_adobe_deflate", - 32771: "tiff_raw_16", # 16-bit padding - 32773: "packbits", - 32809: "tiff_thunderscan", - 32946: "tiff_deflate", - 34676: "tiff_sgilog", - 34677: "tiff_sgilog24", - 34925: "lzma", - 50000: "zstd", - 50001: "webp", -} - -COMPRESSION_INFO_REV = {v: k for k, v in COMPRESSION_INFO.items()} - -OPEN_INFO = { - # (ByteOrder, PhotoInterpretation, SampleFormat, FillOrder, BitsPerSample, - # ExtraSamples) => mode, rawmode - (II, 0, (1,), 1, (1,), ()): ("1", "1;I"), - (MM, 0, (1,), 1, (1,), ()): ("1", "1;I"), - (II, 0, (1,), 2, (1,), ()): ("1", "1;IR"), - (MM, 0, (1,), 2, (1,), ()): ("1", "1;IR"), - (II, 1, (1,), 1, (1,), ()): ("1", "1"), - (MM, 1, (1,), 1, (1,), ()): ("1", "1"), - (II, 1, (1,), 2, (1,), ()): ("1", "1;R"), - (MM, 1, (1,), 2, (1,), ()): ("1", "1;R"), - (II, 0, (1,), 1, (2,), ()): ("L", "L;2I"), - (MM, 0, (1,), 1, (2,), ()): ("L", "L;2I"), - (II, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), - (MM, 0, (1,), 2, (2,), ()): ("L", "L;2IR"), - (II, 1, (1,), 1, (2,), ()): ("L", "L;2"), - (MM, 1, (1,), 1, (2,), ()): ("L", "L;2"), - (II, 1, (1,), 2, (2,), ()): ("L", "L;2R"), - (MM, 1, (1,), 2, (2,), ()): ("L", "L;2R"), - (II, 0, (1,), 1, (4,), ()): ("L", "L;4I"), - (MM, 0, (1,), 1, (4,), ()): ("L", "L;4I"), - (II, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), - (MM, 0, (1,), 2, (4,), ()): ("L", "L;4IR"), - (II, 1, (1,), 1, (4,), ()): ("L", "L;4"), - (MM, 1, (1,), 1, (4,), ()): ("L", "L;4"), - (II, 1, (1,), 2, (4,), ()): ("L", "L;4R"), - (MM, 1, (1,), 2, (4,), ()): ("L", "L;4R"), - (II, 0, (1,), 1, (8,), ()): ("L", "L;I"), - (MM, 0, (1,), 1, (8,), ()): ("L", "L;I"), - (II, 0, (1,), 2, (8,), ()): ("L", "L;IR"), - (MM, 0, (1,), 2, (8,), ()): ("L", "L;IR"), - (II, 1, (1,), 1, (8,), ()): ("L", "L"), - (MM, 1, (1,), 1, (8,), ()): ("L", "L"), - (II, 1, (2,), 1, (8,), ()): ("L", "L"), - (MM, 1, (2,), 1, (8,), ()): ("L", "L"), - (II, 1, (1,), 2, (8,), ()): ("L", "L;R"), - (MM, 1, (1,), 2, (8,), ()): ("L", "L;R"), - (II, 1, (1,), 1, (12,), ()): ("I;16", "I;12"), - (II, 0, (1,), 1, (16,), ()): ("I;16", "I;16"), - (II, 1, (1,), 1, (16,), ()): ("I;16", "I;16"), - (MM, 1, (1,), 1, (16,), ()): ("I;16B", "I;16B"), - (II, 1, (1,), 2, (16,), ()): ("I;16", "I;16R"), - (II, 1, (2,), 1, (16,), ()): ("I", "I;16S"), - (MM, 1, (2,), 1, (16,), ()): ("I", "I;16BS"), - (II, 0, (3,), 1, (32,), ()): ("F", "F;32F"), - (MM, 0, (3,), 1, (32,), ()): ("F", "F;32BF"), - (II, 1, (1,), 1, (32,), ()): ("I", "I;32N"), - (II, 1, (2,), 1, (32,), ()): ("I", "I;32S"), - (MM, 1, (2,), 1, (32,), ()): ("I", "I;32BS"), - (II, 1, (3,), 1, (32,), ()): ("F", "F;32F"), - (MM, 1, (3,), 1, (32,), ()): ("F", "F;32BF"), - (II, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), - (MM, 1, (1,), 1, (8, 8), (2,)): ("LA", "LA"), - (II, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), - (MM, 2, (1,), 1, (8, 8, 8), ()): ("RGB", "RGB"), - (II, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), - (MM, 2, (1,), 2, (8, 8, 8), ()): ("RGB", "RGB;R"), - (II, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples - (MM, 2, (1,), 1, (8, 8, 8, 8), ()): ("RGBA", "RGBA"), # missing ExtraSamples - (II, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGBX", "RGBX"), - (MM, 2, (1,), 1, (8, 8, 8, 8), (0,)): ("RGBX", "RGBX"), - (II, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGBX", "RGBXX"), - (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (0, 0)): ("RGBX", "RGBXX"), - (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGBX", "RGBXXX"), - (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0, 0)): ("RGBX", "RGBXXX"), - (II, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"), - (MM, 2, (1,), 1, (8, 8, 8, 8), (1,)): ("RGBA", "RGBa"), - (II, 2, (1,), 1, (8, 8, 8, 8, 8), (1, 0)): ("RGBA", "RGBaX"), - (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (1, 0)): ("RGBA", "RGBaX"), - (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (1, 0, 0)): ("RGBA", "RGBaXX"), - (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (1, 0, 0)): ("RGBA", "RGBaXX"), - (II, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"), - (MM, 2, (1,), 1, (8, 8, 8, 8), (2,)): ("RGBA", "RGBA"), - (II, 2, (1,), 1, (8, 8, 8, 8, 8), (2, 0)): ("RGBA", "RGBAX"), - (MM, 2, (1,), 1, (8, 8, 8, 8, 8), (2, 0)): ("RGBA", "RGBAX"), - (II, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"), - (MM, 2, (1,), 1, (8, 8, 8, 8, 8, 8), (2, 0, 0)): ("RGBA", "RGBAXX"), - (II, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 - (MM, 2, (1,), 1, (8, 8, 8, 8), (999,)): ("RGBA", "RGBA"), # Corel Draw 10 - (II, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16L"), - (MM, 2, (1,), 1, (16, 16, 16), ()): ("RGB", "RGB;16B"), - (II, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16L"), - (MM, 2, (1,), 1, (16, 16, 16, 16), ()): ("RGBA", "RGBA;16B"), - (II, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGBX", "RGBX;16L"), - (MM, 2, (1,), 1, (16, 16, 16, 16), (0,)): ("RGBX", "RGBX;16B"), - (II, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16L"), - (MM, 2, (1,), 1, (16, 16, 16, 16), (1,)): ("RGBA", "RGBa;16B"), - (II, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16L"), - (MM, 2, (1,), 1, (16, 16, 16, 16), (2,)): ("RGBA", "RGBA;16B"), - (II, 3, (1,), 1, (1,), ()): ("P", "P;1"), - (MM, 3, (1,), 1, (1,), ()): ("P", "P;1"), - (II, 3, (1,), 2, (1,), ()): ("P", "P;1R"), - (MM, 3, (1,), 2, (1,), ()): ("P", "P;1R"), - (II, 3, (1,), 1, (2,), ()): ("P", "P;2"), - (MM, 3, (1,), 1, (2,), ()): ("P", "P;2"), - (II, 3, (1,), 2, (2,), ()): ("P", "P;2R"), - (MM, 3, (1,), 2, (2,), ()): ("P", "P;2R"), - (II, 3, (1,), 1, (4,), ()): ("P", "P;4"), - (MM, 3, (1,), 1, (4,), ()): ("P", "P;4"), - (II, 3, (1,), 2, (4,), ()): ("P", "P;4R"), - (MM, 3, (1,), 2, (4,), ()): ("P", "P;4R"), - (II, 3, (1,), 1, (8,), ()): ("P", "P"), - (MM, 3, (1,), 1, (8,), ()): ("P", "P"), - (II, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), - (MM, 3, (1,), 1, (8, 8), (2,)): ("PA", "PA"), - (II, 3, (1,), 2, (8,), ()): ("P", "P;R"), - (MM, 3, (1,), 2, (8,), ()): ("P", "P;R"), - (II, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), - (MM, 5, (1,), 1, (8, 8, 8, 8), ()): ("CMYK", "CMYK"), - (II, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), - (MM, 5, (1,), 1, (8, 8, 8, 8, 8), (0,)): ("CMYK", "CMYKX"), - (II, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), - (MM, 5, (1,), 1, (8, 8, 8, 8, 8, 8), (0, 0)): ("CMYK", "CMYKXX"), - (II, 5, (1,), 1, (16, 16, 16, 16), ()): ("CMYK", "CMYK;16L"), - # JPEG compressed images handled by LibTiff and auto-converted to RGBX - # Minimal Baseline TIFF requires YCbCr images to have 3 SamplesPerPixel - (II, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"), - (MM, 6, (1,), 1, (8, 8, 8), ()): ("RGB", "RGBX"), - (II, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), - (MM, 8, (1,), 1, (8, 8, 8), ()): ("LAB", "LAB"), -} - -MAX_SAMPLESPERPIXEL = max(len(key_tp[4]) for key_tp in OPEN_INFO) - -PREFIXES = [ - b"MM\x00\x2A", # Valid TIFF header with big-endian byte order - b"II\x2A\x00", # Valid TIFF header with little-endian byte order - b"MM\x2A\x00", # Invalid TIFF header, assume big-endian - b"II\x00\x2A", # Invalid TIFF header, assume little-endian - b"MM\x00\x2B", # BigTIFF with big-endian byte order - b"II\x2B\x00", # BigTIFF with little-endian byte order -] - - -def _accept(prefix): - return prefix[:4] in PREFIXES - - -def _limit_rational(val, max_val): - inv = abs(val) > 1 - n_d = IFDRational(1 / val if inv else val).limit_rational(max_val) - return n_d[::-1] if inv else n_d - - -def _limit_signed_rational(val, max_val, min_val): - frac = Fraction(val) - n_d = frac.numerator, frac.denominator - - if min(n_d) < min_val: - n_d = _limit_rational(val, abs(min_val)) - - if max(n_d) > max_val: - val = Fraction(*n_d) - n_d = _limit_rational(val, max_val) - - return n_d - - -## -# Wrapper for TIFF IFDs. - -_load_dispatch = {} -_write_dispatch = {} - - -class IFDRational(Rational): - """Implements a rational class where 0/0 is a legal value to match - the in the wild use of exif rationals. - - e.g., DigitalZoomRatio - 0.00/0.00 indicates that no digital zoom was used - """ - - """ If the denominator is 0, store this as a float('nan'), otherwise store - as a fractions.Fraction(). Delegate as appropriate - - """ - - __slots__ = ("_numerator", "_denominator", "_val") - - def __init__(self, value, denominator=1): - """ - :param value: either an integer numerator, a - float/rational/other number, or an IFDRational - :param denominator: Optional integer denominator - """ - if isinstance(value, IFDRational): - self._numerator = value.numerator - self._denominator = value.denominator - self._val = value._val - return - - if isinstance(value, Fraction): - self._numerator = value.numerator - self._denominator = value.denominator - else: - self._numerator = value - self._denominator = denominator - - if denominator == 0: - self._val = float("nan") - elif denominator == 1: - self._val = Fraction(value) - else: - self._val = Fraction(value, denominator) - - @property - def numerator(self): - return self._numerator - - @property - def denominator(self): - return self._denominator - - def limit_rational(self, max_denominator): - """ - - :param max_denominator: Integer, the maximum denominator value - :returns: Tuple of (numerator, denominator) - """ - - if self.denominator == 0: - return self.numerator, self.denominator - - f = self._val.limit_denominator(max_denominator) - return f.numerator, f.denominator - - def __repr__(self): - return str(float(self._val)) - - def __hash__(self): - return self._val.__hash__() - - def __eq__(self, other): - val = self._val - if isinstance(other, IFDRational): - other = other._val - if isinstance(other, float): - val = float(val) - return val == other - - def __getstate__(self): - return [self._val, self._numerator, self._denominator] - - def __setstate__(self, state): - IFDRational.__init__(self, 0) - _val, _numerator, _denominator = state - self._val = _val - self._numerator = _numerator - self._denominator = _denominator - - def _delegate(op): - def delegate(self, *args): - return getattr(self._val, op)(*args) - - return delegate - - """ a = ['add','radd', 'sub', 'rsub', 'mul', 'rmul', - 'truediv', 'rtruediv', 'floordiv', 'rfloordiv', - 'mod','rmod', 'pow','rpow', 'pos', 'neg', - 'abs', 'trunc', 'lt', 'gt', 'le', 'ge', 'bool', - 'ceil', 'floor', 'round'] - print("\n".join("__%s__ = _delegate('__%s__')" % (s,s) for s in a)) - """ - - __add__ = _delegate("__add__") - __radd__ = _delegate("__radd__") - __sub__ = _delegate("__sub__") - __rsub__ = _delegate("__rsub__") - __mul__ = _delegate("__mul__") - __rmul__ = _delegate("__rmul__") - __truediv__ = _delegate("__truediv__") - __rtruediv__ = _delegate("__rtruediv__") - __floordiv__ = _delegate("__floordiv__") - __rfloordiv__ = _delegate("__rfloordiv__") - __mod__ = _delegate("__mod__") - __rmod__ = _delegate("__rmod__") - __pow__ = _delegate("__pow__") - __rpow__ = _delegate("__rpow__") - __pos__ = _delegate("__pos__") - __neg__ = _delegate("__neg__") - __abs__ = _delegate("__abs__") - __trunc__ = _delegate("__trunc__") - __lt__ = _delegate("__lt__") - __gt__ = _delegate("__gt__") - __le__ = _delegate("__le__") - __ge__ = _delegate("__ge__") - __bool__ = _delegate("__bool__") - __ceil__ = _delegate("__ceil__") - __floor__ = _delegate("__floor__") - __round__ = _delegate("__round__") - # Python >= 3.11 - if hasattr(Fraction, "__int__"): - __int__ = _delegate("__int__") - - -class ImageFileDirectory_v2(MutableMapping): - """This class represents a TIFF tag directory. To speed things up, we - don't decode tags unless they're asked for. - - Exposes a dictionary interface of the tags in the directory:: - - ifd = ImageFileDirectory_v2() - ifd[key] = 'Some Data' - ifd.tagtype[key] = TiffTags.ASCII - print(ifd[key]) - 'Some Data' - - Individual values are returned as the strings or numbers, sequences are - returned as tuples of the values. - - The tiff metadata type of each item is stored in a dictionary of - tag types in - :attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v2.tagtype`. The types - are read from a tiff file, guessed from the type added, or added - manually. - - Data Structures: - - * ``self.tagtype = {}`` - - * Key: numerical TIFF tag number - * Value: integer corresponding to the data type from - :py:data:`.TiffTags.TYPES` - - .. versionadded:: 3.0.0 - - 'Internal' data structures: - - * ``self._tags_v2 = {}`` - - * Key: numerical TIFF tag number - * Value: decoded data, as tuple for multiple values - - * ``self._tagdata = {}`` - - * Key: numerical TIFF tag number - * Value: undecoded byte string from file - - * ``self._tags_v1 = {}`` - - * Key: numerical TIFF tag number - * Value: decoded data in the v1 format - - Tags will be found in the private attributes ``self._tagdata``, and in - ``self._tags_v2`` once decoded. - - ``self.legacy_api`` is a value for internal use, and shouldn't be changed - from outside code. In cooperation with - :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1`, if ``legacy_api`` - is true, then decoded tags will be populated into both ``_tags_v1`` and - ``_tags_v2``. ``_tags_v2`` will be used if this IFD is used in the TIFF - save routine. Tags should be read from ``_tags_v1`` if - ``legacy_api == true``. - - """ - - def __init__(self, ifh=b"II\052\0\0\0\0\0", prefix=None, group=None): - """Initialize an ImageFileDirectory. - - To construct an ImageFileDirectory from a real file, pass the 8-byte - magic header to the constructor. To only set the endianness, pass it - as the 'prefix' keyword argument. - - :param ifh: One of the accepted magic headers (cf. PREFIXES); also sets - endianness. - :param prefix: Override the endianness of the file. - """ - if not _accept(ifh): - msg = f"not a TIFF file (header {repr(ifh)} not valid)" - raise SyntaxError(msg) - self._prefix = prefix if prefix is not None else ifh[:2] - if self._prefix == MM: - self._endian = ">" - elif self._prefix == II: - self._endian = "<" - else: - msg = "not a TIFF IFD" - raise SyntaxError(msg) - self._bigtiff = ifh[2] == 43 - self.group = group - self.tagtype = {} - """ Dictionary of tag types """ - self.reset() - (self.next,) = ( - self._unpack("Q", ifh[8:]) if self._bigtiff else self._unpack("L", ifh[4:]) - ) - self._legacy_api = False - - prefix = property(lambda self: self._prefix) - offset = property(lambda self: self._offset) - legacy_api = property(lambda self: self._legacy_api) - - @legacy_api.setter - def legacy_api(self, value): - msg = "Not allowing setting of legacy api" - raise Exception(msg) - - def reset(self): - self._tags_v1 = {} # will remain empty if legacy_api is false - self._tags_v2 = {} # main tag storage - self._tagdata = {} - self.tagtype = {} # added 2008-06-05 by Florian Hoech - self._next = None - self._offset = None - - def __str__(self): - return str(dict(self)) - - def named(self): - """ - :returns: dict of name|key: value - - Returns the complete tag dictionary, with named tags where possible. - """ - return { - TiffTags.lookup(code, self.group).name: value - for code, value in self.items() - } - - def __len__(self): - return len(set(self._tagdata) | set(self._tags_v2)) - - def __getitem__(self, tag): - if tag not in self._tags_v2: # unpack on the fly - data = self._tagdata[tag] - typ = self.tagtype[tag] - size, handler = self._load_dispatch[typ] - self[tag] = handler(self, data, self.legacy_api) # check type - val = self._tags_v2[tag] - if self.legacy_api and not isinstance(val, (tuple, bytes)): - val = (val,) - return val - - def __contains__(self, tag): - return tag in self._tags_v2 or tag in self._tagdata - - def __setitem__(self, tag, value): - self._setitem(tag, value, self.legacy_api) - - def _setitem(self, tag, value, legacy_api): - basetypes = (Number, bytes, str) - - info = TiffTags.lookup(tag, self.group) - values = [value] if isinstance(value, basetypes) else value - - if tag not in self.tagtype: - if info.type: - self.tagtype[tag] = info.type - else: - self.tagtype[tag] = TiffTags.UNDEFINED - if all(isinstance(v, IFDRational) for v in values): - self.tagtype[tag] = ( - TiffTags.RATIONAL - if all(v >= 0 for v in values) - else TiffTags.SIGNED_RATIONAL - ) - elif all(isinstance(v, int) for v in values): - if all(0 <= v < 2**16 for v in values): - self.tagtype[tag] = TiffTags.SHORT - elif all(-(2**15) < v < 2**15 for v in values): - self.tagtype[tag] = TiffTags.SIGNED_SHORT - else: - self.tagtype[tag] = ( - TiffTags.LONG - if all(v >= 0 for v in values) - else TiffTags.SIGNED_LONG - ) - elif all(isinstance(v, float) for v in values): - self.tagtype[tag] = TiffTags.DOUBLE - elif all(isinstance(v, str) for v in values): - self.tagtype[tag] = TiffTags.ASCII - elif all(isinstance(v, bytes) for v in values): - self.tagtype[tag] = TiffTags.BYTE - - if self.tagtype[tag] == TiffTags.UNDEFINED: - values = [ - v.encode("ascii", "replace") if isinstance(v, str) else v - for v in values - ] - elif self.tagtype[tag] == TiffTags.RATIONAL: - values = [float(v) if isinstance(v, int) else v for v in values] - - is_ifd = self.tagtype[tag] == TiffTags.LONG and isinstance(values, dict) - if not is_ifd: - values = tuple(info.cvt_enum(value) for value in values) - - dest = self._tags_v1 if legacy_api else self._tags_v2 - - # Three branches: - # Spec'd length == 1, Actual length 1, store as element - # Spec'd length == 1, Actual > 1, Warn and truncate. Formerly barfed. - # No Spec, Actual length 1, Formerly (<4.2) returned a 1 element tuple. - # Don't mess with the legacy api, since it's frozen. - if not is_ifd and ( - (info.length == 1) - or self.tagtype[tag] == TiffTags.BYTE - or (info.length is None and len(values) == 1 and not legacy_api) - ): - # Don't mess with the legacy api, since it's frozen. - if legacy_api and self.tagtype[tag] in [ - TiffTags.RATIONAL, - TiffTags.SIGNED_RATIONAL, - ]: # rationals - values = (values,) - try: - (dest[tag],) = values - except ValueError: - # We've got a builtin tag with 1 expected entry - warnings.warn( - f"Metadata Warning, tag {tag} had too many entries: " - f"{len(values)}, expected 1" - ) - dest[tag] = values[0] - - else: - # Spec'd length > 1 or undefined - # Unspec'd, and length > 1 - dest[tag] = values - - def __delitem__(self, tag): - self._tags_v2.pop(tag, None) - self._tags_v1.pop(tag, None) - self._tagdata.pop(tag, None) - - def __iter__(self): - return iter(set(self._tagdata) | set(self._tags_v2)) - - def _unpack(self, fmt, data): - return struct.unpack(self._endian + fmt, data) - - def _pack(self, fmt, *values): - return struct.pack(self._endian + fmt, *values) - - def _register_loader(idx, size): - def decorator(func): - from .TiffTags import TYPES - - if func.__name__.startswith("load_"): - TYPES[idx] = func.__name__[5:].replace("_", " ") - _load_dispatch[idx] = size, func # noqa: F821 - return func - - return decorator - - def _register_writer(idx): - def decorator(func): - _write_dispatch[idx] = func # noqa: F821 - return func - - return decorator - - def _register_basic(idx_fmt_name): - from .TiffTags import TYPES - - idx, fmt, name = idx_fmt_name - TYPES[idx] = name - size = struct.calcsize("=" + fmt) - _load_dispatch[idx] = ( # noqa: F821 - size, - lambda self, data, legacy_api=True: ( - self._unpack(f"{len(data) // size}{fmt}", data) - ), - ) - _write_dispatch[idx] = lambda self, *values: ( # noqa: F821 - b"".join(self._pack(fmt, value) for value in values) - ) - - list( - map( - _register_basic, - [ - (TiffTags.SHORT, "H", "short"), - (TiffTags.LONG, "L", "long"), - (TiffTags.SIGNED_BYTE, "b", "signed byte"), - (TiffTags.SIGNED_SHORT, "h", "signed short"), - (TiffTags.SIGNED_LONG, "l", "signed long"), - (TiffTags.FLOAT, "f", "float"), - (TiffTags.DOUBLE, "d", "double"), - (TiffTags.IFD, "L", "long"), - (TiffTags.LONG8, "Q", "long8"), - ], - ) - ) - - @_register_loader(1, 1) # Basic type, except for the legacy API. - def load_byte(self, data, legacy_api=True): - return data - - @_register_writer(1) # Basic type, except for the legacy API. - def write_byte(self, data): - if isinstance(data, IFDRational): - data = int(data) - if isinstance(data, int): - data = bytes((data,)) - return data - - @_register_loader(2, 1) - def load_string(self, data, legacy_api=True): - if data.endswith(b"\0"): - data = data[:-1] - return data.decode("latin-1", "replace") - - @_register_writer(2) - def write_string(self, value): - # remerge of https://github.com/python-pillow/Pillow/pull/1416 - if isinstance(value, int): - value = str(value) - if not isinstance(value, bytes): - value = value.encode("ascii", "replace") - return value + b"\0" - - @_register_loader(5, 8) - def load_rational(self, data, legacy_api=True): - vals = self._unpack(f"{len(data) // 4}L", data) - - def combine(a, b): - return (a, b) if legacy_api else IFDRational(a, b) - - return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2])) - - @_register_writer(5) - def write_rational(self, *values): - return b"".join( - self._pack("2L", *_limit_rational(frac, 2**32 - 1)) for frac in values - ) - - @_register_loader(7, 1) - def load_undefined(self, data, legacy_api=True): - return data - - @_register_writer(7) - def write_undefined(self, value): - if isinstance(value, int): - value = str(value).encode("ascii", "replace") - return value - - @_register_loader(10, 8) - def load_signed_rational(self, data, legacy_api=True): - vals = self._unpack(f"{len(data) // 4}l", data) - - def combine(a, b): - return (a, b) if legacy_api else IFDRational(a, b) - - return tuple(combine(num, denom) for num, denom in zip(vals[::2], vals[1::2])) - - @_register_writer(10) - def write_signed_rational(self, *values): - return b"".join( - self._pack("2l", *_limit_signed_rational(frac, 2**31 - 1, -(2**31))) - for frac in values - ) - - def _ensure_read(self, fp, size): - ret = fp.read(size) - if len(ret) != size: - msg = ( - "Corrupt EXIF data. " - f"Expecting to read {size} bytes but only got {len(ret)}. " - ) - raise OSError(msg) - return ret - - def load(self, fp): - self.reset() - self._offset = fp.tell() - - try: - tag_count = ( - self._unpack("Q", self._ensure_read(fp, 8)) - if self._bigtiff - else self._unpack("H", self._ensure_read(fp, 2)) - )[0] - for i in range(tag_count): - tag, typ, count, data = ( - self._unpack("HHQ8s", self._ensure_read(fp, 20)) - if self._bigtiff - else self._unpack("HHL4s", self._ensure_read(fp, 12)) - ) - - tagname = TiffTags.lookup(tag, self.group).name - typname = TYPES.get(typ, "unknown") - msg = f"tag: {tagname} ({tag}) - type: {typname} ({typ})" - - try: - unit_size, handler = self._load_dispatch[typ] - except KeyError: - logger.debug(msg + f" - unsupported type {typ}") - continue # ignore unsupported type - size = count * unit_size - if size > (8 if self._bigtiff else 4): - here = fp.tell() - (offset,) = self._unpack("Q" if self._bigtiff else "L", data) - msg += f" Tag Location: {here} - Data Location: {offset}" - fp.seek(offset) - data = ImageFile._safe_read(fp, size) - fp.seek(here) - else: - data = data[:size] - - if len(data) != size: - warnings.warn( - "Possibly corrupt EXIF data. " - f"Expecting to read {size} bytes but only got {len(data)}." - f" Skipping tag {tag}" - ) - logger.debug(msg) - continue - - if not data: - logger.debug(msg) - continue - - self._tagdata[tag] = data - self.tagtype[tag] = typ - - msg += " - value: " + ( - "<table: %d bytes>" % size if size > 32 else repr(data) - ) - logger.debug(msg) - - (self.next,) = ( - self._unpack("Q", self._ensure_read(fp, 8)) - if self._bigtiff - else self._unpack("L", self._ensure_read(fp, 4)) - ) - except OSError as msg: - warnings.warn(str(msg)) - return - - def tobytes(self, offset=0): - # FIXME What about tagdata? - result = self._pack("H", len(self._tags_v2)) - - entries = [] - offset = offset + len(result) + len(self._tags_v2) * 12 + 4 - stripoffsets = None - - # pass 1: convert tags to binary format - # always write tags in ascending order - for tag, value in sorted(self._tags_v2.items()): - if tag == STRIPOFFSETS: - stripoffsets = len(entries) - typ = self.tagtype.get(tag) - logger.debug(f"Tag {tag}, Type: {typ}, Value: {repr(value)}") - is_ifd = typ == TiffTags.LONG and isinstance(value, dict) - if is_ifd: - if self._endian == "<": - ifh = b"II\x2A\x00\x08\x00\x00\x00" - else: - ifh = b"MM\x00\x2A\x00\x00\x00\x08" - ifd = ImageFileDirectory_v2(ifh, group=tag) - values = self._tags_v2[tag] - for ifd_tag, ifd_value in values.items(): - ifd[ifd_tag] = ifd_value - data = ifd.tobytes(offset) - else: - values = value if isinstance(value, tuple) else (value,) - data = self._write_dispatch[typ](self, *values) - - tagname = TiffTags.lookup(tag, self.group).name - typname = "ifd" if is_ifd else TYPES.get(typ, "unknown") - msg = f"save: {tagname} ({tag}) - type: {typname} ({typ})" - msg += " - value: " + ( - "<table: %d bytes>" % len(data) if len(data) >= 16 else str(values) - ) - logger.debug(msg) - - # count is sum of lengths for string and arbitrary data - if is_ifd: - count = 1 - elif typ in [TiffTags.BYTE, TiffTags.ASCII, TiffTags.UNDEFINED]: - count = len(data) - else: - count = len(values) - # figure out if data fits into the entry - if len(data) <= 4: - entries.append((tag, typ, count, data.ljust(4, b"\0"), b"")) - else: - entries.append((tag, typ, count, self._pack("L", offset), data)) - offset += (len(data) + 1) // 2 * 2 # pad to word - - # update strip offset data to point beyond auxiliary data - if stripoffsets is not None: - tag, typ, count, value, data = entries[stripoffsets] - if data: - msg = "multistrip support not yet implemented" - raise NotImplementedError(msg) - value = self._pack("L", self._unpack("L", value)[0] + offset) - entries[stripoffsets] = tag, typ, count, value, data - - # pass 2: write entries to file - for tag, typ, count, value, data in entries: - logger.debug(f"{tag} {typ} {count} {repr(value)} {repr(data)}") - result += self._pack("HHL4s", tag, typ, count, value) - - # -- overwrite here for multi-page -- - result += b"\0\0\0\0" # end of entries - - # pass 3: write auxiliary data to file - for tag, typ, count, value, data in entries: - result += data - if len(data) & 1: - result += b"\0" - - return result - - def save(self, fp): - if fp.tell() == 0: # skip TIFF header on subsequent pages - # tiff header -- PIL always starts the first IFD at offset 8 - fp.write(self._prefix + self._pack("HL", 42, 8)) - - offset = fp.tell() - result = self.tobytes(offset) - fp.write(result) - return offset + len(result) - - -ImageFileDirectory_v2._load_dispatch = _load_dispatch -ImageFileDirectory_v2._write_dispatch = _write_dispatch -for idx, name in TYPES.items(): - name = name.replace(" ", "_") - setattr(ImageFileDirectory_v2, "load_" + name, _load_dispatch[idx][1]) - setattr(ImageFileDirectory_v2, "write_" + name, _write_dispatch[idx]) -del _load_dispatch, _write_dispatch, idx, name - - -# Legacy ImageFileDirectory support. -class ImageFileDirectory_v1(ImageFileDirectory_v2): - """This class represents the **legacy** interface to a TIFF tag directory. - - Exposes a dictionary interface of the tags in the directory:: - - ifd = ImageFileDirectory_v1() - ifd[key] = 'Some Data' - ifd.tagtype[key] = TiffTags.ASCII - print(ifd[key]) - ('Some Data',) - - Also contains a dictionary of tag types as read from the tiff image file, - :attr:`~PIL.TiffImagePlugin.ImageFileDirectory_v1.tagtype`. - - Values are returned as a tuple. - - .. deprecated:: 3.0.0 - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._legacy_api = True - - tags = property(lambda self: self._tags_v1) - tagdata = property(lambda self: self._tagdata) - - # defined in ImageFileDirectory_v2 - tagtype: dict - """Dictionary of tag types""" - - @classmethod - def from_v2(cls, original): - """Returns an - :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` - instance with the same data as is contained in the original - :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` - instance. - - :returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` - - """ - - ifd = cls(prefix=original.prefix) - ifd._tagdata = original._tagdata - ifd.tagtype = original.tagtype - ifd.next = original.next # an indicator for multipage tiffs - return ifd - - def to_v2(self): - """Returns an - :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` - instance with the same data as is contained in the original - :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v1` - instance. - - :returns: :py:class:`~PIL.TiffImagePlugin.ImageFileDirectory_v2` - - """ - - ifd = ImageFileDirectory_v2(prefix=self.prefix) - ifd._tagdata = dict(self._tagdata) - ifd.tagtype = dict(self.tagtype) - ifd._tags_v2 = dict(self._tags_v2) - return ifd - - def __contains__(self, tag): - return tag in self._tags_v1 or tag in self._tagdata - - def __len__(self): - return len(set(self._tagdata) | set(self._tags_v1)) - - def __iter__(self): - return iter(set(self._tagdata) | set(self._tags_v1)) - - def __setitem__(self, tag, value): - for legacy_api in (False, True): - self._setitem(tag, value, legacy_api) - - def __getitem__(self, tag): - if tag not in self._tags_v1: # unpack on the fly - data = self._tagdata[tag] - typ = self.tagtype[tag] - size, handler = self._load_dispatch[typ] - for legacy in (False, True): - self._setitem(tag, handler(self, data, legacy), legacy) - val = self._tags_v1[tag] - if not isinstance(val, (tuple, bytes)): - val = (val,) - return val - - -# undone -- switch this pointer when IFD_LEGACY_API == False -ImageFileDirectory = ImageFileDirectory_v1 - - -## -# Image plugin for TIFF files. - - -class TiffImageFile(ImageFile.ImageFile): - format = "TIFF" - format_description = "Adobe TIFF" - _close_exclusive_fp_after_loading = False - - def __init__(self, fp=None, filename=None): - self.tag_v2 = None - """ Image file directory (tag dictionary) """ - - self.tag = None - """ Legacy tag entries """ - - super().__init__(fp, filename) - - def _open(self): - """Open the first image in a TIFF file""" - - # Header - ifh = self.fp.read(8) - if ifh[2] == 43: - ifh += self.fp.read(8) - - self.tag_v2 = ImageFileDirectory_v2(ifh) - - # legacy IFD entries will be filled in later - self.ifd = None - - # setup frame pointers - self.__first = self.__next = self.tag_v2.next - self.__frame = -1 - self._fp = self.fp - self._frame_pos = [] - self._n_frames = None - - logger.debug("*** TiffImageFile._open ***") - logger.debug(f"- __first: {self.__first}") - logger.debug(f"- ifh: {repr(ifh)}") # Use repr to avoid str(bytes) - - # and load the first frame - self._seek(0) - - @property - def n_frames(self): - if self._n_frames is None: - current = self.tell() - self._seek(len(self._frame_pos)) - while self._n_frames is None: - self._seek(self.tell() + 1) - self.seek(current) - return self._n_frames - - def seek(self, frame): - """Select a given frame as current image""" - if not self._seek_check(frame): - return - self._seek(frame) - # Create a new core image object on second and - # subsequent frames in the image. Image may be - # different size/mode. - Image._decompression_bomb_check(self.size) - self.im = Image.core.new(self.mode, self.size) - - def _seek(self, frame): - self.fp = self._fp - - # reset buffered io handle in case fp - # was passed to libtiff, invalidating the buffer - self.fp.tell() - - while len(self._frame_pos) <= frame: - if not self.__next: - msg = "no more images in TIFF file" - raise EOFError(msg) - logger.debug( - f"Seeking to frame {frame}, on frame {self.__frame}, " - f"__next {self.__next}, location: {self.fp.tell()}" - ) - self.fp.seek(self.__next) - self._frame_pos.append(self.__next) - logger.debug("Loading tags, location: %s" % self.fp.tell()) - self.tag_v2.load(self.fp) - if self.tag_v2.next in self._frame_pos: - # This IFD has already been processed - # Declare this to be the end of the image - self.__next = 0 - else: - self.__next = self.tag_v2.next - if self.__next == 0: - self._n_frames = frame + 1 - if len(self._frame_pos) == 1: - self.is_animated = self.__next != 0 - self.__frame += 1 - self.fp.seek(self._frame_pos[frame]) - self.tag_v2.load(self.fp) - self._reload_exif() - # fill the legacy tag/ifd entries - self.tag = self.ifd = ImageFileDirectory_v1.from_v2(self.tag_v2) - self.__frame = frame - self._setup() - - def tell(self): - """Return the current frame number""" - return self.__frame - - def getxmp(self): - """ - Returns a dictionary containing the XMP tags. - Requires defusedxml to be installed. - - :returns: XMP tags in a dictionary. - """ - return self._getxmp(self.tag_v2[XMP]) if XMP in self.tag_v2 else {} - - def get_photoshop_blocks(self): - """ - Returns a dictionary of Photoshop "Image Resource Blocks". - The keys are the image resource ID. For more information, see - https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/#50577409_pgfId-1037727 - - :returns: Photoshop "Image Resource Blocks" in a dictionary. - """ - blocks = {} - val = self.tag_v2.get(ExifTags.Base.ImageResources) - if val: - while val[:4] == b"8BIM": - id = i16(val[4:6]) - n = math.ceil((val[6] + 1) / 2) * 2 - size = i32(val[6 + n : 10 + n]) - data = val[10 + n : 10 + n + size] - blocks[id] = {"data": data} - - val = val[math.ceil((10 + n + size) / 2) * 2 :] - return blocks - - def load(self): - if self.tile and self.use_load_libtiff: - return self._load_libtiff() - return super().load() - - def load_end(self): - if self._tile_orientation: - method = { - 2: Image.Transpose.FLIP_LEFT_RIGHT, - 3: Image.Transpose.ROTATE_180, - 4: Image.Transpose.FLIP_TOP_BOTTOM, - 5: Image.Transpose.TRANSPOSE, - 6: Image.Transpose.ROTATE_270, - 7: Image.Transpose.TRANSVERSE, - 8: Image.Transpose.ROTATE_90, - }.get(self._tile_orientation) - if method is not None: - self.im = self.im.transpose(method) - self._size = self.im.size - - # allow closing if we're on the first frame, there's no next - # This is the ImageFile.load path only, libtiff specific below. - if not self.is_animated: - self._close_exclusive_fp_after_loading = True - - # reset buffered io handle in case fp - # was passed to libtiff, invalidating the buffer - self.fp.tell() - - # load IFD data from fp before it is closed - exif = self.getexif() - for key in TiffTags.TAGS_V2_GROUPS: - if key not in exif: - continue - exif.get_ifd(key) - - def _load_libtiff(self): - """Overload method triggered when we detect a compressed tiff - Calls out to libtiff""" - - Image.Image.load(self) - - self.load_prepare() - - if not len(self.tile) == 1: - msg = "Not exactly one tile" - raise OSError(msg) - - # (self._compression, (extents tuple), - # 0, (rawmode, self._compression, fp)) - extents = self.tile[0][1] - args = list(self.tile[0][3]) - - # To be nice on memory footprint, if there's a - # file descriptor, use that instead of reading - # into a string in python. - try: - fp = hasattr(self.fp, "fileno") and self.fp.fileno() - # flush the file descriptor, prevents error on pypy 2.4+ - # should also eliminate the need for fp.tell - # in _seek - if hasattr(self.fp, "flush"): - self.fp.flush() - except OSError: - # io.BytesIO have a fileno, but returns an OSError if - # it doesn't use a file descriptor. - fp = False - - if fp: - args[2] = fp - - decoder = Image._getdecoder( - self.mode, "libtiff", tuple(args), self.decoderconfig - ) - try: - decoder.setimage(self.im, extents) - except ValueError as e: - msg = "Couldn't set the image" - raise OSError(msg) from e - - close_self_fp = self._exclusive_fp and not self.is_animated - if hasattr(self.fp, "getvalue"): - # We've got a stringio like thing passed in. Yay for all in memory. - # The decoder needs the entire file in one shot, so there's not - # a lot we can do here other than give it the entire file. - # unless we could do something like get the address of the - # underlying string for stringio. - # - # Rearranging for supporting byteio items, since they have a fileno - # that returns an OSError if there's no underlying fp. Easier to - # deal with here by reordering. - logger.debug("have getvalue. just sending in a string from getvalue") - n, err = decoder.decode(self.fp.getvalue()) - elif fp: - # we've got a actual file on disk, pass in the fp. - logger.debug("have fileno, calling fileno version of the decoder.") - if not close_self_fp: - self.fp.seek(0) - # 4 bytes, otherwise the trace might error out - n, err = decoder.decode(b"fpfp") - else: - # we have something else. - logger.debug("don't have fileno or getvalue. just reading") - self.fp.seek(0) - # UNDONE -- so much for that buffer size thing. - n, err = decoder.decode(self.fp.read()) - - self.tile = [] - self.readonly = 0 - - self.load_end() - - if close_self_fp: - self.fp.close() - self.fp = None # might be shared - - if err < 0: - raise OSError(err) - - return Image.Image.load(self) - - def _setup(self): - """Setup this image object based on current tags""" - - if 0xBC01 in self.tag_v2: - msg = "Windows Media Photo files not yet supported" - raise OSError(msg) - - # extract relevant tags - self._compression = COMPRESSION_INFO[self.tag_v2.get(COMPRESSION, 1)] - self._planar_configuration = self.tag_v2.get(PLANAR_CONFIGURATION, 1) - - # photometric is a required tag, but not everyone is reading - # the specification - photo = self.tag_v2.get(PHOTOMETRIC_INTERPRETATION, 0) - - # old style jpeg compression images most certainly are YCbCr - if self._compression == "tiff_jpeg": - photo = 6 - - fillorder = self.tag_v2.get(FILLORDER, 1) - - logger.debug("*** Summary ***") - logger.debug(f"- compression: {self._compression}") - logger.debug(f"- photometric_interpretation: {photo}") - logger.debug(f"- planar_configuration: {self._planar_configuration}") - logger.debug(f"- fill_order: {fillorder}") - logger.debug(f"- YCbCr subsampling: {self.tag.get(YCBCRSUBSAMPLING)}") - - # size - xsize = int(self.tag_v2.get(IMAGEWIDTH)) - ysize = int(self.tag_v2.get(IMAGELENGTH)) - self._size = xsize, ysize - - logger.debug(f"- size: {self.size}") - - sample_format = self.tag_v2.get(SAMPLEFORMAT, (1,)) - if len(sample_format) > 1 and max(sample_format) == min(sample_format) == 1: - # SAMPLEFORMAT is properly per band, so an RGB image will - # be (1,1,1). But, we don't support per band pixel types, - # and anything more than one band is a uint8. So, just - # take the first element. Revisit this if adding support - # for more exotic images. - sample_format = (1,) - - bps_tuple = self.tag_v2.get(BITSPERSAMPLE, (1,)) - extra_tuple = self.tag_v2.get(EXTRASAMPLES, ()) - if photo in (2, 6, 8): # RGB, YCbCr, LAB - bps_count = 3 - elif photo == 5: # CMYK - bps_count = 4 - else: - bps_count = 1 - bps_count += len(extra_tuple) - bps_actual_count = len(bps_tuple) - samples_per_pixel = self.tag_v2.get( - SAMPLESPERPIXEL, - 3 if self._compression == "tiff_jpeg" and photo in (2, 6) else 1, - ) - - if samples_per_pixel > MAX_SAMPLESPERPIXEL: - # DOS check, samples_per_pixel can be a Long, and we extend the tuple below - logger.error( - "More samples per pixel than can be decoded: %s", samples_per_pixel - ) - msg = "Invalid value for samples per pixel" - raise SyntaxError(msg) - - if samples_per_pixel < bps_actual_count: - # If a file has more values in bps_tuple than expected, - # remove the excess. - bps_tuple = bps_tuple[:samples_per_pixel] - elif samples_per_pixel > bps_actual_count and bps_actual_count == 1: - # If a file has only one value in bps_tuple, when it should have more, - # presume it is the same number of bits for all of the samples. - bps_tuple = bps_tuple * samples_per_pixel - - if len(bps_tuple) != samples_per_pixel: - msg = "unknown data organization" - raise SyntaxError(msg) - - # mode: check photometric interpretation and bits per pixel - key = ( - self.tag_v2.prefix, - photo, - sample_format, - fillorder, - bps_tuple, - extra_tuple, - ) - logger.debug(f"format key: {key}") - try: - self.mode, rawmode = OPEN_INFO[key] - except KeyError as e: - logger.debug("- unsupported format") - msg = "unknown pixel mode" - raise SyntaxError(msg) from e - - logger.debug(f"- raw mode: {rawmode}") - logger.debug(f"- pil mode: {self.mode}") - - self.info["compression"] = self._compression - - xres = self.tag_v2.get(X_RESOLUTION, 1) - yres = self.tag_v2.get(Y_RESOLUTION, 1) - - if xres and yres: - resunit = self.tag_v2.get(RESOLUTION_UNIT) - if resunit == 2: # dots per inch - self.info["dpi"] = (xres, yres) - elif resunit == 3: # dots per centimeter. convert to dpi - self.info["dpi"] = (xres * 2.54, yres * 2.54) - elif resunit is None: # used to default to 1, but now 2) - self.info["dpi"] = (xres, yres) - # For backward compatibility, - # we also preserve the old behavior - self.info["resolution"] = xres, yres - else: # No absolute unit of measurement - self.info["resolution"] = xres, yres - - # build tile descriptors - x = y = layer = 0 - self.tile = [] - self.use_load_libtiff = READ_LIBTIFF or self._compression != "raw" - if self.use_load_libtiff: - # Decoder expects entire file as one tile. - # There's a buffer size limit in load (64k) - # so large g4 images will fail if we use that - # function. - # - # Setup the one tile for the whole image, then - # use the _load_libtiff function. - - # libtiff handles the fillmode for us, so 1;IR should - # actually be 1;I. Including the R double reverses the - # bits, so stripes of the image are reversed. See - # https://github.com/python-pillow/Pillow/issues/279 - if fillorder == 2: - # Replace fillorder with fillorder=1 - key = key[:3] + (1,) + key[4:] - logger.debug(f"format key: {key}") - # this should always work, since all the - # fillorder==2 modes have a corresponding - # fillorder=1 mode - self.mode, rawmode = OPEN_INFO[key] - # libtiff always returns the bytes in native order. - # we're expecting image byte order. So, if the rawmode - # contains I;16, we need to convert from native to image - # byte order. - if rawmode == "I;16": - rawmode = "I;16N" - if ";16B" in rawmode: - rawmode = rawmode.replace(";16B", ";16N") - if ";16L" in rawmode: - rawmode = rawmode.replace(";16L", ";16N") - - # YCbCr images with new jpeg compression with pixels in one plane - # unpacked straight into RGB values - if ( - photo == 6 - and self._compression == "jpeg" - and self._planar_configuration == 1 - ): - rawmode = "RGB" - - # Offset in the tile tuple is 0, we go from 0,0 to - # w,h, and we only do this once -- eds - a = (rawmode, self._compression, False, self.tag_v2.offset) - self.tile.append(("libtiff", (0, 0, xsize, ysize), 0, a)) - - elif STRIPOFFSETS in self.tag_v2 or TILEOFFSETS in self.tag_v2: - # striped image - if STRIPOFFSETS in self.tag_v2: - offsets = self.tag_v2[STRIPOFFSETS] - h = self.tag_v2.get(ROWSPERSTRIP, ysize) - w = self.size[0] - else: - # tiled image - offsets = self.tag_v2[TILEOFFSETS] - w = self.tag_v2.get(TILEWIDTH) - h = self.tag_v2.get(TILELENGTH) - - for offset in offsets: - if x + w > xsize: - stride = w * sum(bps_tuple) / 8 # bytes per line - else: - stride = 0 - - tile_rawmode = rawmode - if self._planar_configuration == 2: - # each band on it's own layer - tile_rawmode = rawmode[layer] - # adjust stride width accordingly - stride /= bps_count - - a = (tile_rawmode, int(stride), 1) - self.tile.append( - ( - self._compression, - (x, y, min(x + w, xsize), min(y + h, ysize)), - offset, - a, - ) - ) - x = x + w - if x >= self.size[0]: - x, y = 0, y + h - if y >= self.size[1]: - x = y = 0 - layer += 1 - else: - logger.debug("- unsupported data organization") - msg = "unknown data organization" - raise SyntaxError(msg) - - # Fix up info. - if ICCPROFILE in self.tag_v2: - self.info["icc_profile"] = self.tag_v2[ICCPROFILE] - - # fixup palette descriptor - - if self.mode in ["P", "PA"]: - palette = [o8(b // 256) for b in self.tag_v2[COLORMAP]] - self.palette = ImagePalette.raw("RGB;L", b"".join(palette)) - - self._tile_orientation = self.tag_v2.get(ExifTags.Base.Orientation) - - -# -# -------------------------------------------------------------------- -# Write TIFF files - -# little endian is default except for image modes with -# explicit big endian byte-order - -SAVE_INFO = { - # mode => rawmode, byteorder, photometrics, - # sampleformat, bitspersample, extra - "1": ("1", II, 1, 1, (1,), None), - "L": ("L", II, 1, 1, (8,), None), - "LA": ("LA", II, 1, 1, (8, 8), 2), - "P": ("P", II, 3, 1, (8,), None), - "PA": ("PA", II, 3, 1, (8, 8), 2), - "I": ("I;32S", II, 1, 2, (32,), None), - "I;16": ("I;16", II, 1, 1, (16,), None), - "I;16S": ("I;16S", II, 1, 2, (16,), None), - "F": ("F;32F", II, 1, 3, (32,), None), - "RGB": ("RGB", II, 2, 1, (8, 8, 8), None), - "RGBX": ("RGBX", II, 2, 1, (8, 8, 8, 8), 0), - "RGBA": ("RGBA", II, 2, 1, (8, 8, 8, 8), 2), - "CMYK": ("CMYK", II, 5, 1, (8, 8, 8, 8), None), - "YCbCr": ("YCbCr", II, 6, 1, (8, 8, 8), None), - "LAB": ("LAB", II, 8, 1, (8, 8, 8), None), - "I;32BS": ("I;32BS", MM, 1, 2, (32,), None), - "I;16B": ("I;16B", MM, 1, 1, (16,), None), - "I;16BS": ("I;16BS", MM, 1, 2, (16,), None), - "F;32BF": ("F;32BF", MM, 1, 3, (32,), None), -} - - -def _save(im, fp, filename): - try: - rawmode, prefix, photo, format, bits, extra = SAVE_INFO[im.mode] - except KeyError as e: - msg = f"cannot write mode {im.mode} as TIFF" - raise OSError(msg) from e - - ifd = ImageFileDirectory_v2(prefix=prefix) - - encoderinfo = im.encoderinfo - encoderconfig = im.encoderconfig - try: - compression = encoderinfo["compression"] - except KeyError: - compression = im.info.get("compression") - if isinstance(compression, int): - # compression value may be from BMP. Ignore it - compression = None - if compression is None: - compression = "raw" - elif compression == "tiff_jpeg": - # OJPEG is obsolete, so use new-style JPEG compression instead - compression = "jpeg" - elif compression == "tiff_deflate": - compression = "tiff_adobe_deflate" - - libtiff = WRITE_LIBTIFF or compression != "raw" - - # required for color libtiff images - ifd[PLANAR_CONFIGURATION] = 1 - - ifd[IMAGEWIDTH] = im.size[0] - ifd[IMAGELENGTH] = im.size[1] - - # write any arbitrary tags passed in as an ImageFileDirectory - if "tiffinfo" in encoderinfo: - info = encoderinfo["tiffinfo"] - elif "exif" in encoderinfo: - info = encoderinfo["exif"] - if isinstance(info, bytes): - exif = Image.Exif() - exif.load(info) - info = exif - else: - info = {} - logger.debug("Tiffinfo Keys: %s" % list(info)) - if isinstance(info, ImageFileDirectory_v1): - info = info.to_v2() - for key in info: - if isinstance(info, Image.Exif) and key in TiffTags.TAGS_V2_GROUPS: - ifd[key] = info.get_ifd(key) - else: - ifd[key] = info.get(key) - try: - ifd.tagtype[key] = info.tagtype[key] - except Exception: - pass # might not be an IFD. Might not have populated type - - # additions written by Greg Couch, gregc@cgl.ucsf.edu - # inspired by image-sig posting from Kevin Cazabon, kcazabon@home.com - if hasattr(im, "tag_v2"): - # preserve tags from original TIFF image file - for key in ( - RESOLUTION_UNIT, - X_RESOLUTION, - Y_RESOLUTION, - IPTC_NAA_CHUNK, - PHOTOSHOP_CHUNK, - XMP, - ): - if key in im.tag_v2: - ifd[key] = im.tag_v2[key] - ifd.tagtype[key] = im.tag_v2.tagtype[key] - - # preserve ICC profile (should also work when saving other formats - # which support profiles as TIFF) -- 2008-06-06 Florian Hoech - icc = encoderinfo.get("icc_profile", im.info.get("icc_profile")) - if icc: - ifd[ICCPROFILE] = icc - - for key, name in [ - (IMAGEDESCRIPTION, "description"), - (X_RESOLUTION, "resolution"), - (Y_RESOLUTION, "resolution"), - (X_RESOLUTION, "x_resolution"), - (Y_RESOLUTION, "y_resolution"), - (RESOLUTION_UNIT, "resolution_unit"), - (SOFTWARE, "software"), - (DATE_TIME, "date_time"), - (ARTIST, "artist"), - (COPYRIGHT, "copyright"), - ]: - if name in encoderinfo: - ifd[key] = encoderinfo[name] - - dpi = encoderinfo.get("dpi") - if dpi: - ifd[RESOLUTION_UNIT] = 2 - ifd[X_RESOLUTION] = dpi[0] - ifd[Y_RESOLUTION] = dpi[1] - - if bits != (1,): - ifd[BITSPERSAMPLE] = bits - if len(bits) != 1: - ifd[SAMPLESPERPIXEL] = len(bits) - if extra is not None: - ifd[EXTRASAMPLES] = extra - if format != 1: - ifd[SAMPLEFORMAT] = format - - if PHOTOMETRIC_INTERPRETATION not in ifd: - ifd[PHOTOMETRIC_INTERPRETATION] = photo - elif im.mode in ("1", "L") and ifd[PHOTOMETRIC_INTERPRETATION] == 0: - if im.mode == "1": - inverted_im = im.copy() - px = inverted_im.load() - for y in range(inverted_im.height): - for x in range(inverted_im.width): - px[x, y] = 0 if px[x, y] == 255 else 255 - im = inverted_im - else: - im = ImageOps.invert(im) - - if im.mode in ["P", "PA"]: - lut = im.im.getpalette("RGB", "RGB;L") - colormap = [] - colors = len(lut) // 3 - for i in range(3): - colormap += [v * 256 for v in lut[colors * i : colors * (i + 1)]] - colormap += [0] * (256 - colors) - ifd[COLORMAP] = colormap - # data orientation - stride = len(bits) * ((im.size[0] * bits[0] + 7) // 8) - # aim for given strip size (64 KB by default) when using libtiff writer - if libtiff: - im_strip_size = encoderinfo.get("strip_size", STRIP_SIZE) - rows_per_strip = 1 if stride == 0 else min(im_strip_size // stride, im.size[1]) - # JPEG encoder expects multiple of 8 rows - if compression == "jpeg": - rows_per_strip = min(((rows_per_strip + 7) // 8) * 8, im.size[1]) - else: - rows_per_strip = im.size[1] - if rows_per_strip == 0: - rows_per_strip = 1 - strip_byte_counts = 1 if stride == 0 else stride * rows_per_strip - strips_per_image = (im.size[1] + rows_per_strip - 1) // rows_per_strip - ifd[ROWSPERSTRIP] = rows_per_strip - if strip_byte_counts >= 2**16: - ifd.tagtype[STRIPBYTECOUNTS] = TiffTags.LONG - ifd[STRIPBYTECOUNTS] = (strip_byte_counts,) * (strips_per_image - 1) + ( - stride * im.size[1] - strip_byte_counts * (strips_per_image - 1), - ) - ifd[STRIPOFFSETS] = tuple( - range(0, strip_byte_counts * strips_per_image, strip_byte_counts) - ) # this is adjusted by IFD writer - # no compression by default: - ifd[COMPRESSION] = COMPRESSION_INFO_REV.get(compression, 1) - - if im.mode == "YCbCr": - for tag, value in { - YCBCRSUBSAMPLING: (1, 1), - REFERENCEBLACKWHITE: (0, 255, 128, 255, 128, 255), - }.items(): - ifd.setdefault(tag, value) - - blocklist = [TILEWIDTH, TILELENGTH, TILEOFFSETS, TILEBYTECOUNTS] - if libtiff: - if "quality" in encoderinfo: - quality = encoderinfo["quality"] - if not isinstance(quality, int) or quality < 0 or quality > 100: - msg = "Invalid quality setting" - raise ValueError(msg) - if compression != "jpeg": - msg = "quality setting only supported for 'jpeg' compression" - raise ValueError(msg) - ifd[JPEGQUALITY] = quality - - logger.debug("Saving using libtiff encoder") - logger.debug("Items: %s" % sorted(ifd.items())) - _fp = 0 - if hasattr(fp, "fileno"): - try: - fp.seek(0) - _fp = os.dup(fp.fileno()) - except io.UnsupportedOperation: - pass - - # optional types for non core tags - types = {} - # STRIPOFFSETS and STRIPBYTECOUNTS are added by the library - # based on the data in the strip. - # The other tags expect arrays with a certain length (fixed or depending on - # BITSPERSAMPLE, etc), passing arrays with a different length will result in - # segfaults. Block these tags until we add extra validation. - # SUBIFD may also cause a segfault. - blocklist += [ - REFERENCEBLACKWHITE, - STRIPBYTECOUNTS, - STRIPOFFSETS, - TRANSFERFUNCTION, - SUBIFD, - ] - - # bits per sample is a single short in the tiff directory, not a list. - atts = {BITSPERSAMPLE: bits[0]} - # Merge the ones that we have with (optional) more bits from - # the original file, e.g x,y resolution so that we can - # save(load('')) == original file. - legacy_ifd = {} - if hasattr(im, "tag"): - legacy_ifd = im.tag.to_v2() - - # SAMPLEFORMAT is determined by the image format and should not be copied - # from legacy_ifd. - supplied_tags = {**getattr(im, "tag_v2", {}), **legacy_ifd} - if SAMPLEFORMAT in supplied_tags: - del supplied_tags[SAMPLEFORMAT] - - for tag, value in itertools.chain(ifd.items(), supplied_tags.items()): - # Libtiff can only process certain core items without adding - # them to the custom dictionary. - # Custom items are supported for int, float, unicode, string and byte - # values. Other types and tuples require a tagtype. - if tag not in TiffTags.LIBTIFF_CORE: - if not getattr(Image.core, "libtiff_support_custom_tags", False): - continue - - if tag in ifd.tagtype: - types[tag] = ifd.tagtype[tag] - elif not (isinstance(value, (int, float, str, bytes))): - continue - else: - type = TiffTags.lookup(tag).type - if type: - types[tag] = type - if tag not in atts and tag not in blocklist: - if isinstance(value, str): - atts[tag] = value.encode("ascii", "replace") + b"\0" - elif isinstance(value, IFDRational): - atts[tag] = float(value) - else: - atts[tag] = value - - if SAMPLEFORMAT in atts and len(atts[SAMPLEFORMAT]) == 1: - atts[SAMPLEFORMAT] = atts[SAMPLEFORMAT][0] - - logger.debug("Converted items: %s" % sorted(atts.items())) - - # libtiff always expects the bytes in native order. - # we're storing image byte order. So, if the rawmode - # contains I;16, we need to convert from native to image - # byte order. - if im.mode in ("I;16B", "I;16"): - rawmode = "I;16N" - - # Pass tags as sorted list so that the tags are set in a fixed order. - # This is required by libtiff for some tags. For example, the JPEGQUALITY - # pseudo tag requires that the COMPRESS tag was already set. - tags = list(atts.items()) - tags.sort() - a = (rawmode, compression, _fp, filename, tags, types) - e = Image._getencoder(im.mode, "libtiff", a, encoderconfig) - e.setimage(im.im, (0, 0) + im.size) - while True: - # undone, change to self.decodermaxblock: - errcode, data = e.encode(16 * 1024)[1:] - if not _fp: - fp.write(data) - if errcode: - break - if _fp: - try: - os.close(_fp) - except OSError: - pass - if errcode < 0: - msg = f"encoder error {errcode} when writing image file" - raise OSError(msg) - - else: - for tag in blocklist: - del ifd[tag] - offset = ifd.save(fp) - - ImageFile._save( - im, fp, [("raw", (0, 0) + im.size, offset, (rawmode, stride, 1))] - ) - - # -- helper for multi-page save -- - if "_debug_multipage" in encoderinfo: - # just to access o32 and o16 (using correct byte order) - im._debug_multipage = ifd - - -class AppendingTiffWriter: - fieldSizes = [ - 0, # None - 1, # byte - 1, # ascii - 2, # short - 4, # long - 8, # rational - 1, # sbyte - 1, # undefined - 2, # sshort - 4, # slong - 8, # srational - 4, # float - 8, # double - 4, # ifd - 2, # unicode - 4, # complex - 8, # long8 - ] - - # StripOffsets = 273 - # FreeOffsets = 288 - # TileOffsets = 324 - # JPEGQTables = 519 - # JPEGDCTables = 520 - # JPEGACTables = 521 - Tags = {273, 288, 324, 519, 520, 521} - - def __init__(self, fn, new=False): - if hasattr(fn, "read"): - self.f = fn - self.close_fp = False - else: - self.name = fn - self.close_fp = True - try: - self.f = open(fn, "w+b" if new else "r+b") - except OSError: - self.f = open(fn, "w+b") - self.beginning = self.f.tell() - self.setup() - - def setup(self): - # Reset everything. - self.f.seek(self.beginning, os.SEEK_SET) - - self.whereToWriteNewIFDOffset = None - self.offsetOfNewPage = 0 - - self.IIMM = iimm = self.f.read(4) - if not iimm: - # empty file - first page - self.isFirst = True - return - - self.isFirst = False - if iimm == b"II\x2a\x00": - self.setEndian("<") - elif iimm == b"MM\x00\x2a": - self.setEndian(">") - else: - msg = "Invalid TIFF file header" - raise RuntimeError(msg) - - self.skipIFDs() - self.goToEnd() - - def finalize(self): - if self.isFirst: - return - - # fix offsets - self.f.seek(self.offsetOfNewPage) - - iimm = self.f.read(4) - if not iimm: - # msg = "nothing written into new page" - # raise RuntimeError(msg) - # Make it easy to finish a frame without committing to a new one. - return - - if iimm != self.IIMM: - msg = "IIMM of new page doesn't match IIMM of first page" - raise RuntimeError(msg) - - ifd_offset = self.readLong() - ifd_offset += self.offsetOfNewPage - self.f.seek(self.whereToWriteNewIFDOffset) - self.writeLong(ifd_offset) - self.f.seek(ifd_offset) - self.fixIFD() - - def newFrame(self): - # Call this to finish a frame. - self.finalize() - self.setup() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.close_fp: - self.close() - return False - - def tell(self): - return self.f.tell() - self.offsetOfNewPage - - def seek(self, offset, whence=io.SEEK_SET): - if whence == os.SEEK_SET: - offset += self.offsetOfNewPage - - self.f.seek(offset, whence) - return self.tell() - - def goToEnd(self): - self.f.seek(0, os.SEEK_END) - pos = self.f.tell() - - # pad to 16 byte boundary - pad_bytes = 16 - pos % 16 - if 0 < pad_bytes < 16: - self.f.write(bytes(pad_bytes)) - self.offsetOfNewPage = self.f.tell() - - def setEndian(self, endian): - self.endian = endian - self.longFmt = self.endian + "L" - self.shortFmt = self.endian + "H" - self.tagFormat = self.endian + "HHL" - - def skipIFDs(self): - while True: - ifd_offset = self.readLong() - if ifd_offset == 0: - self.whereToWriteNewIFDOffset = self.f.tell() - 4 - break - - self.f.seek(ifd_offset) - num_tags = self.readShort() - self.f.seek(num_tags * 12, os.SEEK_CUR) - - def write(self, data): - return self.f.write(data) - - def readShort(self): - (value,) = struct.unpack(self.shortFmt, self.f.read(2)) - return value - - def readLong(self): - (value,) = struct.unpack(self.longFmt, self.f.read(4)) - return value - - def rewriteLastShortToLong(self, value): - self.f.seek(-2, os.SEEK_CUR) - bytes_written = self.f.write(struct.pack(self.longFmt, value)) - if bytes_written is not None and bytes_written != 4: - msg = f"wrote only {bytes_written} bytes but wanted 4" - raise RuntimeError(msg) - - def rewriteLastShort(self, value): - self.f.seek(-2, os.SEEK_CUR) - bytes_written = self.f.write(struct.pack(self.shortFmt, value)) - if bytes_written is not None and bytes_written != 2: - msg = f"wrote only {bytes_written} bytes but wanted 2" - raise RuntimeError(msg) - - def rewriteLastLong(self, value): - self.f.seek(-4, os.SEEK_CUR) - bytes_written = self.f.write(struct.pack(self.longFmt, value)) - if bytes_written is not None and bytes_written != 4: - msg = f"wrote only {bytes_written} bytes but wanted 4" - raise RuntimeError(msg) - - def writeShort(self, value): - bytes_written = self.f.write(struct.pack(self.shortFmt, value)) - if bytes_written is not None and bytes_written != 2: - msg = f"wrote only {bytes_written} bytes but wanted 2" - raise RuntimeError(msg) - - def writeLong(self, value): - bytes_written = self.f.write(struct.pack(self.longFmt, value)) - if bytes_written is not None and bytes_written != 4: - msg = f"wrote only {bytes_written} bytes but wanted 4" - raise RuntimeError(msg) - - def close(self): - self.finalize() - self.f.close() - - def fixIFD(self): - num_tags = self.readShort() - - for i in range(num_tags): - tag, field_type, count = struct.unpack(self.tagFormat, self.f.read(8)) - - field_size = self.fieldSizes[field_type] - total_size = field_size * count - is_local = total_size <= 4 - if not is_local: - offset = self.readLong() - offset += self.offsetOfNewPage - self.rewriteLastLong(offset) - - if tag in self.Tags: - cur_pos = self.f.tell() - - if is_local: - self.fixOffsets( - count, isShort=(field_size == 2), isLong=(field_size == 4) - ) - self.f.seek(cur_pos + 4) - else: - self.f.seek(offset) - self.fixOffsets( - count, isShort=(field_size == 2), isLong=(field_size == 4) - ) - self.f.seek(cur_pos) - - offset = cur_pos = None - - elif is_local: - # skip the locally stored value that is not an offset - self.f.seek(4, os.SEEK_CUR) - - def fixOffsets(self, count, isShort=False, isLong=False): - if not isShort and not isLong: - msg = "offset is neither short nor long" - raise RuntimeError(msg) - - for i in range(count): - offset = self.readShort() if isShort else self.readLong() - offset += self.offsetOfNewPage - if isShort and offset >= 65536: - # offset is now too large - we must convert shorts to longs - if count != 1: - msg = "not implemented" - raise RuntimeError(msg) # XXX TODO - - # simple case - the offset is just one and therefore it is - # local (not referenced with another offset) - self.rewriteLastShortToLong(offset) - self.f.seek(-10, os.SEEK_CUR) - self.writeShort(TiffTags.LONG) # rewrite the type to LONG - self.f.seek(8, os.SEEK_CUR) - elif isShort: - self.rewriteLastShort(offset) - else: - self.rewriteLastLong(offset) - - -def _save_all(im, fp, filename): - encoderinfo = im.encoderinfo.copy() - encoderconfig = im.encoderconfig - append_images = list(encoderinfo.get("append_images", [])) - if not hasattr(im, "n_frames") and not append_images: - return _save(im, fp, filename) - - cur_idx = im.tell() - try: - with AppendingTiffWriter(fp) as tf: - for ims in [im] + append_images: - ims.encoderinfo = encoderinfo - ims.encoderconfig = encoderconfig - if not hasattr(ims, "n_frames"): - nfr = 1 - else: - nfr = ims.n_frames - - for idx in range(nfr): - ims.seek(idx) - ims.load() - _save(ims, tf, filename) - tf.newFrame() - finally: - im.seek(cur_idx) - - -# -# -------------------------------------------------------------------- -# Register - -Image.register_open(TiffImageFile.format, TiffImageFile, _accept) -Image.register_save(TiffImageFile.format, _save) -Image.register_save_all(TiffImageFile.format, _save_all) - -Image.register_extensions(TiffImageFile.format, [".tif", ".tiff"]) - -Image.register_mime(TiffImageFile.format, "image/tiff") diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chardet/hebrewprober.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chardet/hebrewprober.py deleted file mode 100644 index 785d0057bcc0ea74a4b8d65ab7a0de78474bf892..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/chardet/hebrewprober.py +++ /dev/null @@ -1,316 +0,0 @@ -######################## BEGIN LICENSE BLOCK ######################## -# The Original Code is Mozilla Universal charset detector code. -# -# The Initial Developer of the Original Code is -# Shy Shalom -# Portions created by the Initial Developer are Copyright (C) 2005 -# the Initial Developer. All Rights Reserved. -# -# Contributor(s): -# Mark Pilgrim - port to Python -# -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, write to the Free Software -# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA -# 02110-1301 USA -######################### END LICENSE BLOCK ######################### - -from typing import Optional, Union - -from .charsetprober import CharSetProber -from .enums import ProbingState -from .sbcharsetprober import SingleByteCharSetProber - -# This prober doesn't actually recognize a language or a charset. -# It is a helper prober for the use of the Hebrew model probers - -### General ideas of the Hebrew charset recognition ### -# -# Four main charsets exist in Hebrew: -# "ISO-8859-8" - Visual Hebrew -# "windows-1255" - Logical Hebrew -# "ISO-8859-8-I" - Logical Hebrew -# "x-mac-hebrew" - ?? Logical Hebrew ?? -# -# Both "ISO" charsets use a completely identical set of code points, whereas -# "windows-1255" and "x-mac-hebrew" are two different proper supersets of -# these code points. windows-1255 defines additional characters in the range -# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific -# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6. -# x-mac-hebrew defines similar additional code points but with a different -# mapping. -# -# As far as an average Hebrew text with no diacritics is concerned, all four -# charsets are identical with respect to code points. Meaning that for the -# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters -# (including final letters). -# -# The dominant difference between these charsets is their directionality. -# "Visual" directionality means that the text is ordered as if the renderer is -# not aware of a BIDI rendering algorithm. The renderer sees the text and -# draws it from left to right. The text itself when ordered naturally is read -# backwards. A buffer of Visual Hebrew generally looks like so: -# "[last word of first line spelled backwards] [whole line ordered backwards -# and spelled backwards] [first word of first line spelled backwards] -# [end of line] [last word of second line] ... etc' " -# adding punctuation marks, numbers and English text to visual text is -# naturally also "visual" and from left to right. -# -# "Logical" directionality means the text is ordered "naturally" according to -# the order it is read. It is the responsibility of the renderer to display -# the text from right to left. A BIDI algorithm is used to place general -# punctuation marks, numbers and English text in the text. -# -# Texts in x-mac-hebrew are almost impossible to find on the Internet. From -# what little evidence I could find, it seems that its general directionality -# is Logical. -# -# To sum up all of the above, the Hebrew probing mechanism knows about two -# charsets: -# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are -# backwards while line order is natural. For charset recognition purposes -# the line order is unimportant (In fact, for this implementation, even -# word order is unimportant). -# Logical Hebrew - "windows-1255" - normal, naturally ordered text. -# -# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be -# specifically identified. -# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew -# that contain special punctuation marks or diacritics is displayed with -# some unconverted characters showing as question marks. This problem might -# be corrected using another model prober for x-mac-hebrew. Due to the fact -# that x-mac-hebrew texts are so rare, writing another model prober isn't -# worth the effort and performance hit. -# -#### The Prober #### -# -# The prober is divided between two SBCharSetProbers and a HebrewProber, -# all of which are managed, created, fed data, inquired and deleted by the -# SBCSGroupProber. The two SBCharSetProbers identify that the text is in -# fact some kind of Hebrew, Logical or Visual. The final decision about which -# one is it is made by the HebrewProber by combining final-letter scores -# with the scores of the two SBCharSetProbers to produce a final answer. -# -# The SBCSGroupProber is responsible for stripping the original text of HTML -# tags, English characters, numbers, low-ASCII punctuation characters, spaces -# and new lines. It reduces any sequence of such characters to a single space. -# The buffer fed to each prober in the SBCS group prober is pure text in -# high-ASCII. -# The two SBCharSetProbers (model probers) share the same language model: -# Win1255Model. -# The first SBCharSetProber uses the model normally as any other -# SBCharSetProber does, to recognize windows-1255, upon which this model was -# built. The second SBCharSetProber is told to make the pair-of-letter -# lookup in the language model backwards. This in practice exactly simulates -# a visual Hebrew model using the windows-1255 logical Hebrew model. -# -# The HebrewProber is not using any language model. All it does is look for -# final-letter evidence suggesting the text is either logical Hebrew or visual -# Hebrew. Disjointed from the model probers, the results of the HebrewProber -# alone are meaningless. HebrewProber always returns 0.00 as confidence -# since it never identifies a charset by itself. Instead, the pointer to the -# HebrewProber is passed to the model probers as a helper "Name Prober". -# When the Group prober receives a positive identification from any prober, -# it asks for the name of the charset identified. If the prober queried is a -# Hebrew model prober, the model prober forwards the call to the -# HebrewProber to make the final decision. In the HebrewProber, the -# decision is made according to the final-letters scores maintained and Both -# model probers scores. The answer is returned in the form of the name of the -# charset identified, either "windows-1255" or "ISO-8859-8". - - -class HebrewProber(CharSetProber): - SPACE = 0x20 - # windows-1255 / ISO-8859-8 code points of interest - FINAL_KAF = 0xEA - NORMAL_KAF = 0xEB - FINAL_MEM = 0xED - NORMAL_MEM = 0xEE - FINAL_NUN = 0xEF - NORMAL_NUN = 0xF0 - FINAL_PE = 0xF3 - NORMAL_PE = 0xF4 - FINAL_TSADI = 0xF5 - NORMAL_TSADI = 0xF6 - - # Minimum Visual vs Logical final letter score difference. - # If the difference is below this, don't rely solely on the final letter score - # distance. - MIN_FINAL_CHAR_DISTANCE = 5 - - # Minimum Visual vs Logical model score difference. - # If the difference is below this, don't rely at all on the model score - # distance. - MIN_MODEL_DISTANCE = 0.01 - - VISUAL_HEBREW_NAME = "ISO-8859-8" - LOGICAL_HEBREW_NAME = "windows-1255" - - def __init__(self) -> None: - super().__init__() - self._final_char_logical_score = 0 - self._final_char_visual_score = 0 - self._prev = self.SPACE - self._before_prev = self.SPACE - self._logical_prober: Optional[SingleByteCharSetProber] = None - self._visual_prober: Optional[SingleByteCharSetProber] = None - self.reset() - - def reset(self) -> None: - self._final_char_logical_score = 0 - self._final_char_visual_score = 0 - # The two last characters seen in the previous buffer, - # mPrev and mBeforePrev are initialized to space in order to simulate - # a word delimiter at the beginning of the data - self._prev = self.SPACE - self._before_prev = self.SPACE - # These probers are owned by the group prober. - - def set_model_probers( - self, - logical_prober: SingleByteCharSetProber, - visual_prober: SingleByteCharSetProber, - ) -> None: - self._logical_prober = logical_prober - self._visual_prober = visual_prober - - def is_final(self, c: int) -> bool: - return c in [ - self.FINAL_KAF, - self.FINAL_MEM, - self.FINAL_NUN, - self.FINAL_PE, - self.FINAL_TSADI, - ] - - def is_non_final(self, c: int) -> bool: - # The normal Tsadi is not a good Non-Final letter due to words like - # 'lechotet' (to chat) containing an apostrophe after the tsadi. This - # apostrophe is converted to a space in FilterWithoutEnglishLetters - # causing the Non-Final tsadi to appear at an end of a word even - # though this is not the case in the original text. - # The letters Pe and Kaf rarely display a related behavior of not being - # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak' - # for example legally end with a Non-Final Pe or Kaf. However, the - # benefit of these letters as Non-Final letters outweighs the damage - # since these words are quite rare. - return c in [self.NORMAL_KAF, self.NORMAL_MEM, self.NORMAL_NUN, self.NORMAL_PE] - - def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState: - # Final letter analysis for logical-visual decision. - # Look for evidence that the received buffer is either logical Hebrew - # or visual Hebrew. - # The following cases are checked: - # 1) A word longer than 1 letter, ending with a final letter. This is - # an indication that the text is laid out "naturally" since the - # final letter really appears at the end. +1 for logical score. - # 2) A word longer than 1 letter, ending with a Non-Final letter. In - # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi, - # should not end with the Non-Final form of that letter. Exceptions - # to this rule are mentioned above in isNonFinal(). This is an - # indication that the text is laid out backwards. +1 for visual - # score - # 3) A word longer than 1 letter, starting with a final letter. Final - # letters should not appear at the beginning of a word. This is an - # indication that the text is laid out backwards. +1 for visual - # score. - # - # The visual score and logical score are accumulated throughout the - # text and are finally checked against each other in GetCharSetName(). - # No checking for final letters in the middle of words is done since - # that case is not an indication for either Logical or Visual text. - # - # We automatically filter out all 7-bit characters (replace them with - # spaces) so the word boundary detection works properly. [MAP] - - if self.state == ProbingState.NOT_ME: - # Both model probers say it's not them. No reason to continue. - return ProbingState.NOT_ME - - byte_str = self.filter_high_byte_only(byte_str) - - for cur in byte_str: - if cur == self.SPACE: - # We stand on a space - a word just ended - if self._before_prev != self.SPACE: - # next-to-last char was not a space so self._prev is not a - # 1 letter word - if self.is_final(self._prev): - # case (1) [-2:not space][-1:final letter][cur:space] - self._final_char_logical_score += 1 - elif self.is_non_final(self._prev): - # case (2) [-2:not space][-1:Non-Final letter][ - # cur:space] - self._final_char_visual_score += 1 - else: - # Not standing on a space - if ( - (self._before_prev == self.SPACE) - and (self.is_final(self._prev)) - and (cur != self.SPACE) - ): - # case (3) [-2:space][-1:final letter][cur:not space] - self._final_char_visual_score += 1 - self._before_prev = self._prev - self._prev = cur - - # Forever detecting, till the end or until both model probers return - # ProbingState.NOT_ME (handled above) - return ProbingState.DETECTING - - @property - def charset_name(self) -> str: - assert self._logical_prober is not None - assert self._visual_prober is not None - - # Make the decision: is it Logical or Visual? - # If the final letter score distance is dominant enough, rely on it. - finalsub = self._final_char_logical_score - self._final_char_visual_score - if finalsub >= self.MIN_FINAL_CHAR_DISTANCE: - return self.LOGICAL_HEBREW_NAME - if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE: - return self.VISUAL_HEBREW_NAME - - # It's not dominant enough, try to rely on the model scores instead. - modelsub = ( - self._logical_prober.get_confidence() - self._visual_prober.get_confidence() - ) - if modelsub > self.MIN_MODEL_DISTANCE: - return self.LOGICAL_HEBREW_NAME - if modelsub < -self.MIN_MODEL_DISTANCE: - return self.VISUAL_HEBREW_NAME - - # Still no good, back to final letter distance, maybe it'll save the - # day. - if finalsub < 0.0: - return self.VISUAL_HEBREW_NAME - - # (finalsub > 0 - Logical) or (don't know what to do) default to - # Logical. - return self.LOGICAL_HEBREW_NAME - - @property - def language(self) -> str: - return "Hebrew" - - @property - def state(self) -> ProbingState: - assert self._logical_prober is not None - assert self._visual_prober is not None - - # Remain active as long as any of the model probers are active. - if (self._logical_prober.state == ProbingState.NOT_ME) and ( - self._visual_prober.state == ProbingState.NOT_ME - ): - return ProbingState.NOT_ME - return ProbingState.DETECTING diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/hazmat/primitives/serialization/pkcs7.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/hazmat/primitives/serialization/pkcs7.py deleted file mode 100644 index 9998bcaa1131db00cf432f24fb1731b65ae697cd..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/cryptography/hazmat/primitives/serialization/pkcs7.py +++ /dev/null @@ -1,235 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -from __future__ import annotations - -import email.base64mime -import email.generator -import email.message -import email.policy -import io -import typing - -from cryptography import utils, x509 -from cryptography.hazmat.bindings._rust import pkcs7 as rust_pkcs7 -from cryptography.hazmat.primitives import hashes, serialization -from cryptography.hazmat.primitives.asymmetric import ec, rsa -from cryptography.utils import _check_byteslike - - -def load_pem_pkcs7_certificates(data: bytes) -> typing.List[x509.Certificate]: - from cryptography.hazmat.backends.openssl.backend import backend - - return backend.load_pem_pkcs7_certificates(data) - - -def load_der_pkcs7_certificates(data: bytes) -> typing.List[x509.Certificate]: - from cryptography.hazmat.backends.openssl.backend import backend - - return backend.load_der_pkcs7_certificates(data) - - -def serialize_certificates( - certs: typing.List[x509.Certificate], - encoding: serialization.Encoding, -) -> bytes: - return rust_pkcs7.serialize_certificates(certs, encoding) - - -PKCS7HashTypes = typing.Union[ - hashes.SHA224, - hashes.SHA256, - hashes.SHA384, - hashes.SHA512, -] - -PKCS7PrivateKeyTypes = typing.Union[ - rsa.RSAPrivateKey, ec.EllipticCurvePrivateKey -] - - -class PKCS7Options(utils.Enum): - Text = "Add text/plain MIME type" - Binary = "Don't translate input data into canonical MIME format" - DetachedSignature = "Don't embed data in the PKCS7 structure" - NoCapabilities = "Don't embed SMIME capabilities" - NoAttributes = "Don't embed authenticatedAttributes" - NoCerts = "Don't embed signer certificate" - - -class PKCS7SignatureBuilder: - def __init__( - self, - data: typing.Optional[bytes] = None, - signers: typing.List[ - typing.Tuple[ - x509.Certificate, - PKCS7PrivateKeyTypes, - PKCS7HashTypes, - ] - ] = [], - additional_certs: typing.List[x509.Certificate] = [], - ): - self._data = data - self._signers = signers - self._additional_certs = additional_certs - - def set_data(self, data: bytes) -> PKCS7SignatureBuilder: - _check_byteslike("data", data) - if self._data is not None: - raise ValueError("data may only be set once") - - return PKCS7SignatureBuilder(data, self._signers) - - def add_signer( - self, - certificate: x509.Certificate, - private_key: PKCS7PrivateKeyTypes, - hash_algorithm: PKCS7HashTypes, - ) -> PKCS7SignatureBuilder: - if not isinstance( - hash_algorithm, - ( - hashes.SHA224, - hashes.SHA256, - hashes.SHA384, - hashes.SHA512, - ), - ): - raise TypeError( - "hash_algorithm must be one of hashes.SHA224, " - "SHA256, SHA384, or SHA512" - ) - if not isinstance(certificate, x509.Certificate): - raise TypeError("certificate must be a x509.Certificate") - - if not isinstance( - private_key, (rsa.RSAPrivateKey, ec.EllipticCurvePrivateKey) - ): - raise TypeError("Only RSA & EC keys are supported at this time.") - - return PKCS7SignatureBuilder( - self._data, - self._signers + [(certificate, private_key, hash_algorithm)], - ) - - def add_certificate( - self, certificate: x509.Certificate - ) -> PKCS7SignatureBuilder: - if not isinstance(certificate, x509.Certificate): - raise TypeError("certificate must be a x509.Certificate") - - return PKCS7SignatureBuilder( - self._data, self._signers, self._additional_certs + [certificate] - ) - - def sign( - self, - encoding: serialization.Encoding, - options: typing.Iterable[PKCS7Options], - backend: typing.Any = None, - ) -> bytes: - if len(self._signers) == 0: - raise ValueError("Must have at least one signer") - if self._data is None: - raise ValueError("You must add data to sign") - options = list(options) - if not all(isinstance(x, PKCS7Options) for x in options): - raise ValueError("options must be from the PKCS7Options enum") - if encoding not in ( - serialization.Encoding.PEM, - serialization.Encoding.DER, - serialization.Encoding.SMIME, - ): - raise ValueError( - "Must be PEM, DER, or SMIME from the Encoding enum" - ) - - # Text is a meaningless option unless it is accompanied by - # DetachedSignature - if ( - PKCS7Options.Text in options - and PKCS7Options.DetachedSignature not in options - ): - raise ValueError( - "When passing the Text option you must also pass " - "DetachedSignature" - ) - - if PKCS7Options.Text in options and encoding in ( - serialization.Encoding.DER, - serialization.Encoding.PEM, - ): - raise ValueError( - "The Text option is only available for SMIME serialization" - ) - - # No attributes implies no capabilities so we'll error if you try to - # pass both. - if ( - PKCS7Options.NoAttributes in options - and PKCS7Options.NoCapabilities in options - ): - raise ValueError( - "NoAttributes is a superset of NoCapabilities. Do not pass " - "both values." - ) - - return rust_pkcs7.sign_and_serialize(self, encoding, options) - - -def _smime_encode( - data: bytes, signature: bytes, micalg: str, text_mode: bool -) -> bytes: - # This function works pretty hard to replicate what OpenSSL does - # precisely. For good and for ill. - - m = email.message.Message() - m.add_header("MIME-Version", "1.0") - m.add_header( - "Content-Type", - "multipart/signed", - protocol="application/x-pkcs7-signature", - micalg=micalg, - ) - - m.preamble = "This is an S/MIME signed message\n" - - msg_part = OpenSSLMimePart() - msg_part.set_payload(data) - if text_mode: - msg_part.add_header("Content-Type", "text/plain") - m.attach(msg_part) - - sig_part = email.message.MIMEPart() - sig_part.add_header( - "Content-Type", "application/x-pkcs7-signature", name="smime.p7s" - ) - sig_part.add_header("Content-Transfer-Encoding", "base64") - sig_part.add_header( - "Content-Disposition", "attachment", filename="smime.p7s" - ) - sig_part.set_payload( - email.base64mime.body_encode(signature, maxlinelen=65) - ) - del sig_part["MIME-Version"] - m.attach(sig_part) - - fp = io.BytesIO() - g = email.generator.BytesGenerator( - fp, - maxheaderlen=0, - mangle_from_=False, - policy=m.policy.clone(linesep="\r\n"), - ) - g.flatten(m) - return fp.getvalue() - - -class OpenSSLMimePart(email.message.MIMEPart): - # A MIMEPart subclass that replicates OpenSSL's behavior of not including - # a newline if there are no headers. - def _write_headers(self, generator) -> None: - if list(self.raw_items()): - generator._write_headers(self) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/pyext/__init__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/google/protobuf/pyext/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/cihyFjudo/fairness-paper-search/Kabootar Love Full Movie Download [Extra Quality] Hd.md b/spaces/cihyFjudo/fairness-paper-search/Kabootar Love Full Movie Download [Extra Quality] Hd.md deleted file mode 100644 index d1262e5fa75230b6f73bc9ed10a5185d5cd4b24f..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Kabootar Love Full Movie Download [Extra Quality] Hd.md +++ /dev/null @@ -1,6 +0,0 @@ -<br /> -<p>")*/ $("#header_tv_icon").addClass("tv_highlighted"); $("#tv_icon").attr("src","/assets/tv_active.svg") } var navigation = [ '', '' ]; var layout_type = "movie" var items_count = "100" var platfrm = getShemarooCookies().mobile_browser_type if(layout_type == "song" || layout_type == "video" || layout_type == "videos") var items_0 = 1; var stage_padding = 50; var items_576 = 2; var items_768 = 3; var items_992 = 4; var items_1200 = 5 if(items_count > 5 && platfrm != "mobile") $(".see-more").show(); else if(items_count > 2 && platfrm == "mobile") $(".see-more").show(); else var items_0 = 2; var stage_padding = 25; var items_576 = 3; var items_768 = 4; var items_992 = 5; var items_1200 = 7; if(items_count > 7 && platfrm != "mobile") $(".see-more").show(); else if(items_count > 3 && platfrm == "mobile") $(".see-more").show(); $(".shemaroo_player").empty(); $(document).ready(function() if (getShemarooCookies().theme_option == "dark_theme") $(".preview_video_light").remove() $(".preview_video_dark").show() $(".watch_later_light").remove() $(".watch_later_dark").show() $(".share_light").remove() $(".share_dark").show() $(".download_light").remove() $(".download_dark").show() else if (getShemarooCookies().theme_option == "light_theme" ); var list_count = "1" $("#content_info_0").addClass("active") for(var i = 0; i < list_count ; i++) $("#content_info_" + i).click(function() $(".tab_chk").removeClass("active") var data = $(this).data("value").split(",") $("#content_info_"+data[2]).addClass("active") if(data[2] == 0) $("#synopsis_data").show() $('.season_all_results').html(''); else if(data[2] == 1) $("#synopsis_data").hide() $('.season_all_results').html(''); $(".relative-content-scroll").css("padding-bottom", 184); $(".scroll_loader").show(); trailer_list(data[0],data[1]) )function trailer_list(catalog_id,home_link){ $(".relative-content-scroll").css("padding-bottom", 120); $(".scroll_loader").show(); // var item_id = '' $.ajax({ url: "/catalogs/get_trailers", type: "GET", data: catalog_id : catalog_id, item_id : home_link , success: function(response){ $('.season_all_results').html(''); var list_data = ""; var list_items = ""; var trailer_list = response.trailer_list; for (var i=0; i< trailer_list.length; i++){ console.log(trailer_list[i]) var item= trailer_list[i].split("$"); list_data += ''+item[1].split("|")[0]+'</p> -<p>download Madrsi unlimited Movies and videos Download Here.Madrsi Hd,3gp. mp4 320p and More Videos You Can Download Easyly. tamilrockers and movierulz, tamilgun, filmywap, and pagalworld videos and Movies download.</p> -<h2>Kabootar Love Full Movie Download Hd</h2><br /><p><b><b>DOWNLOAD</b> 🌟 <a href="https://tinurli.com/2uwjhy">https://tinurli.com/2uwjhy</a></b></p><br /><br /> aaccfb2cb3<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/The Kokuhaku Full Movie Download In Italian.md b/spaces/cihyFjudo/fairness-paper-search/The Kokuhaku Full Movie Download In Italian.md deleted file mode 100644 index 77b56f99bc137a9757128bd8c3f6e9a485a4aa65..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/The Kokuhaku Full Movie Download In Italian.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>The Kokuhaku Full Movie Download In Italian</h2><br /><p><b><b>Download</b> ✓✓✓ <a href="https://tinurli.com/2uwkCi">https://tinurli.com/2uwkCi</a></b></p><br /><br /> - - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/cihyFjudo/fairness-paper-search/Watch [WORK] Suspiria (1977) BRRip Xvid AC3-Anarchy.avi Online for Free - No Sign Up Required.md b/spaces/cihyFjudo/fairness-paper-search/Watch [WORK] Suspiria (1977) BRRip Xvid AC3-Anarchy.avi Online for Free - No Sign Up Required.md deleted file mode 100644 index 4db2cad4cf0c53685c3a933214aacdc423223626..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Watch [WORK] Suspiria (1977) BRRip Xvid AC3-Anarchy.avi Online for Free - No Sign Up Required.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>[WORK] Suspiria (1977) BRRip Xvid AC3-Anarchy.avi</h2><br /><p><b><b>Download</b> ✓ <a href="https://tinurli.com/2uwk1u">https://tinurli.com/2uwk1u</a></b></p><br /><br /> - - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fastapi/security/open_id_connect_url.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fastapi/security/open_id_connect_url.py deleted file mode 100644 index 4e65f1f6c486fa579554c61b9d137c7fda1f1b17..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/fastapi/security/open_id_connect_url.py +++ /dev/null @@ -1,34 +0,0 @@ -from typing import Optional - -from fastapi.openapi.models import OpenIdConnect as OpenIdConnectModel -from fastapi.security.base import SecurityBase -from starlette.exceptions import HTTPException -from starlette.requests import Request -from starlette.status import HTTP_403_FORBIDDEN - - -class OpenIdConnect(SecurityBase): - def __init__( - self, - *, - openIdConnectUrl: str, - scheme_name: Optional[str] = None, - description: Optional[str] = None, - auto_error: bool = True, - ): - self.model = OpenIdConnectModel( - openIdConnectUrl=openIdConnectUrl, description=description - ) - self.scheme_name = scheme_name or self.__class__.__name__ - self.auto_error = auto_error - - async def __call__(self, request: Request) -> Optional[str]: - authorization = request.headers.get("Authorization") - if not authorization: - if self.auto_error: - raise HTTPException( - status_code=HTTP_403_FORBIDDEN, detail="Not authenticated" - ) - else: - return None - return authorization diff --git a/spaces/codedog-ai/codedog-demo/codedog_demo/github_utils.py b/spaces/codedog-ai/codedog-demo/codedog_demo/github_utils.py deleted file mode 100644 index 81caf24220c407dccff8f70fa3c00169ed97934f..0000000000000000000000000000000000000000 --- a/spaces/codedog-ai/codedog-demo/codedog_demo/github_utils.py +++ /dev/null @@ -1,17 +0,0 @@ -from typing import Tuple - - -def parse_github_pr_url(url: str) -> Tuple[str, int]: - # Input = https://github.com/codedog-ai/codedog/pull/11 - # Output = codedog-ai/codedog, 11 - if not url: - return ("", 0) - - try: - parts = url.split("/") - owner_repo = parts[3] + "/" + parts[4] - pr_number = int(parts[-1]) - except Exception: - return ("", 0) - - return (owner_repo, pr_number) diff --git a/spaces/colakin/video-generater/public/ffmpeg/fftools/ffmpeg.c b/spaces/colakin/video-generater/public/ffmpeg/fftools/ffmpeg.c deleted file mode 100644 index 15fe839914f84b97255747cd9db29ba62b1b597a..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/fftools/ffmpeg.c +++ /dev/null @@ -1,2298 +0,0 @@ -/* - * Copyright (c) 2000-2003 Fabrice Bellard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * multimedia converter based on the FFmpeg libraries - */ - -#include "config.h" - -#include <errno.h> -#include <limits.h> -#include <stdatomic.h> -#include <stdint.h> -#include <stdlib.h> -#include <string.h> -#include <time.h> - -#if HAVE_IO_H -#include <io.h> -#endif -#if HAVE_UNISTD_H -#include <unistd.h> -#endif - -#if HAVE_SYS_RESOURCE_H -#include <sys/time.h> -#include <sys/types.h> -#include <sys/resource.h> -#elif HAVE_GETPROCESSTIMES -#include <windows.h> -#endif -#if HAVE_GETPROCESSMEMORYINFO -#include <windows.h> -#include <psapi.h> -#endif -#if HAVE_SETCONSOLECTRLHANDLER -#include <windows.h> -#endif - -#if HAVE_SYS_SELECT_H -#include <sys/select.h> -#endif - -#if HAVE_TERMIOS_H -#include <fcntl.h> -#include <sys/ioctl.h> -#include <sys/time.h> -#include <termios.h> -#elif HAVE_KBHIT -#include <conio.h> -#endif - -#include "libavutil/avassert.h" -#include "libavutil/avstring.h" -#include "libavutil/bprint.h" -#include "libavutil/channel_layout.h" -#include "libavutil/dict.h" -#include "libavutil/display.h" -#include "libavutil/fifo.h" -#include "libavutil/hwcontext.h" -#include "libavutil/imgutils.h" -#include "libavutil/intreadwrite.h" -#include "libavutil/libm.h" -#include "libavutil/mathematics.h" -#include "libavutil/opt.h" -#include "libavutil/parseutils.h" -#include "libavutil/pixdesc.h" -#include "libavutil/samplefmt.h" -#include "libavutil/thread.h" -#include "libavutil/threadmessage.h" -#include "libavutil/time.h" -#include "libavutil/timestamp.h" - -#include "libavcodec/version.h" - -#include "libavformat/avformat.h" - -#include "libavdevice/avdevice.h" - -#include "libswresample/swresample.h" - -#include "libavfilter/avfilter.h" -#include "libavfilter/buffersrc.h" -#include "libavfilter/buffersink.h" - -#include "cmdutils.h" -#include "ffmpeg.h" -#include "sync_queue.h" - -const char program_name[] = "ffmpeg"; -const int program_birth_year = 2000; - -FILE *vstats_file; - -typedef struct BenchmarkTimeStamps { - int64_t real_usec; - int64_t user_usec; - int64_t sys_usec; -} BenchmarkTimeStamps; - -static BenchmarkTimeStamps get_benchmark_time_stamps(void); -static int64_t getmaxrss(void); - -int64_t nb_frames_dup = 0; -int64_t nb_frames_drop = 0; -static int64_t decode_error_stat[2]; -unsigned nb_output_dumped = 0; - -static BenchmarkTimeStamps current_time; -AVIOContext *progress_avio = NULL; - -InputFile **input_files = NULL; -int nb_input_files = 0; - -OutputFile **output_files = NULL; -int nb_output_files = 0; - -FilterGraph **filtergraphs; -int nb_filtergraphs; - -#if HAVE_TERMIOS_H - -/* init terminal so that we can grab keys */ -static struct termios oldtty; -static int restore_tty; -#endif - -/* sub2video hack: - Convert subtitles to video with alpha to insert them in filter graphs. - This is a temporary solution until libavfilter gets real subtitles support. - */ - -static int sub2video_get_blank_frame(InputStream *ist) -{ - int ret; - AVFrame *frame = ist->sub2video.frame; - - av_frame_unref(frame); - ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w; - ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h; - ist->sub2video.frame->format = AV_PIX_FMT_RGB32; - if ((ret = av_frame_get_buffer(frame, 0)) < 0) - return ret; - memset(frame->data[0], 0, frame->height * frame->linesize[0]); - return 0; -} - -static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, - AVSubtitleRect *r) -{ - uint32_t *pal, *dst2; - uint8_t *src, *src2; - int x, y; - - if (r->type != SUBTITLE_BITMAP) { - av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n"); - return; - } - if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) { - av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n", - r->x, r->y, r->w, r->h, w, h - ); - return; - } - - dst += r->y * dst_linesize + r->x * 4; - src = r->data[0]; - pal = (uint32_t *)r->data[1]; - for (y = 0; y < r->h; y++) { - dst2 = (uint32_t *)dst; - src2 = src; - for (x = 0; x < r->w; x++) - *(dst2++) = pal[*(src2++)]; - dst += dst_linesize; - src += r->linesize[0]; - } -} - -static void sub2video_push_ref(InputStream *ist, int64_t pts) -{ - AVFrame *frame = ist->sub2video.frame; - int i; - int ret; - - av_assert1(frame->data[0]); - ist->sub2video.last_pts = frame->pts = pts; - for (i = 0; i < ist->nb_filters; i++) { - ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame, - AV_BUFFERSRC_FLAG_KEEP_REF | - AV_BUFFERSRC_FLAG_PUSH); - if (ret != AVERROR_EOF && ret < 0) - av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n", - av_err2str(ret)); - } -} - -void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub) -{ - AVFrame *frame = ist->sub2video.frame; - int8_t *dst; - int dst_linesize; - int num_rects, i; - int64_t pts, end_pts; - - if (!frame) - return; - if (sub) { - pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL, - AV_TIME_BASE_Q, ist->st->time_base); - end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL, - AV_TIME_BASE_Q, ist->st->time_base); - num_rects = sub->num_rects; - } else { - /* If we are initializing the system, utilize current heartbeat - PTS as the start time, and show until the following subpicture - is received. Otherwise, utilize the previous subpicture's end time - as the fall-back value. */ - pts = ist->sub2video.initialize ? - heartbeat_pts : ist->sub2video.end_pts; - end_pts = INT64_MAX; - num_rects = 0; - } - if (sub2video_get_blank_frame(ist) < 0) { - av_log(NULL, AV_LOG_ERROR, - "Impossible to get a blank canvas.\n"); - return; - } - dst = frame->data [0]; - dst_linesize = frame->linesize[0]; - for (i = 0; i < num_rects; i++) - sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]); - sub2video_push_ref(ist, pts); - ist->sub2video.end_pts = end_pts; - ist->sub2video.initialize = 0; -} - -static void sub2video_heartbeat(InputStream *ist, int64_t pts) -{ - InputFile *infile = input_files[ist->file_index]; - int i, j, nb_reqs; - int64_t pts2; - - /* When a frame is read from a file, examine all sub2video streams in - the same file and send the sub2video frame again. Otherwise, decoded - video frames could be accumulating in the filter graph while a filter - (possibly overlay) is desperately waiting for a subtitle frame. */ - for (i = 0; i < infile->nb_streams; i++) { - InputStream *ist2 = infile->streams[i]; - if (!ist2->sub2video.frame) - continue; - /* subtitles seem to be usually muxed ahead of other streams; - if not, subtracting a larger time here is necessary */ - pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1; - /* do not send the heartbeat frame if the subtitle is already ahead */ - if (pts2 <= ist2->sub2video.last_pts) - continue; - if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize) - /* if we have hit the end of the current displayed subpicture, - or if we need to initialize the system, update the - overlayed subpicture and its start/end times */ - sub2video_update(ist2, pts2 + 1, NULL); - for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++) - nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter); - if (nb_reqs) - sub2video_push_ref(ist2, pts2); - } -} - -static void sub2video_flush(InputStream *ist) -{ - int i; - int ret; - - if (ist->sub2video.end_pts < INT64_MAX) - sub2video_update(ist, INT64_MAX, NULL); - for (i = 0; i < ist->nb_filters; i++) { - ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL); - if (ret != AVERROR_EOF && ret < 0) - av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n"); - } -} - -/* end of sub2video hack */ - -static void term_exit_sigsafe(void) -{ -#if HAVE_TERMIOS_H - if(restore_tty) - tcsetattr (0, TCSANOW, &oldtty); -#endif -} - -void term_exit(void) -{ - av_log(NULL, AV_LOG_QUIET, "%s", ""); - term_exit_sigsafe(); -} - -static volatile int received_sigterm = 0; -static volatile int received_nb_signals = 0; -static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0); -static volatile int ffmpeg_exited = 0; -static int64_t copy_ts_first_pts = AV_NOPTS_VALUE; - -static void -sigterm_handler(int sig) -{ - int ret; - received_sigterm = sig; - received_nb_signals++; - term_exit_sigsafe(); - if(received_nb_signals > 3) { - ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n", - strlen("Received > 3 system signals, hard exiting\n")); - if (ret < 0) { /* Do nothing */ }; - exit(123); - } -} - -#if HAVE_SETCONSOLECTRLHANDLER -static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) -{ - av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType); - - switch (fdwCtrlType) - { - case CTRL_C_EVENT: - case CTRL_BREAK_EVENT: - sigterm_handler(SIGINT); - return TRUE; - - case CTRL_CLOSE_EVENT: - case CTRL_LOGOFF_EVENT: - case CTRL_SHUTDOWN_EVENT: - sigterm_handler(SIGTERM); - /* Basically, with these 3 events, when we return from this method the - process is hard terminated, so stall as long as we need to - to try and let the main thread(s) clean up and gracefully terminate - (we have at most 5 seconds, but should be done far before that). */ - while (!ffmpeg_exited) { - Sleep(0); - } - return TRUE; - - default: - av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType); - return FALSE; - } -} -#endif - -#ifdef __linux__ -#define SIGNAL(sig, func) \ - do { \ - action.sa_handler = func; \ - sigaction(sig, &action, NULL); \ - } while (0) -#else -#define SIGNAL(sig, func) \ - signal(sig, func) -#endif - -void term_init(void) -{ -#if defined __linux__ - struct sigaction action = {0}; - action.sa_handler = sigterm_handler; - - /* block other interrupts while processing this one */ - sigfillset(&action.sa_mask); - - /* restart interruptible functions (i.e. don't fail with EINTR) */ - action.sa_flags = SA_RESTART; -#endif - -#if HAVE_TERMIOS_H - if (stdin_interaction) { - struct termios tty; - if (tcgetattr (0, &tty) == 0) { - oldtty = tty; - restore_tty = 1; - - tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP - |INLCR|IGNCR|ICRNL|IXON); - tty.c_oflag |= OPOST; - tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN); - tty.c_cflag &= ~(CSIZE|PARENB); - tty.c_cflag |= CS8; - tty.c_cc[VMIN] = 1; - tty.c_cc[VTIME] = 0; - - tcsetattr (0, TCSANOW, &tty); - } - SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */ - } -#endif - - SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */ - SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */ -#ifdef SIGXCPU - SIGNAL(SIGXCPU, sigterm_handler); -#endif -#ifdef SIGPIPE - signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */ -#endif -#if HAVE_SETCONSOLECTRLHANDLER - SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE); -#endif -} - -/* read a key without blocking */ -static int read_key(void) -{ - unsigned char ch; -#if HAVE_TERMIOS_H - int n = 1; - struct timeval tv; - fd_set rfds; - - FD_ZERO(&rfds); - FD_SET(0, &rfds); - tv.tv_sec = 0; - tv.tv_usec = 0; - n = select(1, &rfds, NULL, NULL, &tv); - if (n > 0) { - n = read(0, &ch, 1); - if (n == 1) - return ch; - - return n; - } -#elif HAVE_KBHIT -# if HAVE_PEEKNAMEDPIPE - static int is_pipe; - static HANDLE input_handle; - DWORD dw, nchars; - if(!input_handle){ - input_handle = GetStdHandle(STD_INPUT_HANDLE); - is_pipe = !GetConsoleMode(input_handle, &dw); - } - - if (is_pipe) { - /* When running under a GUI, you will end here. */ - if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) { - // input pipe may have been closed by the program that ran ffmpeg - return -1; - } - //Read it - if(nchars != 0) { - read(0, &ch, 1); - return ch; - }else{ - return -1; - } - } -# endif - if(kbhit()) - return(getch()); -#endif - return -1; -} - -static int decode_interrupt_cb(void *ctx) -{ - return received_nb_signals > atomic_load(&transcode_init_done); -} - -const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL }; - -static void ffmpeg_cleanup(int ret) -{ - int i; - - if (do_benchmark) { - int maxrss = getmaxrss() / 1024; - av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss); - } - - for (i = 0; i < nb_filtergraphs; i++) - fg_free(&filtergraphs[i]); - av_freep(&filtergraphs); - - /* close files */ - for (i = 0; i < nb_output_files; i++) - of_close(&output_files[i]); - - for (i = 0; i < nb_input_files; i++) - ifile_close(&input_files[i]); - - if (vstats_file) { - if (fclose(vstats_file)) - av_log(NULL, AV_LOG_ERROR, - "Error closing vstats file, loss of information possible: %s\n", - av_err2str(AVERROR(errno))); - } - av_freep(&vstats_filename); - of_enc_stats_close(); - - hw_device_free_all(); - - av_freep(&filter_nbthreads); - - av_freep(&input_files); - av_freep(&output_files); - - uninit_opts(); - - avformat_network_deinit(); - - if (received_sigterm) { - av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n", - (int) received_sigterm); - } else if (ret && atomic_load(&transcode_init_done)) { - av_log(NULL, AV_LOG_INFO, "Conversion failed!\n"); - } - term_exit(); - ffmpeg_exited = 1; -} - -OutputStream *ost_iter(OutputStream *prev) -{ - int of_idx = prev ? prev->file_index : 0; - int ost_idx = prev ? prev->index + 1 : 0; - - for (; of_idx < nb_output_files; of_idx++) { - OutputFile *of = output_files[of_idx]; - if (ost_idx < of->nb_streams) - return of->streams[ost_idx]; - - ost_idx = 0; - } - - return NULL; -} - -InputStream *ist_iter(InputStream *prev) -{ - int if_idx = prev ? prev->file_index : 0; - int ist_idx = prev ? prev->st->index + 1 : 0; - - for (; if_idx < nb_input_files; if_idx++) { - InputFile *f = input_files[if_idx]; - if (ist_idx < f->nb_streams) - return f->streams[ist_idx]; - - ist_idx = 0; - } - - return NULL; -} - -void remove_avoptions(AVDictionary **a, AVDictionary *b) -{ - const AVDictionaryEntry *t = NULL; - - while ((t = av_dict_iterate(b, t))) { - av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE); - } -} - -void assert_avoptions(AVDictionary *m) -{ - const AVDictionaryEntry *t; - if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) { - av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key); - exit_program(1); - } -} - -void update_benchmark(const char *fmt, ...) -{ - if (do_benchmark_all) { - BenchmarkTimeStamps t = get_benchmark_time_stamps(); - va_list va; - char buf[1024]; - - if (fmt) { - va_start(va, fmt); - vsnprintf(buf, sizeof(buf), fmt, va); - va_end(va); - av_log(NULL, AV_LOG_INFO, - "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n", - t.user_usec - current_time.user_usec, - t.sys_usec - current_time.sys_usec, - t.real_usec - current_time.real_usec, buf); - } - current_time = t; - } -} - -void close_output_stream(OutputStream *ost) -{ - OutputFile *of = output_files[ost->file_index]; - ost->finished |= ENCODER_FINISHED; - - if (ost->sq_idx_encode >= 0) - sq_send(of->sq_encode, ost->sq_idx_encode, SQFRAME(NULL)); -} - -static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time) -{ - AVBPrint buf, buf_script; - int64_t total_size = of_filesize(output_files[0]); - int vid; - double bitrate; - double speed; - int64_t pts = INT64_MIN + 1; - static int64_t last_time = -1; - static int first_report = 1; - int hours, mins, secs, us; - const char *hours_sign; - int ret; - float t; - - if (!print_stats && !is_last_report && !progress_avio) - return; - - if (!is_last_report) { - if (last_time == -1) { - last_time = cur_time; - } - if (((cur_time - last_time) < stats_period && !first_report) || - (first_report && nb_output_dumped < nb_output_files)) - return; - last_time = cur_time; - } - - t = (cur_time-timer_start) / 1000000.0; - - vid = 0; - av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC); - av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC); - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - const AVCodecContext * const enc = ost->enc_ctx; - const float q = enc ? ost->quality / (float) FF_QP2LAMBDA : -1; - - if (vid && ost->type == AVMEDIA_TYPE_VIDEO) { - av_bprintf(&buf, "q=%2.1f ", q); - av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n", - ost->file_index, ost->index, q); - } - if (!vid && ost->type == AVMEDIA_TYPE_VIDEO) { - float fps; - uint64_t frame_number = atomic_load(&ost->packets_written); - - fps = t > 1 ? frame_number / t : 0; - av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ", - frame_number, fps < 9.95, fps, q); - av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number); - av_bprintf(&buf_script, "fps=%.2f\n", fps); - av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n", - ost->file_index, ost->index, q); - if (is_last_report) - av_bprintf(&buf, "L"); - - vid = 1; - } - /* compute min output value */ - if (ost->last_mux_dts != AV_NOPTS_VALUE) { - pts = FFMAX(pts, ost->last_mux_dts); - if (copy_ts) { - if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1) - copy_ts_first_pts = pts; - if (copy_ts_first_pts != AV_NOPTS_VALUE) - pts -= copy_ts_first_pts; - } - } - - if (is_last_report) - nb_frames_drop += ost->last_dropped; - } - - secs = FFABS(pts) / AV_TIME_BASE; - us = FFABS(pts) % AV_TIME_BASE; - mins = secs / 60; - secs %= 60; - hours = mins / 60; - mins %= 60; - hours_sign = (pts < 0) ? "-" : ""; - - bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1; - speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1; - - if (total_size < 0) av_bprintf(&buf, "size=N/A time="); - else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0); - if (pts == AV_NOPTS_VALUE) { - av_bprintf(&buf, "N/A "); - } else { - av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ", - hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE); - } - - if (bitrate < 0) { - av_bprintf(&buf, "bitrate=N/A"); - av_bprintf(&buf_script, "bitrate=N/A\n"); - }else{ - av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate); - av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate); - } - - if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n"); - else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size); - if (pts == AV_NOPTS_VALUE) { - av_bprintf(&buf_script, "out_time_us=N/A\n"); - av_bprintf(&buf_script, "out_time_ms=N/A\n"); - av_bprintf(&buf_script, "out_time=N/A\n"); - } else { - av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts); - av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts); - av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n", - hours_sign, hours, mins, secs, us); - } - - if (nb_frames_dup || nb_frames_drop) - av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop); - av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup); - av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop); - - if (speed < 0) { - av_bprintf(&buf, " speed=N/A"); - av_bprintf(&buf_script, "speed=N/A\n"); - } else { - av_bprintf(&buf, " speed=%4.3gx", speed); - av_bprintf(&buf_script, "speed=%4.3gx\n", speed); - } - - if (print_stats || is_last_report) { - const char end = is_last_report ? '\n' : '\r'; - if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) { - fprintf(stderr, "%s %c", buf.str, end); - } else - av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end); - - fflush(stderr); - } - av_bprint_finalize(&buf, NULL); - - if (progress_avio) { - av_bprintf(&buf_script, "progress=%s\n", - is_last_report ? "end" : "continue"); - avio_write(progress_avio, buf_script.str, - FFMIN(buf_script.len, buf_script.size - 1)); - avio_flush(progress_avio); - av_bprint_finalize(&buf_script, NULL); - if (is_last_report) { - if ((ret = avio_closep(&progress_avio)) < 0) - av_log(NULL, AV_LOG_ERROR, - "Error closing progress log, loss of information possible: %s\n", av_err2str(ret)); - } - } - - first_report = 0; -} - -int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par) -{ - int ret; - - // We never got any input. Set a fake format, which will - // come from libavformat. - ifilter->format = par->format; - ifilter->sample_rate = par->sample_rate; - ifilter->width = par->width; - ifilter->height = par->height; - ifilter->sample_aspect_ratio = par->sample_aspect_ratio; - ret = av_channel_layout_copy(&ifilter->ch_layout, &par->ch_layout); - if (ret < 0) - return ret; - - return 0; -} - -static void check_decode_result(InputStream *ist, int *got_output, int ret) -{ - if (*got_output || ret<0) - decode_error_stat[ret<0] ++; - - if (ret < 0 && exit_on_error) - exit_program(1); - - if (*got_output && ist) { - if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) { - av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING, - "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index); - if (exit_on_error) - exit_program(1); - } - } -} - -// Filters can be configured only if the formats of all inputs are known. -int ifilter_has_all_input_formats(FilterGraph *fg) -{ - int i; - for (i = 0; i < fg->nb_inputs; i++) { - if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO || - fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO)) - return 0; - } - return 1; -} - -// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2. -// There is the following difference: if you got a frame, you must call -// it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0 -// (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet) -static int decode(InputStream *ist, AVCodecContext *avctx, - AVFrame *frame, int *got_frame, AVPacket *pkt) -{ - int ret; - - *got_frame = 0; - - if (pkt) { - ret = avcodec_send_packet(avctx, pkt); - // In particular, we don't expect AVERROR(EAGAIN), because we read all - // decoded frames with avcodec_receive_frame() until done. - if (ret < 0 && ret != AVERROR_EOF) - return ret; - } - - ret = avcodec_receive_frame(avctx, frame); - if (ret < 0 && ret != AVERROR(EAGAIN)) - return ret; - if (ret >= 0) { - if (ist->want_frame_data) { - FrameData *fd; - - av_assert0(!frame->opaque_ref); - frame->opaque_ref = av_buffer_allocz(sizeof(*fd)); - if (!frame->opaque_ref) { - av_frame_unref(frame); - return AVERROR(ENOMEM); - } - fd = (FrameData*)frame->opaque_ref->data; - fd->pts = frame->pts; - fd->tb = avctx->pkt_timebase; - fd->idx = avctx->frame_num - 1; - } - - *got_frame = 1; - } - - return 0; -} - -static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame) -{ - int i, ret; - - av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */ - for (i = 0; i < ist->nb_filters; i++) { - ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1); - if (ret == AVERROR_EOF) - ret = 0; /* ignore */ - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, - "Failed to inject frame into filter network: %s\n", av_err2str(ret)); - break; - } - } - return ret; -} - -static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, - int *decode_failed) -{ - AVFrame *decoded_frame = ist->decoded_frame; - AVCodecContext *avctx = ist->dec_ctx; - int ret, err = 0; - AVRational decoded_frame_tb; - - update_benchmark(NULL); - ret = decode(ist, avctx, decoded_frame, got_output, pkt); - update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index); - if (ret < 0) - *decode_failed = 1; - - if (ret != AVERROR_EOF) - check_decode_result(ist, got_output, ret); - - if (!*got_output || ret < 0) - return ret; - - ist->samples_decoded += decoded_frame->nb_samples; - ist->frames_decoded++; - - /* increment next_dts to use for the case where the input stream does not - have timestamps or there are multiple frames in the packet */ - ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) / - decoded_frame->sample_rate; - ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) / - decoded_frame->sample_rate; - - if (decoded_frame->pts != AV_NOPTS_VALUE) { - decoded_frame_tb = ist->st->time_base; - } else if (pkt && pkt->pts != AV_NOPTS_VALUE) { - decoded_frame->pts = pkt->pts; - decoded_frame_tb = pkt->time_base; - }else { - decoded_frame->pts = ist->dts; - decoded_frame_tb = AV_TIME_BASE_Q; - } - if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE && - pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration) - ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE; - if (pkt) - ist->prev_pkt_pts = pkt->pts; - if (decoded_frame->pts != AV_NOPTS_VALUE) - decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts, - (AVRational){1, decoded_frame->sample_rate}, - decoded_frame->nb_samples, - &ist->filter_in_rescale_delta_last, - (AVRational){1, decoded_frame->sample_rate}); - ist->nb_samples = decoded_frame->nb_samples; - err = send_frame_to_filters(ist, decoded_frame); - - av_frame_unref(decoded_frame); - return err < 0 ? err : ret; -} - -static int64_t video_duration_estimate(const InputStream *ist, const AVFrame *frame) -{ - const InputFile *ifile = input_files[ist->file_index]; - const int container_nots = !!(ifile->ctx->iformat->flags & AVFMT_NOTIMESTAMPS); - int64_t codec_duration = 0; - - // XXX lavf currently makes up frame durations when they are not provided by - // the container. As there is no way to reliably distinguish real container - // durations from the fake made-up ones, we use heuristics based on whether - // the container has timestamps. Eventually lavf should stop making up - // durations, then this should be simplified. - - // prefer frame duration for containers with timestamps - if (frame->duration > 0 && !container_nots) - return frame->duration; - - if (ist->dec_ctx->framerate.den && ist->dec_ctx->framerate.num) { - int ticks = frame->repeat_pict >= 0 ? - frame->repeat_pict + 1 : - ist->dec_ctx->ticks_per_frame; - codec_duration = av_rescale_q(ticks, av_inv_q(ist->dec_ctx->framerate), - ist->st->time_base); - } - - // prefer codec-layer duration for containers without timestamps - if (codec_duration > 0 && container_nots) - return codec_duration; - - // when timestamps are available, repeat last frame's actual duration - // (i.e. pts difference between this and last frame) - if (frame->pts != AV_NOPTS_VALUE && ist->last_frame_pts != AV_NOPTS_VALUE && - frame->pts > ist->last_frame_pts) - return frame->pts - ist->last_frame_pts; - - // try frame/codec duration - if (frame->duration > 0) - return frame->duration; - if (codec_duration > 0) - return codec_duration; - - // try average framerate - if (ist->st->avg_frame_rate.num && ist->st->avg_frame_rate.den) { - int64_t d = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), - ist->st->time_base); - if (d > 0) - return d; - } - - // last resort is last frame's estimated duration, and 1 - return FFMAX(ist->last_frame_duration_est, 1); -} - -static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, - int *decode_failed) -{ - AVFrame *decoded_frame = ist->decoded_frame; - int ret = 0, err = 0; - int64_t best_effort_timestamp; - - // With fate-indeo3-2, we're getting 0-sized packets before EOF for some - // reason. This seems like a semi-critical bug. Don't trigger EOF, and - // skip the packet. - if (!eof && pkt && pkt->size == 0) - return 0; - - update_benchmark(NULL); - ret = decode(ist, ist->dec_ctx, decoded_frame, got_output, pkt); - update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index); - if (ret < 0) - *decode_failed = 1; - - // The following line may be required in some cases where there is no parser - // or the parser does not has_b_frames correctly - if (ist->par->video_delay < ist->dec_ctx->has_b_frames) { - if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) { - ist->par->video_delay = ist->dec_ctx->has_b_frames; - } else - av_log(ist->dec_ctx, AV_LOG_WARNING, - "video_delay is larger in decoder than demuxer %d > %d.\n" - "If you want to help, upload a sample " - "of this file to https://streams.videolan.org/upload/ " - "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n", - ist->dec_ctx->has_b_frames, - ist->par->video_delay); - } - - if (ret != AVERROR_EOF) - check_decode_result(ist, got_output, ret); - - if (*got_output && ret >= 0) { - if (ist->dec_ctx->width != decoded_frame->width || - ist->dec_ctx->height != decoded_frame->height || - ist->dec_ctx->pix_fmt != decoded_frame->format) { - av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n", - decoded_frame->width, - decoded_frame->height, - decoded_frame->format, - ist->dec_ctx->width, - ist->dec_ctx->height, - ist->dec_ctx->pix_fmt); - } - } - - if (!*got_output || ret < 0) - return ret; - - if(ist->top_field_first>=0) - decoded_frame->top_field_first = ist->top_field_first; - - ist->frames_decoded++; - - if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) { - err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame); - if (err < 0) - goto fail; - } - - best_effort_timestamp= decoded_frame->best_effort_timestamp; - *duration_pts = decoded_frame->duration; - - if (ist->framerate.num) - best_effort_timestamp = ist->cfr_next_pts++; - - // no timestamp available - extrapolate from previous frame duration - if (best_effort_timestamp == AV_NOPTS_VALUE && - ist->last_frame_pts != AV_NOPTS_VALUE) - best_effort_timestamp = ist->last_frame_pts + ist->last_frame_duration_est; - - if (best_effort_timestamp == AV_NOPTS_VALUE) - best_effort_timestamp = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ist->st->time_base); - - if(best_effort_timestamp != AV_NOPTS_VALUE) { - int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q); - - if (ts != AV_NOPTS_VALUE) - ist->next_pts = ist->pts = ts; - } - - // update timestamp history - ist->last_frame_duration_est = video_duration_estimate(ist, decoded_frame); - ist->last_frame_pts = decoded_frame->pts; - - if (debug_ts) { - av_log(ist, AV_LOG_INFO, - "decoder -> pts:%s pts_time:%s " - "pkt_dts:%s pkt_dts_time:%s " - "best_effort_ts:%"PRId64" best_effort_ts_time:%s " - "duration:%s duration_time:%s " - "keyframe:%d frame_type:%d time_base:%d/%d\n", - av_ts2str(decoded_frame->pts), - av_ts2timestr(decoded_frame->pts, &ist->st->time_base), - av_ts2str(decoded_frame->pkt_dts), - av_ts2timestr(decoded_frame->pkt_dts, &ist->st->time_base), - best_effort_timestamp, - av_ts2timestr(best_effort_timestamp, &ist->st->time_base), - av_ts2str(decoded_frame->duration), - av_ts2timestr(decoded_frame->duration, &ist->st->time_base), - decoded_frame->key_frame, decoded_frame->pict_type, - ist->st->time_base.num, ist->st->time_base.den); - } - - if (ist->st->sample_aspect_ratio.num) - decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio; - - err = send_frame_to_filters(ist, decoded_frame); - -fail: - av_frame_unref(decoded_frame); - return err < 0 ? err : ret; -} - -static int process_subtitle(InputStream *ist, AVSubtitle *subtitle, int *got_output) -{ - int ret = 0; - int free_sub = 1; - - if (ist->fix_sub_duration) { - int end = 1; - if (ist->prev_sub.got_output) { - end = av_rescale(subtitle->pts - ist->prev_sub.subtitle.pts, - 1000, AV_TIME_BASE); - if (end < ist->prev_sub.subtitle.end_display_time) { - av_log(NULL, AV_LOG_DEBUG, - "Subtitle duration reduced from %"PRId32" to %d%s\n", - ist->prev_sub.subtitle.end_display_time, end, - end <= 0 ? ", dropping it" : ""); - ist->prev_sub.subtitle.end_display_time = end; - } - } - FFSWAP(int, *got_output, ist->prev_sub.got_output); - FFSWAP(int, ret, ist->prev_sub.ret); - FFSWAP(AVSubtitle, *subtitle, ist->prev_sub.subtitle); - if (end <= 0) - goto out; - } - - if (!*got_output) - return ret; - - if (ist->sub2video.frame) { - sub2video_update(ist, INT64_MIN, subtitle); - } else if (ist->nb_filters) { - if (!ist->sub2video.sub_queue) - ist->sub2video.sub_queue = av_fifo_alloc2(8, sizeof(AVSubtitle), AV_FIFO_FLAG_AUTO_GROW); - if (!ist->sub2video.sub_queue) - report_and_exit(AVERROR(ENOMEM)); - - ret = av_fifo_write(ist->sub2video.sub_queue, subtitle, 1); - if (ret < 0) - exit_program(1); - free_sub = 0; - } - - if (!subtitle->num_rects) - goto out; - - for (int oidx = 0; oidx < ist->nb_outputs; oidx++) { - OutputStream *ost = ist->outputs[oidx]; - if (!ost->enc_ctx || ost->enc_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) - continue; - - enc_subtitle(output_files[ost->file_index], ost, subtitle); - } - -out: - if (free_sub) - avsubtitle_free(subtitle); - return ret; -} - -static int copy_av_subtitle(AVSubtitle *dst, AVSubtitle *src) -{ - int ret = AVERROR_BUG; - AVSubtitle tmp = { - .format = src->format, - .start_display_time = src->start_display_time, - .end_display_time = src->end_display_time, - .num_rects = 0, - .rects = NULL, - .pts = src->pts - }; - - if (!src->num_rects) - goto success; - - if (!(tmp.rects = av_calloc(src->num_rects, sizeof(*tmp.rects)))) - return AVERROR(ENOMEM); - - for (int i = 0; i < src->num_rects; i++) { - AVSubtitleRect *src_rect = src->rects[i]; - AVSubtitleRect *dst_rect; - - if (!(dst_rect = tmp.rects[i] = av_mallocz(sizeof(*tmp.rects[0])))) { - ret = AVERROR(ENOMEM); - goto cleanup; - } - - tmp.num_rects++; - - dst_rect->type = src_rect->type; - dst_rect->flags = src_rect->flags; - - dst_rect->x = src_rect->x; - dst_rect->y = src_rect->y; - dst_rect->w = src_rect->w; - dst_rect->h = src_rect->h; - dst_rect->nb_colors = src_rect->nb_colors; - - if (src_rect->text) - if (!(dst_rect->text = av_strdup(src_rect->text))) { - ret = AVERROR(ENOMEM); - goto cleanup; - } - - if (src_rect->ass) - if (!(dst_rect->ass = av_strdup(src_rect->ass))) { - ret = AVERROR(ENOMEM); - goto cleanup; - } - - for (int j = 0; j < 4; j++) { - // SUBTITLE_BITMAP images are special in the sense that they - // are like PAL8 images. first pointer to data, second to - // palette. This makes the size calculation match this. - size_t buf_size = src_rect->type == SUBTITLE_BITMAP && j == 1 ? - AVPALETTE_SIZE : - src_rect->h * src_rect->linesize[j]; - - if (!src_rect->data[j]) - continue; - - if (!(dst_rect->data[j] = av_memdup(src_rect->data[j], buf_size))) { - ret = AVERROR(ENOMEM); - goto cleanup; - } - dst_rect->linesize[j] = src_rect->linesize[j]; - } - } - -success: - *dst = tmp; - - return 0; - -cleanup: - avsubtitle_free(&tmp); - - return ret; -} - -static int fix_sub_duration_heartbeat(InputStream *ist, int64_t signal_pts) -{ - int ret = AVERROR_BUG; - int got_output = 1; - AVSubtitle *prev_subtitle = &ist->prev_sub.subtitle; - AVSubtitle subtitle; - - if (!ist->fix_sub_duration || !prev_subtitle->num_rects || - signal_pts <= prev_subtitle->pts) - return 0; - - if ((ret = copy_av_subtitle(&subtitle, prev_subtitle)) < 0) - return ret; - - subtitle.pts = signal_pts; - - return process_subtitle(ist, &subtitle, &got_output); -} - -int trigger_fix_sub_duration_heartbeat(OutputStream *ost, const AVPacket *pkt) -{ - OutputFile *of = output_files[ost->file_index]; - int64_t signal_pts = av_rescale_q(pkt->pts, pkt->time_base, - AV_TIME_BASE_Q); - - if (!ost->fix_sub_duration_heartbeat || !(pkt->flags & AV_PKT_FLAG_KEY)) - // we are only interested in heartbeats on streams configured, and - // only on random access points. - return 0; - - for (int i = 0; i < of->nb_streams; i++) { - OutputStream *iter_ost = of->streams[i]; - InputStream *ist = iter_ost->ist; - int ret = AVERROR_BUG; - - if (iter_ost == ost || !ist || !ist->decoding_needed || - ist->dec_ctx->codec_type != AVMEDIA_TYPE_SUBTITLE) - // We wish to skip the stream that causes the heartbeat, - // output streams without an input stream, streams not decoded - // (as fix_sub_duration is only done for decoded subtitles) as - // well as non-subtitle streams. - continue; - - if ((ret = fix_sub_duration_heartbeat(ist, signal_pts)) < 0) - return ret; - } - - return 0; -} - -static int transcode_subtitles(InputStream *ist, const AVPacket *pkt, - int *got_output, int *decode_failed) -{ - AVSubtitle subtitle; - int ret = avcodec_decode_subtitle2(ist->dec_ctx, - &subtitle, got_output, pkt); - - check_decode_result(NULL, got_output, ret); - - if (ret < 0 || !*got_output) { - *decode_failed = 1; - if (!pkt->size) - sub2video_flush(ist); - return ret; - } - - ist->frames_decoded++; - - return process_subtitle(ist, &subtitle, got_output); -} - -static int send_filter_eof(InputStream *ist) -{ - int i, ret; - /* TODO keep pts also in stream time base to avoid converting back */ - int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base, - AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); - - for (i = 0; i < ist->nb_filters; i++) { - ret = ifilter_send_eof(ist->filters[i], pts); - if (ret < 0) - return ret; - } - return 0; -} - -/* pkt = NULL means EOF (needed to flush decoder buffers) */ -static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof) -{ - InputFile *f = input_files[ist->file_index]; - const AVCodecParameters *par = ist->par; - int ret = 0; - int repeating = 0; - int eof_reached = 0; - int duration_exceeded; - - AVPacket *avpkt = ist->pkt; - - if (!ist->saw_first_ts) { - ist->first_dts = - ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0; - ist->pts = 0; - if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) { - ist->first_dts = - ist->dts += av_rescale_q(pkt->pts, pkt->time_base, AV_TIME_BASE_Q); - } - ist->saw_first_ts = 1; - } - - if (ist->next_dts == AV_NOPTS_VALUE) - ist->next_dts = ist->dts; - if (ist->next_pts == AV_NOPTS_VALUE) - ist->next_pts = ist->pts; - - if (pkt) { - av_packet_unref(avpkt); - ret = av_packet_ref(avpkt, pkt); - if (ret < 0) - return ret; - } - - if (pkt && pkt->dts != AV_NOPTS_VALUE) { - ist->next_dts = ist->dts = av_rescale_q(pkt->dts, pkt->time_base, AV_TIME_BASE_Q); - if (par->codec_type != AVMEDIA_TYPE_VIDEO) - ist->pts = ist->dts; - } - - // while we have more to decode or while the decoder did output something on EOF - while (ist->decoding_needed) { - int64_t duration_dts = 0; - int64_t duration_pts = 0; - int got_output = 0; - int decode_failed = 0; - - ist->pts = ist->next_pts; - ist->dts = ist->next_dts; - - switch (par->codec_type) { - case AVMEDIA_TYPE_AUDIO: - ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output, - &decode_failed); - av_packet_unref(avpkt); - break; - case AVMEDIA_TYPE_VIDEO: - ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt, - &decode_failed); - if (!repeating || !pkt || got_output) { - if (pkt && pkt->duration) { - duration_dts = av_rescale_q(pkt->duration, pkt->time_base, AV_TIME_BASE_Q); - } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) { - int ticks = ist->last_pkt_repeat_pict >= 0 ? - ist->last_pkt_repeat_pict + 1 : - ist->dec_ctx->ticks_per_frame; - duration_dts = ((int64_t)AV_TIME_BASE * - ist->dec_ctx->framerate.den * ticks) / - ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame; - } - - if(ist->dts != AV_NOPTS_VALUE && duration_dts) { - ist->next_dts += duration_dts; - }else - ist->next_dts = AV_NOPTS_VALUE; - } - - if (got_output) { - if (duration_pts > 0) { - ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q); - } else { - ist->next_pts += duration_dts; - } - } - av_packet_unref(avpkt); - break; - case AVMEDIA_TYPE_SUBTITLE: - if (repeating) - break; - ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed); - if (!pkt && ret >= 0) - ret = AVERROR_EOF; - av_packet_unref(avpkt); - break; - default: - return -1; - } - - if (ret == AVERROR_EOF) { - eof_reached = 1; - break; - } - - if (ret < 0) { - if (decode_failed) { - av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n", - ist->file_index, ist->st->index, av_err2str(ret)); - } else { - av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded " - "data for stream #%d:%d\n", ist->file_index, ist->st->index); - } - if (!decode_failed || exit_on_error) - exit_program(1); - break; - } - - if (got_output) - ist->got_output = 1; - - if (!got_output) - break; - - // During draining, we might get multiple output frames in this loop. - // ffmpeg.c does not drain the filter chain on configuration changes, - // which means if we send multiple frames at once to the filters, and - // one of those frames changes configuration, the buffered frames will - // be lost. This can upset certain FATE tests. - // Decode only 1 frame per call on EOF to appease these FATE tests. - // The ideal solution would be to rewrite decoding to use the new - // decoding API in a better way. - if (!pkt) - break; - - repeating = 1; - } - - /* after flushing, send an EOF on all the filter inputs attached to the stream */ - /* except when looping we need to flush but not to send an EOF */ - if (!pkt && ist->decoding_needed && eof_reached && !no_eof) { - int ret = send_filter_eof(ist); - if (ret < 0) { - av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n"); - exit_program(1); - } - } - - /* handle stream copy */ - if (!ist->decoding_needed && pkt) { - ist->dts = ist->next_dts; - switch (par->codec_type) { - case AVMEDIA_TYPE_AUDIO: - av_assert1(pkt->duration >= 0); - if (par->sample_rate) { - ist->next_dts += ((int64_t)AV_TIME_BASE * par->frame_size) / - par->sample_rate; - } else { - ist->next_dts += av_rescale_q(pkt->duration, pkt->time_base, AV_TIME_BASE_Q); - } - break; - case AVMEDIA_TYPE_VIDEO: - if (ist->framerate.num) { - // TODO: Remove work-around for c99-to-c89 issue 7 - AVRational time_base_q = AV_TIME_BASE_Q; - int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate)); - ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q); - } else if (pkt->duration) { - ist->next_dts += av_rescale_q(pkt->duration, pkt->time_base, AV_TIME_BASE_Q); - } else if(ist->dec_ctx->framerate.num != 0) { - int ticks = ist->last_pkt_repeat_pict >= 0 ? - ist->last_pkt_repeat_pict + 1 : - ist->dec_ctx->ticks_per_frame; - ist->next_dts += ((int64_t)AV_TIME_BASE * - ist->dec_ctx->framerate.den * ticks) / - ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame; - } - break; - } - } else if (!ist->decoding_needed) - eof_reached = 1; - - duration_exceeded = 0; - if (f->recording_time != INT64_MAX) { - int64_t start_time = 0; - if (copy_ts) { - start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0; - start_time += start_at_zero ? 0 : f->start_time_effective; - } - if (ist->dts >= f->recording_time + start_time) - duration_exceeded = 1; - } - - for (int oidx = 0; oidx < ist->nb_outputs; oidx++) { - OutputStream *ost = ist->outputs[oidx]; - if (ost->enc_ctx || (!pkt && no_eof)) - continue; - - if (duration_exceeded) { - close_output_stream(ost); - continue; - } - - of_streamcopy(ost, pkt, ist->dts); - } - - return !eof_reached; -} - -static int transcode_init(void) -{ - int ret = 0; - - /* init framerate emulation */ - for (int i = 0; i < nb_input_files; i++) { - InputFile *ifile = input_files[i]; - if (ifile->readrate || ifile->rate_emu) - for (int j = 0; j < ifile->nb_streams; j++) - ifile->streams[j]->start = av_gettime_relative(); - } - - /* discard unused programs */ - for (int i = 0; i < nb_input_files; i++) { - InputFile *ifile = input_files[i]; - for (int j = 0; j < ifile->ctx->nb_programs; j++) { - AVProgram *p = ifile->ctx->programs[j]; - int discard = AVDISCARD_ALL; - - for (int k = 0; k < p->nb_stream_indexes; k++) - if (!ifile->streams[p->stream_index[k]]->discard) { - discard = AVDISCARD_DEFAULT; - break; - } - p->discard = discard; - } - } - - /* dump the stream mapping */ - av_log(NULL, AV_LOG_INFO, "Stream mapping:\n"); - for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist)) { - for (int j = 0; j < ist->nb_filters; j++) { - if (!filtergraph_is_simple(ist->filters[j]->graph)) { - av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s", - ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?", - ist->filters[j]->name); - if (nb_filtergraphs > 1) - av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index); - av_log(NULL, AV_LOG_INFO, "\n"); - } - } - } - - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - if (ost->attachment_filename) { - /* an attached file */ - av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n", - ost->attachment_filename, ost->file_index, ost->index); - continue; - } - - if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) { - /* output from a complex graph */ - av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name); - if (nb_filtergraphs > 1) - av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index); - - av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index, - ost->index, ost->enc_ctx->codec->name); - continue; - } - - av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d", - ost->ist->file_index, - ost->ist->st->index, - ost->file_index, - ost->index); - if (ost->enc_ctx) { - const AVCodec *in_codec = ost->ist->dec; - const AVCodec *out_codec = ost->enc_ctx->codec; - const char *decoder_name = "?"; - const char *in_codec_name = "?"; - const char *encoder_name = "?"; - const char *out_codec_name = "?"; - const AVCodecDescriptor *desc; - - if (in_codec) { - decoder_name = in_codec->name; - desc = avcodec_descriptor_get(in_codec->id); - if (desc) - in_codec_name = desc->name; - if (!strcmp(decoder_name, in_codec_name)) - decoder_name = "native"; - } - - if (out_codec) { - encoder_name = out_codec->name; - desc = avcodec_descriptor_get(out_codec->id); - if (desc) - out_codec_name = desc->name; - if (!strcmp(encoder_name, out_codec_name)) - encoder_name = "native"; - } - - av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))", - in_codec_name, decoder_name, - out_codec_name, encoder_name); - } else - av_log(NULL, AV_LOG_INFO, " (copy)"); - av_log(NULL, AV_LOG_INFO, "\n"); - } - - if (ret) - return ret; - - atomic_store(&transcode_init_done, 1); - - return 0; -} - -/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */ -static int need_output(void) -{ - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - if (ost->finished) - continue; - - return 1; - } - - return 0; -} - -/** - * Select the output stream to process. - * - * @return selected output stream, or NULL if none available - */ -static OutputStream *choose_output(void) -{ - int64_t opts_min = INT64_MAX; - OutputStream *ost_min = NULL; - - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - int64_t opts; - - if (ost->filter && ost->filter->last_pts != AV_NOPTS_VALUE) { - opts = ost->filter->last_pts; - } else { - opts = ost->last_mux_dts == AV_NOPTS_VALUE ? - INT64_MIN : ost->last_mux_dts; - if (ost->last_mux_dts == AV_NOPTS_VALUE) - av_log(ost, AV_LOG_DEBUG, - "cur_dts is invalid [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n", - ost->initialized, ost->inputs_done, ost->finished); - } - - if (!ost->initialized && !ost->inputs_done && !ost->finished) - return ost->unavailable ? NULL : ost; - - if (!ost->finished && opts < opts_min) { - opts_min = opts; - ost_min = ost->unavailable ? NULL : ost; - } - } - return ost_min; -} - -static void set_tty_echo(int on) -{ -#if HAVE_TERMIOS_H - struct termios tty; - if (tcgetattr(0, &tty) == 0) { - if (on) tty.c_lflag |= ECHO; - else tty.c_lflag &= ~ECHO; - tcsetattr(0, TCSANOW, &tty); - } -#endif -} - -static int check_keyboard_interaction(int64_t cur_time) -{ - int i, ret, key; - static int64_t last_time; - if (received_nb_signals) - return AVERROR_EXIT; - /* read_key() returns 0 on EOF */ - if (cur_time - last_time >= 100000) { - key = read_key(); - last_time = cur_time; - }else - key = -1; - if (key == 'q') { - av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n"); - return AVERROR_EXIT; - } - if (key == '+') av_log_set_level(av_log_get_level()+10); - if (key == '-') av_log_set_level(av_log_get_level()-10); - if (key == 'c' || key == 'C'){ - char buf[4096], target[64], command[256], arg[256] = {0}; - double time; - int k, n = 0; - fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n"); - i = 0; - set_tty_echo(1); - while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1) - if (k > 0) - buf[i++] = k; - buf[i] = 0; - set_tty_echo(0); - fprintf(stderr, "\n"); - if (k > 0 && - (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) { - av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s", - target, time, command, arg); - for (i = 0; i < nb_filtergraphs; i++) { - FilterGraph *fg = filtergraphs[i]; - if (fg->graph) { - if (time < 0) { - ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf), - key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0); - fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf); - } else if (key == 'c') { - fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n"); - ret = AVERROR_PATCHWELCOME; - } else { - ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time); - if (ret < 0) - fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret)); - } - } - } - } else { - av_log(NULL, AV_LOG_ERROR, - "Parse error, at least 3 arguments were expected, " - "only %d given in string '%s'\n", n, buf); - } - } - if (key == 'd' || key == 'D'){ - int debug=0; - if(key == 'D') { - InputStream *ist = ist_iter(NULL); - - if (ist) - debug = ist->dec_ctx->debug << 1; - - if(!debug) debug = 1; - while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash - debug += debug; - }else{ - char buf[32]; - int k = 0; - i = 0; - set_tty_echo(1); - while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1) - if (k > 0) - buf[i++] = k; - buf[i] = 0; - set_tty_echo(0); - fprintf(stderr, "\n"); - if (k <= 0 || sscanf(buf, "%d", &debug)!=1) - fprintf(stderr,"error parsing debug value\n"); - } - for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist)) - ist->dec_ctx->debug = debug; - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { - if (ost->enc_ctx) - ost->enc_ctx->debug = debug; - } - if(debug) av_log_set_level(AV_LOG_DEBUG); - fprintf(stderr,"debug=%d\n", debug); - } - if (key == '?'){ - fprintf(stderr, "key function\n" - "? show this help\n" - "+ increase verbosity\n" - "- decrease verbosity\n" - "c Send command to first matching filter supporting it\n" - "C Send/Queue command to all matching filters\n" - "D cycle through available debug modes\n" - "h dump packets/hex press to cycle through the 3 states\n" - "q quit\n" - "s Show QP histogram\n" - ); - } - return 0; -} - -static int got_eagain(void) -{ - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) - if (ost->unavailable) - return 1; - return 0; -} - -static void reset_eagain(void) -{ - int i; - for (i = 0; i < nb_input_files; i++) - input_files[i]->eagain = 0; - for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) - ost->unavailable = 0; -} - -static void decode_flush(InputFile *ifile) -{ - for (int i = 0; i < ifile->nb_streams; i++) { - InputStream *ist = ifile->streams[i]; - int ret; - - if (ist->discard) - continue; - - do { - ret = process_input_packet(ist, NULL, 1); - } while (ret > 0); - - if (ist->decoding_needed) { - /* report last frame duration to the demuxer thread */ - if (ist->par->codec_type == AVMEDIA_TYPE_AUDIO) { - LastFrameDuration dur; - - dur.stream_idx = i; - dur.duration = av_rescale_q(ist->nb_samples, - (AVRational){ 1, ist->dec_ctx->sample_rate}, - ist->st->time_base); - - av_thread_message_queue_send(ifile->audio_duration_queue, &dur, 0); - } - - avcodec_flush_buffers(ist->dec_ctx); - } - } -} - -static void ts_discontinuity_detect(InputFile *ifile, InputStream *ist, - AVPacket *pkt) -{ - const int fmt_is_discont = ifile->ctx->iformat->flags & AVFMT_TS_DISCONT; - int disable_discontinuity_correction = copy_ts; - int64_t pkt_dts = av_rescale_q_rnd(pkt->dts, pkt->time_base, AV_TIME_BASE_Q, - AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); - - if (copy_ts && ist->next_dts != AV_NOPTS_VALUE && - fmt_is_discont && ist->st->pts_wrap_bits < 60) { - int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits), - pkt->time_base, AV_TIME_BASE_Q, - AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); - if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10) - disable_discontinuity_correction = 0; - } - - if (ist->next_dts != AV_NOPTS_VALUE && !disable_discontinuity_correction) { - int64_t delta = pkt_dts - ist->next_dts; - if (fmt_is_discont) { - if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || - pkt_dts + AV_TIME_BASE/10 < ist->dts) { - ifile->ts_offset_discont -= delta; - av_log(NULL, AV_LOG_WARNING, - "timestamp discontinuity for stream #%d:%d " - "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n", - ist->file_index, ist->st->index, ist->st->id, - av_get_media_type_string(ist->par->codec_type), - delta, ifile->ts_offset_discont); - pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base); - if (pkt->pts != AV_NOPTS_VALUE) - pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base); - } - } else { - if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) { - av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index); - pkt->dts = AV_NOPTS_VALUE; - } - if (pkt->pts != AV_NOPTS_VALUE){ - int64_t pkt_pts = av_rescale_q(pkt->pts, pkt->time_base, AV_TIME_BASE_Q); - delta = pkt_pts - ist->next_dts; - if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) { - av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index); - pkt->pts = AV_NOPTS_VALUE; - } - } - } - } else if (ist->next_dts == AV_NOPTS_VALUE && !copy_ts && - fmt_is_discont && ifile->last_ts != AV_NOPTS_VALUE) { - int64_t delta = pkt_dts - ifile->last_ts; - if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) { - ifile->ts_offset_discont -= delta; - av_log(NULL, AV_LOG_DEBUG, - "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", - delta, ifile->ts_offset_discont); - pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base); - if (pkt->pts != AV_NOPTS_VALUE) - pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base); - } - } - - ifile->last_ts = av_rescale_q(pkt->dts, pkt->time_base, AV_TIME_BASE_Q); -} - -static void ts_discontinuity_process(InputFile *ifile, InputStream *ist, - AVPacket *pkt) -{ - int64_t offset = av_rescale_q(ifile->ts_offset_discont, AV_TIME_BASE_Q, - pkt->time_base); - - // apply previously-detected timestamp-discontinuity offset - // (to all streams, not just audio/video) - if (pkt->dts != AV_NOPTS_VALUE) - pkt->dts += offset; - if (pkt->pts != AV_NOPTS_VALUE) - pkt->pts += offset; - - // detect timestamp discontinuities for audio/video - if ((ist->par->codec_type == AVMEDIA_TYPE_VIDEO || - ist->par->codec_type == AVMEDIA_TYPE_AUDIO) && - pkt->dts != AV_NOPTS_VALUE) - ts_discontinuity_detect(ifile, ist, pkt); -} - -/* - * Return - * - 0 -- one packet was read and processed - * - AVERROR(EAGAIN) -- no packets were available for selected file, - * this function should be called again - * - AVERROR_EOF -- this function should not be called again - */ -static int process_input(int file_index) -{ - InputFile *ifile = input_files[file_index]; - AVFormatContext *is; - InputStream *ist; - AVPacket *pkt; - int ret, i; - - is = ifile->ctx; - ret = ifile_get_packet(ifile, &pkt); - - if (ret == AVERROR(EAGAIN)) { - ifile->eagain = 1; - return ret; - } - if (ret == 1) { - /* the input file is looped: flush the decoders */ - decode_flush(ifile); - return AVERROR(EAGAIN); - } - if (ret < 0) { - if (ret != AVERROR_EOF) { - print_error(is->url, ret); - if (exit_on_error) - exit_program(1); - } - - for (i = 0; i < ifile->nb_streams; i++) { - ist = ifile->streams[i]; - if (!ist->discard) { - ret = process_input_packet(ist, NULL, 0); - if (ret>0) - return 0; - } - - /* mark all outputs that don't go through lavfi as finished */ - for (int oidx = 0; oidx < ist->nb_outputs; oidx++) { - OutputStream *ost = ist->outputs[oidx]; - OutputFile *of = output_files[ost->file_index]; - close_output_stream(ost); - of_output_packet(of, ost->pkt, ost, 1); - } - } - - ifile->eof_reached = 1; - return AVERROR(EAGAIN); - } - - reset_eagain(); - - ist = ifile->streams[pkt->stream_index]; - - ist->data_size += pkt->size; - ist->nb_packets++; - - if (ist->discard) - goto discard_packet; - - /* add the stream-global side data to the first packet */ - if (ist->nb_packets == 1) { - for (i = 0; i < ist->st->nb_side_data; i++) { - AVPacketSideData *src_sd = &ist->st->side_data[i]; - uint8_t *dst_data; - - if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX) - continue; - - if (av_packet_get_side_data(pkt, src_sd->type, NULL)) - continue; - - dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size); - if (!dst_data) - report_and_exit(AVERROR(ENOMEM)); - - memcpy(dst_data, src_sd->data, src_sd->size); - } - } - - // detect and try to correct for timestamp discontinuities - ts_discontinuity_process(ifile, ist, pkt); - - if (debug_ts) { - av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n", - ifile->index, pkt->stream_index, - av_get_media_type_string(ist->par->codec_type), - av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &pkt->time_base), - av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &pkt->time_base), - av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &pkt->time_base), - av_ts2str(input_files[ist->file_index]->ts_offset), - av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q)); - } - - sub2video_heartbeat(ist, pkt->pts); - - process_input_packet(ist, pkt, 0); - -discard_packet: - av_packet_free(&pkt); - - return 0; -} - -/** - * Run a single step of transcoding. - * - * @return 0 for success, <0 for error - */ -static int transcode_step(void) -{ - OutputStream *ost; - InputStream *ist = NULL; - int ret; - - ost = choose_output(); - if (!ost) { - if (got_eagain()) { - reset_eagain(); - av_usleep(10000); - return 0; - } - av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n"); - return AVERROR_EOF; - } - - if (ost->filter && !ost->filter->graph->graph) { - if (ifilter_has_all_input_formats(ost->filter->graph)) { - ret = configure_filtergraph(ost->filter->graph); - if (ret < 0) { - av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n"); - return ret; - } - } - } - - if (ost->filter && ost->filter->graph->graph) { - if ((ret = fg_transcode_step(ost->filter->graph, &ist)) < 0) - return ret; - if (!ist) - return 0; - } else if (ost->filter) { - int i; - for (i = 0; i < ost->filter->graph->nb_inputs; i++) { - InputFilter *ifilter = ost->filter->graph->inputs[i]; - if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) { - ist = ifilter->ist; - break; - } - } - if (!ist) { - ost->inputs_done = 1; - return 0; - } - } else { - ist = ost->ist; - av_assert0(ist); - } - - ret = process_input(ist->file_index); - if (ret == AVERROR(EAGAIN)) { - if (input_files[ist->file_index]->eagain) - ost->unavailable = 1; - return 0; - } - - if (ret < 0) - return ret == AVERROR_EOF ? 0 : ret; - - return reap_filters(0); -} - -/* - * The following code is the main loop of the file converter - */ -static int transcode(void) -{ - int ret, i; - InputStream *ist; - int64_t timer_start; - - ret = transcode_init(); - if (ret < 0) - return ret; - - if (stdin_interaction) { - av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n"); - } - - timer_start = av_gettime_relative(); - - while (!received_sigterm) { - int64_t cur_time= av_gettime_relative(); - - /* if 'q' pressed, exits */ - if (stdin_interaction) - if (check_keyboard_interaction(cur_time) < 0) - break; - - /* check if there's any stream where output is still needed */ - if (!need_output()) { - av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n"); - break; - } - - ret = transcode_step(); - if (ret < 0 && ret != AVERROR_EOF) { - av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret)); - break; - } - - /* dump report by using the output first video and audio streams */ - print_report(0, timer_start, cur_time); - } - - /* at the end of stream, we must flush the decoder buffers */ - for (ist = ist_iter(NULL); ist; ist = ist_iter(ist)) { - if (!input_files[ist->file_index]->eof_reached) { - process_input_packet(ist, NULL, 0); - } - } - enc_flush(); - - term_exit(); - - /* write the trailer if needed */ - for (i = 0; i < nb_output_files; i++) { - int err = of_write_trailer(output_files[i]); - ret = err_merge(ret, err); - } - - /* dump report by using the first video and audio streams */ - print_report(1, timer_start, av_gettime_relative()); - - return ret; -} - -static BenchmarkTimeStamps get_benchmark_time_stamps(void) -{ - BenchmarkTimeStamps time_stamps = { av_gettime_relative() }; -#if HAVE_GETRUSAGE - struct rusage rusage; - - getrusage(RUSAGE_SELF, &rusage); - time_stamps.user_usec = - (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec; - time_stamps.sys_usec = - (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec; -#elif HAVE_GETPROCESSTIMES - HANDLE proc; - FILETIME c, e, k, u; - proc = GetCurrentProcess(); - GetProcessTimes(proc, &c, &e, &k, &u); - time_stamps.user_usec = - ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10; - time_stamps.sys_usec = - ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10; -#else - time_stamps.user_usec = time_stamps.sys_usec = 0; -#endif - return time_stamps; -} - -static int64_t getmaxrss(void) -{ -#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS - struct rusage rusage; - getrusage(RUSAGE_SELF, &rusage); - return (int64_t)rusage.ru_maxrss * 1024; -#elif HAVE_GETPROCESSMEMORYINFO - HANDLE proc; - PROCESS_MEMORY_COUNTERS memcounters; - proc = GetCurrentProcess(); - memcounters.cb = sizeof(memcounters); - GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters)); - return memcounters.PeakPagefileUsage; -#else - return 0; -#endif -} - -int main(int argc, char **argv) -{ - int ret; - BenchmarkTimeStamps ti; - - init_dynload(); - - register_exit(ffmpeg_cleanup); - - setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */ - - av_log_set_flags(AV_LOG_SKIP_REPEATED); - parse_loglevel(argc, argv, options); - -#if CONFIG_AVDEVICE - avdevice_register_all(); -#endif - avformat_network_init(); - - show_banner(argc, argv, options); - - /* parse options and open all input/output files */ - ret = ffmpeg_parse_options(argc, argv); - if (ret < 0) - exit_program(1); - - if (nb_output_files <= 0 && nb_input_files == 0) { - show_usage(); - av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name); - exit_program(1); - } - - /* file converter / grab */ - if (nb_output_files <= 0) { - av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n"); - exit_program(1); - } - - current_time = ti = get_benchmark_time_stamps(); - ret = transcode(); - if (ret >= 0 && do_benchmark) { - int64_t utime, stime, rtime; - current_time = get_benchmark_time_stamps(); - utime = current_time.user_usec - ti.user_usec; - stime = current_time.sys_usec - ti.sys_usec; - rtime = current_time.real_usec - ti.real_usec; - av_log(NULL, AV_LOG_INFO, - "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n", - utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0); - } - av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n", - decode_error_stat[0], decode_error_stat[1]); - if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1]) - exit_program(69); - - ret = received_nb_signals ? 255 : ret; - exit_program(ret); - return ret; -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacsbr.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacsbr.c deleted file mode 100644 index 47aa6cb3c1f4d5cfd3e8fd0481c127ac79afa24a..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/aacsbr.c +++ /dev/null @@ -1,369 +0,0 @@ -/* - * AAC Spectral Band Replication decoding functions - * Copyright (c) 2008-2009 Robert Swain ( rob opendot cl ) - * Copyright (c) 2009-2010 Alex Converse <alex.converse@gmail.com> - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * AAC Spectral Band Replication decoding functions - * @author Robert Swain ( rob opendot cl ) - */ -#define USE_FIXED 0 - -#include "aac.h" -#include "sbr.h" -#include "aacsbr.h" -#include "aacsbrdata.h" -#include "internal.h" -#include "aacps.h" -#include "sbrdsp.h" -#include "libavutil/internal.h" -#include "libavutil/libm.h" -#include "libavutil/avassert.h" -#include "libavutil/mem_internal.h" - -#include <stdint.h> -#include <float.h> -#include <math.h> - -#if ARCH_MIPS -#include "mips/aacsbr_mips.h" -#endif /* ARCH_MIPS */ - -static VLC vlc_sbr[10]; -static void aacsbr_func_ptr_init(AACSBRContext *c); - -static void make_bands(int16_t* bands, int start, int stop, int num_bands) -{ - int k, previous, present; - float base, prod; - - base = powf((float)stop / start, 1.0f / num_bands); - prod = start; - previous = start; - - for (k = 0; k < num_bands-1; k++) { - prod *= base; - present = lrintf(prod); - bands[k] = present - previous; - previous = present; - } - bands[num_bands-1] = stop - previous; -} - -/// Dequantization and stereo decoding (14496-3 sp04 p203) -static void sbr_dequant(SpectralBandReplication *sbr, int id_aac) -{ - int k, e; - int ch; - static const double exp2_tab[2] = {1, M_SQRT2}; - if (id_aac == TYPE_CPE && sbr->bs_coupling) { - int pan_offset = sbr->data[0].bs_amp_res ? 12 : 24; - for (e = 1; e <= sbr->data[0].bs_num_env; e++) { - for (k = 0; k < sbr->n[sbr->data[0].bs_freq_res[e]]; k++) { - float temp1, temp2, fac; - if (sbr->data[0].bs_amp_res) { - temp1 = ff_exp2fi(sbr->data[0].env_facs_q[e][k] + 7); - temp2 = ff_exp2fi(pan_offset - sbr->data[1].env_facs_q[e][k]); - } - else { - temp1 = ff_exp2fi((sbr->data[0].env_facs_q[e][k]>>1) + 7) * - exp2_tab[sbr->data[0].env_facs_q[e][k] & 1]; - temp2 = ff_exp2fi((pan_offset - sbr->data[1].env_facs_q[e][k])>>1) * - exp2_tab[(pan_offset - sbr->data[1].env_facs_q[e][k]) & 1]; - } - if (temp1 > 1E20) { - av_log(NULL, AV_LOG_ERROR, "envelope scalefactor overflow in dequant\n"); - temp1 = 1; - } - fac = temp1 / (1.0f + temp2); - sbr->data[0].env_facs[e][k] = fac; - sbr->data[1].env_facs[e][k] = fac * temp2; - } - } - for (e = 1; e <= sbr->data[0].bs_num_noise; e++) { - for (k = 0; k < sbr->n_q; k++) { - float temp1 = ff_exp2fi(NOISE_FLOOR_OFFSET - sbr->data[0].noise_facs_q[e][k] + 1); - float temp2 = ff_exp2fi(12 - sbr->data[1].noise_facs_q[e][k]); - float fac; - av_assert0(temp1 <= 1E20); - fac = temp1 / (1.0f + temp2); - sbr->data[0].noise_facs[e][k] = fac; - sbr->data[1].noise_facs[e][k] = fac * temp2; - } - } - } else { // SCE or one non-coupled CPE - for (ch = 0; ch < (id_aac == TYPE_CPE) + 1; ch++) { - for (e = 1; e <= sbr->data[ch].bs_num_env; e++) - for (k = 0; k < sbr->n[sbr->data[ch].bs_freq_res[e]]; k++){ - if (sbr->data[ch].bs_amp_res) - sbr->data[ch].env_facs[e][k] = ff_exp2fi(sbr->data[ch].env_facs_q[e][k] + 6); - else - sbr->data[ch].env_facs[e][k] = ff_exp2fi((sbr->data[ch].env_facs_q[e][k]>>1) + 6) - * exp2_tab[sbr->data[ch].env_facs_q[e][k] & 1]; - if (sbr->data[ch].env_facs[e][k] > 1E20) { - av_log(NULL, AV_LOG_ERROR, "envelope scalefactor overflow in dequant\n"); - sbr->data[ch].env_facs[e][k] = 1; - } - } - - for (e = 1; e <= sbr->data[ch].bs_num_noise; e++) - for (k = 0; k < sbr->n_q; k++) - sbr->data[ch].noise_facs[e][k] = - ff_exp2fi(NOISE_FLOOR_OFFSET - sbr->data[ch].noise_facs_q[e][k]); - } - } -} - -/** High Frequency Generation (14496-3 sp04 p214+) and Inverse Filtering - * (14496-3 sp04 p214) - * Warning: This routine does not seem numerically stable. - */ -static void sbr_hf_inverse_filter(SBRDSPContext *dsp, - float (*alpha0)[2], float (*alpha1)[2], - const float X_low[32][40][2], int k0) -{ - int k; - for (k = 0; k < k0; k++) { - LOCAL_ALIGNED_16(float, phi, [3], [2][2]); - float dk; - - dsp->autocorrelate(X_low[k], phi); - - dk = phi[2][1][0] * phi[1][0][0] - - (phi[1][1][0] * phi[1][1][0] + phi[1][1][1] * phi[1][1][1]) / 1.000001f; - - if (!dk) { - alpha1[k][0] = 0; - alpha1[k][1] = 0; - } else { - float temp_real, temp_im; - temp_real = phi[0][0][0] * phi[1][1][0] - - phi[0][0][1] * phi[1][1][1] - - phi[0][1][0] * phi[1][0][0]; - temp_im = phi[0][0][0] * phi[1][1][1] + - phi[0][0][1] * phi[1][1][0] - - phi[0][1][1] * phi[1][0][0]; - - alpha1[k][0] = temp_real / dk; - alpha1[k][1] = temp_im / dk; - } - - if (!phi[1][0][0]) { - alpha0[k][0] = 0; - alpha0[k][1] = 0; - } else { - float temp_real, temp_im; - temp_real = phi[0][0][0] + alpha1[k][0] * phi[1][1][0] + - alpha1[k][1] * phi[1][1][1]; - temp_im = phi[0][0][1] + alpha1[k][1] * phi[1][1][0] - - alpha1[k][0] * phi[1][1][1]; - - alpha0[k][0] = -temp_real / phi[1][0][0]; - alpha0[k][1] = -temp_im / phi[1][0][0]; - } - - if (alpha1[k][0] * alpha1[k][0] + alpha1[k][1] * alpha1[k][1] >= 16.0f || - alpha0[k][0] * alpha0[k][0] + alpha0[k][1] * alpha0[k][1] >= 16.0f) { - alpha1[k][0] = 0; - alpha1[k][1] = 0; - alpha0[k][0] = 0; - alpha0[k][1] = 0; - } - } -} - -/// Chirp Factors (14496-3 sp04 p214) -static void sbr_chirp(SpectralBandReplication *sbr, SBRData *ch_data) -{ - int i; - float new_bw; - static const float bw_tab[] = { 0.0f, 0.75f, 0.9f, 0.98f }; - - for (i = 0; i < sbr->n_q; i++) { - if (ch_data->bs_invf_mode[0][i] + ch_data->bs_invf_mode[1][i] == 1) { - new_bw = 0.6f; - } else - new_bw = bw_tab[ch_data->bs_invf_mode[0][i]]; - - if (new_bw < ch_data->bw_array[i]) { - new_bw = 0.75f * new_bw + 0.25f * ch_data->bw_array[i]; - } else - new_bw = 0.90625f * new_bw + 0.09375f * ch_data->bw_array[i]; - ch_data->bw_array[i] = new_bw < 0.015625f ? 0.0f : new_bw; - } -} - -/** - * Calculation of levels of additional HF signal components (14496-3 sp04 p219) - * and Calculation of gain (14496-3 sp04 p219) - */ -static void sbr_gain_calc(AACContext *ac, SpectralBandReplication *sbr, - SBRData *ch_data, const int e_a[2]) -{ - int e, k, m; - // max gain limits : -3dB, 0dB, 3dB, inf dB (limiter off) - static const float limgain[4] = { 0.70795, 1.0, 1.41254, 10000000000 }; - - for (e = 0; e < ch_data->bs_num_env; e++) { - int delta = !((e == e_a[1]) || (e == e_a[0])); - for (k = 0; k < sbr->n_lim; k++) { - float gain_boost, gain_max; - float sum[2] = { 0.0f, 0.0f }; - for (m = sbr->f_tablelim[k] - sbr->kx[1]; m < sbr->f_tablelim[k + 1] - sbr->kx[1]; m++) { - const float temp = sbr->e_origmapped[e][m] / (1.0f + sbr->q_mapped[e][m]); - sbr->q_m[e][m] = sqrtf(temp * sbr->q_mapped[e][m]); - sbr->s_m[e][m] = sqrtf(temp * ch_data->s_indexmapped[e + 1][m]); - if (!sbr->s_mapped[e][m]) { - sbr->gain[e][m] = sqrtf(sbr->e_origmapped[e][m] / - ((1.0f + sbr->e_curr[e][m]) * - (1.0f + sbr->q_mapped[e][m] * delta))); - } else { - sbr->gain[e][m] = sqrtf(sbr->e_origmapped[e][m] * sbr->q_mapped[e][m] / - ((1.0f + sbr->e_curr[e][m]) * - (1.0f + sbr->q_mapped[e][m]))); - } - sbr->gain[e][m] += FLT_MIN; - } - for (m = sbr->f_tablelim[k] - sbr->kx[1]; m < sbr->f_tablelim[k + 1] - sbr->kx[1]; m++) { - sum[0] += sbr->e_origmapped[e][m]; - sum[1] += sbr->e_curr[e][m]; - } - gain_max = limgain[sbr->bs_limiter_gains] * sqrtf((FLT_EPSILON + sum[0]) / (FLT_EPSILON + sum[1])); - gain_max = FFMIN(100000.f, gain_max); - for (m = sbr->f_tablelim[k] - sbr->kx[1]; m < sbr->f_tablelim[k + 1] - sbr->kx[1]; m++) { - float q_m_max = sbr->q_m[e][m] * gain_max / sbr->gain[e][m]; - sbr->q_m[e][m] = FFMIN(sbr->q_m[e][m], q_m_max); - sbr->gain[e][m] = FFMIN(sbr->gain[e][m], gain_max); - } - sum[0] = sum[1] = 0.0f; - for (m = sbr->f_tablelim[k] - sbr->kx[1]; m < sbr->f_tablelim[k + 1] - sbr->kx[1]; m++) { - sum[0] += sbr->e_origmapped[e][m]; - sum[1] += sbr->e_curr[e][m] * sbr->gain[e][m] * sbr->gain[e][m] - + sbr->s_m[e][m] * sbr->s_m[e][m] - + (delta && !sbr->s_m[e][m]) * sbr->q_m[e][m] * sbr->q_m[e][m]; - } - gain_boost = sqrtf((FLT_EPSILON + sum[0]) / (FLT_EPSILON + sum[1])); - gain_boost = FFMIN(1.584893192f, gain_boost); - for (m = sbr->f_tablelim[k] - sbr->kx[1]; m < sbr->f_tablelim[k + 1] - sbr->kx[1]; m++) { - sbr->gain[e][m] *= gain_boost; - sbr->q_m[e][m] *= gain_boost; - sbr->s_m[e][m] *= gain_boost; - } - } - } -} - -/// Assembling HF Signals (14496-3 sp04 p220) -static void sbr_hf_assemble(float Y1[38][64][2], - const float X_high[64][40][2], - SpectralBandReplication *sbr, SBRData *ch_data, - const int e_a[2]) -{ - int e, i, j, m; - const int h_SL = 4 * !sbr->bs_smoothing_mode; - const int kx = sbr->kx[1]; - const int m_max = sbr->m[1]; - static const float h_smooth[5] = { - 0.33333333333333, - 0.30150283239582, - 0.21816949906249, - 0.11516383427084, - 0.03183050093751, - }; - float (*g_temp)[48] = ch_data->g_temp, (*q_temp)[48] = ch_data->q_temp; - int indexnoise = ch_data->f_indexnoise; - int indexsine = ch_data->f_indexsine; - - if (sbr->reset) { - for (i = 0; i < h_SL; i++) { - memcpy(g_temp[i + 2*ch_data->t_env[0]], sbr->gain[0], m_max * sizeof(sbr->gain[0][0])); - memcpy(q_temp[i + 2*ch_data->t_env[0]], sbr->q_m[0], m_max * sizeof(sbr->q_m[0][0])); - } - } else if (h_SL) { - for (i = 0; i < 4; i++) { - memcpy(g_temp[i + 2 * ch_data->t_env[0]], - g_temp[i + 2 * ch_data->t_env_num_env_old], - sizeof(g_temp[0])); - memcpy(q_temp[i + 2 * ch_data->t_env[0]], - q_temp[i + 2 * ch_data->t_env_num_env_old], - sizeof(q_temp[0])); - } - } - - for (e = 0; e < ch_data->bs_num_env; e++) { - for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) { - memcpy(g_temp[h_SL + i], sbr->gain[e], m_max * sizeof(sbr->gain[0][0])); - memcpy(q_temp[h_SL + i], sbr->q_m[e], m_max * sizeof(sbr->q_m[0][0])); - } - } - - for (e = 0; e < ch_data->bs_num_env; e++) { - for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) { - LOCAL_ALIGNED_16(float, g_filt_tab, [48]); - LOCAL_ALIGNED_16(float, q_filt_tab, [48]); - float *g_filt, *q_filt; - - if (h_SL && e != e_a[0] && e != e_a[1]) { - g_filt = g_filt_tab; - q_filt = q_filt_tab; - for (m = 0; m < m_max; m++) { - const int idx1 = i + h_SL; - g_filt[m] = 0.0f; - q_filt[m] = 0.0f; - for (j = 0; j <= h_SL; j++) { - g_filt[m] += g_temp[idx1 - j][m] * h_smooth[j]; - q_filt[m] += q_temp[idx1 - j][m] * h_smooth[j]; - } - } - } else { - g_filt = g_temp[i + h_SL]; - q_filt = q_temp[i]; - } - - sbr->dsp.hf_g_filt(Y1[i] + kx, X_high + kx, g_filt, m_max, - i + ENVELOPE_ADJUSTMENT_OFFSET); - - if (e != e_a[0] && e != e_a[1]) { - sbr->dsp.hf_apply_noise[indexsine](Y1[i] + kx, sbr->s_m[e], - q_filt, indexnoise, - kx, m_max); - } else { - int idx = indexsine&1; - int A = (1-((indexsine+(kx & 1))&2)); - int B = (A^(-idx)) + idx; - float *out = &Y1[i][kx][idx]; - float *in = sbr->s_m[e]; - for (m = 0; m+1 < m_max; m+=2) { - out[2*m ] += in[m ] * A; - out[2*m+2] += in[m+1] * B; - } - if(m_max&1) - out[2*m ] += in[m ] * A; - } - indexnoise = (indexnoise + m_max) & 0x1ff; - indexsine = (indexsine + 1) & 3; - } - } - ch_data->f_indexnoise = indexnoise; - ch_data->f_indexsine = indexsine; -} - -#include "aacsbr_template.c" diff --git a/spaces/congsaPfin/Manga-OCR/logs/GTA 5 PS3 Emulator Gameplay Watch How RPCS3 Runs Grand Theft Auto V on PC.md b/spaces/congsaPfin/Manga-OCR/logs/GTA 5 PS3 Emulator Gameplay Watch How RPCS3 Runs Grand Theft Auto V on PC.md deleted file mode 100644 index 38833a27adeed722f62cafdd59a4be0fd0588dc1..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/GTA 5 PS3 Emulator Gameplay Watch How RPCS3 Runs Grand Theft Auto V on PC.md +++ /dev/null @@ -1,167 +0,0 @@ - -<h1>How to Download GTA 5 on PS3 Emulator</h1> -<p>Grand Theft Auto V, or GTA 5, is one of the most popular and successful video games of all time. It is an open-world action-adventure game that lets you explore a vast and diverse city, engage in various missions and activities, and experience a thrilling story with three different protagonists. GTA 5 was originally released for PlayStation 3 and Xbox 360 in 2013, and later for PlayStation 4, Xbox One, and PC in 2014 and 2015.</p> -<h2>gta 5 download ps3 emulator</h2><br /><p><b><b>Download Zip</b> ○ <a href="https://urlca.com/2uObOW">https://urlca.com/2uObOW</a></b></p><br /><br /> -<p>But what if you want to play GTA 5 on your PC, but you don't have a powerful enough system to run it smoothly? Or what if you want to relive the nostalgia of playing GTA 5 on your old PS3 console, but you don't have it anymore? Or what if you just want to try something new and different with GTA 5?</p> -<p>The answer is simple: you can download GTA 5 on PS3 emulator. A PS3 emulator is a software that allows you to run PS3 games on your PC, by emulating the hardware and software of the PS3 console. This way, you can enjoy GTA 5 on your PC, with the same graphics, gameplay, and features as the original PS3 version.</p> -<p>In this article, we will show you how to download GTA 5 on PS3 emulator, step by step. We will also give you some tips and tricks for playing GTA 5 on PS3 emulator, such as how to fix common issues and errors, how to improve graphics and framerate, and how to use cheats and mods. By the end of this article, you will be able to play GTA 5 on PS3 emulator like a pro.</p> - <h2>Introduction</h2> -<h3>What is GTA 5?</h3> -<p>GTA 5 is the fifth main installment in the Grand Theft Auto series, developed by Rockstar North and published by Rockstar Games. It is set in the fictional state of San Andreas, which is based on Southern California. The game follows the lives of three criminals: Michael De Santa, a retired bank robber; Franklin Clinton, a street hustler; and Trevor Philips, a psychopathic drug dealer. The game allows you to switch between these three characters at any time, and experience their stories from different perspectives.</p> -<p>GTA 5 is praised for its stunning graphics, realistic physics, immersive gameplay, rich content, and humorous satire. It has won numerous awards and accolades, and has sold over 150 million copies worldwide. It is widely considered as one of the best games ever made.</p> - <h3>What is PS3 Emulator?</h3> -<p>A PS3 emulator is a software that mimics the functionality of the PlayStation 3 console on your PC. It allows you to run PS3 games on your PC, by converting the PS3 code into PC code. A PS3 emulator requires a powerful PC system to run smoothly, as it has to emulate both the hardware and software of the PS3 console.</p> -<p>There are several PS3 emulators available online, but the most popular and reliable one is RPCS3. RPCS3 is an open-source project that has been in development since 2011. It supports over 6000 PS3 games, including GTA 5. It also has many features that enhance the gaming experience, such as custom resolution, anti-aliasing, anisotropic filtering, frame limit, save states, trophies, controller support, online multiplayer, <h3>Why play GTA 5 on PS3 Emulator?</h3> -<p>There are many reasons why you might want to play GTA 5 on PS3 emulator. Here are some of them:</p> -<p>gta 5 ps3 emulator pc download<br /> -gta 5 rpcs3 download iso<br /> -gta 5 ps3 emulator android apk<br /> -gta 5 ps3 emulator gameplay<br /> -gta 5 rpcs3 settings 2023<br /> -gta 5 ps3 emulator for windows 10<br /> -gta 5 rpcs3 download highly compressed<br /> -gta 5 ps3 emulator system requirements<br /> -gta 5 rpcs3 best build<br /> -gta 5 ps3 emulator online play<br /> -gta 5 rpcs3 download reddit<br /> -gta 5 ps3 emulator free download full version<br /> -gta 5 rpcs3 compatibility list<br /> -gta 5 ps3 emulator no survey no password<br /> -gta 5 rpcs3 patch download<br /> -gta 5 ps3 emulator mac download<br /> -gta 5 rpcs3 install guide<br /> -gta 5 ps3 emulator bios download<br /> -gta 5 rpcs3 cheats codes<br /> -gta 5 ps3 emulator linux download<br /> -gta 5 rpcs3 save data download<br /> -gta 5 ps3 emulator update download<br /> -gta 5 rpcs3 mods download<br /> -gta 5 ps3 emulator rar file download<br /> -gta 5 rpcs3 controller setup<br /> -gta 5 ps3 emulator direct download link<br /> -gta 5 rpcs3 performance tips<br /> -gta 5 ps3 emulator youtube video<br /> -gta 5 rpcs3 error fix<br /> -gta 5 ps3 emulator zip file download<br /> -gta 5 rpcs3 configuration file download<br /> -gta 5 ps3 emulator without verification download<br /> -gta 5 rpcs3 dlc download<br /> -gta 5 ps3 emulator google drive download link<br /> -gta 5 rpcs3 graphics settings<br /> -gta 5 ps3 emulator torrent download kickass<br /> -gta 5 rpcs3 log file location<br /> -gta 5 ps3 emulator license key download<br /> -gta 5 rpcs3 multiplayer mode<br /> -gta 5 ps3 emulator crack file download<br /> -gta 5 rpcs3 vulkan or opengl<br /> -gta 5 ps3 emulator reddit review<br /> -gta 5 rpcs3 keyboard and mouse setup<br /> -gta 5 ps3 emulator blogspot download link<br /> -gta 5 rpcs3 audio settings<br /> -gta 5 ps3 emulator how to play on pc tutorial</p> -<ul> -<li>You can play GTA 5 on your PC, even if you don't have a powerful enough system to run the PC version.</li> -<li>You can play GTA 5 on your PC, even if you don't have a copy of the PC version.</li> -<li>You can play GTA 5 on your PC, with the same graphics, gameplay, and features as the original PS3 version.</li> -<li>You can play GTA 5 on your PC, with some additional features and enhancements that the PS3 emulator provides, such as custom resolution, anti-aliasing, anisotropic filtering, frame limit, save states, trophies, controller support, online multiplayer, and more.</li> -<li>You can play GTA 5 on your PC, with some cheats and mods that are compatible with the PS3 emulator, such as infinite money, god mode, flying cars, super jump, and more.</li> -<li>You can play GTA 5 on your PC, with a nostalgic feeling of playing it on your old PS3 console.</li> -<li>You can play GTA 5 on your PC, with a new and different experience of playing it on a different platform.</li> -</ul> -<p>As you can see, playing GTA 5 on PS3 emulator has many benefits and advantages. It is a fun and exciting way to enjoy one of the best games ever made.</p> - <h2>How to download GTA 5 on PS3 Emulator</h2> -<p>Now that you know what GTA 5 and PS3 emulator are, and why you should play GTA 5 on PS3 emulator, let's get to the main part of this article: how to download GTA 5 on PS3 emulator. The process is not very complicated, but it does require some steps and preparations. Here is a step-by-step guide for downloading GTA 5 on PS3 emulator:</p> - <h3>Step 1: Get a legal copy of GTA 5 for PS3</h3> -<p>The first thing you need to do is to get a legal copy of GTA 5 for PS3. This is very important, as downloading or using pirated or illegal copies of GTA 5 is not only unethical, but also illegal and risky. You could face legal consequences or get infected with malware or viruses if you use pirated or illegal copies of GTA 5.</p> -<p>There are two ways to get a legal copy of GTA 5 for PS3:</p> -<ul> -<li>You can buy a physical disc of GTA 5 for PS3 from any online or offline store that sells video games. This is the easiest and most convenient way to get a legal copy of GTA 5 for PS3. You just need to insert the disc into your PC's DVD drive and copy the game files to your hard drive.</li> -<li>You can buy a digital download of GTA 5 for PS3 from the PlayStation Store. This is the cheaper and faster way to get a legal copy of GTA 5 for PS3. You just need to create a PlayStation Network account, log in to the PlayStation Store, search for GTA 5 for PS3, and purchase it. You will then receive a download link and a code that you can use to download the game files to your PC.</li> -</ul> -<p>Either way, you will need to have enough space on your hard drive to store the game files. The size of GTA 5 for PS3 is about 18 GB, so make sure you have at least that much free space on your hard drive before downloading or copying the game files.</p> - <h3>Step 2: Download and install RPCS3, the best PS3 emulator for PC</h3> -<p>The next thing you need to do is to download and install RPCS3, the best PS3 emulator for PC. As we mentioned before, RPCS3 is an open-source project that has been in development since 2011. It supports over 6000 PS3 games, including GTA 5. It also has many features that enhance the gaming experience, such as custom resolution, anti-aliasing, anisotropic filtering, frame limit, save states, trophies, controller support, online multiplayer, and more.</p> -<p>Downloading and installing RPCS3 is very easy and straightforward. Here are the steps you need to follow:</p> -<ul> -<li>Go to the official website of RPCS3 at <a href="">https://rpcs3.net/</a> and click on the Download button.</li> -<li>Choose the version of RPCS3 that is compatible with your operating system (Windows, Linux, or BSD) and download the zip file.</li> -<li>Extract the zip file to a folder of your choice on your hard drive.</li> -<li>Run the rpcs3.exe file to launch the RPCS3 emulator.</li> -</ul> -<p>Congratulations, you have successfully downloaded and installed RPCS3, the best PS3 emulator for PC.</p> - <h3>Step 3: Configure RPCS3 settings for optimal performance and compatibility</h3> -<p>The next thing you need to do is to configure RPCS3 settings for optimal performance and compatibility. This is very important, as different games may require different settings to run smoothly and without errors on RPCS3. You also need to adjust the settings according to your PC's specifications and preferences.</p> -<p>There are many settings that you can tweak and customize on RPCS3, but we will focus on the most essential ones for playing GTA 5 on PS3 emulator. Here are the steps you need to follow:</p> -<ul> -<li>Open the RPCS3 emulator and click on File > Install Firmware. Browse to the folder where you downloaded or copied the GTA 5 game files and select the PS3UPDAT.PUP file. This will install the PS3 firmware on RPCS3, which is required for running PS3 games.</li> -<li>Click on Config > CPU and make sure that the PPU Decoder and SPU Decoder are set to Recompiler (LLVM). This will enable the fastest and most accurate emulation of the PS3 CPU.</li> -<li>Click on Config > GPU and make sure that the Renderer is set to Vulkan. This will enable the best graphics quality and performance for GTA 5. You can also change the Resolution Scale to increase or decrease the resolution of the game, depending on your PC's capabilities and preferences.</li> -<li>Click on Config > Audio and make sure that the Audio Out is set to XAudio2. This will enable the best sound quality and performance for GTA 5.</li> -<li>Click on Config > System and make sure that the Language is set to English (United States). This will ensure that GTA 5 runs in English language.</li> -</ul> -<p>You have successfully configured RPCS3 settings for optimal performance and compatibility. You can also explore other settings and options on RPCS3, such as Input/Output, Network, Emulator, Debug, etc., but be careful not to change anything that might cause problems or errors with GTA 5.</p> - <h3>Step 4: Load GTA 5 on RPCS3 and enjoy the game</h3> -<p>The final thing you need to do is to load GTA 5 on RPCS3 and enjoy the game. This is very easy and simple. Here are the steps you need to follow:</p> -<ul> -<li>Open the RPCS3 emulator and click on File > Boot Game. Browse to the folder where you downloaded or copied the GTA 5 game files and select the EBOOT.BIN file. This will load GTA 5 on RPCS3.</li> -<li>Wait for a few seconds or minutes until GTA 5 starts running on RPCS3. You will see a loading screen with some information about GTA 5 and RPCS3.</li> -<li>Once GTA 5 is loaded, you will see a menu screen with some options such as Story Mode, Online Mode, Settings, etc. Choose the option that you want to play and press Enter or click on it.</li> -<li>You will then see a cutscene or a loading screen that introduces you to GTA 5. After that, you will be able to control one of the three protagonists of GTA 5: Michael, Franklin, or Trevor.</li> -<li>You can now play GTA 5 on PS3 emulator as you would normally play it on PS3 console. You can switch between the three protagonists at any time, explore the city of Los Santos, engage in various missions and activities, and experience a thrilling story with three different perspectives.</li> -</ul> -<p>Congratulations, you have successfully loaded GTA 5 on RPCS3 and enjoy the game.</p> - <h2>Tips and tricks for playing GTA 5 on PS3 Emulator</h2> -<h3>How to fix common issues and errors</h3> -<p>Playing GTA 5 on PS3 emulator is not without its challenges and difficulties. You may encounter some common issues and errors while playing GTA 5 on PS3 emulator, such as crashes, freezes, glitches, black screens, missing textures, sound problems, etc. These issues and errors can be caused by various factors, such as incompatible settings, corrupted files, outdated drivers, insufficient resources, etc.</p> -<p>Fortunately, there are some ways to fix these common issues and errors and improve your gaming experience. Here are some tips and tricks for fixing common issues and errors while playing GTA 5 on PS3 emulator:</p> -<ul> -<li>Make sure that you have a legal copy of GTA 5 for PS3 and that you have installed the PS3 firmware on RPCS3. Using pirated or illegal copies of GTA 5 or not installing the PS3 firmware can cause many problems and errors with GTA 5 on PS3 emulator.</li> -<li>Make sure that you have the latest version of RPCS3 and that you have configured the settings correctly for GTA 5. Using an outdated or unstable version of RPCS3 or having wrong or incompatible settings can cause many issues and errors with GTA 5 on PS3 emulator.</li> -<li>Make sure that you have updated your PC's drivers, especially the graphics card driver. Having outdated or faulty drivers can cause many performance and compatibility issues with GTA 5 on PS3 emulator.</li> -<li>Make sure that you have enough free space on your hard drive and that you have defragmented it regularly. Having low or fragmented disk space can cause many loading and saving issues with GTA 5 on PS3 emulator.</li> -<li>Make sure that you have enough RAM and CPU power to run GTA 5 on PS3 emulator smoothly. Having insufficient or overloaded memory or processor can cause many lagging and crashing issues with GTA 5 on PS3 emulator.</li> -<li>Make sure that you have closed any unnecessary programs or processes that are running in the background while playing GTA 5 on PS3 emulator. Having too many applications or services running in the background can consume your PC's resources and interfere with GTA 5 on PS3 emulator.</li> -<li>Make sure that you have a stable and fast internet connection while playing GTA 5 on PS3 emulator, especially if you want to play online mode. Having a slow or unstable internet connection can cause many connection and synchronization issues with GTA 5 on PS3 emulator.</li> -<li>If you encounter any specific issue or error with GTA 5 on PS3 emulator, try to search for a solution online. There are many forums, blogs, videos, guides, etc., that can help you solve your problem. You can also check the official website of RPCS3 at <a href="">https://rpcs3.net/</a> for more information and support.</li> -</ul> -<p>By following these tips and tricks, you should be able to fix most of the common issues and errors while playing GTA 5 on PS3 emulator.</p> - <h3>How to improve graphics and framerate</h3> -<p>Playing GTA 5 on PS3 emulator can be a great experience, but it can also be a frustrating one if the graphics and framerate are not up to your expectations. You may notice that the graphics are blurry, pixelated, jagged, or washed out, or that the framerate is choppy, stuttering, or inconsistent. These graphics and framerate issues can affect your enjoyment and immersion in the game.</p> -<p>Luckily, there are some ways to improve graphics and framerate while playing GTA 5 on PS3 emulator. Here are some tips and tricks for improving graphics and framerate while playing GTA 5 on PS3 emulator:</p> -<ul> -<li>Increase the Resolution Scale in the GPU settings of RPCS3. This will increase the resolution of the game, making it sharper and clearer. However, this will also increase the load on your PC's GPU, so make sure you have a powerful enough graphics card to handle it.</li> -<li>Enable Anti-Aliasing in the GPU settings of RPCS3. This will smooth out the edges of the game objects, making them less jagged and pixelated. However, this will also increase the load on your PC's GPU, so make sure you have a powerful enough graphics card to handle it.</li> -<li>Enable Anisotropic Filtering in the GPU settings of RPCS3. This will improve the quality of the textures in the game, making them more detailed and realistic. However, this will also increase the load on your PC's GPU, so make sure you have a powerful enough graphics card to handle it.</li> -<li>Increase the Frame Limit in the GPU settings of RPCS3. This will increase the maximum number of frames per second (FPS) that the game can run at, making it smoother and more responsive. However, this will also increase the load on your PC's CPU and GPU, so make sure you have powerful enough components to handle it.</li> -<li>Lower the Resolution Scale or disable Anti-Aliasing, Anisotropic Filtering, or Frame Limit in the GPU settings of RPCS3. This will lower the quality of the graphics or the smoothness of the game, but it will also reduce the load on your PC's CPU and GPU, making the game run faster and more stable.</li> -<li>Adjust the graphics settings of GTA 5 in the game menu. You can change the brightness, contrast, saturation, sharpness, etc., of the game to suit your preferences and improve the visual quality of the game.</li> -</ul> -<p>By following these tips and tricks, you should be able to improve graphics and framerate while playing GTA 5 on PS3 emulator.</p> - <h3>How to use cheats and mods</h3> -<p>Playing GTA 5 on PS3 emulator can be a lot of fun, but it can also be more fun if you use cheats and mods. Cheats are codes or commands that you can enter in the game to activate various effects, such as infinite money, god mode, flying cars, super jump, etc. Mods are modifications or additions that you can install in the game to change or enhance various aspects, such as graphics, gameplay, features, etc.</p> -<p>Using cheats and mods while playing GTA 5 on PS3 emulator can make the game more enjoyable and interesting. However, you should be careful not to use cheats or mods that are incompatible with the PS3 emulator or that might cause problems or errors with the game. You should also be aware that using cheats or mods might affect your online gameplay or your trophies.</p> -<p>Here are some tips and tricks for using cheats and mods while playing GTA 5 on PS3 emulator:</p> -<ul> -<li>To use cheats while playing GTA 5 on PS3 emulator, you can either enter them manually using your keyboard or controller, or use a cheat menu that you can access by pressing a certain button or key. You can find a list of cheat codes for GTA 5 for PS3 online, such as <a href="">https://www.ign.com/cheats/games/grand-theft-auto-v-ps3-20593</a>. You can also find a cheat menu for GTA 5 for PS3 online, such as <a href="">https://www.gta5-mods.com/scripts/simple-trainer-for-gtav</a>.</li> -<li>To use mods while playing GTA 5 on PS3 emulator, you need to download and install them on your PC. You can find a variety of mods for GTA 5 for PS3 online, such as <a href="">https://www.gtaall.com/gta-5/mods/ps3.html</a>. You need to follow the instructions provided by the mod creators to install them correctly. You also need to backup your original game files before installing any mods, in case something goes wrong or you want to uninstall them.</li> -<li>To enable or disable cheats or mods while playing GTA 5 on PS3 emulator, you can either use the cheat menu or the mod manager that comes with the mod. You can also delete or rename the mod files from your PC if you want to remove them completely.</li> -</ul> -<p>By following these tips and tricks, you should be able to use cheats and mods while playing GTA 5 on PS3 emulator.</p> - <h2>Conclusion</h2> -<p>GTA 5 is one of the best games ever made, and playing it on PS3 emulator is a great way to enjoy it on your PC. You can play GTA 5 on PS3 emulator with the same graphics, gameplay, and features as the original PS3 version, but with some additional benefits and advantages. You can also improve your gaming experience by fixing common issues and errors, improving graphics and framerate, and using cheats and mods.</p> -<p>In this article, we have shown you how to download GTA 5 on PS3 emulator step by step. We have also given you some tips and tricks for playing GTA 5 on PS3 emulator like a pro. We hope that this article has been helpful and informative for you. If you have any questions or comments about this article, feel free to leave them below.</p> -<p>Thank you for reading this article and happy gaming!</p> - <h2>FAQs</h2> -<p>Here are some frequently asked questions about downloading GTA 5 on PS3 emulator:</p> - <h4>Q: Is downloading GTA 5 on PS3 emulator legal?</h4> -<p>A: Yes, downloading GTA 5 on PS3 emulator is legal as long as you have a legal copy of GTA 5 for PS3 and you do not distribute or share it with anyone else. However, downloading or using pirated or illegal copies of GTA 5 is not legal and can get you in trouble. You should always use legal and legitimate sources to get GTA 5 for PS3.</p> - <h4>Q: Is downloading GTA 5 on PS3 emulator safe?</h4> -<p>A: Yes, downloading GTA 5 on PS3 emulator is safe as long as you use a reliable and trustworthy PS3 emulator, such as RPCS3, and you scan your PC for any malware or viruses before and after downloading or installing anything. However, downloading or using unsafe or unverified PS3 emulators, GTA 5 game files, cheats, or mods can be unsafe and can harm your PC or your data. You should always use safe and verified sources to get GTA 5 on PS3 emulator.</p> - <h4>Q: Is downloading GTA 5 on PS3 emulator free?</h4> -<p>A: Yes, downloading GTA 5 on PS3 emulator is free as long as you have a legal copy of GTA 5 for PS3 and you use a free and open-source PS3 emulator, such as RPCS3. However, buying GTA 5 for PS3 or donating to the RPCS3 project may cost you some money, depending on where and how you get them. You should always support the developers and creators of GTA 5 and RPCS3 by buying or donating to them if you can.</p> - <h4>Q: How long does it take to download GTA 5 on PS3 emulator?</h4> -<p>A: The time it takes to download GTA 5 on PS3 emulator depends on several factors, such as the size of the game files, the speed of your internet connection, the performance of your PC, etc. Generally, it can take anywhere from a few minutes to a few hours to download GTA 5 on PS3 emulator. You should be patient and wait for the download to finish before playing the game.</p> - <h4>Q: Can I play GTA 5 online on PS3 emulator?</h4> -<p>A: Yes, you can play GTA 5 online on PS3 emulator if you have a PlayStation Network account and you use the online mode option in the game menu. However, playing GTA 5 online on PS3 emulator may not be as smooth or stable as playing it on PS3 console, as there may be some connection or synchronization issues or errors. You should also avoid using cheats or mods while playing GTA 5 online on PS3 emulator, as they may get you banned or suspended from the PlayStation Network.</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to get Racing in Car apk for free and race like a pro.md b/spaces/congsaPfin/Manga-OCR/logs/How to get Racing in Car apk for free and race like a pro.md deleted file mode 100644 index 6f978b8949311e32dd39cf8add8b01dffc3e1e30..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/How to get Racing in Car apk for free and race like a pro.md +++ /dev/null @@ -1,111 +0,0 @@ - -<h1>Racing in Car APK: A Realistic and Fun Driving Simulator for Android</h1> -<p>Do you love driving cars and racing games? If yes, then you should try Racing in Car APK, a realistic and fun driving simulator for Android devices. Racing in Car APK is a game that lets you drive a car from a first-person perspective, as if you are sitting behind the wheel. You can race on different roads, overtake traffic, drift, customize your car, and enjoy a thrilling driving experience. In this article, we will tell you everything you need to know about Racing in Car APK, including its features, how to download and install it, what are the benefits of playing it, and what are some tips and tricks to play it better.</p> -<h2>racing in car apk</h2><br /><p><b><b>Download File</b> ► <a href="https://urlca.com/2uObeW">https://urlca.com/2uObeW</a></b></p><br /><br /> - <h2>What is Racing in Car APK?</h2> -<p>Racing in Car APK is an Android game that simulates driving a car on various roads. It is developed by Fast Free Games, a studio that specializes in creating realistic and addictive racing games. Racing in Car APK is one of their most popular games, with over 100 million downloads on Google Play Store. The game has received positive reviews from users and critics alike, who praised its graphics, gameplay, controls, and realism.</p> - <h3>Features of Racing in Car APK</h3> -<p>Racing in Car APK has many features that make it an enjoyable and exciting game to play. Here are some of them:</p> -<h4>- Easy to learn and drive</h4> -<p>The game has simple and intuitive controls that allow you to drive your car easily. You can choose between tilt or touch controls, depending on your preference. You can also adjust the sensitivity of the steering wheel and the camera angle. The game also has a tutorial mode that teaches you the basics of driving.</p> -<p>racing in car 2021 apk download<br /> -racing in car 2 mod apk unlimited money<br /> -racing in car 3d apk<br /> -racing in car mod apk android 1<br /> -racing in car apk pure<br /> -racing in car 2021 mod apk<br /> -racing in car 2 apk latest version<br /> -racing in car 2021 hack apk<br /> -racing in car 3d mod apk<br /> -racing in car mod apk revdl<br /> -racing in car 2 hack apk<br /> -racing in car 2021 offline apk<br /> -racing in car simulator apk<br /> -racing in car mod apk rexdl<br /> -racing in car 2 offline apk<br /> -racing in car 2021 unlimited money apk<br /> -racing in car 3d game apk<br /> -racing in car mod apk happymod<br /> -racing in car 2 mod apk android 1<br /> -racing in car 2021 premium apk<br /> -racing in car game download apk<br /> -racing in car mod apk unlimited coins<br /> -racing in car 2 mod apk rexdl<br /> -racing in car 2021 mod apk revdl<br /> -racing in car free download apk<br /> -racing in car mod apk latest version<br /> -racing in car 2 mod apk happymod<br /> -racing in car 2021 mod apk rexdl<br /> -racing in car unlimited money apk<br /> -racing in car hack apk download<br /> -racing in car 2 game download apk<br /> -racing in car mod apk download for android<br /> -racing in car 2 mod apk pure<br /> -racing in car 2021 mod apk download for android<br /> -racing in car online apk<br /> -racing in car cheats apk<br /> -racing in car 2 cheats apk<br /> -racing in car mod menu apk<br /> -racing in car 2021 mod menu apk<br /> -real racing in car game 3d traffic racer simulator mod apk</p> -<h4>- 3D realistic cockpit view</h4> -<p>The game gives you a 3D realistic cockpit view of your car, which makes you feel like you are actually driving it. You can see the dashboard, the steering wheel, the mirrors, the pedals, and the road ahead. You can also switch between different camera views, such as front, rear, side, or top.</p> -<h4>- Endless game mode</h4> -<p>The game has an endless game mode that lets you drive as long as you want without any time limit or level restriction. You can drive on different roads, such as city, highway, desert, or snow. You can also change the weather, the time of day, and the traffic density. The game will randomly generate different scenarios and challenges for you to face, such as police chase, traffic jam, or road block. The game will also reward you with points and money based on your driving performance, such as your speed, distance, overtaking, drifting, and avoiding crashes.</p> -<h4>- Different locations and cars to choose</h4> -<p>The game offers you a variety of locations and cars to choose from. You can drive in different countries, such as USA, Germany, France, Italy, Spain, or Japan. You can also drive different types of cars, such as sports cars, muscle cars, SUVs, trucks, or buses. Each car has its own characteristics, such as speed, acceleration, handling, braking, and durability. You can also customize your car with different colors, wheels, stickers, and accessories.</p> -<h4>- Simulator-like controls</h4> -<p>The game has simulator-like controls that make you feel like you are driving a real car. You can use the accelerator and brake pedals to control your speed. You can also use the gear shift to change gears manually or automatically. You can also use the handbrake to perform drifts and turns. The game also has realistic physics and sound effects that simulate the engine noise, the tire screech, the wind noise, and the collision impact.</p> - <h2>How to download and install Racing in Car APK?</h2> -<p>If you want to play Racing in Car APK on your Android device, you need to download and install it first. Here are the steps to do so:</p> - <h3>Steps to download and install Racing in Car APK</h3> -<h4>- Go to the official website of Racing in Car APK</h4> -<p>The first step is to go to the official website of Racing in Car APK, where you can find the latest version of the game. You can also read more information about the game, such as its features, screenshots, reviews, and ratings.</p> -<h4>- Click on the download button and wait for the APK file to be downloaded</h4> -<p>The next step is to click on the download button on the website and wait for the APK file to be downloaded on your device. The APK file is a small file that contains the game data and installation instructions. The download time may vary depending on your internet speed and device storage.</p> -<h4>- Enable unknown sources on your device settings</h4> -<p>The third step is to enable unknown sources on your device settings. This is a security measure that prevents you from installing apps from sources other than Google Play Store. To enable unknown sources, you need to go to your device settings > security > unknown sources > toggle on.</p> -<h4>- Locate the downloaded APK file and tap on it to install it</h4> -<p>The fourth step is to locate the downloaded APK file on your device storage and tap on it to install it. You may need to grant some permissions for the app to access your device features and data. The installation process may take a few minutes depending on your device performance.</p> -<h4>- Launch the game and enjoy racing in car</h4> -<p>The final step is to launch the game and enjoy racing in car. You can start by choosing your car and location and then hit the road. You can also adjust the game settings according to your preference. Have fun driving!</p> <h2>What are the benefits of playing Racing in Car APK?</h2> -<p>Playing Racing in Car APK is not only fun but also beneficial for you. Here are some of the benefits of playing this game:</p> - <h3>Advantages of playing Racing in Car APK</h3> -<h4>- It improves your driving skills and reflexes</h4> -<p>Playing Racing in Car APK can help you improve your driving skills and reflexes. You can learn how to steer, accelerate, brake, overtake, drift, and avoid obstacles. You can also practice your reaction time and decision making. Playing this game can make you a better and safer driver in real life.</p> -<h4>- It gives you a realistic and immersive driving experience</h4> -<p>Playing Racing in Car APK can give you a realistic and immersive driving experience. You can feel like you are driving a real car with the 3D realistic cockpit view, the simulator-like controls, the realistic physics and sound effects, and the dynamic weather and traffic conditions. You can also experience different driving scenarios and challenges that test your driving abilities.</p> -<h4>- It lets you customize and upgrade your car according to your preference</h4> -<p>Playing Racing in Car APK can let you customize and upgrade your car according to your preference. You can choose from different types of cars, such as sports cars, muscle cars, SUVs, trucks, or buses. You can also change the color, wheels, stickers, and accessories of your car. You can also upgrade your car's speed, acceleration, handling, braking, and durability with the money you earn from racing.</p> -<h4>- It offers you a variety of challenges and scenarios to test your driving abilities</h4> -<p>Playing Racing in Car APK can offer you a variety of challenges and scenarios to test your driving abilities. You can drive on different roads, such as city, highway, desert, or snow. You can also face different situations, such as police chase, traffic jam, or road block. You can also compete with other drivers or yourself by trying to beat your own high score or the global leaderboard.</p> -<h4>- It is free to play and does not require an internet connection</h4> -<p>Playing Racing in Car APK is free to play and does not require an internet connection. You can download and install the game easily from the official website or Google Play Store. You can also play the game offline without any interruption or limitation. You can enjoy racing in car anytime and anywhere.</p> - <h2>What are some tips and tricks to play Racing in Car APK better?</h2> -<p>If you want to play Racing in Car APK better, you need to know some tips and tricks that can help you improve your driving performance and score. Here are some of them:</p> - <h3>Useful tips and tricks for playing Racing in Car APK</h3> -<h4>- Use the tilt or touch controls to steer your car smoothly</h4> -<p>The game allows you to choose between tilt or touch controls to steer your car. You can choose the one that suits you best. However, whichever control you choose, you need to use it smoothly and gently. Avoid jerking or swerving your car too much as it will make you lose control and crash. Instead, use small and precise movements to steer your car smoothly.</p> -<h4>- Overtake traffic as close as possible to earn more points and money</h4> -<p>The game rewards you with more points and money if you overtake traffic as close as possible without crashing. The closer you overtake traffic, the higher your score multiplier will be. However, be careful not to hit other cars or obstacles as it will damage your car and reduce your score. Try to find the right balance between risk and reward when overtaking traffic.</p> -<h4>- Drift around corners to maintain your speed and momentum</h4> -<p>The game allows you to drift around corners by using the handbrake button. Drifting can help you maintain your speed and momentum when turning. However, drifting too much or too little can make you lose control or slow down. Try to drift at the right angle and timing when cornering.</p> -<h4>- Avoid crashing into other cars or obstacles as it will damage your car and reduce your score</h4> -<p>The game penalizes you for crashing into other cars or obstacles by damaging your car and reducing your score. The more damage your car has, the slower it will go and the harder it will be to control. The more crashes you have, the lower your score will be. Try to avoid crashing into other cars or obstacles by steering carefully and braking timely.</p> -<h4>- Try different cars and locations to find the best combination for your driving style</h4> -<p>The game offers you different cars and locations to choose from. Each car has its own characteristics, such as speed, acceleration, handling, braking, and durability <h2>Conclusion</h2> -<p>Racing in Car APK is a realistic and fun driving simulator for Android devices that lets you drive a car from a first-person perspective. You can race on different roads, overtake traffic, drift, customize your car, and enjoy a thrilling driving experience. You can also improve your driving skills and reflexes, experience different driving scenarios and challenges, and play the game offline and for free. If you love driving cars and racing games, you should download and install Racing in Car APK from the official website or Google Play Store and start racing in car.</p> - <h2>FAQs</h2> -<p>Here are some frequently asked questions about Racing in Car APK:</p> -<h4>- Is Racing in Car APK safe to download and install?</h4> -<p>Yes, Racing in Car APK is safe to download and install. The game does not contain any viruses, malware, or spyware that can harm your device or data. The game also does not require any special permissions or access to your device features or data. However, you should always download and install the game from the official website or Google Play Store to avoid any fake or malicious versions.</p> -<h4>- Is Racing in Car APK compatible with my device?</h4> -<p>Racing in Car APK is compatible with most Android devices that have Android 4.1 or higher. The game also has a low file size and does not require much storage space or memory. However, the game performance may vary depending on your device specifications and settings. You can check the game requirements and compatibility on the official website or Google Play Store before downloading and installing the game.</p> -<h4>- How can I update Racing in Car APK to the latest version?</h4> -<p>You can update Racing in Car APK to the latest version by visiting the official website or Google Play Store and downloading and installing the new version. The game will also notify you when there is a new update available. You should always update the game to enjoy the latest features, improvements, and bug fixes.</p> -<h4>- How can I contact the developer of Racing in Car APK?</h4> -<p>You can contact the developer of Racing in Car APK by visiting their official website or their Facebook page. You can also send them an email at fast.free.games@gmail.com. You can also rate and review the game on Google Play Store and share your feedback and suggestions.</p> -<h4>- How can I support the developer of Racing in Car APK?</h4> -<p>You can support the developer of Racing in Car APK by playing and enjoying the game, sharing it with your friends and family, rating and reviewing it on Google Play Store, following their social media accounts, and watching ads or making in-app purchases if you want to.</p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/CRACK Corel PhotoMirage 3.2.2.169 Portable Crackedl __EXCLUSIVE__.md b/spaces/contluForse/HuggingGPT/assets/CRACK Corel PhotoMirage 3.2.2.169 Portable Crackedl __EXCLUSIVE__.md deleted file mode 100644 index 7b2faccfae668ae950658afa56111cee5656c7b4..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/CRACK Corel PhotoMirage 3.2.2.169 Portable Crackedl __EXCLUSIVE__.md +++ /dev/null @@ -1,11 +0,0 @@ -<h2>CRACK Corel PhotoMirage 3.2.2.169 Portable Crackedl</h2><br /><p><b><b>Download File</b> ……… <a href="https://ssurll.com/2uzxzw">https://ssurll.com/2uzxzw</a></b></p><br /><br /> -<br /> -Corel Pho Patch The full version of 32-bit Windows. DOWNLOAD: · Download a9c2e16639. Related links: How to crack the first safe with a warning. How to crack the first safe with a warning? You need to crack the first safe with a warning. -I know how to crack a safe with a password, but I need a code, and I want to guess the code itself. -How to crack the first safe with a warning? -You need to crack the first safe with a warning. -How to crack the first safe with a warning. -You need to crack the first safe with a warning. 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/coraKong/WorldSimulation/plugins/ResourceDepletionPlugin.py b/spaces/coraKong/WorldSimulation/plugins/ResourceDepletionPlugin.py deleted file mode 100644 index 7303634c365851933cfd57e1f15e78fb35ef432b..0000000000000000000000000000000000000000 --- a/spaces/coraKong/WorldSimulation/plugins/ResourceDepletionPlugin.py +++ /dev/null @@ -1,37 +0,0 @@ -import random -class ResourceDepletionPlugin: - def __init__(self, depletion_threshold, death_rate = 0.3): - self.depletion_threshold = depletion_threshold - self.death_rate = death_rate - - def trigger_disaster(self, characters, character_die_callback): - print("资源耗尽! 发生灾难...") - - # 按照宗族大小排序 - clan_size = {} - for c in characters: - clan_size[c.clan] = clan_size.get(c.clan, 0) + 1 - - clans = sorted(clan_size.items(), key=lambda x: x[1], reverse=True) - - # 前 50% 大小的宗族,死亡 30%, 后 50% 大小的宗族,死亡 60% - for i, (clan, size) in enumerate(clans): - if i < len(clans) // 2: - num_killed = int(size * self.death_rate) - else: - num_killed = int(size * (self.death_rate * 2)) - - # 随机选择死亡成员 - clan_members = [c for c in characters if c.clan == clan] - killed = random.sample(clan_members, num_killed) - - for c in killed: - c.history.append(f"{c.real_age}岁,因资源耗尽死亡") - character_die_callback(c) - - def execute(self, resources, characters, character_die_callback): - # 检查资源是否耗尽,如果耗尽则有概率触发灾难 - if resources < self.depletion_threshold: - probability = random.random() - if probability < 0.3: - self.trigger_disaster(characters, character_die_callback) \ No newline at end of file diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py deleted file mode 100644 index d02122ca0e68743b1bf7a893afae96042f23838c..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/cascade_decode_head.py +++ /dev/null @@ -1,57 +0,0 @@ -from abc import ABCMeta, abstractmethod - -from .decode_head import BaseDecodeHead - - -class BaseCascadeDecodeHead(BaseDecodeHead, metaclass=ABCMeta): - """Base class for cascade decode head used in - :class:`CascadeEncoderDecoder.""" - - def __init__(self, *args, **kwargs): - super(BaseCascadeDecodeHead, self).__init__(*args, **kwargs) - - @abstractmethod - def forward(self, inputs, prev_output): - """Placeholder of forward function.""" - pass - - def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, - train_cfg): - """Forward function for training. - Args: - inputs (list[Tensor]): List of multi-level img features. - prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - gt_semantic_seg (Tensor): Semantic segmentation masks - used if the architecture supports semantic segmentation task. - train_cfg (dict): The training config. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - seg_logits = self.forward(inputs, prev_output) - losses = self.losses(seg_logits, gt_semantic_seg) - - return losses - - def forward_test(self, inputs, prev_output, img_metas, test_cfg): - """Forward function for testing. - - Args: - inputs (list[Tensor]): List of multi-level img features. - prev_output (Tensor): The output of previous decode head. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmseg/datasets/pipelines/formatting.py:Collect`. - test_cfg (dict): The testing config. - - Returns: - Tensor: Output segmentation map. - """ - return self.forward(inputs, prev_output) diff --git a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/nl_head.py b/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/nl_head.py deleted file mode 100644 index 3eee424199e6aa363b564e2a3340a070db04db86..0000000000000000000000000000000000000000 --- a/spaces/coreml-community/ControlNet-v1-1-Annotators-cpu/annotator/uniformer/mmseg/models/decode_heads/nl_head.py +++ /dev/null @@ -1,49 +0,0 @@ -import torch -from annotator.uniformer.mmcv.cnn import NonLocal2d - -from ..builder import HEADS -from .fcn_head import FCNHead - - -@HEADS.register_module() -class NLHead(FCNHead): - """Non-local Neural Networks. - - This head is the implementation of `NLNet - <https://arxiv.org/abs/1711.07971>`_. - - Args: - reduction (int): Reduction factor of projection transform. Default: 2. - use_scale (bool): Whether to scale pairwise_weight by - sqrt(1/inter_channels). Default: True. - mode (str): The nonlocal mode. Options are 'embedded_gaussian', - 'dot_product'. Default: 'embedded_gaussian.'. - """ - - def __init__(self, - reduction=2, - use_scale=True, - mode='embedded_gaussian', - **kwargs): - super(NLHead, self).__init__(num_convs=2, **kwargs) - self.reduction = reduction - self.use_scale = use_scale - self.mode = mode - self.nl_block = NonLocal2d( - in_channels=self.channels, - reduction=self.reduction, - use_scale=self.use_scale, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - mode=self.mode) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - output = self.nl_block(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/dachenchen/HiWantJoin/modules/presets.py b/spaces/dachenchen/HiWantJoin/modules/presets.py deleted file mode 100644 index 969f122198a360f8c3eb126b156d056ab81d53e1..0000000000000000000000000000000000000000 --- a/spaces/dachenchen/HiWantJoin/modules/presets.py +++ /dev/null @@ -1,222 +0,0 @@ -# -*- coding:utf-8 -*- -import os -from pathlib import Path -import gradio as gr -from .webui_locale import I18nAuto - -i18n = I18nAuto() # internationalization - -CHATGLM_MODEL = None -CHATGLM_TOKENIZER = None -LLAMA_MODEL = None -LLAMA_INFERENCER = None - -# ChatGPT 设置 -INITIAL_SYSTEM_PROMPT = "You are a helpful assistant." -API_HOST = "api.openai.com" -COMPLETION_URL = "https://api.openai.com/v1/chat/completions" -BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants" -USAGE_API_URL="https://api.openai.com/dashboard/billing/usage" -HISTORY_DIR = Path("history") -HISTORY_DIR = "history" -TEMPLATES_DIR = "templates" - -# 错误信息 -STANDARD_ERROR_MSG = i18n("☹️发生了错误:") # 错误信息的标准前缀 -GENERAL_ERROR_MSG = i18n("获取对话时发生错误,请查看后台日志") -ERROR_RETRIEVE_MSG = i18n("请检查网络连接,或者API-Key是否有效。") -CONNECTION_TIMEOUT_MSG = i18n("连接超时,无法获取对话。") # 连接超时 -READ_TIMEOUT_MSG = i18n("读取超时,无法获取对话。") # 读取超时 -PROXY_ERROR_MSG = i18n("代理错误,无法获取对话。") # 代理错误 -SSL_ERROR_PROMPT = i18n("SSL错误,无法获取对话。") # SSL 错误 -NO_APIKEY_MSG = i18n("API key为空,请检查是否输入正确。") # API key 长度不足 51 位 -NO_INPUT_MSG = i18n("请输入对话内容。") # 未输入对话内容 -BILLING_NOT_APPLICABLE_MSG = i18n("账单信息不适用") # 本地运行的模型返回的账单信息 - -TIMEOUT_STREAMING = 60 # 流式对话时的超时时间 -TIMEOUT_ALL = 200 # 非流式对话时的超时时间 -ENABLE_STREAMING_OPTION = True # 是否启用选择选择是否实时显示回答的勾选框 -HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True -CONCURRENT_COUNT = 100 # 允许同时使用的用户数量 - -SIM_K = 5 -INDEX_QUERY_TEMPRATURE = 1.0 - -CHUANHU_TITLE = i18n("川虎Chat 🚀") - -CHUANHU_DESCRIPTION = i18n("由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本") - -FOOTER = """<div class="versions">{versions}</div>""" - -APPEARANCE_SWITCHER = """ -<div style="display: flex; justify-content: space-between;"> -<span style="margin-top: 4px !important;">"""+ i18n("切换亮暗色主题") + """</span> -<span><label class="apSwitch" for="checkbox"> - <input type="checkbox" id="checkbox"> - <div class="apSlider"></div> -</label></span> -</div> -""" - -SUMMARIZE_PROMPT = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt - -ONLINE_MODELS = [ - "gpt-3.5-turbo", - "gpt-3.5-turbo-0301", - "gpt-4", - "gpt-4-0314", - "gpt-4-32k", - "gpt-4-32k-0314", - "xmchat", -] - -LOCAL_MODELS = [ - "chatglm-6b", - "chatglm-6b-int4", - "chatglm-6b-int4-qe", - "llama-7b-hf", - "llama-13b-hf", - "llama-30b-hf", - "llama-65b-hf" -] - -if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true': - MODELS = ONLINE_MODELS -else: - MODELS = ONLINE_MODELS + LOCAL_MODELS - -DEFAULT_MODEL = 0 - -os.makedirs("models", exist_ok=True) -os.makedirs("lora", exist_ok=True) -os.makedirs("history", exist_ok=True) -for dir_name in os.listdir("models"): - if os.path.isdir(os.path.join("models", dir_name)): - if dir_name not in MODELS: - MODELS.append(dir_name) - -MODEL_TOKEN_LIMIT = { - "gpt-3.5-turbo": 4096, - "gpt-3.5-turbo-0301": 4096, - "gpt-4": 8192, - "gpt-4-0314": 8192, - "gpt-4-32k": 32768, - "gpt-4-32k-0314": 32768 -} - -TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。 -DEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限 -REDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。 - -REPLY_LANGUAGES = [ - "简体中文", - "繁體中文", - "English", - "日本語", - "Español", - "Français", - "Deutsch", - "跟随问题语言(不稳定)" -] - - -WEBSEARCH_PTOMPT_TEMPLATE = """\ -Web search results: - -{web_results} -Current date: {current_date} - -Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject. -Query: {query} -Reply in {reply_language} -""" - -PROMPT_TEMPLATE = """\ -Context information is below. ---------------------- -{context_str} ---------------------- -Current date: {current_date}. -Using the provided context information, write a comprehensive reply to the given query. -Make sure to cite results using [number] notation after the reference. -If the provided context information refer to multiple subjects with the same name, write separate answers for each subject. -Use prior knowledge only if the given context didn't provide enough information. -Answer the question: {query_str} -Reply in {reply_language} -""" - -REFINE_TEMPLATE = """\ -The original question is as follows: {query_str} -We have provided an existing answer: {existing_answer} -We have the opportunity to refine the existing answer -(only if needed) with some more context below. ------------- -{context_msg} ------------- -Given the new context, refine the original answer to better -Reply in {reply_language} -If the context isn't useful, return the original answer. -""" - -ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->" - -small_and_beautiful_theme = gr.themes.Soft( - primary_hue=gr.themes.Color( - c50="#02C160", - c100="rgba(2, 193, 96, 0.2)", - c200="#02C160", - c300="rgba(2, 193, 96, 0.32)", - c400="rgba(2, 193, 96, 0.32)", - c500="rgba(2, 193, 96, 1.0)", - c600="rgba(2, 193, 96, 1.0)", - c700="rgba(2, 193, 96, 0.32)", - c800="rgba(2, 193, 96, 0.32)", - c900="#02C160", - c950="#02C160", - ), - secondary_hue=gr.themes.Color( - c50="#576b95", - c100="#576b95", - c200="#576b95", - c300="#576b95", - c400="#576b95", - c500="#576b95", - c600="#576b95", - c700="#576b95", - c800="#576b95", - c900="#576b95", - c950="#576b95", - ), - neutral_hue=gr.themes.Color( - name="gray", - c50="#f9fafb", - c100="#f3f4f6", - c200="#e5e7eb", - c300="#d1d5db", - c400="#B2B2B2", - c500="#808080", - c600="#636363", - c700="#515151", - c800="#393939", - c900="#272727", - c950="#171717", - ), - radius_size=gr.themes.sizes.radius_sm, - ).set( - button_primary_background_fill="#06AE56", - button_primary_background_fill_dark="#06AE56", - button_primary_background_fill_hover="#07C863", - button_primary_border_color="#06AE56", - button_primary_border_color_dark="#06AE56", - button_primary_text_color="#FFFFFF", - button_primary_text_color_dark="#FFFFFF", - button_secondary_background_fill="#F2F2F2", - button_secondary_background_fill_dark="#2B2B2B", - button_secondary_text_color="#393939", - button_secondary_text_color_dark="#FFFFFF", - # background_fill_primary="#F7F7F7", - # background_fill_primary_dark="#1F1F1F", - block_title_text_color="*primary_500", - block_title_background_fill="*primary_100", - input_background_fill="#F6F6F6", - ) diff --git a/spaces/dataroots/SofaStyler/README.md b/spaces/dataroots/SofaStyler/README.md deleted file mode 100644 index 1abec094e7bfe6483085e0239f588b2fc1442de9..0000000000000000000000000000000000000000 --- a/spaces/dataroots/SofaStyler/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SofaStyler -emoji: 🛋 -colorFrom: blue -colorTo: green -sdk: streamlit -sdk_version: 1.9.0 -app_file: app.py -pinned: true ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/dawood/audioldm-text-to-audio-generation/share_btn.py b/spaces/dawood/audioldm-text-to-audio-generation/share_btn.py deleted file mode 100644 index b8c2ed17439625f85fd0e910766c727b29131e3d..0000000000000000000000000000000000000000 --- a/spaces/dawood/audioldm-text-to-audio-generation/share_btn.py +++ /dev/null @@ -1,60 +0,0 @@ -community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32"> - <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path> - <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path> -</svg>""" - -loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin" - style="color: #ffffff; -" - xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>""" - -share_js = """async () => { - async function uploadFile(file){ - const UPLOAD_URL = 'https://huggingface.co/uploads'; - const response = await fetch(UPLOAD_URL, { - method: 'POST', - headers: { - 'Content-Type': file.type, - 'X-Requested-With': 'XMLHttpRequest', - }, - body: file, /// <- File inherits from Blob - }); - const url = await response.text(); - return url; - } - const gradioEl = document.querySelector('body > gradio-app'); - const imgEls = gradioEl.querySelectorAll('#gallery img'); - const promptTxt = gradioEl.querySelector('#prompt-text-input input').value; - const shareBtnEl = gradioEl.querySelector('#share-btn'); - const shareIconEl = gradioEl.querySelector('#share-btn-share-icon'); - const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon'); - if(!imgEls.length){ - return; - }; - shareBtnEl.style.pointerEvents = 'none'; - shareIconEl.style.display = 'none'; - loadingIconEl.style.removeProperty('display'); - const files = await Promise.all( - [...imgEls].map(async (imgEl) => { - const res = await fetch(imgEl.src); - const blob = await res.blob(); - const imgId = Date.now() % 200; - const fileName = `diffuse-the-rest-${{imgId}}.jpg`; - return new File([blob], fileName, { type: 'image/jpeg' }); - }) - ); - const urls = await Promise.all(files.map((f) => uploadFile(f))); - const htmlImgs = urls.map(url => `<img src='${url}' width='400' height='400'>`); - const descriptionMd = `<div style='display: flex; flex-wrap: wrap; column-gap: 0.75rem;'> -${htmlImgs.join(`\n`)} -</div>`; - const params = new URLSearchParams({ - title: promptTxt, - description: descriptionMd, - }); - const paramsStr = params.toString(); - window.open(`https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation/discussions/new?${paramsStr}`, '_blank'); - shareBtnEl.style.removeProperty('pointer-events'); - shareIconEl.style.removeProperty('display'); - loadingIconEl.style.display = 'none'; -}""" \ No newline at end of file diff --git a/spaces/declare-lab/tango/audioldm/latent_diffusion/openaimodel.py b/spaces/declare-lab/tango/audioldm/latent_diffusion/openaimodel.py deleted file mode 100644 index 831d7aafb36bba16888e4389153979a6c13639f5..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/audioldm/latent_diffusion/openaimodel.py +++ /dev/null @@ -1,1069 +0,0 @@ -from abc import abstractmethod -import math - -import numpy as np -import torch as th -import torch.nn as nn -import torch.nn.functional as F - -from audioldm.latent_diffusion.util import ( - checkpoint, - conv_nd, - linear, - avg_pool_nd, - zero_module, - normalization, - timestep_embedding, -) -from audioldm.latent_diffusion.attention import SpatialTransformer - - -# dummy replace -def convert_module_to_f16(x): - pass - - -def convert_module_to_f32(x): - pass - - -## go -class AttentionPool2d(nn.Module): - """ - Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py - """ - - def __init__( - self, - spacial_dim: int, - embed_dim: int, - num_heads_channels: int, - output_dim: int = None, - ): - super().__init__() - self.positional_embedding = nn.Parameter( - th.randn(embed_dim, spacial_dim**2 + 1) / embed_dim**0.5 - ) - self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) - self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) - self.num_heads = embed_dim // num_heads_channels - self.attention = QKVAttention(self.num_heads) - - def forward(self, x): - b, c, *_spatial = x.shape - x = x.reshape(b, c, -1).contiguous() # NC(HW) - x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) - x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) - x = self.qkv_proj(x) - x = self.attention(x) - x = self.c_proj(x) - return x[:, :, 0] - - -class TimestepBlock(nn.Module): - """ - Any module where forward() takes timestep embeddings as a second argument. - """ - - @abstractmethod - def forward(self, x, emb): - """ - Apply the module to `x` given `emb` timestep embeddings. - """ - - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb, context=None): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb) - elif isinstance(layer, SpatialTransformer): - x = layer(x, context) - else: - x = layer(x) - return x - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - upsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - if use_conv: - self.conv = conv_nd( - dims, self.channels, self.out_channels, 3, padding=padding - ) - - def forward(self, x): - assert x.shape[1] == self.channels - if self.dims == 3: - x = F.interpolate( - x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" - ) - else: - x = F.interpolate(x, scale_factor=2, mode="nearest") - if self.use_conv: - x = self.conv(x) - return x - - -class TransposedUpsample(nn.Module): - "Learned 2x upsampling without padding" - - def __init__(self, channels, out_channels=None, ks=5): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - - self.up = nn.ConvTranspose2d( - self.channels, self.out_channels, kernel_size=ks, stride=2 - ) - - def forward(self, x): - return self.up(x) - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - downsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - stride = 2 if dims != 3 else (1, 2, 2) - if use_conv: - self.op = conv_nd( - dims, - self.channels, - self.out_channels, - 3, - stride=stride, - padding=padding, - ) - else: - assert self.channels == self.out_channels - self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class ResBlock(TimestepBlock): - """ - A residual block that can optionally change the number of channels. - :param channels: the number of input channels. - :param emb_channels: the number of timestep embedding channels. - :param dropout: the rate of dropout. - :param out_channels: if specified, the number of out channels. - :param use_conv: if True and out_channels is specified, use a spatial - convolution instead of a smaller 1x1 convolution to change the - channels in the skip connection. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param use_checkpoint: if True, use gradient checkpointing on this module. - :param up: if True, use this block for upsampling. - :param down: if True, use this block for downsampling. - """ - - def __init__( - self, - channels, - emb_channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - use_checkpoint=False, - up=False, - down=False, - ): - super().__init__() - self.channels = channels - self.emb_channels = emb_channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_checkpoint = use_checkpoint - self.use_scale_shift_norm = use_scale_shift_norm - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - conv_nd(dims, channels, self.out_channels, 3, padding=1), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.emb_layers = nn.Sequential( - nn.SiLU(), - linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, - ), - ) - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module( - conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) - ), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = conv_nd( - dims, channels, self.out_channels, 3, padding=1 - ) - else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) - - def forward(self, x, emb): - """ - Apply the block to a Tensor, conditioned on a timestep embedding. - :param x: an [N x C x ...] Tensor of features. - :param emb: an [N x emb_channels] Tensor of timestep embeddings. - :return: an [N x C x ...] Tensor of outputs. - """ - return checkpoint( - self._forward, (x, emb), self.parameters(), self.use_checkpoint - ) - - def _forward(self, x, emb): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] - if self.use_scale_shift_norm: - out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = th.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift - h = out_rest(h) - else: - h = h + emb_out - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - use_checkpoint=False, - use_new_attention_order=False, - ): - super().__init__() - self.channels = channels - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.use_checkpoint = use_checkpoint - self.norm = normalization(channels) - self.qkv = conv_nd(1, channels, channels * 3, 1) - if use_new_attention_order: - # split qkv before split heads - self.attention = QKVAttention(self.num_heads) - else: - # split heads before split qkv - self.attention = QKVAttentionLegacy(self.num_heads) - - self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) - - def forward(self, x): - return checkpoint( - self._forward, (x,), self.parameters(), True - ) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! - # return pt_checkpoint(self._forward, x) # pytorch - - def _forward(self, x): - b, c, *spatial = x.shape - x = x.reshape(b, c, -1).contiguous() - qkv = self.qkv(self.norm(x)).contiguous() - h = self.attention(qkv).contiguous() - h = self.proj_out(h).contiguous() - return (x + h).reshape(b, c, *spatial).contiguous() - - -def count_flops_attn(model, _x, y): - """ - A counter for the `thop` package to count the operations in an - attention operation. - Meant to be used like: - macs, params = thop.profile( - model, - inputs=(inputs, timestamps), - custom_ops={QKVAttention: QKVAttention.count_flops}, - ) - """ - b, c, *spatial = y[0].shape - num_spatial = int(np.prod(spatial)) - # We perform two matmuls with the same number of ops. - # The first computes the weight matrix, the second computes - # the combination of the value vectors. - matmul_ops = 2 * b * (num_spatial**2) * c - model.total_ops += th.DoubleTensor([matmul_ops]) - - -class QKVAttentionLegacy(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = ( - qkv.reshape(bs * self.n_heads, ch * 3, length).contiguous().split(ch, dim=1) - ) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v) - return a.reshape(bs, -1, length).contiguous() - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class QKVAttention(nn.Module): - """ - A module which performs QKV attention and splits in a different order. - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.chunk(3, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", - (q * scale).view(bs * self.n_heads, ch, length), - (k * scale).view(bs * self.n_heads, ch, length), - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum( - "bts,bcs->bct", - weight, - v.reshape(bs * self.n_heads, ch, length).contiguous(), - ) - return a.reshape(bs, -1, length).contiguous() - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class UNetModel(nn.Module): - """ - The full UNet model with attention and timestep embedding. - :param in_channels: channels in the input Tensor. - :param model_channels: base channel count for the model. - :param out_channels: channels in the output Tensor. - :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. - :param dropout: the dropout probability. - :param channel_mult: channel multiplier for each level of the UNet. - :param conv_resample: if True, use learned convolutions for upsampling and - downsampling. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param num_classes: if specified (as an int), then this model will be - class-conditional with `num_classes` classes. - :param use_checkpoint: use gradient checkpointing to reduce memory usage. - :param num_heads: the number of attention heads in each attention layer. - :param num_heads_channels: if specified, ignore num_heads and instead use - a fixed channel width per attention head. - :param num_heads_upsample: works with num_heads to set a different number - of heads for upsampling. Deprecated. - :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. - :param resblock_updown: use residual blocks for up/downsampling. - :param use_new_attention_order: use a different attention pattern for potentially - increased efficiency. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - num_classes=None, - extra_film_condition_dim=None, - use_checkpoint=False, - use_fp16=False, - num_heads=-1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - extra_film_use_concat=False, # If true, concatenate extrafilm condition with time embedding, else addition - resblock_updown=False, - use_new_attention_order=False, - use_spatial_transformer=False, # custom transformer support - transformer_depth=1, # custom transformer support - context_dim=None, # custom transformer support - n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model - legacy=True, - ): - super().__init__() - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - if num_heads == -1: - assert ( - num_head_channels != -1 - ), "Either num_heads or num_head_channels has to be set" - - if num_head_channels == -1: - assert ( - num_heads != -1 - ), "Either num_heads or num_head_channels has to be set" - - self.image_size = image_size - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.num_classes = num_classes - self.extra_film_condition_dim = extra_film_condition_dim - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - self.predict_codebook_ids = n_embed is not None - self.extra_film_use_concat = extra_film_use_concat - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - assert not ( - self.num_classes is not None and self.extra_film_condition_dim is not None - ), "As for the condition of theh UNet model, you can only set using class label or an extra embedding vector (such as from CLAP). You cannot set both num_classes and extra_film_condition_dim." - - if self.num_classes is not None: - self.label_emb = nn.Embedding(num_classes, time_embed_dim) - - self.use_extra_film_by_concat = ( - self.extra_film_condition_dim is not None and self.extra_film_use_concat - ) - self.use_extra_film_by_addition = ( - self.extra_film_condition_dim is not None and not self.extra_film_use_concat - ) - - if self.extra_film_condition_dim is not None: - self.film_emb = nn.Linear(self.extra_film_condition_dim, time_embed_dim) - # print("+ Use extra condition on UNet channel using Film. Extra condition dimension is %s. " % self.extra_film_condition_dim) - # if(self.use_extra_film_by_concat): - # print("\t By concatenation with time embedding") - # elif(self.use_extra_film_by_concat): - # print("\t By addition with time embedding") - - if use_spatial_transformer and ( - self.use_extra_film_by_concat or self.use_extra_film_by_addition - ): - # print("+ Spatial transformer will only be used as self-attention. Because you have choose to use film as your global condition.") - spatial_transformer_no_context = True - else: - spatial_transformer_no_context = False - - if use_spatial_transformer and not spatial_transformer_no_context: - assert ( - context_dim is not None - ), "Fool!! You forgot to include the dimension of your cross-attention conditioning..." - - if context_dim is not None and not spatial_transformer_no_context: - assert ( - use_spatial_transformer - ), "Fool!! You forgot to use the spatial transformer for your cross-attention conditioning..." - from omegaconf.listconfig import ListConfig - - if type(context_dim) == ListConfig: - context_dim = list(context_dim) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim - if (not self.use_extra_film_by_concat) - else time_embed_dim * 2, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - dim_head = ( - ch // num_heads - if use_spatial_transformer - else num_head_channels - ) - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) - if not use_spatial_transformer - else SpatialTransformer( - ch, - num_heads, - dim_head, - depth=transformer_depth, - context_dim=context_dim, - no_context=spatial_transformer_no_context, - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim - if (not self.use_extra_film_by_concat) - else time_embed_dim * 2, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - # num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim - if (not self.use_extra_film_by_concat) - else time_embed_dim * 2, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) - if not use_spatial_transformer - else SpatialTransformer( - ch, - num_heads, - dim_head, - depth=transformer_depth, - context_dim=context_dim, - no_context=spatial_transformer_no_context, - ), - ResBlock( - ch, - time_embed_dim - if (not self.use_extra_film_by_concat) - else time_embed_dim * 2, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - - self.output_blocks = nn.ModuleList([]) - for level, mult in list(enumerate(channel_mult))[::-1]: - for i in range(num_res_blocks + 1): - ich = input_block_chans.pop() - layers = [ - ResBlock( - ch + ich, - time_embed_dim - if (not self.use_extra_film_by_concat) - else time_embed_dim * 2, - dropout, - out_channels=model_channels * mult, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = model_channels * mult - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - # num_heads = 1 - dim_head = ( - ch // num_heads - if use_spatial_transformer - else num_head_channels - ) - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads_upsample, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) - if not use_spatial_transformer - else SpatialTransformer( - ch, - num_heads, - dim_head, - depth=transformer_depth, - context_dim=context_dim, - no_context=spatial_transformer_no_context, - ) - ) - if level and i == num_res_blocks: - out_ch = ch - layers.append( - ResBlock( - ch, - time_embed_dim - if (not self.use_extra_film_by_concat) - else time_embed_dim * 2, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - up=True, - ) - if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ds //= 2 - self.output_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), - ) - if self.predict_codebook_ids: - self.id_predictor = nn.Sequential( - normalization(ch), - conv_nd(dims, model_channels, n_embed, 1), - # nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits - ) - - self.shape_reported = False - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - self.output_blocks.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - self.output_blocks.apply(convert_module_to_f32) - - def forward(self, x, timesteps=None, context=None, y=None, **kwargs): - """ - Apply the model to an input batch. - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :param context: conditioning plugged in via crossattn - :param y: an [N] Tensor of labels, if class-conditional. an [N, extra_film_condition_dim] Tensor if film-embed conditional - :return: an [N x C x ...] Tensor of outputs. - """ - if not self.shape_reported: - # print("The shape of UNet input is", x.size()) - self.shape_reported = True - - assert (y is not None) == ( - self.num_classes is not None or self.extra_film_condition_dim is not None - ), "must specify y if and only if the model is class-conditional or film embedding conditional" - hs = [] - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) - emb = self.time_embed(t_emb) - - if self.num_classes is not None: - assert y.shape == (x.shape[0],) - emb = emb + self.label_emb(y) - - if self.use_extra_film_by_addition: - emb = emb + self.film_emb(y) - elif self.use_extra_film_by_concat: - emb = th.cat([emb, self.film_emb(y)], dim=-1) - - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb, context) - hs.append(h) - h = self.middle_block(h, emb, context) - for module in self.output_blocks: - h = th.cat([h, hs.pop()], dim=1) - h = module(h, emb, context) - h = h.type(x.dtype) - if self.predict_codebook_ids: - return self.id_predictor(h) - else: - return self.out(h) - - -class EncoderUNetModel(nn.Module): - """ - The half UNet model with attention and timestep embedding. - For usage, see UNet. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - use_checkpoint=False, - use_fp16=False, - num_heads=1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - pool="adaptive", - *args, - **kwargs, - ): - super().__init__() - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - self.pool = pool - if pool == "adaptive": - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - nn.AdaptiveAvgPool2d((1, 1)), - zero_module(conv_nd(dims, ch, out_channels, 1)), - nn.Flatten(), - ) - elif pool == "attention": - assert num_head_channels != -1 - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - AttentionPool2d( - (image_size // ds), ch, num_head_channels, out_channels - ), - ) - elif pool == "spatial": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - nn.ReLU(), - nn.Linear(2048, self.out_channels), - ) - elif pool == "spatial_v2": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - normalization(2048), - nn.SiLU(), - nn.Linear(2048, self.out_channels), - ) - else: - raise NotImplementedError(f"Unexpected {pool} pooling") - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - - def forward(self, x, timesteps): - """ - Apply the model to an input batch. - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :return: an [N x K] Tensor of outputs. - """ - emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) - - results = [] - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = self.middle_block(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = th.cat(results, axis=-1) - return self.out(h) - else: - h = h.type(x.dtype) - return self.out(h) diff --git a/spaces/declare-lab/tango/diffusers/scripts/convert_original_controlnet_to_diffusers.py b/spaces/declare-lab/tango/diffusers/scripts/convert_original_controlnet_to_diffusers.py deleted file mode 100644 index a9e05abd4cf13a0fe629698f969698e95e913c4a..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/scripts/convert_original_controlnet_to_diffusers.py +++ /dev/null @@ -1,91 +0,0 @@ -# coding=utf-8 -# Copyright 2023 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Conversion script for stable diffusion checkpoints which _only_ contain a contrlnet. """ - -import argparse - -from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." - ) - parser.add_argument( - "--original_config_file", - type=str, - required=True, - help="The YAML config file corresponding to the original architecture.", - ) - parser.add_argument( - "--num_in_channels", - default=None, - type=int, - help="The number of input channels. If `None` number of input channels will be automatically inferred.", - ) - parser.add_argument( - "--image_size", - default=512, - type=int, - help=( - "The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2" - " Base. Use 768 for Stable Diffusion v2." - ), - ) - parser.add_argument( - "--extract_ema", - action="store_true", - help=( - "Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights" - " or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield" - " higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning." - ), - ) - parser.add_argument( - "--upcast_attention", - action="store_true", - help=( - "Whether the attention computation should always be upcasted. This is necessary when running stable" - " diffusion 2.1." - ), - ) - parser.add_argument( - "--from_safetensors", - action="store_true", - help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.", - ) - parser.add_argument( - "--to_safetensors", - action="store_true", - help="Whether to store pipeline in safetensors format or not.", - ) - parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") - parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)") - args = parser.parse_args() - - controlnet = download_controlnet_from_original_ckpt( - checkpoint_path=args.checkpoint_path, - original_config_file=args.original_config_file, - image_size=args.image_size, - extract_ema=args.extract_ema, - num_in_channels=args.num_in_channels, - upcast_attention=args.upcast_attention, - from_safetensors=args.from_safetensors, - device=args.device, - ) - - controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) diff --git a/spaces/diacanFperku/AutoGPT/Hp C9079a Printer Driver Download.md b/spaces/diacanFperku/AutoGPT/Hp C9079a Printer Driver Download.md deleted file mode 100644 index c1e1d36b01d6812b7c81a52215975164dbc8ad4c..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Hp C9079a Printer Driver Download.md +++ /dev/null @@ -1,20 +0,0 @@ -<h2>Hp C9079a Printer Driver Download</h2><br /><p><b><b>Download</b> ––– <a href="https://gohhs.com/2uFUrc">https://gohhs.com/2uFUrc</a></b></p><br /><br /> - -Now I get this error and cant print. - hpdeskjet.com - -All drivers, software and firmware for the HP Deskjet D2360 Printer is available in the Downloads section.Click the icons below to be taken to the downloads for your HP Deskjet D2360 printer. - -Find more information about which versions are currently available for your HP Deskjet D2360 printer.Download the latest drivers, firmware, and software for your HP Deskjet D2360 Printer.Download the latest drivers, firmware, and software for your HP Deskjet D2360 Printer. - -Find out if your HP Deskjet D2360 device is supported. - -All drivers, software and firmware for the HP Deskjet D2360 Printer is available in the Downloads section.Click the icons below to be taken to the downloads for your HP Deskjet D2360 printer.Download the latest drivers, firmware, and software for your HP Deskjet D2360 Printer. - -Is there any alternative solution for printing problem. Any help is appreciated. My printer is HP deskjet d2360. Find more information about which versions are currently available for your HP Deskjet D2360 printer. - -Find more information about which versions are currently available for your HP Deskjet D2360 printer.Download the latest drivers, firmware, and software for your HP Deskjet D2360 Printer. - -Find more information about which versions are currently available for your HP Deskjet D2360 printer.Download the latest drivers, firmware, and software for your HP Deskjet D2360 Pr 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/diacanFperku/AutoGPT/Hypersonic 2 Crack For Windows 7 64bit.md b/spaces/diacanFperku/AutoGPT/Hypersonic 2 Crack For Windows 7 64bit.md deleted file mode 100644 index 4a21ee39c79dfcaf59ab2de9569ccadd0ab7bba0..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Hypersonic 2 Crack For Windows 7 64bit.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Hypersonic 2 Crack For Windows 7 64bit</h2><br /><p><b><b>Download File</b> 🗹 <a href="https://gohhs.com/2uFVn3">https://gohhs.com/2uFVn3</a></b></p><br /><br /> - -Polyphonic arpeggiator and an extensive effects section. VSTi Plugins VST2 Plugins Steinberg Software ... 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/diacanFperku/AutoGPT/Nitro Circus O Filme Dublado 720p.md b/spaces/diacanFperku/AutoGPT/Nitro Circus O Filme Dublado 720p.md deleted file mode 100644 index 408b9c91d304c248cd633fe72941748fdd6697bc..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Nitro Circus O Filme Dublado 720p.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>nitro circus o filme dublado 720p</h2><br /><p><b><b>Download Zip</b> ———>>> <a href="https://gohhs.com/2uFUcm">https://gohhs.com/2uFUcm</a></b></p><br /><br /> - -Drama, Filmes Dublados, Filmes em AVI, Nacionais. Desmundo at ... ZIP; . just a tex file NODDY - NODDY a Hlas Zákona Nitro.Circus.The.Movie.2012.BRRip. 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/digitalxingtong/Luzao-Bert-Vits2/losses.py b/spaces/digitalxingtong/Luzao-Bert-Vits2/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Luzao-Bert-Vits2/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/digitalxingtong/Shanbao-Bert-VITS2/attentions.py b/spaces/digitalxingtong/Shanbao-Bert-VITS2/attentions.py deleted file mode 100644 index ecbdbc8be941a962046fc11fd6739b093112123e..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Shanbao-Bert-VITS2/attentions.py +++ /dev/null @@ -1,343 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from torch.nn.utils import weight_norm, remove_weight_norm -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, isflow = True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - if isflow: - cond_layer = torch.nn.Conv1d(256, 2*hidden_channels*n_layers, 1) - self.cond_pre = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, 1) - self.cond_layer = weight_norm(cond_layer, name='weight') - self.gin_channels = 256 - self.cond_layer_idx = self.n_layers - if 'gin_channels' in kwargs: - self.gin_channels = kwargs['gin_channels'] - if self.gin_channels != 0: - self.spk_emb_linear = nn.Linear(self.gin_channels, self.hidden_channels) - # vits2 says 3rd block, so idx is 2 by default - self.cond_layer_idx = kwargs['cond_layer_idx'] if 'cond_layer_idx' in kwargs else 2 - print(self.gin_channels, self.cond_layer_idx) - assert self.cond_layer_idx < self.n_layers, 'cond_layer_idx should be less than n_layers' - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - def forward(self, x, x_mask, g=None): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - if i == self.cond_layer_idx and g is not None: - g = self.spk_emb_linear(g.transpose(1, 2)) - g = g.transpose(1, 2) - x = x + g - x = x * x_mask - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/digitalxingtong/Taffy-Bert-VITS2/utils.py b/spaces/digitalxingtong/Taffy-Bert-VITS2/utils.py deleted file mode 100644 index c6aa6cfc64c33e2eed33e9845239e831fc1c4a1a..0000000000000000000000000000000000000000 --- a/spaces/digitalxingtong/Taffy-Bert-VITS2/utils.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None, skip_optimizer=False): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None and not skip_optimizer and checkpoint_dict['optimizer'] is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - elif optimizer is None and not skip_optimizer: - #else: #Disable this line if Infer ,and enable the line upper - new_opt_dict = optimizer.state_dict() - new_opt_dict_params = new_opt_dict['param_groups'][0]['params'] - new_opt_dict['param_groups'] = checkpoint_dict['optimizer']['param_groups'] - new_opt_dict['param_groups'][0]['params'] = new_opt_dict_params - optimizer.load_state_dict(new_opt_dict) - saved_state_dict = checkpoint_dict['model'] - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - #assert "emb_g" not in k - # print("load", k) - new_state_dict[k] = saved_state_dict[k] - assert saved_state_dict[k].shape == v.shape, (saved_state_dict[k].shape, v.shape) - except: - print("error, %s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict, strict=False) - else: - model.load_state_dict(new_state_dict, strict=False) - print("load ") - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, default="./OUTPUT_MODEL", - help='Model name') - parser.add_argument('--cont', dest='cont', action="store_true", default=False, help="whether to continue training on the latest checkpoint") - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - hparams.cont = args.cont - return hparams - - -def clean_checkpoints(path_to_models='logs/44k/', n_ckpts_to_keep=2, sort_by_time=True): - """Freeing up space by deleting saved ckpts - - Arguments: - path_to_models -- Path to the model directory - n_ckpts_to_keep -- Number of ckpts to keep, excluding G_0.pth and D_0.pth - sort_by_time -- True -> chronologically delete ckpts - False -> lexicographically delete ckpts - """ - import re - ckpts_files = [f for f in os.listdir(path_to_models) if os.path.isfile(os.path.join(path_to_models, f))] - name_key = (lambda _f: int(re.compile('._(\d+)\.pth').match(_f).group(1))) - time_key = (lambda _f: os.path.getmtime(os.path.join(path_to_models, _f))) - sort_key = time_key if sort_by_time else name_key - x_sorted = lambda _x: sorted([f for f in ckpts_files if f.startswith(_x) and not f.endswith('_0.pth')], - key=sort_key) - to_del = [os.path.join(path_to_models, fn) for fn in - (x_sorted('G')[:-n_ckpts_to_keep] + x_sorted('D')[:-n_ckpts_to_keep])] - del_info = lambda fn: logger.info(f".. Free up space by deleting ckpt {fn}") - del_routine = lambda x: [os.remove(x), del_info(x)] - rs = [del_routine(fn) for fn in to_del] - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams(): - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/digitiamosrl/recsys-and-customer-segmentation/README.md b/spaces/digitiamosrl/recsys-and-customer-segmentation/README.md deleted file mode 100644 index 495954b20e7af307483311ff50616c2a936233d0..0000000000000000000000000000000000000000 --- a/spaces/digitiamosrl/recsys-and-customer-segmentation/README.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Recommender system and customer segmentation -emoji: 🐨 -colorFrom: purple -colorTo: blue -sdk: streamlit -sdk_version: 1.10.0 -app_file: recommender_system.py -pinned: false -license: mit ---- - - -# Recommender system and customer segmentation - -Demo with recsys and clustering for the [online retail](https://www.kaggle.com/datasets/vijayuv/onlineretail?select=OnlineRetail.csv) dataset. - -## Objective - -Recommender system: - - 1. interactively select a user - 2. show all the recommendations for the user - 3. explain why we get these suggestions (which purchased object influences the most) - 4. plot the purchases and suggested articles - -Clustering: - - 1. compute the user clustering - 2. plot users and their clusters - 3. explain the meaning of the clusters (compute the mean metrics or literally explain them) - -## Setup - -In your terminal run: - -```bash -# Enable the env -source .venv/bin/activate - -# Install the dependencies - -pip install -r requirements.txt - -# Or install the freezed dependencies from the requirements_freezed.txt - -# You are ready to rock! -``` - -## Run - -In your terminal run: - -```bash -streamlit run recommender_system.py - -# Now the defualt browser will be opened with -# the stramlit page. It you want to customize the -# execution of streaming, refer to its documentation. -``` - -## Resources - -- [streamlit](https://streamlit.io/) -- [implicit](https://github.com/benfred/implicit), recsys library -- [t-sne guide](https://distill.pub/2016/misread-tsne/) -- [RFM segmentation](https://www.omniconvert.com/blog/rfm-score/) diff --git a/spaces/dinhminh20521597/OCR_DEMO/configs/textrecog/crnn/README.md b/spaces/dinhminh20521597/OCR_DEMO/configs/textrecog/crnn/README.md deleted file mode 100644 index 52232587e512eb53f16e652e3f3afd0a53686faf..0000000000000000000000000000000000000000 --- a/spaces/dinhminh20521597/OCR_DEMO/configs/textrecog/crnn/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# CRNN - -> [An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition](https://arxiv.org/abs/1507.05717) - -<!-- [ALGORITHM] --> - -## Abstract - -Image-based sequence recognition has been a long-standing research topic in computer vision. In this paper, we investigate the problem of scene text recognition, which is among the most important and challenging tasks in image-based sequence recognition. A novel neural network architecture, which integrates feature extraction, sequence modeling and transcription into a unified framework, is proposed. Compared with previous systems for scene text recognition, the proposed architecture possesses four distinctive properties: (1) It is end-to-end trainable, in contrast to most of the existing algorithms whose components are separately trained and tuned. (2) It naturally handles sequences in arbitrary lengths, involving no character segmentation or horizontal scale normalization. (3) It is not confined to any predefined lexicon and achieves remarkable performances in both lexicon-free and lexicon-based scene text recognition tasks. (4) It generates an effective yet much smaller model, which is more practical for real-world application scenarios. The experiments on standard benchmarks, including the IIIT-5K, Street View Text and ICDAR datasets, demonstrate the superiority of the proposed algorithm over the prior arts. Moreover, the proposed algorithm performs well in the task of image-based music score recognition, which evidently verifies the generality of it. - -<div align=center> -<img src="https://user-images.githubusercontent.com/22607038/142797788-6b1cd78d-1dd6-4e02-be32-3dbd257c4992.png"/> -</div> - -## Dataset - -### Train Dataset - -| trainset | instance_num | repeat_num | note | -| :------: | :----------: | :--------: | :---: | -| Syn90k | 8919273 | 1 | synth | - -### Test Dataset - -| testset | instance_num | note | -| :-----: | :----------: | :-------: | -| IIIT5K | 3000 | regular | -| SVT | 647 | regular | -| IC13 | 1015 | regular | -| IC15 | 2077 | irregular | -| SVTP | 645 | irregular | -| CT80 | 288 | irregular | - -## Results and models - -| methods | | Regular Text | | | | Irregular Text | | download | -| :------------------------------------------------------: | :----: | :----------: | :--: | :-: | :--: | :------------: | :--: | :-----------------------------------------------------------------------------------------------: | -| methods | IIIT5K | SVT | IC13 | | IC15 | SVTP | CT80 | | -| [CRNN](/configs/textrecog/crnn/crnn_academic_dataset.py) | 80.5 | 81.5 | 86.5 | | 54.1 | 59.1 | 55.6 | [model](https://download.openmmlab.com/mmocr/textrecog/crnn/crnn_academic-a723a1c5.pth) \| [log](https://download.openmmlab.com/mmocr/textrecog/crnn/20210326_111035.log.json) | - -## Citation - -```bibtex -@article{shi2016end, - title={An end-to-end trainable neural network for image-based sequence recognition and its application to scene text recognition}, - author={Shi, Baoguang and Bai, Xiang and Yao, Cong}, - journal={IEEE transactions on pattern analysis and machine intelligence}, - year={2016} -} -``` diff --git a/spaces/dmeck/RVC-Speakers/speakers/processors/edge_to_voice.py b/spaces/dmeck/RVC-Speakers/speakers/processors/edge_to_voice.py deleted file mode 100644 index f2c202bf4f0b3f603af3496a9c76955e669eef68..0000000000000000000000000000000000000000 --- a/spaces/dmeck/RVC-Speakers/speakers/processors/edge_to_voice.py +++ /dev/null @@ -1,107 +0,0 @@ -from typing import Optional, Union, Dict - -from speakers.common.registry import registry -from speakers.processors import BaseProcessor, ProcessorData -from io import BytesIO -import logging -import numpy as np -import edge_tts -import asyncio -import nest_asyncio -import util -import librosa - -logger = logging.getLogger('edge_to_voice') - - -def set_edge_to_voice_logger(l): - global logger - logger = l - - -class EdgeProcessorData(ProcessorData): - """ - :param text: 生成文本 - :param tts_speaker: 讲话人id - :param rate: 语速 - :param volume: 语气轻重 - - """ - """生成文本""" - text: str - """讲话人id""" - tts_speaker: int - """语速""" - rate: str - """语气轻重""" - volume: str - - - @property - def type(self) -> str: - """Type of the Message, used for serialization.""" - return "EDGE" - - -@registry.register_processor("edge_to_voice") -class EdgeToVoice(BaseProcessor): - - def __init__(self): - super().__init__() - nest_asyncio.apply() - self._tts_speakers_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) # noqa - - def __call__( - self, - data: EdgeProcessorData - ): - - if data.text is None: - raise RuntimeError('Please provide TTS text.') - - if data.tts_speaker is None: - raise RuntimeError('Please provide TTS text.') - # 同步调用协程代码 - tts_np, tts_sr = asyncio.get_event_loop().run_until_complete( self._call_edge_tts(data=data)) - - return tts_np, tts_sr - - @property - def tts_speakers_list(self): - return self._tts_speakers_list - - @classmethod - def from_config(cls, cfg=None): - if cfg is None: - raise RuntimeError("from_config cfg is None.") - - return cls() - - def match(self, data: ProcessorData): - return "EDGE" in data.type - - async def _call_edge_tts(self, data: EdgeProcessorData): - - speaker = self._tts_speakers_list[data.tts_speaker]['ShortName'] - tts_com = edge_tts.Communicate(text=data.text, voice=speaker, rate=data.rate, volume=data.volume) - tts_raw = b'' - - # Stream TTS audio to bytes - async for chunk in tts_com.stream(): - if chunk['type'] == 'audio': - tts_raw += chunk['data'] - - # Convert mp3 stream to wav - ffmpeg_proc = await asyncio.create_subprocess_exec( - 'ffmpeg', - '-f', 'mp3', - '-i', '-', - '-f', 'wav', - '-loglevel', 'error', - '-', - stdin=asyncio.subprocess.PIPE, - stdout=asyncio.subprocess.PIPE - ) - (tts_wav, _) = await ffmpeg_proc.communicate(tts_raw) - - return librosa.load(BytesIO(tts_wav)) diff --git a/spaces/dnth/testalgae/app.py b/spaces/dnth/testalgae/app.py deleted file mode 100644 index a0c5a490248f73b3e98c00062c9be5fc892cb93a..0000000000000000000000000000000000000000 --- a/spaces/dnth/testalgae/app.py +++ /dev/null @@ -1,99 +0,0 @@ -import subprocess -import sys -print("Reinstalling mmcv") -subprocess.check_call([sys.executable, "-m", "pip", "uninstall", "-y", "mmcv-full==1.3.17"]) -subprocess.check_call([sys.executable, "-m", "pip", "install", "mmcv-full==1.3.17", "-f", "https://download.openmmlab.com/mmcv/dist/cpu/torch1.10.0/index.html"]) -print("mmcv install complete") - -## Only works if we reinstall mmcv here. - -from gradio.outputs import Label -from icevision.all import * -from icevision.models.checkpoint import * -import PIL -import gradio as gr -import os - -# Load model -checkpoint_path = "models/model_checkpoint.pth" -checkpoint_and_model = model_from_checkpoint(checkpoint_path) -model = checkpoint_and_model["model"] -model_type = checkpoint_and_model["model_type"] -class_map = checkpoint_and_model["class_map"] - -# Transforms -img_size = checkpoint_and_model["img_size"] -valid_tfms = tfms.A.Adapter([*tfms.A.resize_and_pad(img_size), tfms.A.Normalize()]) - -for root, dirs, files in os.walk(r"sample_images/"): - for filename in files: - print("Loading sample image:", filename) - - -# Populate examples in Gradio interface -example_images = [["sample_images/" + file] for file in files] -# Columns: Input Image | Label | Box | Detection Threshold -examples = [ - [example_images[0], False, True, 0.5], - [example_images[1], True, True, 0.5], - [example_images[2], False, True, 0.7], - [example_images[3], True, True, 0.7], - [example_images[4], False, True, 0.5], - [example_images[5], False, True, 0.5], - [example_images[6], False, True, 0.6], - [example_images[7], False, True, 0.6], -] - - -def show_preds(input_image, display_label, display_bbox, detection_threshold): - if detection_threshold == 0: - detection_threshold = 0.5 - img = PIL.Image.fromarray(input_image, "RGB") - pred_dict = model_type.end2end_detect( - img, - valid_tfms, - model, - class_map=class_map, - detection_threshold=detection_threshold, - display_label=display_label, - display_bbox=display_bbox, - return_img=True, - font_size=16, - label_color="#FF59D6", - ) - return pred_dict["img"], len(pred_dict["detection"]["bboxes"]) - - -# display_chkbox = gr.inputs.CheckboxGroup(["Label", "BBox"], label="Display", default=True) -display_chkbox_label = gr.inputs.Checkbox(label="Label", default=False) -display_chkbox_box = gr.inputs.Checkbox(label="Box", default=True) -detection_threshold_slider = gr.inputs.Slider( - minimum=0, maximum=1, step=0.1, default=0.5, label="Detection Threshold" -) -outputs = [ - gr.outputs.Image(type="pil", label="RetinaNet Inference"), - gr.outputs.Textbox(type="number", label="Microalgae Count"), -] - -article = "<p style='text-align: center'><a href='https://dicksonneoh.com/' target='_blank'>Blog post</a></p>" - -# Option 1: Get an image from local drive -gr_interface = gr.Interface( - fn=show_preds, - inputs=[ - "image", - display_chkbox_label, - display_chkbox_box, - detection_threshold_slider, - ], - outputs=outputs, - title="Microalgae Detector with RetinaNet", - description="This RetinaNet model counts microalgaes on a given image. Upload an image or click an example image below to use.", - article=article, - examples=examples, -) -# # Option 2: Grab an image from a webcam -# gr_interface = gr.Interface(fn=show_preds, inputs=["webcam", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - COCO', live=False) -# # Option 3: Continuous image stream from the webcam -# gr_interface = gr.Interface(fn=show_preds, inputs=["webcam", display_chkbox_label, display_chkbox_box, detection_threshold_slider], outputs=outputs, title='IceApp - COCO', live=True) -gr_interface.launch(inline=False, share=False, debug=True) \ No newline at end of file diff --git a/spaces/dorkai/singpt/modules/text_generation.py b/spaces/dorkai/singpt/modules/text_generation.py deleted file mode 100644 index d64481b24ec4542e55de1605a6181f97d9a50de9..0000000000000000000000000000000000000000 --- a/spaces/dorkai/singpt/modules/text_generation.py +++ /dev/null @@ -1,238 +0,0 @@ -import gc -import re -import time - -import numpy as np -import torch -import transformers - -import modules.shared as shared -from modules.callbacks import (Iteratorize, Stream, - _SentinelTokenStoppingCriteria) -from modules.extensions import apply_extensions -from modules.html_generator import generate_4chan_html, generate_basic_html -from modules.models import local_rank - - -def get_max_prompt_length(tokens): - max_length = 2048-tokens - if shared.soft_prompt: - max_length -= shared.soft_prompt_tensor.shape[1] - return max_length - -def encode(prompt, tokens_to_generate=0, add_special_tokens=True): - if shared.is_RWKV: - input_ids = shared.tokenizer.encode(str(prompt)) - input_ids = np.array(input_ids).reshape(1, len(input_ids)) - return input_ids - else: - input_ids = shared.tokenizer.encode(str(prompt), return_tensors='pt', truncation=True, max_length=get_max_prompt_length(tokens_to_generate), add_special_tokens=add_special_tokens) - if shared.args.cpu: - return input_ids - elif shared.args.flexgen: - return input_ids.numpy() - elif shared.args.deepspeed: - return input_ids.to(device=local_rank) - else: - return input_ids.cuda() - -def decode(output_ids): - # Open Assistant relies on special tokens like <|endoftext|> - if re.match('oasst-*', shared.model_name.lower()): - return shared.tokenizer.decode(output_ids, skip_special_tokens=False) - else: - reply = shared.tokenizer.decode(output_ids, skip_special_tokens=True) - reply = reply.replace(r'<|endoftext|>', '') - return reply - -def generate_softprompt_input_tensors(input_ids): - inputs_embeds = shared.model.transformer.wte(input_ids) - inputs_embeds = torch.cat((shared.soft_prompt_tensor, inputs_embeds), dim=1) - filler_input_ids = torch.zeros((1, inputs_embeds.shape[1]), dtype=input_ids.dtype).to(shared.model.device) - #filler_input_ids += shared.model.config.bos_token_id # setting dummy input_ids to bos tokens - return inputs_embeds, filler_input_ids - -# Removes empty replies from gpt4chan outputs -def fix_gpt4chan(s): - for i in range(10): - s = re.sub("--- [0-9]*\n>>[0-9]*\n---", "---", s) - s = re.sub("--- [0-9]*\n *\n---", "---", s) - s = re.sub("--- [0-9]*\n\n\n---", "---", s) - return s - -# Fix the LaTeX equations in galactica -def fix_galactica(s): - s = s.replace(r'\[', r'$') - s = s.replace(r'\]', r'$') - s = s.replace(r'\(', r'$') - s = s.replace(r'\)', r'$') - s = s.replace(r'$$', r'$') - s = re.sub(r'\n', r'\n\n', s) - s = re.sub(r"\n{3,}", "\n\n", s) - return s - -def formatted_outputs(reply, model_name): - if not (shared.args.chat or shared.args.cai_chat): - if model_name.lower().startswith('galactica'): - reply = fix_galactica(reply) - return reply, reply, generate_basic_html(reply) - elif model_name.lower().startswith(('gpt4chan', 'gpt-4chan', '4chan')): - reply = fix_gpt4chan(reply) - return reply, 'Only applicable for GALACTICA models.', generate_4chan_html(reply) - else: - return reply, 'Only applicable for GALACTICA models.', generate_basic_html(reply) - else: - return reply - -def clear_torch_cache(): - gc.collect() - if not shared.args.cpu: - torch.cuda.empty_cache() - -def generate_reply(question, max_new_tokens, do_sample, temperature, top_p, typical_p, repetition_penalty, top_k, min_length, no_repeat_ngram_size, num_beams, penalty_alpha, length_penalty, early_stopping, eos_token=None, stopping_string=None): - clear_torch_cache() - t0 = time.time() - - # These models are not part of Hugging Face, so we handle them - # separately and terminate the function call earlier - if shared.is_RWKV: - try: - if shared.args.no_stream: - reply = shared.model.generate(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k) - yield formatted_outputs(reply, shared.model_name) - else: - yield formatted_outputs(question, shared.model_name) - # RWKV has proper streaming, which is very nice. - # No need to generate 8 tokens at a time. - for reply in shared.model.generate_with_streaming(context=question, token_count=max_new_tokens, temperature=temperature, top_p=top_p, top_k=top_k): - yield formatted_outputs(reply, shared.model_name) - finally: - t1 = time.time() - output = encode(reply)[0] - input_ids = encode(question) - print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(input_ids[0]))/(t1-t0):.2f} tokens/s, {len(output)-len(input_ids[0])} tokens)") - return - - original_question = question - if not (shared.args.chat or shared.args.cai_chat): - question = apply_extensions(question, "input") - if shared.args.verbose: - print(f"\n\n{question}\n--------------------\n") - - input_ids = encode(question, max_new_tokens) - original_input_ids = input_ids - output = input_ids[0] - cuda = "" if (shared.args.cpu or shared.args.deepspeed or shared.args.flexgen) else ".cuda()" - eos_token_ids = [shared.tokenizer.eos_token_id] if shared.tokenizer.eos_token_id is not None else [] - if eos_token is not None: - eos_token_ids.append(int(encode(eos_token)[0][-1])) - stopping_criteria_list = transformers.StoppingCriteriaList() - if stopping_string is not None: - # Copied from https://github.com/PygmalionAI/gradio-ui/blob/master/src/model.py - t = encode(stopping_string, 0, add_special_tokens=False) - stopping_criteria_list.append(_SentinelTokenStoppingCriteria(sentinel_token_ids=t, starting_idx=len(input_ids[0]))) - - if not shared.args.flexgen: - generate_params = [ - f"max_new_tokens=max_new_tokens", - f"eos_token_id={eos_token_ids}", - f"stopping_criteria=stopping_criteria_list", - f"do_sample={do_sample}", - f"temperature={temperature}", - f"top_p={top_p}", - f"typical_p={typical_p}", - f"repetition_penalty={repetition_penalty}", - f"top_k={top_k}", - f"min_length={min_length if shared.args.no_stream else 0}", - f"no_repeat_ngram_size={no_repeat_ngram_size}", - f"num_beams={num_beams}", - f"penalty_alpha={penalty_alpha}", - f"length_penalty={length_penalty}", - f"early_stopping={early_stopping}", - ] - else: - generate_params = [ - f"max_new_tokens={max_new_tokens if shared.args.no_stream else 8}", - f"do_sample={do_sample}", - f"temperature={temperature}", - f"stop={eos_token_ids[-1]}", - ] - if shared.args.deepspeed: - generate_params.append("synced_gpus=True") - if shared.soft_prompt: - inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids) - generate_params.insert(0, "inputs_embeds=inputs_embeds") - generate_params.insert(0, "inputs=filler_input_ids") - else: - generate_params.insert(0, "inputs=input_ids") - - try: - # Generate the entire reply at once. - if shared.args.no_stream: - with torch.no_grad(): - output = eval(f"shared.model.generate({', '.join(generate_params)}){cuda}")[0] - if shared.soft_prompt: - output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:])) - - reply = decode(output) - if not (shared.args.chat or shared.args.cai_chat): - reply = original_question + apply_extensions(reply[len(question):], "output") - - yield formatted_outputs(reply, shared.model_name) - - # Stream the reply 1 token at a time. - # This is based on the trick of using 'stopping_criteria' to create an iterator. - elif not shared.args.flexgen: - - def generate_with_callback(callback=None, **kwargs): - kwargs['stopping_criteria'].append(Stream(callback_func=callback)) - clear_torch_cache() - with torch.no_grad(): - shared.model.generate(**kwargs) - - def generate_with_streaming(**kwargs): - return Iteratorize(generate_with_callback, kwargs, callback=None) - - yield formatted_outputs(original_question, shared.model_name) - with eval(f"generate_with_streaming({', '.join(generate_params)})") as generator: - for output in generator: - if shared.soft_prompt: - output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:])) - reply = decode(output) - - if not (shared.args.chat or shared.args.cai_chat): - reply = original_question + apply_extensions(reply[len(question):], "output") - - if output[-1] in eos_token_ids: - break - yield formatted_outputs(reply, shared.model_name) - - yield formatted_outputs(reply, shared.model_name) - - # Stream the output naively for FlexGen since it doesn't support 'stopping_criteria' - else: - for i in range(max_new_tokens//8+1): - clear_torch_cache() - with torch.no_grad(): - output = eval(f"shared.model.generate({', '.join(generate_params)})")[0] - if shared.soft_prompt: - output = torch.cat((input_ids[0], output[filler_input_ids.shape[1]:])) - reply = decode(output) - - if not (shared.args.chat or shared.args.cai_chat): - reply = original_question + apply_extensions(reply[len(question):], "output") - - if np.count_nonzero(np.isin(input_ids[0], eos_token_ids)) < np.count_nonzero(np.isin(output, eos_token_ids)): - break - yield formatted_outputs(reply, shared.model_name) - - input_ids = np.reshape(output, (1, output.shape[0])) - if shared.soft_prompt: - inputs_embeds, filler_input_ids = generate_softprompt_input_tensors(input_ids) - - yield formatted_outputs(reply, shared.model_name) - - finally: - t1 = time.time() - print(f"Output generated in {(t1-t0):.2f} seconds ({(len(output)-len(original_input_ids[0]))/(t1-t0):.2f} tokens/s, {len(output)-len(original_input_ids[0])} tokens)") - return diff --git a/spaces/drift-ai/emoji-predictor/README.md b/spaces/drift-ai/emoji-predictor/README.md deleted file mode 100644 index 2fc7f7108bcc196df930c13097a46d5958a46e73..0000000000000000000000000000000000000000 --- a/spaces/drift-ai/emoji-predictor/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Emoji Predictor -emoji: 😎 -colorFrom: pink -colorTo: indigo -sdk: gradio -sdk_version: 3.3.1 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: vincentclaes/emoji-predictor ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/duycse1603/math2tex/HybridViT/module/component/seq_modeling/vit/vision_transformer.py b/spaces/duycse1603/math2tex/HybridViT/module/component/seq_modeling/vit/vision_transformer.py deleted file mode 100644 index ed828b0f098edfc7f811076831676d35eb6eada1..0000000000000000000000000000000000000000 --- a/spaces/duycse1603/math2tex/HybridViT/module/component/seq_modeling/vit/vision_transformer.py +++ /dev/null @@ -1,184 +0,0 @@ -import torch -from torch import nn -from functools import partial -from collections import OrderedDict -from ...common import DropPath -from .utils import trunc_normal_ - - -class Mlp(nn.Module): - def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -class ConvFFN(nn.Module): - def __init__(self, *args, **kwargs) -> None: - super().__init__(*args, **kwargs) - - -class Attention(nn.Module): - def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): - super().__init__() - self.num_heads = num_heads - head_dim = dim // num_heads - # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights - self.scale = qk_scale or head_dim ** -0.5 - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - def forward(self, x): - B, N, C = x.shape - qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - attn = (q @ k.transpose(-2, -1)) * self.scale - attn = attn.softmax(dim=-1) - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class Block(nn.Module): - def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., - drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm): - super().__init__() - self.norm1 = norm_layer(dim) - self.attn = Attention( - dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) - # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here - self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) - - def forward(self, x): - x = x + self.drop_path(self.attn(self.norm1(x))) - x = x + self.drop_path(self.mlp(self.norm2(x))) - return x - - -class VisionTransformer(nn.Module): - """ Vision Transformer - - A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` - - https://arxiv.org/abs/2010.11929 - """ - def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, - num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, - drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None): - """ - Args: - img_size (int, tuple): input image size - patch_size (int, tuple): patch size - in_chans (int): number of input channels - num_classes (int): number of classes for classification head - embed_dim (int): embedding dimension - depth (int): depth of transformer - num_heads (int): number of attention heads - mlp_ratio (int): ratio of mlp hidden dim to embedding dim - qkv_bias (bool): enable bias for qkv if True - qk_scale (float): override default qk scale of head_dim ** -0.5 if set - representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set - drop_rate (float): dropout rate - attn_drop_rate (float): attention dropout rate - drop_path_rate (float): stochastic depth rate - hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module - norm_layer: (nn.Module): normalization layer - """ - super().__init__() - self.num_classes = num_classes - self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models - norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) - - self.patch_embed = None - num_patches = getattr(self.patch_embed, 'num_patches', 128) - - self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) - self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) - self.pos_drop = nn.Dropout(p=drop_rate) - - dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule - self.blocks = nn.ModuleList([ - Block( - dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, - drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer) - for i in range(depth)]) - self.norm = norm_layer(embed_dim) - - # Representation layer - if representation_size: - self.num_features = representation_size - self.pre_logits = nn.Sequential(OrderedDict([ - ('fc', nn.Linear(embed_dim, representation_size)), - ('act', nn.Tanh()) - ])) - else: - self.pre_logits = nn.Identity() - - # Classifier head - self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() - - trunc_normal_(self.pos_embed, std=.02) - trunc_normal_(self.cls_token, std=.02) - self.apply(self._init_weights) - - def _init_weights(self, m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - @torch.jit.ignore - def no_weight_decay(self): - return {'pos_embed', 'cls_token'} - - def get_classifier(self): - return self.head - - def reset_classifier(self, num_classes, global_pool=''): - self.num_classes = num_classes - self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() - - def forward_features(self, x): - B = x.shape[0] - x = self.patch_embed(x) - - cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks - x = torch.cat((cls_tokens, x), dim=1) - x = x + self.pos_embed - x = self.pos_drop(x) - - for blk in self.blocks: - x = blk(x) - - x = self.norm(x)[:, 0] - x = self.pre_logits(x) - return x - - def forward(self, x): - x = self.forward_features(x) - x = self.head(x) - return x diff --git a/spaces/empy-ai/Token-classification/core/classificator.py b/spaces/empy-ai/Token-classification/core/classificator.py deleted file mode 100644 index e8ae555b74c32bea914c8a75f7e87543026b3d95..0000000000000000000000000000000000000000 --- a/spaces/empy-ai/Token-classification/core/classificator.py +++ /dev/null @@ -1,78 +0,0 @@ -from dataclasses import dataclass -from typing import List, Dict, Any - -import requests - - -class ClassificationError(Exception): - pass - - -@dataclass -class Classification: - entity: str - start: int - end: int - - def dict(self) -> Dict[str, Any]: - return { - 'entity': self.entity, - 'start': self.start, - 'end': self.end - } - - -class Classificator: - - def __init__(self, config: Dict[str, Any]): - """ - Initialize the classificator with the given configuration - """ - self._config = config - - def classify(self, text: str) -> List[Classification]: - raw_data = self.send_request(text) - return self.post_process(raw_data) - - def send_request(self, text: str) -> List[Dict[str, Any]]: - """ - Process the text and return a list of dictionaries with the following keys - """ - - headers = { - 'Authorization': self._config['auth_endpoint_token'], - 'Content-Type': 'application/json', - } - try: - response = requests.post(self._config['endpoint_url'], headers=headers, json={'inputs': text}) - return response.json() - except Exception: - raise ClassificationError('Classification failed') - - @staticmethod - def post_process(raw_data: List[Dict[str, Any]]) -> List[Classification]: - """ - Process the raw data and return a list of dictionaries with the following keys - - raw_data is a list of dictionaries with the following keys - {'entity': 'B-Evaluation', 'score': 0.86011535, 'index': 1, 'word': 'Things', 'start': 0, 'end': 6} - - result is a list of classifications with the following keys - Classification(entity='Evaluation', start=0, end=6) - """ - classifications = [] - - current_entity = None - for item in raw_data: - if current_entity is None or current_entity != item['entity'][2:]: - current_entity = item['entity'][2:] - classifications.append( - Classification( - entity=current_entity, - start=item['start'], - end=item['end'] - ) - ) - else: - classifications[-1].end = item['end'] - return classifications diff --git a/spaces/enzostvs/hair-colour/app/api/check-hair-color/pipeline.ts b/spaces/enzostvs/hair-colour/app/api/check-hair-color/pipeline.ts deleted file mode 100644 index 19fc90f1dceae368073654e3e092733950985ba2..0000000000000000000000000000000000000000 --- a/spaces/enzostvs/hair-colour/app/api/check-hair-color/pipeline.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { pipeline, env } from "@xenova/transformers"; - -// Use the Singleton pattern to enable lazy construction of the pipeline. -// NOTE: We wrap the class in a function to prevent code duplication (see below). - -const P = () => class PipelineSingleton { - static task = 'image-classification'; - static model = 'enzostvs/hair-color'; - static instance = null; - - static async getInstance(progress_callback: any = null) { - if (this.instance === null) { - this.instance = pipeline(this.task, this.model, { progress_callback }) as any; - } - return this.instance; - } -} - -const PipelineSingleton = P(); -export default PipelineSingleton; \ No newline at end of file diff --git a/spaces/eson/tokenizer-arena/examples.py b/spaces/eson/tokenizer-arena/examples.py deleted file mode 100644 index dd1275f3e4ad644e24054aaf224b3f3db90b9658..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/examples.py +++ /dev/null @@ -1,22 +0,0 @@ -examples = { - "en": [ - ["spaces: 2spaces 8spaces\t1tab\t\t2tab\n1newline", "llama", "chatglm_6b"], # chatglm 有blank_n, - # !?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏. - ["punctuations: ,.:/?+=\",。!?;【】〔〕〖〗", "baichuan", "llama"], - ["symbols: 🦙❤❥웃유♋☮✊☏☢☚✔☑♚▢♪✈✞÷↑↓▤▥⊙■□▣▽¿─│♥❣▬▫☿Ⓐ ✋✉☣☤", "baichuan", "llama"], - ["digits: (10086 + 98) = 100184", "baichuan", "llama"] - ] - , - "zh": [ - ["空格测试: 2个空格 8个空格", "llama", "chatglm_6b"], # chatglm 有blank_n, - ["标点测试:,。!?;", "baichuan_7b", "llama"], - ["符号测试:🦙❤❥웃유♋☮✊☏☢☚✔☑♚▢♪✈✞÷↑↓▤▥⊙■□▣▽¿─│♥❣▬▫☿Ⓐ ✋✉☣☤", "baichuan_7b", "llama"], - ["数字测试:(10086 + 98) = 100184", "baichuan_7b", "llama"], - ["中文简体:宽带,繁体:樂來", "baichuan_7b", "llama"], - ] - -} - - -def example_fn(example_idx): - return examples["en"][example_idx] diff --git a/spaces/eson/tokenizer-arena/vocab/gpt_35_turbo/aaa.py b/spaces/eson/tokenizer-arena/vocab/gpt_35_turbo/aaa.py deleted file mode 100644 index 4698219f150c0d251e3a90ee18338ce483ca89b8..0000000000000000000000000000000000000000 --- a/spaces/eson/tokenizer-arena/vocab/gpt_35_turbo/aaa.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -gpt_35_turbo decode UnicodeDecodeError 99413 b' \xe6\xb5' -gpt_35_turbo decode UnicodeDecodeError 99742 b'\x8c\xa8' -gpt_35_turbo decode UnicodeDecodeError 99834 b'\xad\x90' -gpt_35_turbo decode UnicodeDecodeError 100112 b'\xe0\xae\xbf\xe0\xae' -gpt_35_turbo decode KeyError 100256 -gpt_35_turbo decode KeyError 100261 -gpt_35_turbo decode KeyError 100262 -gpt_35_turbo decode KeyError 100263 -""" - - - -import json -import tiktoken - - -tokenizer = tiktoken.encoding_for_model('gpt-3.5-turbo') - -tokens = [100263, 99834] - -tokenizer.decode(tokens) - -tokenizer._core_bpe.decode_bytes(tokens).decode("utf-8", errors="replace") - -for token_id in [100263, 99834]: # special_tokens: 200257-100260 100276 - try: - tokenizer.decode_tokens_bytes([token_id]) - except: - pass - - try: - tokenizer.decode_single_token_bytes(token_id) - except: - pass - - try: - tokenizer.decode_bytes([token_id]) - except: - pass - - - diff --git a/spaces/eswat/Image-and-3D-Model-Creator/PIFu/lib/model/HGPIFuNet.py b/spaces/eswat/Image-and-3D-Model-Creator/PIFu/lib/model/HGPIFuNet.py deleted file mode 100644 index 4771715345afcf326b3b0e64717517801fe75a1c..0000000000000000000000000000000000000000 --- a/spaces/eswat/Image-and-3D-Model-Creator/PIFu/lib/model/HGPIFuNet.py +++ /dev/null @@ -1,142 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from .BasePIFuNet import BasePIFuNet -from .SurfaceClassifier import SurfaceClassifier -from .DepthNormalizer import DepthNormalizer -from .HGFilters import * -from ..net_util import init_net - - -class HGPIFuNet(BasePIFuNet): - ''' - HG PIFu network uses Hourglass stacks as the image filter. - It does the following: - 1. Compute image feature stacks and store it in self.im_feat_list - self.im_feat_list[-1] is the last stack (output stack) - 2. Calculate calibration - 3. If training, it index on every intermediate stacks, - If testing, it index on the last stack. - 4. Classification. - 5. During training, error is calculated on all stacks. - ''' - - def __init__(self, - opt, - projection_mode='orthogonal', - error_term=nn.MSELoss(), - ): - super(HGPIFuNet, self).__init__( - projection_mode=projection_mode, - error_term=error_term) - - self.name = 'hgpifu' - - self.opt = opt - self.num_views = self.opt.num_views - - self.image_filter = HGFilter(opt) - - self.surface_classifier = SurfaceClassifier( - filter_channels=self.opt.mlp_dim, - num_views=self.opt.num_views, - no_residual=self.opt.no_residual, - last_op=nn.Sigmoid()) - - self.normalizer = DepthNormalizer(opt) - - # This is a list of [B x Feat_i x H x W] features - self.im_feat_list = [] - self.tmpx = None - self.normx = None - - self.intermediate_preds_list = [] - - init_net(self) - - def filter(self, images): - ''' - Filter the input images - store all intermediate features. - :param images: [B, C, H, W] input images - ''' - self.im_feat_list, self.tmpx, self.normx = self.image_filter(images) - # If it is not in training, only produce the last im_feat - if not self.training: - self.im_feat_list = [self.im_feat_list[-1]] - - def query(self, points, calibs, transforms=None, labels=None): - ''' - Given 3D points, query the network predictions for each point. - Image features should be pre-computed before this call. - store all intermediate features. - query() function may behave differently during training/testing. - :param points: [B, 3, N] world space coordinates of points - :param calibs: [B, 3, 4] calibration matrices for each image - :param transforms: Optional [B, 2, 3] image space coordinate transforms - :param labels: Optional [B, Res, N] gt labeling - :return: [B, Res, N] predictions for each point - ''' - if labels is not None: - self.labels = labels - - xyz = self.projection(points, calibs, transforms) - xy = xyz[:, :2, :] - z = xyz[:, 2:3, :] - - in_img = (xy[:, 0] >= -1.0) & (xy[:, 0] <= 1.0) & (xy[:, 1] >= -1.0) & (xy[:, 1] <= 1.0) - - z_feat = self.normalizer(z, calibs=calibs) - - if self.opt.skip_hourglass: - tmpx_local_feature = self.index(self.tmpx, xy) - - self.intermediate_preds_list = [] - - for im_feat in self.im_feat_list: - # [B, Feat_i + z, N] - point_local_feat_list = [self.index(im_feat, xy), z_feat] - - if self.opt.skip_hourglass: - point_local_feat_list.append(tmpx_local_feature) - - point_local_feat = torch.cat(point_local_feat_list, 1) - - # out of image plane is always set to 0 - pred = in_img[:,None].float() * self.surface_classifier(point_local_feat) - self.intermediate_preds_list.append(pred) - - self.preds = self.intermediate_preds_list[-1] - - def get_im_feat(self): - ''' - Get the image filter - :return: [B, C_feat, H, W] image feature after filtering - ''' - return self.im_feat_list[-1] - - def get_error(self): - ''' - Hourglass has its own intermediate supervision scheme - ''' - error = 0 - for preds in self.intermediate_preds_list: - error += self.error_term(preds, self.labels) - error /= len(self.intermediate_preds_list) - - return error - - def forward(self, images, points, calibs, transforms=None, labels=None): - # Get image feature - self.filter(images) - - # Phase 2: point query - self.query(points=points, calibs=calibs, transforms=transforms, labels=labels) - - # get the prediction - res = self.get_preds() - - # get the error - error = self.get_error() - - return res, error \ No newline at end of file diff --git a/spaces/exbert-project/exbert/Dockerfile b/spaces/exbert-project/exbert/Dockerfile deleted file mode 100644 index 149c2eb03e9ca9711a4a8e3ea1704b8cc0b72246..0000000000000000000000000000000000000000 --- a/spaces/exbert-project/exbert/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM python:3.7 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt - -RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt - -RUN mkdir -p /.cache - -RUN chmod 777 /.cache - -COPY . . - -CMD ["python", "server/main.py", "--port", "7860"] diff --git a/spaces/falterWliame/Face_Mask_Detection/Cwcheat Download God Eater Burst TOP.md b/spaces/falterWliame/Face_Mask_Detection/Cwcheat Download God Eater Burst TOP.md deleted file mode 100644 index 0ba32cddbb9f2a8ce816d3353746b34030fccf35..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Cwcheat Download God Eater Burst TOP.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>cwcheat download god eater burst</h2><br /><p><b><b>Download Zip</b> ➡ <a href="https://urlca.com/2uDdE1">https://urlca.com/2uDdE1</a></b></p><br /><br /> - -god eaters 2 psp cwcheat codes 40 [JPN] _C0 999 Storage Item Quantity _L ... 30, NPJH-50352 cheat, download cheat god eater burst MPJH, godz eater burzt ... 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/falterWliame/Face_Mask_Detection/Dum Laga Ke Haisha Hindi Movie Full Free Download WORK.md b/spaces/falterWliame/Face_Mask_Detection/Dum Laga Ke Haisha Hindi Movie Full Free Download WORK.md deleted file mode 100644 index 5e8376d78ff1c42f581e4aa3ef6c9eaa84f25d7a..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Dum Laga Ke Haisha Hindi Movie Full Free Download WORK.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>dum laga ke haisha hindi movie full free download</h2><br /><p><b><b>Download</b> ✸ <a href="https://urlca.com/2uDcIi">https://urlca.com/2uDcIi</a></b></p><br /><br /> -<br /> -Subtitle Cat All language subtitles for STREET DOGS Tamil Full Movie Indian ... Listen to Dum Laga Ke Haisha Movie English Subtitles Free Download and 196 ... 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/fatiXbelha/sd/Air Xonix The Ultimate 3D Arcade Game for Android Devices.md b/spaces/fatiXbelha/sd/Air Xonix The Ultimate 3D Arcade Game for Android Devices.md deleted file mode 100644 index b2ef61c0c062e1004a00e2393cd27af07be9936a..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Air Xonix The Ultimate 3D Arcade Game for Android Devices.md +++ /dev/null @@ -1,119 +0,0 @@ - -<h1>Air Xonix APK: A Classic Arcade Game Remake with 3D Graphics</h1> -<p>If you are a fan of classic arcade games, you might remember Xonix, a popular game from the 80s that was based on Qix, another arcade game from the same era. Xonix was a simple but addictive game where you had to cut off parts of the playing field with a moving line, while avoiding enemies and obstacles. Now, you can relive the fun of Xonix with Air Xonix APK, a remake of the classic game with modern 3D graphics and enhanced gameplay.</p> -<h2>air xonix apk</h2><br /><p><b><b>Download File</b> –––––>>> <a href="https://urllie.com/2uNGdL">https://urllie.com/2uNGdL</a></b></p><br /><br /> -<h2>What is Air Xonix APK?</h2> -<h3>A remake of the classic arcade game Xonix by Delico Games</h3> -<p>Air Xonix APK is a game developed by Delico Games, a studio that specializes in remaking classic arcade games for mobile devices. Air Xonix APK is their version of Xonix, a game that was originally released in 1984 by Ilan Rav and Dani Katz. Air Xonix APK follows the same premise as Xonix, but adds some new features and improvements.</p> -<h3>A 3D game that takes the original Qix game to the next level</h3> -<p>Air Xonix APK is not just a remake of Xonix, but also a tribute to Qix, the game that inspired Xonix and many other similar games. Qix was a game released in 1981 by Taito Corporation, where you had to draw lines to capture areas of the screen, while avoiding a bouncing line called Qix. Air Xonix APK takes the concept of Qix and adds an extra dimension, making it a 3D game. You play as a pilot of a hi-tech flying device, like a helicopter, and you have to fly over the playing field and cut off parts of it with your tail. You have to avoid enemies and obstacles that can destroy your device or your tail. You also have to watch out for your fuel level, which decreases as you fly.</p> -<h2>How to play Air Xonix APK?</h2> -<h3>The main goal is to explore and capture territory with a hi-tech flying device</h3> -<p>The main goal of Air Xonix APK is to explore and capture territory with your flying device. You have to cut off parts of the playing field by flying over them and returning to the border. You have to capture at least 75% of the field to complete each level. The more you capture, the more points you get. You also get bonus points for capturing special areas marked with stars or diamonds.</p> -<h3>The game has 35 classic levels and 10 challenge levels for the most experienced players</h3> -<p>Air Xonix APK has two modes of play: Classic and Challenge. In Classic mode, you can play 35 levels that are based on the original levels of Xonix. Each level has different enemies, obstacles, and layouts that make the game more challenging as you progress. In Challenge mode, you can play 10 levels that are designed for the most experienced players. These levels have more enemies, obstacles, and complex layouts that require more skill and strategy to complete. You can also adjust the difficulty level of the game by changing the speed of your device and the enemies.</p> -<h2>Why download Air Xonix APK?</h2> -<h3>The game has modern 3D graphics that are optimized for mobile devices</h3> -<p>One of the main reasons to download Air Xonix APK is its modern 3D graphics that are optimized for mobile devices. The game has colorful and detailed graphics that create a realistic and immersive atmosphere. The game also has smooth animations and effects that enhance the gameplay. You can enjoy the game on any screen size and resolution, as the game adapts to your device's specifications.</p> -<p>air xonix 3d game apk<br /> -air xonix android apk download<br /> -air xonix apk free download<br /> -air xonix apk mod<br /> -air xonix apk old version<br /> -air xonix apk paid<br /> -air xonix apk pure<br /> -air xonix arcade game apk<br /> -air xonix classic apk<br /> -air xonix deluxe apk<br /> -air xonix for android apk<br /> -air xonix full version apk<br /> -air xonix game download apk<br /> -air xonix hack apk<br /> -air xonix latest apk<br /> -air xonix modded apk<br /> -air xonix new version apk<br /> -air xonix offline apk<br /> -air xonix original apk<br /> -air xonix premium apk<br /> -air xonix pro apk<br /> -air xonix remake apk<br /> -air xonix unlimited money apk<br /> -air xonix update apk<br /> -airxonix 3d android game apk<br /> -best air xonix game apk<br /> -delico games air xonix apk<br /> -download game android air xonix 3d mod apk<br /> -download game gratis android air xonix 3d full version apk<br /> -how to install air xonix on android phone using apk file<br /> -how to play air xonix on android device with apk file<br /> -latest version of air xonix game for android free download in apk format<br /> -qix arcade game remake air xonix 3d graphics android app in apk file<br /> -retro arcade game inspired by qix - airxonix 3d for android in apk file download link<br /> -where can i get the latest version of the classic arcade game remake - airxonix 3d for android in an easy to install and use apk file format</p> -<h3>The game has easy to use and intuitive controls</h3> -<p>Another reason to download Air Xonix APK is its easy to use and intuitive controls. The game has two control options: joystick and tilt. You can choose the one that suits you best and change it anytime in the settings. The joystick option lets you control your device with a virtual joystick on the screen. The tilt option lets you control your device by tilting your device left or right. Both options are responsive and accurate, and you can also adjust the sensitivity of the controls in the settings.</p> -<h3>The game has time-tested and addictive gameplay that will redefine your gaming experience</h3> -<p>The most important reason to download Air Xonix APK is its time-tested and addictive gameplay that will redefine your gaming experience. The game has a simple but challenging gameplay that will keep you hooked for hours. The game has a perfect balance of skill, strategy, and luck, as you have to plan your moves carefully, avoid enemies and obstacles, and capture as much territory as possible. The game also has a variety of levels that offer different challenges and surprises. The game is suitable for all ages and skill levels, as you can customize the difficulty level according to your preference.</p> -<h2>Where to download Air Xonix APK?</h2> -<h3>The game is available on Google Play Store and APKCombo</h3> -<p>If you want to download Air Xonix APK, you have two options: Google Play Store and APKCombo. Google Play Store is the official app store for Android devices, where you can find millions of apps and games for free or for a fee. You can download Air Xonix APK from Google Play Store by following these steps:</p> -<ol> -<li>Open Google Play Store on your device.</li> -<li>Search for "Air Xonix" in the search bar.</li> -<li>Select the game from the list of results.</li> -<li>Tap on "Install" and wait for the download to finish.</li> -<li>Enjoy playing Air Xonix APK on your device.</li> -</ol> -<p>APKCombo is a website that provides APK files for Android apps and games. APK files are the installation files for Android apps and games, which you can download and install manually on your device. You can download Air Xonix APK from APKCombo by following these steps:</p> -<ol> -<li>Open a web browser on your device.</li> -<li>Go to <a href="">https://apkcombo.com/air-xonix/com.delico.airxonix/</a>.</li> -<li>Select the version of Air Xonix APK that you want to download.</li> -<li>Tap on "Download APK" and wait for the download to finish.</li> -<li>Open the downloaded file and tap on "Install".</li> -<li>Enjoy playing Air Xonix APK on your device.</li> -</ol> -<h3>The game is compatible with Android 4.4 and above</h3> -<p>Air Xonix APK is compatible with Android 4.4 and above, which means that you can play it on most Android devices. However, some devices may not support the game due to hardware limitations or software issues. If you encounter any problems while playing Air Xonix APK, you can contact the developer at <a href="">delicogames@gmail.com</a> or leave a comment on Google Play Store or APKCombo.</p> <h2>What are the reviews of Air Xonix APK?</h2> -<h3>The game has positive ratings and feedback from users and critics</h3> -<p>Air Xonix APK has received positive ratings and feedback from users and critics who have played the game. The game has a rating of 4.4 out of 5 stars on Google Play Store, based on over 1,000 reviews. The game also has a rating of 4.5 out of 5 stars on APKCombo, based on over 100 reviews. Here are some of the comments from the users and critics:</p> -<ul> -<li>"This is a great remake of the classic Xonix game. The graphics are amazing and the gameplay is addictive. I love the challenge mode and the different enemies and obstacles. This is one of the best arcade games I have played on my phone." - User on Google Play Store</li> -<li>"I remember playing Xonix on my PC when I was a kid. This game brings back so many memories and nostalgia. The game is very well done and updated for modern devices. The 3D graphics are stunning and the controls are smooth. I highly recommend this game to anyone who likes classic arcade games." - User on APKCombo</li> -<li>"Air Xonix APK is a faithful remake of the classic arcade game Xonix, with some new features and improvements. The game has modern 3D graphics that are optimized for mobile devices, easy to use and intuitive controls, and time-tested and addictive gameplay that will redefine your gaming experience. The game has two modes of play: Classic and Challenge, with 35 classic levels and 10 challenge levels for the most experienced players. The game is suitable for all ages and skill levels, as you can customize the difficulty level according to your preference. Air Xonix APK is a must-have game for fans of classic arcade games." - Critic on <a href="">https://www.androidauthority.com/air-xonix-apk-review-1234567/</a></li> -</ul> -<h2>Conclusion</h2> -<p>Air Xonix APK is a classic arcade game remake with 3D graphics that will take you on a nostalgic trip to the 80s. The game is a remake of Xonix, a popular game from the 80s that was based on Qix, another arcade game from the same era. The game has modern 3D graphics that are optimized for mobile devices, easy to use and intuitive controls, and time-tested and addictive gameplay that will redefine your gaming experience. The game has two modes of play: Classic and Challenge, with 35 classic levels and 10 challenge levels for the most experienced players. The game is suitable for all ages and skill levels, as you can customize the difficulty level according to your preference. The game has positive ratings and feedback from users and critics who have played the game.</p> -<p>If you want to download Air Xonix APK, you have two options: Google Play Store and APKCombo. You can download the game from Google Play Store by following these steps:</p> -<ol> -<li>Open Google Play Store on your device.</li> -<li>Search for "Air Xonix" in the search bar.</li> -<li>Select the game from the list of results.</li> -<li>Tap on "Install" and wait for the download to finish.</li> -<li>Enjoy playing Air Xonix APK on your device.</li> -</ol> -<p>You can also download the game from APKCombo by following these steps:</p> -<ol> -<li>Open a web browser on your device.</li> -<li>Go to <a href="">https://apkcombo.com/air-xonix/com.delico.airxonix/</a>.</li> -<li>Select the version of Air Xonix APK that you want to download.</li> -<li>Tap on "Download APK" and wait for the download to finish.</li> -<li>Open the downloaded file and tap on "Install".</li> -<li>Enjoy playing Air Xonix APK on your device.</li> -</ol> -<p>The game is compatible with Android 4.4 and above, which means that you can play it on most Android devices. However, some devices may not support the game due to hardware limitations or software issues. If you encounter any problems while playing Air Xonix APK, you can contact the developer at <a href="">delicogames@gmail.com</a> or leave a comment on Google Play Store or APKCombo.</p> - <h2>FAQs</h2> -<h3>What is Air Xonix APK?</h3> -<p>Air Xonix APK is a remake of the classic arcade game Xonix with modern 3D graphics and enhanced gameplay.</p> -<h3>How to play Air Xonix APK?</h3> -<p>The main goal of Air Xonix APK is to explore and capture territory with your flying device. You have to cut off parts of the playing field by flying over them and returning to the border. You have to capture at least 75% of the field to complete each level. You have to avoid enemies and obstacles that can destroy your device or your tail. You also have to watch out for your fuel level, which decreases as you fly.</p> -<h3>Why download Air Xonix APK?</h3> -<p>You should download Air Xonix APK because it is a classic arcade game remake with 3D graphics that will take you on a nostalgic trip to the 80s. The game has modern 3D graphics that are optimized for mobile devices, easy to use and intuitive controls, and time-tested and addictive gameplay that will redefine your gaming experience. The game has two modes of play: Classic and Challenge, with 35 classic levels and 10 challenge levels for the most experienced players. The game is suitable for all ages and skill levels, as you can customize the difficulty level according to your preference.</p> -<h3>Where to download Air Xonix APK?</h3> -<p>You can download Air Xonix APK from Google Play Store or APKCombo. You can follow the steps mentioned in the article to download the game from either of these sources.</p> -<h3>What are the reviews of Air Xonix APK?</h3> -<p>Air Xonix APK has positive ratings and feedback from users and critics who have played the game. The game has a rating of 4.4 out of 5 stars on Google Play Store, based on over 1,000 reviews. The game also has a rating of 4.5 out of 5 stars on APKCombo, based on over 100 reviews. The game is praised for its graphics, gameplay, and nostalgia factor.</p> -<h3>How to contact the developer of Air Xonix APK?</h3> -<p>If you have any questions, suggestions, or issues regarding Air Xonix APK, you can contact the developer at <a href="">delicogames@gmail.com</a> or leave a comment on Google Play Store or APKCombo.</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Dead Zombie Sniper Assassin Shooter 3D Mod APK - The Best FPS Shooting Game for Killing Zombies and Criminals.md b/spaces/fatiXbelha/sd/Dead Zombie Sniper Assassin Shooter 3D Mod APK - The Best FPS Shooting Game for Killing Zombies and Criminals.md deleted file mode 100644 index e64e595527032b55b68c2637829a363ed4926f20..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Dead Zombie Sniper Assassin Shooter 3D Mod APK - The Best FPS Shooting Game for Killing Zombies and Criminals.md +++ /dev/null @@ -1,93 +0,0 @@ - -<h1>Dead Zombie Sniper Assassin Shooter 3D Mod APK: A Review</h1> -<p>If you are looking for a fun and exciting shooting game with zombies, snipers, and multiplayer modes, then you might want to check out Dead Zombie Sniper Assassin Shooter 3D Mod APK. This is a modified version of the original game that gives you unlimited money, resources, weapons, and more. In this article, we will review the game, its features, how to download and install it, and its pros and cons. We will also answer some frequently asked questions about the game.</p> -<h2>dead zombie sniper assassin shooter 3d mod apk</h2><br /><p><b><b>Download Zip</b> ✑ <a href="https://urllie.com/2uNDlh">https://urllie.com/2uNDlh</a></b></p><br /><br /> -<h2>What is Dead Zombie Sniper Assassin Shooter 3D?</h2> -<p>Dead Zombie Sniper Assassin Shooter 3D is an engaging combination of thrilling and realistic FPS shooting game with massive sniper and multiplayer PvP modes in new 3D zombie themes. It was developed by VNG Game Studios and released in 2020. The game has over 10 million downloads on Google Play Store and has received positive reviews from players.</p> -<h3>A thrilling and realistic FPS shooting game</h3> -<p>In this game, you play as a sniper in a post-zombie apocalypse world, where you have to shoot and kill zombies, survive the zombie horde, defend the city, and complete various missions. You can choose from different sniper rifles, each with its own features, advantages, and disadvantages. You can also upgrade your weapons with scopes, silencers, magazines, barrels, stocks, and more. You have to aim carefully, shoot accurately, reload quickly, and use your skills and tactics to survive.</p> -<h3>A massive sniper and multiplayer PvP modes</h3> -<p>The game offers various modes for you to enjoy. You can play the story-driven campaign mode, where you follow the plot and complete different levels. You can also play the daily missions mode, where you can earn rewards by completing tasks every day. You can also join the special ops mode, where you can participate in special events and challenges. Moreover, you can play the multiplayer PvP mode, where you can compete with other players online in different maps and modes. You can team up with your friends or fight against them in real-time battles.</p> -<h3>A new 3D zombie theme</h3> -<p>The game features a new 3D zombie theme that adds more excitement and horror to the gameplay. The zombies are not just mindless creatures that walk slowly towards you. They have different types, behaviors, abilities, and weaknesses. Some zombies are fast, some are strong, some are stealthy, some are explosive, some are armored, some are boss zombies, and more. You have to be careful and adapt your strategy according to the zombie type you encounter. The game also has stunning graphics, realistic physics, dynamic lighting, shadows, blood effects, sound effects, and music that create an immersive atmosphere.</p> -<h2>What are the features of Dead Zombie Sniper Assassin Shooter 3D Mod APK?</h2> -<p>Dead Zombie Sniper Assassin Shooter 3D Mod APK is a modified version of the original game that gives you some extra features that are not available in the official version. These features are as follows:</p> -<p>dead zombie sniper assassin shooter 3d hack apk<br /> -dead zombie sniper assassin shooter 3d unlimited money<br /> -dead zombie sniper assassin shooter 3d offline game<br /> -dead zombie sniper assassin shooter 3d cheats codes<br /> -dead zombie sniper assassin shooter 3d latest version<br /> -dead zombie sniper assassin shooter 3d free download<br /> -dead zombie sniper assassin shooter 3d gameplay video<br /> -dead zombie sniper assassin shooter 3d review rating<br /> -dead zombie sniper assassin shooter 3d best weapons<br /> -dead zombie sniper assassin shooter 3d tips tricks<br /> -dead zombie sniper assassin shooter 3d mod menu<br /> -dead zombie sniper assassin shooter 3d no ads<br /> -dead zombie sniper assassin shooter 3d android ios<br /> -dead zombie sniper assassin shooter 3d online multiplayer<br /> -dead zombie sniper assassin shooter 3d survival mode<br /> -dead zombie sniper assassin shooter 3d new update<br /> -dead zombie sniper assassin shooter 3d apk obb<br /> -dead zombie sniper assassin shooter 3d high graphics<br /> -dead zombie sniper assassin shooter 3d levels missions<br /> -dead zombie sniper assassin shooter 3d modded apk download<br /> -dead zombie sniper assassin shooter 3d premium features<br /> -dead zombie sniper assassin shooter 3d unlock all guns<br /> -dead zombie sniper assassin shooter 3d fps game<br /> -dead zombie sniper assassin shooter 3d horror action<br /> -dead zombie sniper assassin shooter 3d realistic physics<br /> -dead zombie sniper assassin shooter 3d mod apk rexdl<br /> -dead zombie sniper assassin shooter 3d mod apk revdl<br /> -dead zombie sniper assassin shooter 3d mod apk happymod<br /> -dead zombie sniper assassin shooter 3d mod apk an1<br /> -dead zombie sniper assassin shooter 3d mod apk android1<br /> -dead zombie sniper assassin shooter 3d mod apk apkpure<br /> -dead zombie sniper assassin shooter 3d mod apk apkmody<br /> -dead zombie sniper assassin shooter 3d mod apk apkdone<br /> -dead zombie sniper assassin shooter 3d mod apk mob.org<br /> -dead zombie sniper assassin shooter 3d mod apk uptodown<br /> -dead zombie sniper assassin shooter 3d mod apk techylist<br /> -dead zombie sniper assassin shooter 3d mod apk dlandroid<br /> -dead zombie sniper assassin shooter 3d mod apk andropalace<br /> -dead zombie sniper assassin shooter 3d mod apk ihackedit<br /> -dead zombie sniper assassin shooter 3d mod apk androidoyun.club</p> -<h3>Unlimited money and resources</h3> -<p>With this mod, you will have unlimited money and resources in the game. You can use them to buy and upgrade any weapons, items, and equipment you want. You can also unlock all the levels, modes, and features in the game. You don't have to worry about running out of money or resources or spending real money to get them.</p> -<h3>Unlocked and upgraded weapons</h3> -<p>With this mod, you will have access to all the weapons in the game. You can choose from a variety of sniper rifles, each with its own characteristics and stats. You can also upgrade your weapons with different attachments and enhancements to improve their performance and accuracy. You can also customize your weapons with skins and stickers to make them look cool and unique.</p> -<h3>No ads and root required</h3> -<p>With this mod, you will not see any ads in the game. You can enjoy the game without any interruptions or distractions. You can also play the game without rooting your device. You don't have to worry about damaging your device or losing your warranty by rooting it.</p> -<h2>How to download and install Dead Zombie Sniper Assassin Shooter 3D Mod APK?</h2> -<p>If you want to download and install Dead Zombie Sniper Assassin Shooter 3D Mod APK, you can follow these simple steps:</p> -<h3>Download the APK file from a trusted source</h3> -<p>The first step is to download the APK file from a trusted source. You can search for the file on the internet or use the link provided below. Make sure that the file is safe and virus-free before downloading it.</p> -<h3>Enable unknown sources on your device</h3> -<p>The next step is to enable unknown sources on your device. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, security, and enable unknown sources. You may need to confirm this action by tapping OK or Allow.</p> -<h3>Install the APK file and enjoy the game</h3> -<p>The final step is to install the APK file and enjoy the game. To do this, locate the downloaded file on your device storage and tap on it. Follow the instructions on the screen to complete the installation process. Once done, you can launch the game and start playing.</p> -<h2>What are the pros and cons of Dead Zombie Sniper Assassin Shooter 3D Mod APK?</h2> -<p>Like any other modded game, Dead Zombie Sniper Assassin Shooter 3D Mod APK has its pros and cons. Here are some of them:</p> -<h4>Pros:</h4> -<ul> -<li><h5>Fun and addictive gameplay</h5><p>The game offers a fun and addictive gameplay that will keep you entertained for hours. You can shoot zombies, complete missions, compete with other players, and enjoy the realistic FPS experience.</p></li> -<li><h5>High-quality graphics and sound effects</h5><p>The game has high-quality graphics and sound effects that create a stunning visual and auditory experience. You can admire the detailed 3D environments, animations, effects, and models. You can also hear the realistic gunshots, explosions, zombie groans, and music.</p></li> -<li><h5>Various modes and challenges</h5><p>The game has various modes and challenges that will test your skills and strategies. You can play the campaign mode, daily missions mode, special ops mode, multiplayer PvP mode, and more. You can also face different types of zombies, each with their own strengths and weaknesses.</p></li> -</ul> -<h4>Cons:</h4> -<ul> -<li><h5>Large file size and battery consumption</h5><p>The game has a large file size that may take up a lot of space on your device storage. It may also consume a lot of battery power while playing. You may need to free up some space or charge your device frequently while playing.</p></li> -<li><h5>Possible compatibility issues and bugs</h5><p>The game may have some compatibility issues and bugs that may affect its performance or functionality. It may not work well on some devices or versions of Android. It may also crash or freeze at times or have some glitches or errors.</p></li> -<li><h5>Risk of malware and viruses</h5><p>The game may have some risk of malware and viruses that may harm your device or data. Since it is a modded version of the original game, it may not be safe or secure to download or install. It may contain some malicious code or files that may infect your device or steal your information.</p></li> -</ul> -<h2>Conclusion</h2> -<p>Dead Zombie Sniper Assassin Shooter 3D Mod APK is an exciting shooting game version. If you have any other questions, feel free to ask me.</p> -<ul> -<li><h3>What is the difference between Dead Zombie Sniper Assassin Shooter 3D and Dead Zombie Sniper Assassin Shooter 3D Mod APK?</h3><p>Dead Zombie Sniper Assassin Shooter 3D is the official version of the game that you can download from the Google Play Store. Dead Zombie Sniper Assassin Shooter 3D Mod APK is the modified version of the game that you can download from other sources. The modded version gives you some extra features and benefits that are not available in the official version, such as unlimited money, resources, weapons, and more.</p></li> -<li><h3>Is Dead Zombie Sniper Assassin Shooter 3D Mod APK safe to download and install?</h3><p>Dead Zombie Sniper Assassin Shooter 3D Mod APK may not be safe to download and install. Since it is a modded version of the original game, it may not be verified or approved by the developers or the Google Play Store. It may contain some malicious code or files that may harm your device or data. It may also have some compatibility issues or bugs that may affect its performance or functionality. Therefore, you should be careful and cautious when downloading and installing the modded version of the game. You should also scan the file with an antivirus program before installing it.</p></li> -<li><h3>How can I update Dead Zombie Sniper Assassin Shooter 3D Mod APK?</h3><p>Dead Zombie Sniper Assassin Shooter 3D Mod APK may not be updated automatically or regularly. Since it is a modded version of the original game, it may not receive the latest updates or patches from the developers or the Google Play Store. It may also become outdated or incompatible with the new versions of the game or Android. Therefore, you may need to manually check for updates or download the new versions of the modded version of the game from other sources.</p></li> -<li><h3>Can I play Dead Zombie Sniper Assassin Shooter 3D Mod APK offline?</h3><p>Yes, you can play Dead Zombie Sniper Assassin Shooter 3D Mod APK offline. You don't need an internet connection to play the game, except for some features or modes that require online access, such as multiplayer PvP mode. You can enjoy the game without any internet connection or data usage.</p></li> -<li><h3>Can I play Dead Zombie Sniper Assassin Shooter 3D Mod APK with my friends?</h3><p>Yes, you can play Dead Zombie Sniper Assassin Shooter 3D Mod APK with your friends. You can join the multiplayer PvP mode, where you can compete with other players online in different maps and modes. You can team up with your friends or fight against them in real-time battles. You can also chat with your friends and other players in the game.</p></li> -</ul></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Incredibox Blinding Lights and Unleash Your Creativity.md b/spaces/fatiXbelha/sd/Download Incredibox Blinding Lights and Unleash Your Creativity.md deleted file mode 100644 index 805baf301c1fe7674eb415137ee44c3d91415f46..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Incredibox Blinding Lights and Unleash Your Creativity.md +++ /dev/null @@ -1,148 +0,0 @@ -<br /> -<h1>How to Download Incredibox Blinding Lights and Enjoy the Music</h1> -<p>Do you love music and want to create your own beats with a simple and fun app? Do you want to explore different musical genres and atmospheres with a group of animated beatboxers? Do you want to discover one of the most popular songs of 2020 and its mod for Incredibox? If you answered yes to any of these questions, then this article is for you. In this article, we will show you how to download Incredibox Blinding Lights, a mod that lets you play with the song "Blinding Lights" by the Weeknd on Incredibox, a music app that lets you create your own music with the help of a merry crew of beatboxers. We will also explain what Incredibox and Blinding Lights are, and give you some tips and tricks to make the most out of your musical experience.</p> - <h2>What is Incredibox?</h2> -<p>Incredibox is a music app that lets you create your own music with the help of a merry crew of beatboxers. It was developed and published by the French company So Far So Good (SFSG) in 2009, and since then it has become a hit with people of all ages, with more than 75 million players worldwide. </p> -<h2>download incredibox blinding lights</h2><br /><p><b><b>Download Zip</b> ►►►►► <a href="https://urllie.com/2uNzPA">https://urllie.com/2uNzPA</a></b></p><br /><br /> - <h3>A fun, interactive music experience</h3> -<p>Incredibox is a music app that is part game, part tool, and above all an audio and visual experience. The concept of the game is simple: you drag and drop sound icons on different characters to make them sing and start to compose your own music. You can choose your musical style among 8 impressive atmospheres, from hip-hop to electro, from pop to jazz, from Brazilian to Indian. You can also find the right sound combos to unlock animated choruses that will enhance your tune, share your mix with other users and vote for your favorite tracks, or just listen to an endless mix generated by an automatic mode. </p> - <h3>Different versions and musical styles</h3> -<p>Incredibox has different versions that you can choose from, each with a unique theme and musical style. The first four versions can be played both on the official Incredibox website demo and on the paid apps, while the remaining versions are exclusive to the paid apps. Here is a table that summarizes the versions and their styles:</p> - <table> -<tr> -<th>Version</th> -<th>Style</th> -<th>Theme</th> -</tr> -<tr> -<td>Alpha</td> -<td>Old school beatbox</td> -<td>A remastered version of the original online flash game from 2009</td> -</tr> -<tr> -<td>Little Miss</td> -<td>R&B</td> -<td>A smooth and sensual vibe</td> -</tr> -<tr> -<td>Sunrise</td> -<td>Pop</td> -<td>A colorful and upbeat atmosphere</td> -</tr> -<tr> -<td>The Love</td> -<td>French touch</td> -<td>A swerving electro set for the dancefloor</td> -</tr> -<tr> -<td>Brazil</td> -<td>Samba fiesta</td> -<td>A blend of samba, batucada and Carnival spirit</td> -</tr> -<tr> -<td>Alive</td> -<td>Otaku trap</td> -<td>A futuristic, trippy world where geek, hip-hop and Nippon cultures collide</td> -</tr> -<tr> -<td>Jeevan</td> -<td <td>Jeevan</td> -<td>Bollywood</td> -<td>A fusion of Indian music, hip-hop and electro beats</td> -</tr> -<tr> -<td>Dystopia</td> -<td>Dark electro</td> -<td>A post-apocalyptic, cyberpunk world where machines rule</td> -</tr> -<tr> -<td>Icon Series</td> -<td>New wave and synth-pop</td> -<td>A tribute to the 80s music icons and their style</td> -</tr> -</table> - <h3>How to create, share and record your mix</h3> -<p>Creating your own mix with Incredibox is very easy and intuitive. You just need to drag and drop the sound icons on the characters to make them sing. You can choose from different categories of sounds, such as beats, effects, melodies, voices and choruses. You can also mute or solo each character, or shuffle the sounds randomly. To unlock the animated choruses, you need to find the right sound combos, which are indicated by a pulsing icon. Once you have created your mix, you can share it with other users on the app or on the website, where you can also vote for your favorite tracks and discover new ones. You can also record your mix as a video or an audio file, and save it on your device or share it on social media. </p> - <h2>What is Blinding Lights?</h2> -<p>Blinding Lights is a song by the Canadian singer and songwriter The Weeknd, released in November 2019 as the second single from his fourth studio album After Hours. The song is a synth-pop and new wave track that features 80s-inspired production and lyrics about longing for a lover after a breakup. The song was a huge commercial success, reaching number one in 34 countries and breaking several records, such as the most weeks spent in the top five and top ten of the Billboard Hot 100 chart. The song also received critical acclaim and won several awards, such as the MTV Video Music Award for Video of the Year and the American Music Award for Favorite Song - Pop/Rock. </p> - <h3>A mod for Incredibox</h3> -<p>Blinding Lights is also a mod for Incredibox that lets you play with the song on the app. A mod is a modification of a game that changes its features or content, usually created by fans or independent developers. Blinding Lights is one of the many mods that have been created for Incredibox by its passionate community, using tools such as Adobe Flash or Adobe Animate. The mod was created by a user named Mr. Patoche, who uploaded it on YouTube in December 2020. The mod is based on the Icon Series version of Incredibox, which is a tribute to the 80s music icons and their style. The mod replaces the original sound icons with ones that match the song's elements, such as drums, bass, synths, vocals and choruses. The mod also changes the background image to a cityscape at night, inspired by the song's music video. </p> - <h3>A new wave and synth-pop style</h3> -<p>Blinding Lights is a song that belongs to the new wave and synth-pop genres, which are musical styles that emerged in the late 1970s and early 1980s as part of the post-punk movement. New wave is a broad term that encompasses various styles that were influenced by punk rock, disco, funk, electronic music and art pop. Some of the characteristics of new wave are catchy hooks, synthesizers, keyboards, drum machines, guitars with effects, experimentation and diversity. Synth-pop is a subgenre of new wave that focuses on synthesizers as the main musical instrument, creating pop songs with electronic sounds and robotic vocals. Some of the artists that are considered as pioneers or icons of new wave and synth-pop are Depeche Mode, New Order, Duran Duran, Blondie, The Human League, Eurythmics and Madonna. </p> - <h2>How to Download Incredibox Blinding Lights?</h2> -<p>If you want to download Incredibox Blinding Lights and enjoy playing with the song on your device, you will need to follow these steps:</p> - <h3>Download the app or visit the website</h3> -<p>The first step is to download the Incredibox app on your device or visit the official Incredibox website demo on your browser. The app is available for iOS (iPhone/iPad), Android (smartphone/tablet) and macOS (MacBook/iMac), and it costs $4.99 USD (or equivalent in other currencies). The website demo is free but it only offers four versions: Alpha, Little Miss, Sunrise and The Love. You can download the app from You can download the app from the following links: - [App Store] for iOS devices - [Google Play] for Android devices - [Mac App Store] for macOS devices You can also visit the [Incredibox website demo] on your browser, but you will need to enable Flash Player to play the game. </p> -<p>How to download incredibox blinding lights mod<br /> -Incredibox v9 blinding lights review<br /> -Incredibox blinding lights music and sound design<br /> -Download incredibox app for full experience<br /> -Incredibox v9 blinding lights mix<br /> -Incredibox blinding lights mod github<br /> -Incredibox blinding lights youtube video<br /> -Incredibox blinding lights lyrics and song<br /> -Download incredibox v9 for free<br /> -Incredibox blinding lights mod apk<br /> -Incredibox blinding lights instrumental<br /> -Incredibox v9 blinding lights bonus<br /> -Incredibox blinding lights mod online<br /> -Incredibox blinding lights original video<br /> -Download incredibox v9 for android<br /> -Incredibox blinding lights mod tutorial<br /> -Incredibox v9 blinding lights characters<br /> -Incredibox blinding lights remix<br /> -Download incredibox v9 for ios<br /> -Incredibox blinding lights mod download link<br /> -Incredibox blinding lights beatbox<br /> -Incredibox v9 blinding lights theme<br /> -Incredibox blinding lights mod gameplay<br /> -Incredibox blinding lights song download<br /> -Download incredibox v9 for pc<br /> -Incredibox blinding lights mod features<br /> -Incredibox v9 blinding lights animation<br /> -Incredibox blinding lights cover<br /> -Download incredibox v9 for mac<br /> -Incredibox blinding lights mod version 1.1.5<br /> -Incredibox blinding lights challenge<br /> -Incredibox v9 blinding lights design and illustration<br /> -Incredibox blinding lights mod credits<br /> -Incredibox blinding lights mp3 download<br /> -Download incredibox v9 for windows 10<br /> -Incredibox blinding lights mod settings<br /> -Incredibox v9 blinding lights director and code<br /> -Incredibox blinding lights parody<br /> -Download incredibox v9 for chromebook<br /> -Incredibox blinding lights mod faq<br /> -Incredibox v9 blinding lights atmosphere<br /> -Incredibox blinding lights reaction<br /> -Download incredibox v9 for linux<br /> -Incredibox blinding lights mod feedback<br /> -Incredibox v9 blinding lights musical genre <br /> -Incredibox blinding lights mashup</p> - <h3>Choose the Icon Series version</h3> -<p>The second step is to choose the Icon Series version of Incredibox, which is the one that is compatible with the Blinding Lights mod. The Icon Series version is the latest and most recent version of Incredibox, released in October 2020. It is a tribute to the 80s music icons and their style, featuring sounds and visuals inspired by new wave, synth-pop, glam rock and pop art. To choose the Icon Series version, you need to swipe left or right on the main menu until you see the icon that looks like a cassette tape with a star on it. Then, tap or click on it to start the game. </p> - <h3>Drag and drop the sound icons</h3> -<p>The third step is to drag and drop the sound icons on the characters to make them sing and play with Blinding Lights. The sound icons are located at the bottom of the screen, and they are divided into five categories: beats, effects, melodies, voices and choruses. Each category has four sound icons, which correspond to different elements of the song. For example, the beats category has icons for drums, bass, claps and snaps, while the voices category has icons for vocals, harmonies, ad-libs and echoes. To drag and drop a sound icon, you need to tap or click on it and then drag it over one of the characters on the screen. You will see a circle around the character indicating that you can drop the icon there. Once you drop the icon, the character will start to sing or play that sound. You can do this with up to seven characters at a time, creating your own mix of Blinding Lights. </p> - <h2>Tips and Tricks for Incredibox Blinding Lights</h2> -<p>Now that you know how to download and play Incredibox Blinding Lights, here are some tips and tricks that will help you enjoy the game even more:</p> - <h3>Find the combos and bonuses</h3> -<p>One of the fun features of Incredibox is that you can find combos and bonuses by using certain sound icons together. A combo is a combination of four sound icons that creates a special animated chorus that enhances your mix. A bonus is a combination of seven sound icons that triggers a video clip that shows your characters performing in a different setting. There are two combos and two bonuses for each version of Incredibox, including Blinding Lights. To find them, you need to experiment with different sound icons and see which ones work together. You can also use hints that are available on the app or on the website, or look for guides online. </p> - <h3>Switch between normal and dark mode</h3> -<p>Another cool feature of Incredibox Blinding Lights is that you can switch between normal and dark mode by tapping or clicking on the moon icon at the top right corner of the screen. Normal mode is the default mode that shows your characters in a bright and colorful setting, while dark mode is a hidden mode that shows your characters in a dark and mysterious setting. Dark mode also changes some of the sounds and visuals of your mix, giving it a more edgy and intense vibe. You can switch between normal and dark mode anytime you want, depending on your mood and preference.</p> - <h3>Watch tutorials and reviews on YouTube</h3> -<p>If you want to learn more about Incredibox Blinding Lights or see how other people play with it, you can watch tutorials and reviews on YouTube. There are many videos that show you how to download and install the mod, how to find the combos and bonuses, how to create your own mix or recreate the original song, how to record and share your mix, and more. You can also watch videos that give you feedback and opinions on the mod, its features, its pros and cons, its similarities and differences with other versions of Incredibox, etc. Watching these videos can help you improve your skills, discover new things, get inspired or just have fun.</p> - <h2>Conclusion</h2> -<p>In this article, we have shown you how to download Incredibox Blinding Lights, a mod that lets you play with the song "Blinding Lights" by The Weeknd on Incredibox, a music app that lets you create your own music with the help of a merry crew of beatboxers. We have also explained what Incredibox and Blinding Lights are, and given you some tips and tricks to make the most out of your musical experience. We hope that you have enjoyed reading this article and that you will have fun playing with Incredibox Blinding Lights. If you have any questions or comments, feel free to leave them below. Thank you for your attention and happy mixing! <h2>FAQs</h2> -<p>Here are some frequently asked questions and answers about Incredibox Blinding Lights:</p> - <h3>Q: How can I download the Blinding Lights mod?</h3> -<p>A: You can download the Blinding Lights mod from the YouTube video description of Mr. Patoche, the creator of the mod. You will find a link to a Google Drive folder that contains the mod files for different devices and platforms. You will need to unzip the files and follow the instructions to install the mod on your device or browser. Alternatively, you can also use a QR code scanner to scan the code that appears on the video and access the download link directly.</p> - <h3>Q: Is the Blinding Lights mod official or unofficial?</h3> -<p>A: The Blinding Lights mod is an unofficial mod that was created by a fan of Incredibox and The Weeknd, not by the developers of Incredibox. The mod is not endorsed or supported by SFSG or The Weeknd, and it may not work properly or cause issues with your device or browser. Use the mod at your own risk and discretion.</p> - <h3>Q: Can I play other songs on Incredibox?</h3> -<p>A: Yes, you can play other songs on Incredibox, as long as they are compatible with the sound icons and the musical style of the version you are using. For example, you can play "Take On Me" by A-ha on Icon Series, "Bad Guy" by Billie Eilish on Dystopia, or "Despacito" by Luis Fonsi on Brazil. You can also find other mods that let you play specific songs on Incredibox, such as "Old Town Road" by Lil Nas X, "Believer" by Imagine Dragons, or "Shape of You" by Ed Sheeran.</p> - <h3>Q: Can I create my own mod for Incredibox?</h3> -<p>A: Yes, you can create your own mod for Incredibox, if you have the skills and tools to do so. You will need to use software such as Adobe Flash or Adobe Animate to modify the original game files and create your own sound icons, animations, backgrounds, etc. You will also need to respect the intellectual property rights of SFSG and the artists whose songs you are using, and give them proper credit. You can also share your mod with other users online, but be aware that SFSG may take down your mod if they consider it inappropriate or harmful.</p> - <h3>Q: How can I contact SFSG or Mr. Patoche?</h3> -<p>A: You can contact SFSG, the developers of Incredibox, through their official website , their social media accounts , or their email address contact@incredibox.com. You can contact Mr. Patoche, the creator of the Blinding Lights mod, through his YouTube channel , his Instagram account , or his email address mr.patoche.incredibox@gmail.com.</p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Download Pirates Tides of Fortune and Join the Epic Sailing Battles.md b/spaces/fatiXbelha/sd/Download Pirates Tides of Fortune and Join the Epic Sailing Battles.md deleted file mode 100644 index 295daacdd918664feaea86fab576d949d9316797..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Download Pirates Tides of Fortune and Join the Epic Sailing Battles.md +++ /dev/null @@ -1,193 +0,0 @@ - -<h1>Pirates: Tides of Fortune - A Pirate-Themed Strategy Game</h1> - <p>Do you dream of sailing the high seas as a pirate captain? Do you want to build your own pirate haven from scratch and plunder your enemies for treasure? Do you want to join forces with other pirates and form a powerful brotherhood? If you answered yes to any of these questions, then you might want to check out <strong>Pirates: Tides of Fortune</strong>, a free-to-play online strategy game developed by Plarium.</p> - <p>Pirates: Tides of Fortune is a massively multiplayer real-time strategy (MMORTS) game that immerses you in the hustling, raiding, treasure-hunting life of a pirate. You start as a prisoner on a desert island who is rescued by Captain Anne O'Malley, a charming but ruthless pirate leader who offers you a chance to join her crew. You then have to raise your pirate haven from nothing into a bustling hub of your pirate empire. You can recruit your own crew members from various classes such as marauders, buccaneers, bombardiers, etc., and build your own fleet of ships from different types such as brigantines, frigates, ships of the line, etc. You can also research new discoveries and technologies that will give you an edge over your rivals.</p> -<h2>pirates tides of fortune download</h2><br /><p><b><b>Download</b> ★★★★★ <a href="https://urllie.com/2uNBZd">https://urllie.com/2uNBZd</a></b></p><br /><br /> - <p>But being a pirate is not all about building and researching. You also have to raid other players and plunder their resources, gold, and rubies. You can also join or create a brotherhood and cooperate with other players to take on common enemies, such as the Spanish Armada, the Royal Navy, or the Kraken. You can also participate in various events and brawls that will reward you with valuable prizes and glory.</p> - <p>Pirates: Tides of Fortune is a game that will appeal to anyone who loves strategy, adventure, and pirates. It has stunning graphics, immersive sound effects, witty dialogue, and a captivating story. It also has a large and active community of players who are always ready to chat, trade, or fight with you. If you are looking for a game that will challenge your strategic skills and unleash your inner pirate, then Pirates: Tides of Fortune is the game for you.</p> - <h2>How to Download and Play Pirates: Tides of Fortune</h2> - <p>One of the best things about Pirates: Tides of Fortune is that it is free to play and easy to access. You can play it on your desktop or on your browser, depending on your preference. Here are the steps to download and play Pirates: Tides of Fortune on both platforms:</p> - <h3>Downloading the Game on Desktop</h3> - <p>If you want to play Pirates: Tides of Fortune on your desktop, you will need to download and install the Plarium Play launcher, which is a free and secure platform that allows you to access all of Plarium's games in one place. Here is how to do it:</p> - <ol> -<li>Go to <a href="">https://plarium.com/en/plarium-play/</a> and click on the "Download for Free" button.</li> -<li>Run the downloaded file and follow the instructions to install the Plarium Play launcher on your computer.</li> -<li>Launch the Plarium Play launcher and sign in with your email or Facebook account.</li> -<li>Find Pirates: Tides of Fortune in the list of games and click on the "Play Now" button.</li> -<li>Enjoy the game!</li> -</ol> - <p>The system requirements for playing Pirates: Tides of Fortune on desktop are:</p> - <ul> -<li>Operating system: Windows 7 or higher</li> -<li>Processor: 2 GHz or higher</li> -<li>Memory: 2 GB RAM or higher</li> -<li>Graphics: DirectX 9.0c compatible video card</li> -<li>Network: Broadband Internet connection</li> -<li>Storage: 300 MB available space</li> -</ul> - <h3>Playing the Game on Browser</h3> - <p>If you prefer to play Pirates: Tides of Fortune on your browser, you can do so without downloading anything. Here is how to do it:</p> -<p>pirates tides of fortune free online game<br /> -pirates tides of fortune strategy guide<br /> -pirates tides of fortune browser game review<br /> -pirates tides of fortune plarium play launcher<br /> -pirates tides of fortune mmorts adventure<br /> -pirates tides of fortune cheats and hacks<br /> -pirates tides of fortune best troops and ships<br /> -pirates tides of fortune how to join a brotherhood<br /> -pirates tides of fortune tips and tricks for beginners<br /> -pirates tides of fortune rum production and consumption<br /> -pirates tides of fortune latest updates and news<br /> -pirates tides of fortune system requirements and compatibility<br /> -pirates tides of fortune haven design and layout<br /> -pirates tides of fortune discoveries and sketches<br /> -pirates tides of fortune resources and flotsam<br /> -pirates tides of fortune raiding and plundering strategies<br /> -pirates tides of fortune enemy list and revenge attacks<br /> -pirates tides of fortune gems and gold stashes<br /> -pirates tides of fortune defensive technologies and structures<br /> -pirates tides of fortune sailing and action gameplay<br /> -pirates tides of fortune 2d graphics and sound effects<br /> -pirates tides of fortune forum and community support<br /> -pirates tides of fortune developer and publisher information<br /> -pirates tides of fortune release date and genre<br /> -pirates tides of fortune in-game purchases and rewards<br /> -pirates tides of fortune offline mode and save data<br /> -pirates tides of fortune login and registration issues<br /> -pirates tides of fortune gameplay videos and screenshots<br /> -pirates tides of fortune wiki and faq page<br /> -pirates tides of fortune events and tournaments<br /> -pirates tides of fortune best brotherhoods and alliances<br /> -pirates tides of fortune crew members and captains<br /> -pirates tides of fortune pirate haven upgrade guide<br /> -pirates tides of fortune rum distilleries and taverns<br /> -pirates tides of fortune world map and shipwrecks<br /> -pirates tides of fortune treasure hunting and quests<br /> -pirates tides of fortune feedback and suggestions<br /> -pirates tides of fortune bugs and glitches report<br /> -pirates tides of fortune comparison with other plarium games<br /> -pirates tides of fortune fun facts and trivia questions</p> - <ol> -<li>Go to <a href="">https://plarium.com/en/strategy-games/pirates-tides-of-fortune/</a> or <a href="">https://www.facebook.com/PiratesTidesOfFortune/</a> and click on the "Play Now" button.</li> -<li>Sign in with your email or Facebook account.</li> -<li>Enjoy the game!</li> -</ol> - <p>The system requirements for playing Pirates: Tides of Fortune on browser are:</p> - <ul> -<li>Operating system: Windows 7 or higher, Mac OS X 10.9 or higher, Linux</li> -<li>Browser: Chrome, Firefox, Safari, Opera, Edge</li> -<li>Flash Player: Version 11.4 or higher</li> -<li>Network: Broadband Internet connection</li> -</ul> - <h2>Tips and Tricks for Beginners</h2> - <p>Pirates: Tides of Fortune is a game that requires strategy, planning, and patience. It can be overwhelming for beginners who are not familiar with the game mechanics and features. To help you get started, here are some tips and tricks that will help you progress faster and smarter in the game:</p> - <h3>Completing Quests and Tasks</h3> - <p>One of the easiest ways to earn rubies, resources, sketches, and other rewards in Pirates: Tides of Fortune is to complete quests and tasks. Quests are missions that are given by Captain Anne O'Malley or other characters in the game. They usually involve building, upgrading, researching, raiding, or joining a brotherhood. Tasks are daily objectives that are given by your advisor. They usually involve collecting resources, training units, sending reinforcements, or participating in events. You can find both quests and tasks on the left side of your screen. You should try to complete as many quests and tasks as possible every day to boost your progress and gain valuable rewards.</p> - <h3>Upgrading Buildings and Resources</h3> - <p>Your pirate haven is your base of operations and your source of income. You should always upgrade your buildings and resources to increase your production, storage, and defense capabilities. Some of the most important buildings and resources to upgrade are:</p> - <ul> -<li><strong>Rum Distillery</strong>: This is where you produce rum, which is the main resource for maintaining your crew. You should upgrade your rum distillery to increase your rum production and storage capacity.</li> -<li><strong>Lumber Yard</strong>: This is where you produce lumber, which is the main resource for building and upgrading your haven. You should upgrade your lumber yard to increase your lumber production and storage capacity.</li> -<li><strong>Gold Mine</strong>: This is where you produce gold, which is the main resource for recruiting and training your crew. You should upgrade your gold mine to increase your gold production and storage capacity.</li> -<li><strong>Warehouse</strong>: This is where you store your resources. You should upgrade your warehouse to increase your storage capacity and protect your resources from being raided by other players.</li> -<li><strong>Pirate Stronghold</strong>: This is the heart of your haven and the main source of your defense. You should upgrade your pirate stronghold to unlock new buildings, discoveries, and units, as well as to increase your defense bonus and haven size.</li> -</ul> - <h3>Researching Discoveries and Technologies</h3> - <p>Besides upgrading your buildings and resources, you should also research new discoveries and technologies that will give you an edge over your rivals. Discoveries and technologies are divided into four categories: Order of Battle, Swordplay, Trade, and Brotherhood. Each category has its own benefits and requirements. You can research discoveries and technologies in the Observatory, which you can build after upgrading your pirate stronghold to level 5. Some of the most useful discoveries and technologies to research are:</p> - <ul> -<li><strong>Order of Battle</strong>: This category focuses on improving your military capabilities, such as increasing your offense, defense, speed, or load of your units. You should research order of battle discoveries and technologies to make your raids more effective and efficient.</li> -<li><strong>Swordplay</strong>: This category focuses on unlocking new units and improving their skills, such as accuracy, critical hit, or evasion. You should research swordplay discoveries and technologies to diversify your crew and make them more powerful.</li> -<li><strong>Trade</strong>: This category focuses on improving your economic capabilities, such as increasing your resource production, trade capacity, or trade speed. You should research trade discoveries and technologies to boost your income and trade with other players.</li> -<li><strong>Brotherhood</strong>: This category focuses on improving your social capabilities, such as increasing your brotherhood size, brotherhood bonus, or brotherhood influence. You should research brotherhood discoveries and technologies to strengthen your bonds with other players and gain more benefits from being in a brotherhood.</li> -</ul> - <h3>Joining a Brotherhood and Cooperating with Other Players</h3> - <p>Pirates: Tides of Fortune is not a game that you can play alone. You will need the help of other players to survive and thrive in the pirate world. That is why joining a brotherhood is one of the best decisions you can make in the game. A brotherhood is a group of players who share a common name, flag, motto, chat, and goals. By joining a brotherhood, you can:</p> - <ul> -<li><strong>Send and receive reinforcements</strong>: You can send or request reinforcements from your brotherhood members to help you defend or attack other players.</li> -<li><strong>Trade resources</strong>: You can trade resources with your brotherhood members to balance your economy or help each other out.</li> -<li><strong>Participate in brawls</strong>: You can participate in brawls with your brotherhood members to compete against other brotherhoods for prizes and glory.</li> -<li><strong>Enjoy social features</strong>: You can chat with your brotherhood members, send them gifts, invite them to join you in raids, or share tips and tricks with them.</li> -</ul> - <p>To join a brotherhood, you can either apply to an existing one or create your own one. You can find brotherhoods in the Brotherhoods tab on the right side of your screen. You can also use the search function or the filters to find a suitable brotherhood for you. Before joining a brotherhood, you should check its requirements, rules, ranking, and reputation. You should also try to communicate with its leader or members to see if they are friendly and active.</p> - <h2>Review and Rating of Pirates: Tides of Fortune</h2> - <p>Now that you have learned how to download and play Pirates: Tides of Fortune, as well as some tips and tricks for beginners, you might be wondering what other players think of the game. Is it worth playing? Is it fun and engaging? Is it fair and balanced? To answer these questions, here is a review and rating of Pirates: Tides of Fortune based on its graphics, sound, gameplay, story, and more.</p> - <h3>Graphics and Sound</h3> - <p>Pirates: Tides of Fortune has impressive graphics and sound that create a realistic and immersive pirate atmosphere. The game features a 3D isometric view that allows you to zoom in and out and rotate your camera to see your haven and the map from different angles. The game also has detailed animations and effects that bring your buildings, units, and ships to life. The game's graphics are colorful, vibrant, and eye-catching, making you feel like you are in a pirate paradise.</p> - <p>The game's sound is also top-notch, with voice acting, music, and sound effects that enhance the game experience. The game has voice acting for the main characters, such as Captain Anne O'Malley, who guides you through the game with her witty and charming remarks. The game also has music that matches the mood and theme of the game, such as upbeat tunes for building and raiding, or dramatic tunes for brawls and events. The game also has sound effects that add realism and excitement to the game, such as the sound of waves crashing, cannons firing, or swords clashing.</p> - <h3>Gameplay and Story</h3> - <p>Pirates: Tides of Fortune has addictive and engaging gameplay that will keep you hooked for hours. The game is a mix of strategy, adventure, and humor that will challenge your skills and entertain you at the same time. The game has a lot of features and activities that will keep you busy and satisfied, such as building your haven, recruiting your crew, researching discoveries, raiding other players, joining a brotherhood, participating in events, and more. The game also has a lot of variety and customization options that will allow you to play the game your way, such as choosing your pirate class, flag, motto, units, ships, etc.</p> - <p>The game also has a captivating story that will make you feel like you are part of a pirate legend. The game has a rich and immersive pirate lore that is revealed through quests, dialogues, characters, and events. The game also has a lot of humor and personality that will make you laugh and smile. The game's characters are funny and memorable, such as Captain Anne O'Malley, who is your mentor and friend; Captain "Bonnie" Anne Bonny, who is your rival and nemesis; or Captain Jack Rackham, who is your ally and lover. The game also has a lot of references and jokes that will appeal to pirate fans and pop culture enthusiasts.</p> - <h3>Pros and Cons</h3> - <p>Like any other game, Pirates: Tides of Fortune has its pros and cons that might affect your enjoyment of the game. Here are some of them:</p> - <table> -<tr> -<th>Pros</th> -<th>Cons</th> -</tr> -<tr> -<td>- Free to play and easy to access</td> -<td>- Requires a lot of time and patience</td> -</tr> -<tr> -<td>- Stunning graphics and immersive sound</td> -<td>- Can be laggy or buggy sometimes</td> -</tr> -<tr> -<td>- Addictive and engaging gameplay</td> -<td>- Can be repetitive or grindy sometimes</td> -</tr> -<tr> -<td>- Captivating story and humor</td> -<td>- Can be confusing or inconsistent sometimes</td> -</tr> -<tr> -<td>- Large and active community</td> -<td>- Can be competitive or toxic sometimes</td> -</tr> -</table> - <h3>Rating</h3> - <p>Based on the above review, I would give Pirates: Tides of Fortune a rating of 4 out of 5 stars. I think it is a great game for anyone who loves strategy, adventure, and pirates. It has a lot of features and activities that will keep you hooked for hours, as well as a rich and immersive pirate lore that will make you feel like you are part of a pirate legend. It also has a large and active community of players who are always ready to chat, trade, or fight with you. However, the game also has some drawbacks that might frustrate or bore you, such as the long waiting times, the frequent bugs, the repetitive tasks, or the competitive environment. Therefore, I would recommend this game to anyone who is looking for a fun and challenging pirate-themed strategy game, but also has a lot of time and patience to spare.</p> - <h2>Conclusion and FAQs</h2> - <p>In conclusion, Pirates: Tides of Fortune is a free-to-play online strategy game that lets you live the life of a pirate captain. You can build your own pirate haven, recruit your own crew, raid other players, and join a brotherhood. You can also enjoy the stunning graphics, the immersive sound, the addictive gameplay, and the captivating story of the game. However, you should also be aware of the drawbacks of the game, such as the long waiting times, the frequent bugs, the repetitive tasks, or the competitive environment. If you are interested in playing Pirates: Tides of Fortune, you can download it on your desktop or play it on your browser. Here are some frequently asked questions and answers about the game:</p> - <h3>FAQs</h3> - <ol> -<li><strong>Is Pirates: Tides of Fortune pay-to-win?</strong></li> -<p>No, Pirates: Tides of Fortune is not pay-to-win. You can play the game for free and enjoy all of its features and content without spending any money. However, you can also buy rubies, which are the premium currency of the game, with real money. Rubies can be used to speed up your progress, buy special items, or access exclusive offers. However, rubies are not necessary to play or win the game. You can also earn rubies for free by completing quests, tasks, events, or brawls.</p> -<li><strong>How do I get more resources in Pirates: Tides of Fortune?</strong></li> -<p>There are several ways to get more resources in Pirates: Tides of Fortune. You can:</p> -<ul> -<li>Upgrade your resource buildings and warehouses to increase your production and storage capacity.</li> -<li>Complete quests and tasks that reward you with resources.</li> -<li>Raid other players and plunder their resources.</li> -<li>Trade resources with your brotherhood members or other players.</li> -<li>Participate in events and brawls that reward you with resources.</li> -</ul> -<li><strong>How do I get more units in Pirates: Tides of Fortune?</strong></li> -<p>There are several ways to get more units in Pirates: Tides of Fortune. You can:</p> -<ul> -<li>Upgrade your gold mine and rum distillery to increase your gold and rum production and storage capacity.</li> -<li>Upgrade your tavern and market to increase your recruitment and training capacity.</li> -<li>Research swordplay discoveries and technologies to unlock new units and improve their skills.</li> -<li>Complete quests and tasks that reward you with units.</li> -<li>Raid other players and capture their units.</li> -<li>Participate in events and brawls that reward you with units.</li> -</ul> -<li><strong>How do I get more sketches in Pirates: Tides of Fortune?</strong></li> -<p>Sketches are special items that are required to upgrade some buildings or research some discoveries. There are several ways to get more sketches in Pirates: Tides of Fortune. You can:</p> -<ul> -<li>Complete quests and tasks that reward you with sketches.</li> -<li>Raid other players and steal their sketches.</li> -<li>Trade sketches with your brotherhood members or other players.</li> -<li>Buy sketches with rubies or real money.</li> -</ul> -<li><strong>How do I get more rubies in Pirates: Tides of Fortune?</strong></li> -<p>Rubies are the premium currency of Pirates: Tides of Fortune. They can be used to speed up your progress, buy special items, or access exclusive offers. There are several ways to get more rubies in Pirates: Tides of Fortune. You can:</p> -<ul> -<li>Complete quests and tasks that reward you with rubies.</li> -<li>Raid other players and loot their rubies.</li> -<li>Participate in events and brawls that reward you with rubies.</li> -<li>Buy rubies with real money.</li> -</ul> - <p>I hope this article has helped you learn more about Pirates: Tides of Fortune and how to download and play it. If you have any questions or feedback about the game or the article, please feel free to leave a comment below or contact me directly. Thank you for reading and happy pirating!</p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Enjoy Driving in Different Environments with Drivers Jobs Online Simulator APK 0.92.md b/spaces/fatiXbelha/sd/Enjoy Driving in Different Environments with Drivers Jobs Online Simulator APK 0.92.md deleted file mode 100644 index 9d918b2b434eac5441e0fb21cb939a277a13872f..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Enjoy Driving in Different Environments with Drivers Jobs Online Simulator APK 0.92.md +++ /dev/null @@ -1,124 +0,0 @@ -<br /> -<h1>Drivers Jobs Online Simulator APK 0.92: A Fun and Realistic Driving Game for Android</h1> - <h2>Introduction</h2> - <p>Do you love driving different vehicles and exploring new places? Do you want to experience what it's like to have various driving jobs and interact with other players online? If you answered yes to these questions, then you should check out Drivers Jobs Online Simulator APK 0.92.</p> - <p>Drivers Jobs Online Simulator APK 0.92 is a driving simulation game for Android devices that lets you choose from different vehicles and jobs, such as bus driver, car driver, or van driver. You can drive around realistic cities and countryside, pick up passengers or deliver goods, participate in races or events, and chat with other players online.</p> -<h2>drivers jobs online simulator apk 0.92</h2><br /><p><b><b>Download</b> >>> <a href="https://urllie.com/2uNxKt">https://urllie.com/2uNxKt</a></b></p><br /><br /> - <p>In this article, we will tell you everything you need to know about this amazing game, including how to download and install it on your device, how to play it, what are its pros and cons, and some frequently asked questions. By the end of this article, you will have a clear idea of whether this game is worth downloading or not.</p> - <h2>How to Download and Install Drivers Jobs Online Simulator APK 0.92</h2> - <p>If you are interested in playing this game, you might be wondering where to find the APK file and how to install it on your device. Don't worry, we have got you covered.</p> - <p>The APK file is a compressed file that contains all the data and resources needed to run an Android application. You can download it from one of the trusted sources, such as [Driver's Jobs Simulator 2022 APK (Android App) - Free Download - APKCombo](^1^). This website offers you the latest version of the game, which is 0.92, and it is free and safe to download.</p> - <p>Once you have downloaded the APK file, you need to install it on your device. To do that, you need to follow these simple steps:</p> - <ol> -<li>Go to your device settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from the Google Play Store.</li> -<li>Locate the APK file on your device using a file manager app or your browser's download history.</li> -<li>Tap on the APK file and follow the instructions on the screen to install it.</li> -<li>Wait for the installation process to finish and then launch the game from your app drawer or home screen.</li> -</ol> - <p>That's it! You have successfully installed Drivers Jobs Online Simulator APK 0.92 on your device. Now you can enjoy playing this fun and realistic driving game.</p> - <h2>How to Update Drivers Jobs Online Simulator APK 0.92 to the Latest Version</h2> - <p>If you already have Drivers Jobs Online Simulator APK 0.92 installed on your device, you might want to update it to the latest version whenever there is a new update available. Updating the game will give you access to new features, improvements, bug fixes, and more.</p> - <p>To update Drivers Jobs Online Simulator APK 0.92, you need to follow these simple steps:</p> -<p>drivers jobs online simulator game download apk 0.92<br /> -drivers jobs online simulator latest version apk 0.92<br /> -drivers jobs online simulator android apk 0.92 free<br /> -drivers jobs online simulator mod apk 0.92 unlimited money<br /> -drivers jobs online simulator apk 0.92 update<br /> -drivers jobs online simulator apk 0.92 offline<br /> -drivers jobs online simulator apk 0.92 hack<br /> -drivers jobs online simulator apk 0.92 for pc<br /> -drivers jobs online simulator apk 0.92 install<br /> -drivers jobs online simulator apk 0.92 review<br /> -drivers jobs online simulator apk 0.92 gameplay<br /> -drivers jobs online simulator apk 0.92 features<br /> -drivers jobs online simulator apk 0.92 tips and tricks<br /> -drivers jobs online simulator apk 0.92 cheats<br /> -drivers jobs online simulator apk 0.92 best cars<br /> -drivers jobs online simulator apk 0.92 graphics<br /> -drivers jobs online simulator apk 0.92 requirements<br /> -drivers jobs online simulator apk 0.92 size<br /> -drivers jobs online simulator apk 0.92 rating<br /> -drivers jobs online simulator apk 0.92 bugs and fixes<br /> -drivers jobs online simulator apk 0.92 new update<br /> -drivers jobs online simulator apk 0.92 multiplayer mode<br /> -drivers jobs online simulator apk 0.92 realistic physics<br /> -drivers jobs online simulator apk 0.92 customization options<br /> -drivers jobs online simulator apk 0.92 maps and locations<br /> -drivers jobs online simulator apk 0.92 missions and challenges<br /> -drivers jobs online simulator apk 0.92 traffic and weather<br /> -drivers jobs online simulator apk 0.92 controls and settings<br /> -drivers jobs online simulator apk 0.92 sound and music<br /> -drivers jobs online simulator apk 0.92 fun and addictive</p> - <ol> -<li>Go to the website where you downloaded the APK file, such as [Driver's Jobs Simulator 2022 APK (Android App) - Free Download - APKCombo](^1^), and check if there is a new version available.</li> -<li>If there is a new version available, download it and save it on your device.</li> -<li>Uninstall the old version of the game from your device. You can do this by going to your device settings, finding the app, and tapping on uninstall.</li> -<li>Install the new version of the game by following the same steps as before.</li> -<li>Launch the game and enjoy the new features and improvements.</li> -</ol> - <p>Note: You don't need to worry about losing your progress or data when updating the game. The game will automatically sync your data with your Google Play account or Facebook account if you have logged in with them.</p> - <h2>How to Play Drivers Jobs Online Simulator APK 0.92</h2> - <p>Now that you have downloaded and installed Drivers Jobs Online Simulator APK 0.92 on your device, you might be wondering how to play it. Don't worry, we have got you covered.</p> - <p>Drivers Jobs Online Simulator APK 0.92 is a driving simulation game that lets you choose from different vehicles and jobs, such as bus driver, car driver, or van driver. You can drive around realistic cities and countryside, pick up passengers or deliver goods, participate in races or events, and chat with other players online.</p> - <p>To play Drivers Jobs Online Simulator APK 0.92, you need to follow these simple steps:</p> - <ol> -<li>Launch the game from your app drawer or home screen.</li> -<li>Select your preferred language from the options available.</li> -<li>Create your profile by entering your name and choosing your avatar.</li> -<li>Select your vehicle and job from the options available. You can choose from buses, cars, vans, trucks, motorcycles, and more. You can also choose from different jobs, such as taxi driver, delivery driver, bus driver, police officer, firefighter, ambulance driver, and more.</li> -<li>Start driving and exploring the map. You can use the steering wheel, pedals, buttons, or tilt controls to drive your vehicle. You can also use the map, GPS, speedometer, fuel gauge, and other indicators to navigate and monitor your vehicle's status.</li> -<li>Pick up passengers or deliver goods according to your job. You can see your destination on the map or GPS. You can also see your earnings and ratings on the screen.</li> -<li>Interact with other players online. You can chat with them using text or voice messages. You can also join or create rooms with them and play together.</li> -<li>Customize your vehicle and earn money. You can buy new vehicles or upgrade your existing ones using the money you earn from your jobs. You can also customize your vehicle's appearance, performance, sound, and more using various options available.</li> -</ol> - <p>That's it! You have learned how to play Drivers Jobs Online Simulator APK 0.92. Now you can enjoy this fun and realistic driving game on your device.</p> - <h2>Pros and Cons of Drivers Jobs Online Simulator APK 0.92</h2> - <p>Drivers Jobs Online Simulator APK 0.92 is a driving simulation game that has many pros and cons. Here are some of them:</p> - <h3>Pros</h3> - <ul> -<li>It has realistic graphics and sounds that make you feel like you are driving in real life.</li> -<li>It has a large and diverse map that offers you many places to explore and drive.</li> -<li>It has a variety of vehicles and jobs that suit your preferences and skills.</li> -<li>It has an online multiplayer mode that lets you chat and play with other players from around the world.</li> -<li>It has a customization option that lets you modify your vehicle's appearance, performance, sound, and more.</li> -<li>It has a simple and intuitive control system that makes it easy to drive your vehicle.</li> -<li>It has a low file size and does not require much storage space on your device.</li> -</ul> - <h3>Cons</h3> - <ul> -<li>It may have some bugs or glitches that affect the gameplay or performance of the game.</li> -<li>It may have some ads or in-app purchases that may annoy or distract you from the game.</li> -<li>It may have some compatibility issues with some devices or Android versions.</li> -<li>It may drain your battery or consume your data if you play it for a long time or online.</li> -<li>It may be too easy or too hard for some players depending on their level of experience or difficulty setting.</li> -</ul> - <h2>Conclusion</h2> - <p>In conclusion, Drivers Jobs Online Simulator APK 0.92 is a fun and realistic driving game for Android devices that lets you choose from different vehicles and jobs, such as bus driver, car driver, or van driver. You can drive around realistic cities and countryside, pick up passengers or deliver goods, participate in races or events, and chat with other players online. You can also customize your vehicle and earn money by buying new vehicles or upgrading your existing ones.</p> - <p>This game has many pros and cons that you should consider before downloading it. Some of the pros are realistic graphics and sounds, large and diverse map, variety of vehicles and jobs, online multiplayer mode, customization option, simple and intuitive control system, and low file size. Some of the cons are bugs or glitches, ads or in-app purchases, compatibility issues, battery or data consumption, and difficulty level.</p> - <p>If you are looking for a driving simulation game that offers you a lot of fun and realism, then you should give Drivers Jobs Online Simulator APK 0.92 a try. You can download it from [Driver's Jobs Simulator 2022 APK (Android App) - Free Download - APKCombo] and install it on your device following the steps we have provided in this article. You can also update it to the latest version whenever there is a new update available.</p> - <p>We hope you enjoyed reading this article and learned something new about this game. If you have any questions or feedback, please feel free to leave them in the comments section below. We would love to hear from you. Thank you for your time and attention.</p> - <h2>FAQs</h2> - <h4>Q1: What are the minimum requirements to play this game?</h4> - <p>A1: The minimum requirements to play this game are:</p> - <ul> -<li>An Android device with Android 4.4 or higher</li> -<li>A stable internet connection (for online mode)</li> -<li>At least 100 MB of free storage space on your device</li> -</ul> - <h4>Q2: Is this game safe and virus-free?</h4> - <p>A2: Yes, this game is safe and virus-free. We have downloaded it from [Driver's Jobs Simulator 2022 APK (Android App) - Free Download - APKCombo], which is a trusted source that offers you the latest version of the game. We have also scanned it with our antivirus software and found no threats or malware.</p> - <h4>Q3: Can I play this game offline or without internet connection?</h4> - <p>A3: Yes, you can play this game offline or without internet connection. However, you will not be able to access some features or functions of the game, such as online multiplayer mode, chat with other players, update the game, or sync your data with your Google Play account or Facebook account.</p> - <h4>Q4: How can I contact the developer or report a bug?</h4> - <p>A4: You can contact the developer or report a bug by sending an email to driversjobssimulator@gmail.com. You can also visit their Facebook page at [Drivers Jobs Simulator - Home | Facebook] or their YouTube channel at [Drivers Jobs Simulator - YouTube](^3 ^)^. You can also leave a review or rating on the Google Play Store or the website where you downloaded the game.</p> - <h4>Q5: Are there any similar games like this one?</h4> - <p>A5: Yes, there are many similar games like this one that you can try. Some of them are:</p> - <ul> -<li>[Euro Truck Simulator 2]: A driving simulation game that lets you drive trucks across Europe and deliver cargo.</li> -<li>[City Car Driving]: A driving simulation game that lets you drive cars in realistic traffic conditions and obey traffic rules.</li> -<li>[Bus Simulator: Ultimate]: A driving simulation game that lets you drive buses around the world and transport passengers.</li> -<li>[Car Parking Multiplayer]: A driving simulation game that lets you park cars in various scenarios and play with other players online.</li> -<li>[Extreme Car Driving Simulator]: A driving simulation game that lets you drive sports cars and perform stunts and drifts.</li> -</ul></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/training/coach.py b/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/training/coach.py deleted file mode 100644 index 10b22a6830673752dcf922cee7914c39069a4333..0000000000000000000000000000000000000000 --- a/spaces/feng2022/Time-TravelRephotography/Time_TravelRephotography/models/encoder4editing/training/coach.py +++ /dev/null @@ -1,439 +0,0 @@ -import os -import random -import matplotlib -import matplotlib.pyplot as plt - -matplotlib.use('Agg') - -import torch -from torch import nn, autograd -from torch.utils.data import DataLoader -from torch.utils.tensorboard import SummaryWriter -import torch.nn.functional as F - -from utils import common, train_utils -from criteria import id_loss, moco_loss -from configs import data_configs -from datasets.images_dataset import ImagesDataset -from criteria.lpips.lpips import LPIPS -from models.psp import pSp -from models.latent_codes_pool import LatentCodesPool -from models.discriminator import LatentCodesDiscriminator -from models.encoders.psp_encoders import ProgressiveStage -from training.ranger import Ranger - -random.seed(0) -torch.manual_seed(0) - - -class Coach: - def __init__(self, opts, prev_train_checkpoint=None): - self.opts = opts - - self.global_step = 0 - - self.device = 'cuda:0' - self.opts.device = self.device - # Initialize network - self.net = pSp(self.opts).to(self.device) - - # Initialize loss - if self.opts.lpips_lambda > 0: - self.lpips_loss = LPIPS(net_type=self.opts.lpips_type).to(self.device).eval() - if self.opts.id_lambda > 0: - if 'ffhq' in self.opts.dataset_type or 'celeb' in self.opts.dataset_type: - self.id_loss = id_loss.IDLoss().to(self.device).eval() - else: - self.id_loss = moco_loss.MocoLoss(opts).to(self.device).eval() - self.mse_loss = nn.MSELoss().to(self.device).eval() - - # Initialize optimizer - self.optimizer = self.configure_optimizers() - - # Initialize discriminator - if self.opts.w_discriminator_lambda > 0: - self.discriminator = LatentCodesDiscriminator(512, 4).to(self.device) - self.discriminator_optimizer = torch.optim.Adam(list(self.discriminator.parameters()), - lr=opts.w_discriminator_lr) - self.real_w_pool = LatentCodesPool(self.opts.w_pool_size) - self.fake_w_pool = LatentCodesPool(self.opts.w_pool_size) - - # Initialize dataset - self.train_dataset, self.test_dataset = self.configure_datasets() - self.train_dataloader = DataLoader(self.train_dataset, - batch_size=self.opts.batch_size, - shuffle=True, - num_workers=int(self.opts.workers), - drop_last=True) - self.test_dataloader = DataLoader(self.test_dataset, - batch_size=self.opts.test_batch_size, - shuffle=False, - num_workers=int(self.opts.test_workers), - drop_last=True) - - # Initialize logger - log_dir = os.path.join(opts.exp_dir, 'logs') - os.makedirs(log_dir, exist_ok=True) - self.logger = SummaryWriter(log_dir=log_dir) - - # Initialize checkpoint dir - self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints') - os.makedirs(self.checkpoint_dir, exist_ok=True) - self.best_val_loss = None - if self.opts.save_interval is None: - self.opts.save_interval = self.opts.max_steps - - if prev_train_checkpoint is not None: - self.load_from_train_checkpoint(prev_train_checkpoint) - prev_train_checkpoint = None - - def load_from_train_checkpoint(self, ckpt): - print('Loading previous training data...') - self.global_step = ckpt['global_step'] + 1 - self.best_val_loss = ckpt['best_val_loss'] - self.net.load_state_dict(ckpt['state_dict']) - - if self.opts.keep_optimizer: - self.optimizer.load_state_dict(ckpt['optimizer']) - if self.opts.w_discriminator_lambda > 0: - self.discriminator.load_state_dict(ckpt['discriminator_state_dict']) - self.discriminator_optimizer.load_state_dict(ckpt['discriminator_optimizer_state_dict']) - if self.opts.progressive_steps: - self.check_for_progressive_training_update(is_resume_from_ckpt=True) - print(f'Resuming training from step {self.global_step}') - - def train(self): - self.net.train() - if self.opts.progressive_steps: - self.check_for_progressive_training_update() - while self.global_step < self.opts.max_steps: - for batch_idx, batch in enumerate(self.train_dataloader): - loss_dict = {} - if self.is_training_discriminator(): - loss_dict = self.train_discriminator(batch) - x, y, y_hat, latent = self.forward(batch) - loss, encoder_loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent) - loss_dict = {**loss_dict, **encoder_loss_dict} - self.optimizer.zero_grad() - loss.backward() - self.optimizer.step() - - # Logging related - if self.global_step % self.opts.image_interval == 0 or ( - self.global_step < 1000 and self.global_step % 25 == 0): - self.parse_and_log_images(id_logs, x, y, y_hat, title='images/train/faces') - if self.global_step % self.opts.board_interval == 0: - self.print_metrics(loss_dict, prefix='train') - self.log_metrics(loss_dict, prefix='train') - - # Validation related - val_loss_dict = None - if self.global_step % self.opts.val_interval == 0 or self.global_step == self.opts.max_steps: - val_loss_dict = self.validate() - if val_loss_dict and (self.best_val_loss is None or val_loss_dict['loss'] < self.best_val_loss): - self.best_val_loss = val_loss_dict['loss'] - self.checkpoint_me(val_loss_dict, is_best=True) - - if self.global_step % self.opts.save_interval == 0 or self.global_step == self.opts.max_steps: - if val_loss_dict is not None: - self.checkpoint_me(val_loss_dict, is_best=False) - else: - self.checkpoint_me(loss_dict, is_best=False) - - if self.global_step == self.opts.max_steps: - print('OMG, finished training!') - break - - self.global_step += 1 - if self.opts.progressive_steps: - self.check_for_progressive_training_update() - - def check_for_progressive_training_update(self, is_resume_from_ckpt=False): - for i in range(len(self.opts.progressive_steps)): - if is_resume_from_ckpt and self.global_step >= self.opts.progressive_steps[i]: # Case checkpoint - self.net.encoder.set_progressive_stage(ProgressiveStage(i)) - if self.global_step == self.opts.progressive_steps[i]: # Case training reached progressive step - self.net.encoder.set_progressive_stage(ProgressiveStage(i)) - - def validate(self): - self.net.eval() - agg_loss_dict = [] - for batch_idx, batch in enumerate(self.test_dataloader): - cur_loss_dict = {} - if self.is_training_discriminator(): - cur_loss_dict = self.validate_discriminator(batch) - with torch.no_grad(): - x, y, y_hat, latent = self.forward(batch) - loss, cur_encoder_loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent) - cur_loss_dict = {**cur_loss_dict, **cur_encoder_loss_dict} - agg_loss_dict.append(cur_loss_dict) - - # Logging related - self.parse_and_log_images(id_logs, x, y, y_hat, - title='images/test/faces', - subscript='{:04d}'.format(batch_idx)) - - # For first step just do sanity test on small amount of data - if self.global_step == 0 and batch_idx >= 4: - self.net.train() - return None # Do not log, inaccurate in first batch - - loss_dict = train_utils.aggregate_loss_dict(agg_loss_dict) - self.log_metrics(loss_dict, prefix='test') - self.print_metrics(loss_dict, prefix='test') - - self.net.train() - return loss_dict - - def checkpoint_me(self, loss_dict, is_best): - save_name = 'best_model.pt' if is_best else 'iteration_{}.pt'.format(self.global_step) - save_dict = self.__get_save_dict() - checkpoint_path = os.path.join(self.checkpoint_dir, save_name) - torch.save(save_dict, checkpoint_path) - with open(os.path.join(self.checkpoint_dir, 'timestamp.txt'), 'a') as f: - if is_best: - f.write( - '**Best**: Step - {}, Loss - {:.3f} \n{}\n'.format(self.global_step, self.best_val_loss, loss_dict)) - else: - f.write('Step - {}, \n{}\n'.format(self.global_step, loss_dict)) - - def configure_optimizers(self): - params = list(self.net.encoder.parameters()) - if self.opts.train_decoder: - params += list(self.net.decoder.parameters()) - else: - self.requires_grad(self.net.decoder, False) - if self.opts.optim_name == 'adam': - optimizer = torch.optim.Adam(params, lr=self.opts.learning_rate) - else: - optimizer = Ranger(params, lr=self.opts.learning_rate) - return optimizer - - def configure_datasets(self): - if self.opts.dataset_type not in data_configs.DATASETS.keys(): - Exception('{} is not a valid dataset_type'.format(self.opts.dataset_type)) - print('Loading dataset for {}'.format(self.opts.dataset_type)) - dataset_args = data_configs.DATASETS[self.opts.dataset_type] - transforms_dict = dataset_args['transforms'](self.opts).get_transforms() - train_dataset = ImagesDataset(source_root=dataset_args['train_source_root'], - target_root=dataset_args['train_target_root'], - source_transform=transforms_dict['transform_source'], - target_transform=transforms_dict['transform_gt_train'], - opts=self.opts) - test_dataset = ImagesDataset(source_root=dataset_args['test_source_root'], - target_root=dataset_args['test_target_root'], - source_transform=transforms_dict['transform_source'], - target_transform=transforms_dict['transform_test'], - opts=self.opts) - print("Number of training samples: {}".format(len(train_dataset))) - print("Number of test samples: {}".format(len(test_dataset))) - return train_dataset, test_dataset - - def calc_loss(self, x, y, y_hat, latent): - loss_dict = {} - loss = 0.0 - id_logs = None - if self.is_training_discriminator(): # Adversarial loss - loss_disc = 0. - dims_to_discriminate = self.get_dims_to_discriminate() if self.is_progressive_training() else \ - list(range(self.net.decoder.n_latent)) - - for i in dims_to_discriminate: - w = latent[:, i, :] - fake_pred = self.discriminator(w) - loss_disc += F.softplus(-fake_pred).mean() - loss_disc /= len(dims_to_discriminate) - loss_dict['encoder_discriminator_loss'] = float(loss_disc) - loss += self.opts.w_discriminator_lambda * loss_disc - - if self.opts.progressive_steps and self.net.encoder.progressive_stage.value != 18: # delta regularization loss - total_delta_loss = 0 - deltas_latent_dims = self.net.encoder.get_deltas_starting_dimensions() - - first_w = latent[:, 0, :] - for i in range(1, self.net.encoder.progressive_stage.value + 1): - curr_dim = deltas_latent_dims[i] - delta = latent[:, curr_dim, :] - first_w - delta_loss = torch.norm(delta, self.opts.delta_norm, dim=1).mean() - loss_dict[f"delta{i}_loss"] = float(delta_loss) - total_delta_loss += delta_loss - loss_dict['total_delta_loss'] = float(total_delta_loss) - loss += self.opts.delta_norm_lambda * total_delta_loss - - if self.opts.id_lambda > 0: # Similarity loss - loss_id, sim_improvement, id_logs = self.id_loss(y_hat, y, x) - loss_dict['loss_id'] = float(loss_id) - loss_dict['id_improve'] = float(sim_improvement) - loss += loss_id * self.opts.id_lambda - if self.opts.l2_lambda > 0: - loss_l2 = F.mse_loss(y_hat, y) - loss_dict['loss_l2'] = float(loss_l2) - loss += loss_l2 * self.opts.l2_lambda - if self.opts.lpips_lambda > 0: - loss_lpips = self.lpips_loss(y_hat, y) - loss_dict['loss_lpips'] = float(loss_lpips) - loss += loss_lpips * self.opts.lpips_lambda - loss_dict['loss'] = float(loss) - return loss, loss_dict, id_logs - - def forward(self, batch): - x, y = batch - x, y = x.to(self.device).float(), y.to(self.device).float() - y_hat, latent = self.net.forward(x, return_latents=True) - if self.opts.dataset_type == "cars_encode": - y_hat = y_hat[:, :, 32:224, :] - return x, y, y_hat, latent - - def log_metrics(self, metrics_dict, prefix): - for key, value in metrics_dict.items(): - self.logger.add_scalar('{}/{}'.format(prefix, key), value, self.global_step) - - def print_metrics(self, metrics_dict, prefix): - print('Metrics for {}, step {}'.format(prefix, self.global_step)) - for key, value in metrics_dict.items(): - print('\t{} = '.format(key), value) - - def parse_and_log_images(self, id_logs, x, y, y_hat, title, subscript=None, display_count=2): - im_data = [] - for i in range(display_count): - cur_im_data = { - 'input_face': common.log_input_image(x[i], self.opts), - 'target_face': common.tensor2im(y[i]), - 'output_face': common.tensor2im(y_hat[i]), - } - if id_logs is not None: - for key in id_logs[i]: - cur_im_data[key] = id_logs[i][key] - im_data.append(cur_im_data) - self.log_images(title, im_data=im_data, subscript=subscript) - - def log_images(self, name, im_data, subscript=None, log_latest=False): - fig = common.vis_faces(im_data) - step = self.global_step - if log_latest: - step = 0 - if subscript: - path = os.path.join(self.logger.log_dir, name, '{}_{:04d}.jpg'.format(subscript, step)) - else: - path = os.path.join(self.logger.log_dir, name, '{:04d}.jpg'.format(step)) - os.makedirs(os.path.dirname(path), exist_ok=True) - fig.savefig(path) - plt.close(fig) - - def __get_save_dict(self): - save_dict = { - 'state_dict': self.net.state_dict(), - 'opts': vars(self.opts) - } - # save the latent avg in state_dict for inference if truncation of w was used during training - if self.opts.start_from_latent_avg: - save_dict['latent_avg'] = self.net.latent_avg - - if self.opts.save_training_data: # Save necessary information to enable training continuation from checkpoint - save_dict['global_step'] = self.global_step - save_dict['optimizer'] = self.optimizer.state_dict() - save_dict['best_val_loss'] = self.best_val_loss - if self.opts.w_discriminator_lambda > 0: - save_dict['discriminator_state_dict'] = self.discriminator.state_dict() - save_dict['discriminator_optimizer_state_dict'] = self.discriminator_optimizer.state_dict() - return save_dict - - def get_dims_to_discriminate(self): - deltas_starting_dimensions = self.net.encoder.get_deltas_starting_dimensions() - return deltas_starting_dimensions[:self.net.encoder.progressive_stage.value + 1] - - def is_progressive_training(self): - return self.opts.progressive_steps is not None - -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Discriminator ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # - - def is_training_discriminator(self): - return self.opts.w_discriminator_lambda > 0 - - @staticmethod - def discriminator_loss(real_pred, fake_pred, loss_dict): - real_loss = F.softplus(-real_pred).mean() - fake_loss = F.softplus(fake_pred).mean() - - loss_dict['d_real_loss'] = float(real_loss) - loss_dict['d_fake_loss'] = float(fake_loss) - - return real_loss + fake_loss - - @staticmethod - def discriminator_r1_loss(real_pred, real_w): - grad_real, = autograd.grad( - outputs=real_pred.sum(), inputs=real_w, create_graph=True - ) - grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean() - - return grad_penalty - - @staticmethod - def requires_grad(model, flag=True): - for p in model.parameters(): - p.requires_grad = flag - - def train_discriminator(self, batch): - loss_dict = {} - x, _ = batch - x = x.to(self.device).float() - self.requires_grad(self.discriminator, True) - - with torch.no_grad(): - real_w, fake_w = self.sample_real_and_fake_latents(x) - real_pred = self.discriminator(real_w) - fake_pred = self.discriminator(fake_w) - loss = self.discriminator_loss(real_pred, fake_pred, loss_dict) - loss_dict['discriminator_loss'] = float(loss) - - self.discriminator_optimizer.zero_grad() - loss.backward() - self.discriminator_optimizer.step() - - # r1 regularization - d_regularize = self.global_step % self.opts.d_reg_every == 0 - if d_regularize: - real_w = real_w.detach() - real_w.requires_grad = True - real_pred = self.discriminator(real_w) - r1_loss = self.discriminator_r1_loss(real_pred, real_w) - - self.discriminator.zero_grad() - r1_final_loss = self.opts.r1 / 2 * r1_loss * self.opts.d_reg_every + 0 * real_pred[0] - r1_final_loss.backward() - self.discriminator_optimizer.step() - loss_dict['discriminator_r1_loss'] = float(r1_final_loss) - - # Reset to previous state - self.requires_grad(self.discriminator, False) - - return loss_dict - - def validate_discriminator(self, test_batch): - with torch.no_grad(): - loss_dict = {} - x, _ = test_batch - x = x.to(self.device).float() - real_w, fake_w = self.sample_real_and_fake_latents(x) - real_pred = self.discriminator(real_w) - fake_pred = self.discriminator(fake_w) - loss = self.discriminator_loss(real_pred, fake_pred, loss_dict) - loss_dict['discriminator_loss'] = float(loss) - return loss_dict - - def sample_real_and_fake_latents(self, x): - sample_z = torch.randn(self.opts.batch_size, 512, device=self.device) - real_w = self.net.decoder.get_latent(sample_z) - fake_w = self.net.encoder(x) - if self.opts.start_from_latent_avg: - fake_w = fake_w + self.net.latent_avg.repeat(fake_w.shape[0], 1, 1) - if self.is_progressive_training(): # When progressive training, feed only unique w's - dims_to_discriminate = self.get_dims_to_discriminate() - fake_w = fake_w[:, dims_to_discriminate, :] - if self.opts.use_w_pool: - real_w = self.real_w_pool.query(real_w) - fake_w = self.fake_w_pool.query(fake_w) - if fake_w.ndim == 3: - fake_w = fake_w[:, 0, :] - return real_w, fake_w diff --git a/spaces/fengmuxi/ChatGpt-Web/.github/ISSUE_TEMPLATE/feature_request.md b/spaces/fengmuxi/ChatGpt-Web/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 25c36ab679f0b77bcf754d940ee4f3962c41b131..0000000000000000000000000000000000000000 --- a/spaces/fengmuxi/ChatGpt-Web/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: "[Feature] " -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Become a Vampire Hunter with Vampire The Masquerade - Out for Blood MOD APK.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Become a Vampire Hunter with Vampire The Masquerade - Out for Blood MOD APK.md deleted file mode 100644 index b422a3ed182a33de15510c1e86d2cfe96048d6fb..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Become a Vampire Hunter with Vampire The Masquerade - Out for Blood MOD APK.md +++ /dev/null @@ -1,116 +0,0 @@ -<br /> -<h1>Vampire: The Masquerade — Out for Blood Mod Apk: A Guide for Gamers</h1> -<p>If you are a fan of horror games and vampire stories, you might want to check out <strong>vampire the masquerade out for blood mod apk</strong>. This is a game that lets you explore the world of darkness as a human or a vampire hunter, and make choices that affect your fate. In this article, we will tell you everything you need to know about this game and how to use mod apk to enhance your gaming experience.</p> -<h2>vampire the masquerade out for blood mod apk</h2><br /><p><b><b>Download File</b> >>>>> <a href="https://gohhs.com/2uPtIN">https://gohhs.com/2uPtIN</a></b></p><br /><br /> -<h2>What is vampire the masquerade out for blood mod apk?</h2> -<p>Vampire: The Masquerade — Out for Blood is an interactive horror novel by Jim Dattilo, based on Vampire: The Masquerade and set in the World of Darkness shared story universe. Your choices control the story. It's entirely text-based, without graphics or sound effects.</p> -<p>You’ve barely settled into your new home of Jericho Heights on the outskirts of Chicago, before discovering that vampires live in town. You’re struggling to start a new life, meet new people, and maybe even find love. But when your neighbors start disappearing, you’re forced to take action.</p> -<p>Take on the role of a vampire hunter to save your town from the influence of Chastain, a vampire more than a century old. When a group of young thin-blood vampires start a war with Chastain, will you choose sides, or hunt them all? Gather your forces and sharpen your stake to take back the night!</p> -<p>Mod apk is a modified version of the original game that gives you access to unlimited resources, unlocked chapters, and other enhancements. With mod apk, you can enjoy more features and options in the game without spending any money or waiting for ads.</p> -<h2>Why is it popular among gamers?</h2> -<p>Vampire: The Masquerade — Out for Blood is popular among gamers because it offers a captivating story, diverse characters, and immersive gameplay. Here are some reasons why gamers love this game:</p> -<ul> -<li>The world of darkness setting: The game is set in a dark and gritty version of our world where supernatural creatures exist and prey on humans <h2>How to play the game as a human or a vampire hunter?</h2> -<p>The game is an interactive horror novel, which means that you control the story by making choices at certain points. You can choose to play as a human or a vampire hunter, and your decisions will affect your relationships, your morality, and your fate.</p> -<p>As a human, you are a newcomer to the town of Jericho Heights, where you soon discover that vampires are lurking in the shadows. You can choose to join a group of vampire hunters, who are determined to protect the town from the bloodsuckers, or you can try to stay out of trouble and live a normal life. However, you will soon realize that there is no escaping the world of darkness, and that your choices will have consequences.</p> -<p>vampire the masquerade out for blood hack apk<br /> -vampire the masquerade out for blood unlimited money mod<br /> -vampire the masquerade out for blood cheats android<br /> -vampire the masquerade out for blood premium choices mod<br /> -vampire the masquerade out for blood latest version mod apk<br /> -vampire the masquerade out for blood free download apk<br /> -vampire the masquerade out for blood mod apk offline<br /> -vampire the masquerade out for blood cracked apk<br /> -vampire the masquerade out for blood mod menu apk<br /> -vampire the masquerade out for blood full unlocked mod apk<br /> -vampire the masquerade out for blood mod apk wendgames<br /> -vampire the masquerade out for blood mod apk rexdl<br /> -vampire the masquerade out for blood mod apk happymod<br /> -vampire the masquerade out for blood mod apk revdl<br /> -vampire the masquerade out for blood mod apk android 1<br /> -vampire the masquerade out for blood mod apk no root<br /> -vampire the masquerade out for blood mod apk obb<br /> -vampire the masquerade out for blood mod apk data<br /> -vampire the masquerade out for blood mega mod apk<br /> -vampire the masquerade out for blood pro mod apk<br /> -vampire the masquerade out for blood vip mod apk<br /> -vampire the masquerade out for blood god mode mod apk<br /> -vampire the masquerade out for blood unlimited gems mod apk<br /> -vampire the masquerade out for blood unlimited tickets mod apk<br /> -vampire the masquerade out for blood all episodes unlocked mod apk<br /> -vampire the masquerade out for blood no ads mod apk<br /> -vampire the masquerade out for blood high graphics mod apk<br /> -vampire the masquerade out for blood low mb mod apk<br /> -vampire the masquerade out for blood best mods apk<br /> -vampire the masquerade out for blood new update mod apk<br /> -download game vampire the masquerade out for blood mod apk<br /> -how to install vampire the masquerade out for blood mod apk<br /> -how to play vampire the masquerade out for blood with mod apk<br /> -how to get vampire the masquerade out for blood mod apk free<br /> -how to update vampire the masquerade out for blood mod apk<br /> -how to download vampire the masquerade out for blood mod apk on pc<br /> -how to download vampire the masquerade out for blood mod apk on ios<br /> -how to download vampire the masquerade out for blood mod apk on mac<br /> -how to download vampire the masquerade out for blood mod apk on laptop<br /> -how to download vampire the masquerade out for blood mod apk on chromebook<br /> -how to download vampire the masquerade out for blood mod apk on windows 10<br /> -how to download vampire the masquerade out for blood mod apk on android tv box<br /> -how to download vampire the masquerade out for blood mod apk on firestick <br /> -how to download vampire the masquerade out for blood mod apk on bluestacks <br /> -how to download vampire the masquerade out for blood mod apk on nox player <br /> -how to download vampire the masquerade out for blood mod apk on ldplayer <br /> -how to download vampire the masquerade out for blood mod apk on memu play <br /> -how to download vampire the masquerade out for blood mod apk on gameloop</p> -<p>As a vampire hunter, you are a member of the Society of Leopold, a secret organization that has been hunting vampires for centuries. You have been sent to Jericho Heights to investigate the disappearance of several hunters, and to eliminate the vampire threat. You can choose to follow the orders of your superiors, or to act on your own initiative. You can also choose to be ruthless or merciful, and to embrace or reject your faith.</p> -<p>In both cases, you will have to deal with various challenges, such as combat, investigation, stealth, persuasion, and romance. You will also have to balance your humanity and your bloodlust, as well as your loyalty and your independence. The game has multiple endings, depending on your choices and actions.</p> -<h2>How to use mod apk to improve your gameplay?</h2> -<p>Mod apk is a modified version of the original game that gives you access to unlimited resources, unlocked chapters, and other enhancements. With mod apk, you can enjoy more features and options in the game without spending any money or waiting for ads.</p> -<p>To use mod apk, you need to download and install the mod apk file from a reliable source. You can find the link to download the mod apk file at the end of this article. Before installing the mod apk file, make sure that you have enough storage space on your device, and that you have enabled the installation of apps from unknown sources in your settings.</p> -<p>Once you have installed the mod apk file, you can launch the game and enjoy the benefits of mod apk. You will have unlimited currency, which you can use to buy items, upgrade skills, and unlock chapters. You will also have free packs, which contain various rewards and bonuses. You will also be able to remove ads, which can be annoying and distracting.</p> -<h2>How to avoid potential risks or issues with mod apk?</h2> -<p>While mod apk can enhance your gaming experience, it can also pose some risks or issues if not used properly. Here are some precautions to take before using mod apk:</p> -<ul> -<li>Back up your data: Mod apk can sometimes cause errors or glitches in the game, which can result in data loss or corruption. To avoid losing your progress or achievements, make sure that you back up your data before using mod apk. You can use cloud services or external storage devices to save your data.</li> -<li>Check for malware: Mod apk can sometimes contain malware or viruses, which can harm your device or compromise your security. To avoid downloading malicious files, make sure that you download mod apk from a trusted source, and that you scan the file with an antivirus software before installing it.</li> -<li>Update regularly: Mod apk can sometimes become outdated or incompatible with the latest version of the game or the operating system. To avoid crashing or freezing issues, make sure that you update mod apk regularly whenever there is a new update available.</li> -</ul> -<h2>What are the main features of the game?</h2> -<p>The game has many features that make it an engaging and immersive horror adventure. Here are some of the main features of the game:</p> -<ul> -<li>The story: The game has a rich and complex story that is influenced by your choices and actions. The game has multiple branches and endings, as well as different perspectives and outcomes depending on whether you play as a human or a vampire hunter.</li> -<li>The characters: The game has a diverse and dynamic cast of characters that you can interact with, befriend, romance, or antagonize. The game has over 30 characters with their own personalities, backgrounds, motivations, and secrets.</li> -<li>The graphics: The game has stunning graphics that create a realistic and atmospheric world of darkness. The game has detailed environments, lighting effects, shadows, and textures that enhance the mood and tone of the game.</li> -<li>The sound effects: The game has immersive sound effects that add to the tension and suspense of the game. The game has ambient sounds, music tracks, voice acting, and sound effects that create a captivating and immersive audio experience.</li <h2>What are the main features of mod apk?</h2> -<p>Mod apk is a modified version of the original game that gives you access to unlimited resources, unlocked chapters, and other enhancements. Here are some of the main features of mod apk:</p> -<ul> -<li>Unlimited currency: Mod apk gives you unlimited coins and gems, which you can use to buy items, upgrade skills, and unlock chapters. You can also use them to buy premium choices, which can affect the story and the outcome of the game.</li> -<li>Free packs: Mod apk gives you free packs, which contain various rewards and bonuses. You can get items, skills, allies, enemies, and more from these packs. You can also get rare and exclusive items that are not available in the original game.</li> -<li>Ad removal: Mod apk removes all the ads from the game, which can be annoying and distracting. You can enjoy the game without any interruptions or delays.</li> -<li>Other enhancements: Mod apk also provides other enhancements, such as faster loading time, smoother performance, better compatibility, and more. You can also customize your game settings, such as the font size, the sound volume, and the language.</li> -</ul> -<h2>How do these features enhance your gaming experience?</h2> -<p>These features enhance your gaming experience by giving you more options, flexibility, and convenience in the game. You can enjoy the game without any limitations or restrictions. You can also explore different scenarios and outcomes in the game by using mod apk. Here are some benefits of using mod apk:</p> -<ul> -<li>More fun: Mod apk makes the game more fun and enjoyable by giving you more resources, features, and options. You can have more freedom and creativity in the game by using mod apk.</li> -<li>More challenge: Mod apk makes the game more challenging and exciting by giving you more choices, consequences, and endings. You can have more impact and influence in the game by using mod apk.</li> -<li>More replay value: Mod apk makes the game more replayable and varied by giving you more branches, perspectives, and outcomes. You can have more diversity and variety in the game by using mod apk.</li> -</ul> -<h2>Conclusion</h2> -<p>Vampire: The Masquerade — Out for Blood is a game that lets you experience the world of darkness as a human or a vampire hunter. You can make choices that affect your fate and the fate of others in this interactive horror novel. Mod apk is a modified version of the original game that gives you access to unlimited resources, unlocked chapters, and other enhancements. With mod apk, you can enjoy more features and options in the game without spending any money or waiting for ads.</p> -<p>If you are interested in trying this game and using mod apk, you can download it from the link below. Make sure that you follow the instructions and precautions before installing mod apk. We hope that this article has helped you learn more about vampire the masquerade out for blood mod apk and how to use it to enhance your gaming experience.</p> -<p>Have fun playing this game and exploring the world of darkness!</p> -<h2>Frequently Asked Questions</h2> -<p>Here are some frequently asked questions about vampire the masquerade out for blood mod apk:</p> -<ol> -<li><strong>Is vampire the masquerade out for blood mod apk safe to use?</strong></li> -<p>Vampire: The Masquerade — Out for Blood mod apk is safe to use if you download it from a reliable source, scan it with an antivirus software before installing it, back up your data before using it, and update it regularly whenever there is a new update available.</p> -<li><strong>Is vampire the masquerade out for blood mod apk compatible with my device?</strong></li> -<p>Vampire: The Masquerade — Out for Blood mod apk is compatible with most Android devices that have Android 4.1 or higher. However, some devices may not support some features or functions of mod apk. To ensure compatibility, make sure that your device meets the minimum requirements of the original game.</p> -<li><strong>How do I uninstall vampire the masquerade out for blood mod apk?</strong></li> -<p>To uninstall vampire: The Masquerade — Out for Blood mod apk, you can simply delete the mod apk file from your device. Alternatively, you can go to your device settings, find the app manager or application list, select vampire: The Masquerade — Out for Blood mod apk, and tap on uninstall.</p> -<li><strong>Can I play vampire the masquerade out for blood mod apk offline?</strong></li <p>Vampire: The Masquerade — Out for Blood mod apk can be played offline without an internet connection. However, some features or functions of mod apk may require an internet connection <p>to play online with other players, access online content, or update the game. To play online, make sure that you have a stable internet connection and that you have logged in to your Google Play account.</p> -<li><strong>Where can I find more information about vampire the masquerade out for blood mod apk?</strong></li> -<p>If you want to learn more about vampire the masquerade out for blood mod apk, you can visit the official website of the game, the official Facebook page of the game, or the official Reddit community of the game. You can also read reviews, ratings, and feedback from other players who have used mod apk.</p> -</ol></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Blue Followers Apk The Ultimate Guide to Boost Your Instagram Likes and Followers.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Blue Followers Apk The Ultimate Guide to Boost Your Instagram Likes and Followers.md deleted file mode 100644 index e67027d674226fcea40e7ebf1d8bf0c119a099bb..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Blue Followers Apk The Ultimate Guide to Boost Your Instagram Likes and Followers.md +++ /dev/null @@ -1,150 +0,0 @@ -<br /> -<h1>Download Blue Followers APK: A Free and Easy Way to Boost Your Instagram Presence</h1> -<p>If you are looking for a way to increase your Instagram followers and likes without spending any money, you might be interested in Blue Followers APK. This is a third-party app that claims to provide you with free and unlimited followers and likes on your Instagram account. But is it safe and reliable? How does it work? And what are the alternatives? In this article, we will answer these questions and more, so you can decide whether to download Blue Followers APK or not.</p> -<h2>download blue followers apk</h2><br /><p><b><b>Download Zip</b> ✒ <a href="https://gohhs.com/2uPmfV">https://gohhs.com/2uPmfV</a></b></p><br /><br /> - <h2>What is Blue Followers APK?</h2> -<p>Blue Followers APK is an Android app that allows you to get free followers and likes on your Instagram account. It is not available on the Google Play Store, so you have to download it from other sources. The app works by connecting you with other users who are also looking for followers and likes, and exchanging them with each other. You can choose how many followers and likes you want to get, and the app will deliver them to your account within minutes.</p> - <h3>Features of Blue Followers APK</h3> -<p>Some of the features of Blue Followers APK are:</p> -<ul> -<li>It is free and easy to use.</li> -<li>It does not require any login or password.</li> -<li>It does not ask for any personal information or access to your account.</li> -<li>It supports multiple languages, including English, Spanish, Portuguese, Arabic, Turkish, and more.</li> -<li>It has a user-friendly interface and design.</li> -<li>It offers various options to customize your preferences and settings.</li> -<li>It updates regularly to fix bugs and improve performance.</li> -</ul> - <h3>How to download and install Blue Followers APK</h3> -<p>To download and install Blue Followers APK, you need to follow these steps:</p> -<ol> -<li>Go to [this link](^4^) and click on the download button.</li> -<li>Wait for the file to be downloaded on your device.</li> -<li>Go to your device settings and enable the option to install apps from unknown sources.</li> -<li>Locate the downloaded file and tap on it to start the installation process.</li> -<li>Follow the instructions on the screen and grant the necessary permissions.</li> -<li>Once the installation is complete, open the app and enjoy.</li> -</ol> - <h3>How to use Blue Followers APK</h3> -<p>To use Blue Followers APK, you need to follow these steps:</p> -<p>How to download blue followers apk for Instagram<br /> -Blue followers apk free download latest version<br /> -Download blue followers apk and get free likes<br /> -Blue followers apk download for Android devices<br /> -Benefits of using blue followers apk for Instagram<br /> -Download blue followers apk mod with unlimited coins<br /> -Blue followers apk review and rating<br /> -Download blue followers apk from apkresult.com[^1^]<br /> -Blue followers apk download link and installation guide<br /> -Download blue followers apk and boost your Instagram profile<br /> -Blue followers apk features and specifications<br /> -Download blue followers apk without root or jailbreak<br /> -Blue followers apk download for iOS devices<br /> -Download blue followers apk and earn money from Instagram<br /> -Blue followers apk comparison with other apps<br /> -Download blue followers apk and join the community<br /> -Blue followers apk troubleshooting and support<br /> -Download blue followers apk and enjoy premium features<br /> -Blue followers apk alternatives and recommendations<br /> -Download blue followers apk and increase your engagement rate<br /> -Blue followers apk pros and cons<br /> -Download blue followers apk and get verified on Instagram<br /> -Blue followers apk FAQs and answers<br /> -Download blue followers apk and create amazing content<br /> -Blue followers apk testimonials and feedback<br /> -Download blue followers apk and grow your brand awareness<br /> -Blue followers apk updates and news<br /> -Download blue followers apk and access exclusive offers<br /> -Blue followers apk tips and tricks<br /> -Download blue followers apk and connect with influencers<br /> -Blue followers apk requirements and compatibility<br /> -Download blue followers apk and discover new trends<br /> -Blue followers apk privacy policy and terms of service<br /> -Download blue followers apk and share your experience<br /> -Blue followers apk awards and achievements<br /> -Download blue followers apk and learn from experts<br /> -Blue followers apk security and reliability<br /> -Download blue followers apk and customize your settings<br /> -Blue followers apk statistics and analytics<br /> -Download blue followers apk and invite your friends</p> -<ol> -<li>Open the app and select your language.</li> -<li>Select the option to get followers or likes.</li> -<li>Enter your Instagram username (not your password) and click on submit.</li> -<li>Select how many followers or likes you want to get (the maximum is 1000 per day).</li> -<li>Wait for the app to process your request and deliver the results to your account.</li> -<li>You can repeat this process as many times as you want, but be careful not to overdo it or you might get banned by Instagram.</li> -</ol> - <h2>Why use Blue Followers APK?</h2> -<p>You might be wondering why you should use Blue Followers APK instead of other methods to boost your Instagram presence. Here are some of the reasons why:</p> - <h3>Benefits of using Blue Followers APK</h3> -<p>Some of the benefits of using Blue Followers APK are:</p> -<ul> -<li>You can get free followers and likes without spending any money or time.</li> -<li>You can increase your popularity and visibility on Instagram and attract more attention from potential customers, sponsors, or collaborators.</li> -<li>You can improve your engagement rate and reach more people with your content.</li> -<li <p>You can enhance your credibility and authority in your niche and stand out from the competition.</li> -<li>You can have fun and experiment with different types of content and see what works best for your audience.</li> -</ul> - <h3>Risks of using Blue Followers APK</h3> -<p>However, using Blue Followers APK also comes with some risks that you should be aware of. Some of the risks are:</p> -<ul> -<li>You might get fake or inactive followers and likes that do not interact with your content or add any value to your account.</li> -<li>You might get spam or malicious comments or messages from bots or scammers that try to trick you or harm you.</li> -<li>You might violate the terms and conditions of Instagram and get your account suspended or banned for using unauthorized apps or services.</li> -<li>You might lose the trust and respect of your real followers and customers who might notice that you are using fake methods to boost your numbers.</li> -<li>You might damage your reputation and image as a content creator or influencer who does not have organic and authentic growth.</li> -</ul> - <h3>Tips to avoid getting banned by Instagram</h3> -<p>If you decide to use Blue Followers APK, you should follow some tips to avoid getting banned by Instagram. Some of the tips are:</p> -<ul> -<li>Do not use the app too frequently or excessively. Limit yourself to a reasonable amount of followers and likes per day (e.g., 100-200).</li> -<li>Do not use the app on multiple accounts or devices. Stick to one account and one device only.</li> -<li>Do not use the app along with other similar apps or services. Choose one option and stick to it.</li> -<li>Do not use the app for accounts that are new, private, or have low activity. Use it only for accounts that are old, public, and have high activity.</li> -<li>Do not use the app for accounts that are related to sensitive or controversial topics, such as politics, religion, sexuality, etc. Use it only for accounts that are related to general or harmless topics, such as entertainment, lifestyle, hobbies, etc.</li> -</ul> - <h2>Alternatives to Blue Followers APK</h2> -<p>If you are not convinced by Blue Followers APK or you want to try other options, there are some alternatives that you can consider. Here are some of them:</p> - <h3>Igmods</h3> -<p>Igmods is another Android app that offers free followers and likes on Instagram. It has similar features and functions as Blue Followers APK, but it also has some differences. For example, Igmods requires you to login with your Instagram account and password, which might pose a security risk. It also has a limit of 50 followers and likes per hour, which might be too slow for some users. You can download Igmods from [this link].</p> - <h3>Gbfollowers</h3> -<p>Gbfollowers is a website that provides free followers and likes on Instagram. It does not require any app installation or login information. It works by generating a link that you have to share with other users who want to get followers and likes as well. You can choose how many followers and likes you want to get, up to 1000 per day. You can access Gbfollowers from [this link].</p> - <h3>Other options</h3> -<p>Besides these two alternatives, there are many other options that you can find online. However, you should be careful and cautious when using them, as they might be unsafe, unreliable, or illegal. Some of the things that you should look out for when choosing an option are:</p> -<ul> -<li>The source and reputation of the app or service. Check the reviews, ratings, feedbacks, and testimonials from other users.</li> -<li>The security and privacy of your account and information. Avoid giving any login or personal information or granting any access to your account.</li> -<li>The quality and quantity of the followers and likes. Avoid getting too many or too few followers and likes that might look suspicious or unnatural.</li> -<li>The terms and conditions of Instagram. Avoid violating any rules or policies that might get you in trouble or banned.</li></ul> - <h2>Conclusion</h2> -<p>In conclusion, Blue Followers APK is a free and easy way to boost your Instagram presence by getting more followers and likes on your account. It has some advantages and disadvantages that you should weigh before using it. It also has some alternatives that you can consider if you want to try other options. However, the best way to grow your Instagram account is by creating high-quality and engaging content that attracts real and loyal followers who love what you do.</p> - <h3>Summary of the main points</h3> -<p>Here is a summary of the main points that we covered in this article:</p> -<ul> -<li <li>Blue Followers APK is a third-party app that provides free followers and likes on Instagram.</li> -<li>It has some features, such as no login, multiple languages, and various options.</li> -<li>It has some risks, such as fake followers, spam comments, account suspension, and reputation damage.</li> -<li>It has some tips to avoid getting banned, such as limiting the usage, sticking to one account, and avoiding sensitive topics.</li> -<li>It has some alternatives, such as Igmods, Gbfollowers, and other options.</li> -<li>The best way to grow your Instagram account is by creating high-quality and engaging content.</li> -</ul> - <h3>Call to action</h3> -<p>If you are interested in downloading Blue Followers APK, you can do so by clicking on the link below. However, we recommend that you use it with caution and moderation, and that you also focus on creating great content for your Instagram account. Remember that quality is more important than quantity when it comes to social media success.</p> -<p>[Download Blue Followers APK here]</p> - <h3>FAQs</h3> -<p>Here are some of the frequently asked questions that you might have about Blue Followers APK:</p> -<ol> -<li>Is Blue Followers APK safe?</li> -<p>Blue Followers APK is not an official app from Instagram, so it might not be safe or secure. It might contain viruses, malware, or spyware that could harm your device or data. It might also expose your account to hackers or scammers who could steal your information or money. Therefore, you should use it at your own risk and discretion.</p> -<li>Is Blue Followers APK legal?</li> -<p>Blue Followers APK is not legal or authorized by Instagram. It violates the terms and conditions of Instagram that prohibit the use of any third-party apps or services that manipulate the number of followers or likes on your account. Therefore, you could face legal consequences or penalties if you use it.</p> -<li>Does Blue Followers APK work?</li> -<p>Blue Followers APK might work for some users who want to get more followers and likes on their Instagram account. However, it might not work for others who might encounter errors, bugs, or glitches while using it. It might also stop working at any time due to updates or changes from Instagram.</p> -<li>How long does Blue Followers APK take to deliver the results?</li> -<p>Blue Followers APK claims to deliver the results within minutes after you submit your request. However, this might vary depending on the availability of the app, the demand of the users, and the speed of your internet connection. It might also take longer if you request a large number of followers or likes.</p> -<li>Can I use Blue Followers APK for other social media platforms?</li> -<p>No, you cannot use Blue Followers APK for other social media platforms. It is designed and developed only for Instagram. If you want to get more followers or likes on other platforms, such as Facebook, Twitter, YouTube, etc., you will need to find other apps or services that are compatible with them.</p> -</ol></p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bubble Shooter Classic A Fun and Relaxing Game for All Ages.md b/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bubble Shooter Classic A Fun and Relaxing Game for All Ages.md deleted file mode 100644 index 4ed31a6d0656c18ef6d2e0afeb85ad01053d646c..0000000000000000000000000000000000000000 --- a/spaces/feregVcuzo/sanity-test-midi/checkpoint/Bubble Shooter Classic A Fun and Relaxing Game for All Ages.md +++ /dev/null @@ -1,115 +0,0 @@ - -<h1>Bubble Shooter Classic Free Download: A Fun and Relaxing Game for Everyone</h1> -<p>Do you love playing vintage games like bubble shooter? Do you want to have a fun and relaxing time with a simple and addictive game? If you answered yes, then you should try <strong>Bubble Shooter Classic</strong>, a free game that you can download and play on your device. In this article, we will tell you everything you need to know about this classic game, including what it is, how to play it, and where to download it. We will also give you some tips and tricks to help you beat the levels and have more fun. Let's get started!</p> -<h2>bubble shooter classic free download</h2><br /><p><b><b>Download File</b> ✅ <a href="https://gohhs.com/2uPmsO">https://gohhs.com/2uPmsO</a></b></p><br /><br /> - <h2>What is Bubble Shooter Classic?</h2> -<p>Bubble Shooter Classic is a game that belongs to the genre of bubble shooter games, which are games where you have to shoot and pop bubbles of the same color. The game is inspired by the original arcade game that was released in the 1990s, but it has been updated with new features and graphics to make it more enjoyable and challenging.</p> - <h3>The gameplay and features of Bubble Shooter Classic</h3> -<p>The gameplay of Bubble Shooter Classic is very simple and easy to learn. You have to aim and shoot bubbles from a cannon at the bottom of the screen, trying to match three or more bubbles of the same color to make them pop and clear the board. You can use your finger to drag the laser sight and lift it to take a shot. You have to be careful not to let the bubbles reach the bottom of the screen, or you will lose the game.</p> -<p>Bubble Shooter Classic has many features that make it more fun and exciting than other bubble shooter games. Some of these features are:</p> -<ul> -<li><strong>Thousands of levels.</strong> The game has over 3000 levels that you can play, each with a different layout and difficulty. You will never get bored with this game, as there is always a new challenge waiting for you.</li> -<li><strong>Boosters and power-ups.</strong> The game also has various boosters and power-ups that you can use to help you clear the levels faster and easier. For example, you can use the fireball to pop 7 bubbles in a row, or the bomb to blast a large area of bubbles.</li> -<li><strong>Daily rewards.</strong> The game rewards you with coins and other prizes every day that you play. You can use these coins to buy more boosters and power-ups, or to unlock new elements and themes for the game.</li> -<li><strong>Social features.</strong> The game also allows you to connect with your friends and family through Facebook, so you can share your progress and achievements with them. You can also compete with them on the leaderboard, or send them lives and gifts.</li> -<li><strong>Colorblind mode.</strong> The game also has a colorblind mode that makes it easier for people who have difficulty distinguishing colors to play the game. The bubbles have different shapes and patterns that help them identify their colors.</li> -</ul> - <h3>The benefits of playing Bubble Shooter Classic</h3> -<p>Besides being fun and entertaining, playing Bubble Shooter Classic also has some benefits for your brain and mood. Some of these benefits are:</p> -<ul> -<li><strong>It improves your concentration and focus.</strong> Playing bubble shooter games requires you to pay attention to the colors and patterns of the bubbles, as well as the trajectory of your shots. This helps you improve your concentration and focus skills, which are useful for many tasks in life.</li> -<li><strong>It enhances your problem-solving and strategy skills.</strong> Playing bubble shooter games also requires you to think ahead and plan your moves carefully, as well as adapt to different situations and obstacles. This helps you enhance your problem-solving and strategy skills, which are essential for many challenges in life.</li> -<li><strong>It reduces your stress and anxiety.</strong> Playing bubble shooter games also helps you relax and unwind from your daily worries and pressures. The <p>The game has soothing sounds and colors that calm your nerves and mood. The game also gives you a sense of satisfaction and accomplishment when you pop the bubbles and clear the levels.</li> -</ul> -<p>As you can see, playing Bubble Shooter Classic is not only fun, but also good for your brain and mood. So, what are you waiting for? Download and play this game today and enjoy its benefits!</p> -<p>bubble shooter classic game free download<br /> -bubble shooter classic apk free download<br /> -bubble shooter classic for pc free download<br /> -bubble shooter classic offline free download<br /> -bubble shooter classic mod apk free download<br /> -bubble shooter classic no ads free download<br /> -bubble shooter classic unlimited coins free download<br /> -bubble shooter classic app free download<br /> -bubble shooter classic android free download<br /> -bubble shooter classic windows 10 free download<br /> -bubble shooter classic online free play<br /> -bubble shooter classic original free play<br /> -bubble shooter classic full screen free play<br /> -bubble shooter classic no download free play<br /> -bubble shooter classic without flash free play<br /> -bubble shooter classic unblocked free play<br /> -bubble shooter classic 2 free play<br /> -bubble shooter classic 3 free play<br /> -bubble shooter classic 4 free play<br /> -bubble shooter classic 5 free play<br /> -bubble shooter classic cheats free tips<br /> -bubble shooter classic hack free coins<br /> -bubble shooter classic levels free guide<br /> -bubble shooter classic strategy free hints<br /> -bubble shooter classic tricks free walkthrough<br /> -bubble shooter classic review free rating<br /> -bubble shooter classic update free version<br /> -bubble shooter classic install free link<br /> -bubble shooter classic support free contact<br /> -bubble shooter classic feedback free comment<br /> -how to play bubble shooter classic for free<br /> -how to win bubble shooter classic for free<br /> -how to beat bubble shooter classic for free<br /> -how to get unlimited coins in bubble shooter classic for free<br /> -how to remove ads from bubble shooter classic for free<br /> -best bubble shooter classic games for free<br /> -new bubble shooter classic games for free<br /> -fun bubble shooter classic games for free<br /> -addictive bubble shooter classic games for free<br /> -relaxing bubble shooter classic games for free<br /> -retro style bubble shooter classic games for free<br /> -color matching bubble shooter classic games for free<br /> -puzzle solving bubble shooter classic games for free<br /> -brain teasing bubble shooter classic games for free<br /> -family friendly bubble shooter classic games for free<br /> -vintage inspired bubble shooter classic games for free<br /> -arcade mode bubble shooter classic games for free<br /> -pop and blast bubbles in bubble shooter classic games for free</p> - <h2>How to download and play Bubble Shooter Classic for free?</h2> -<p>Downloading and playing Bubble Shooter Classic is very easy and simple. You can download and play this game for free on your Android or Windows device. Here are the steps to follow:</p> - <h3>Downloading Bubble Shooter Classic from Google Play Store</h3> -<p>If you have an Android device, you can download Bubble Shooter Classic from the Google Play Store. Here is how:</p> -<ol> -<li>Open the Google Play Store app on your device.</li> -<li>Search for "Bubble Shooter Classic" in the search bar.</li> -<li>Tap on the game icon that has a blue background and a yellow bubble with a smiley face on it.</li> -<li>Tap on the "Install" button and wait for the game to download and install on your device.</li> -<li>Tap on the "Open" button or find the game icon on your home screen or app drawer and tap on it to launch the game.</li> -</ol> - <h3>Downloading Bubble Shooter Classic from Microsoft Store</h3> -<p>If you have a Windows device, you can download Bubble Shooter Classic from the Microsoft Store. Here is how:</p> -<ol> -<li>Open the Microsoft Store app on your device.</li> -<li>Search for "Bubble Shooter Classic" in the search bar.</li> -<li>Click on the game icon that has a blue background and a yellow bubble with a smiley face on it.</li> -<li>Click on the "Get" button and sign in with your Microsoft account if prompted.</li> -<li>Wait for the game to download and install on your device.</li> -<li>Click on the "Play" button or find the game icon on your start menu or desktop and click on it to launch the game.</li> -</ol> - <h3>Tips and tricks for playing Bubble Shooter Classic</h3> -<p>Now that you have downloaded and installed Bubble Shooter Classic, you are ready to play it. But before you do, here are some tips and tricks that will help you play better and have more fun:</p> -<ul> -<li><strong>Aim carefully.</strong> The most important skill in bubble shooter games is aiming. You have to aim your shots precisely to hit the bubbles of the same color and make them pop. You can use the laser sight to guide your shots, but remember that it is not always accurate. Sometimes, you have to adjust your angle slightly to hit the right spot.</li> -<li><strong>Bounce off the walls.</strong> Another useful skill in bubble shooter games is bouncing. You can bounce your shots off the walls to reach bubbles that are hard to hit directly. This can help you clear more bubbles and create more space on the board. However, be careful not to bounce too much, as this can reduce your accuracy and waste your shots.</li> -<li><strong>Use boosters and power-ups wisely.</strong> Boosters and power-ups are very helpful in bubble shooter games, as they can give you an edge in difficult levels. However, they are also limited and costly, so you have to use them wisely. Don't waste them on easy levels or when you don't need them. Save them for when you are stuck or when you want to score more points.</li> -<li><strong>Plan ahead.</strong> Another important skill in bubble shooter games is planning. You have to plan your moves ahead and think about the consequences of your shots. Don't just shoot randomly or impulsively, as this can lead to more trouble later. Try to create clusters of bubbles of the same color, so you can pop them with one shot. Also, try to avoid leaving isolated bubbles or gaps on the board, as they can make it harder to clear later.</li> -<li><strong>Have fun.</strong> The most important tip of all is to have fun. Bubble shooter games are meant to be fun and relaxing, not stressful or frustrating. Don't worry too much about your score or your performance, just enjoy popping bubbles and clearing levels. If you get stuck or bored, you can always switch to another level or theme, or take a break and come back later.</li> -</ul> - <h2>Conclusion</h2> -<p>Bubble Shooter Classic is a fun and relaxing game that everyone can enjoy. It is easy to play, but challenging to master. It has thousands of levels, boosters, power-ups, daily rewards, social features, colorblind mode, and more. It is also good for your brain and mood, as it improves your concentration, problem-solving, and stress relief. You can download and play this game for free on your Android or Windows device, and enjoy its benefits. We hope this article has helped you learn more about this classic game, and we encourage you to try it out today. Have fun and happy popping!</p> - <h2>FAQs</h2> -<p>Here are some frequently asked questions about Bubble Shooter Classic:</p> -<ol> -<li><strong>Is Bubble Shooter Classic free?</strong> Yes, Bubble Shooter Classic is free to download and play. However, it does have some in-app purchases that you can buy to get more coins, boosters, power-ups, and other features.</li> -<li><strong>Is Bubble Shooter Classic offline?</strong> Yes, Bubble Shooter Classic can be played offline without an internet connection. However, some features like daily rewards, social features, and updates may require an internet connection.</li> -<li><strong>How many levels does Bubble Shooter Classic have?</strong> Bubble Shooter Classic has over 3000 levels that you can play, each with a different layout and difficulty. The game also adds new levels regularly, so you will always have something new to play.</li> -<li><strong>How do I change the theme of the game?</strong> You can change the theme of the game by tapping on the gear icon on the top right corner of the screen, and then tapping on the "Themes" button. You can choose from various themes like classic, candy, fruits, animals, and more. You can also unlock new themes by playing the game and earning coins.</li> -<li><strong>How do I contact the developer of the game?</strong> You can contact the developer of the game by tapping on the gear icon on the top right corner of the screen, and then tapping on the "Contact Us" button. You can also visit their website or follow them on Facebook for more information and support.</li> -</ol></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/flax-community/koclip/utils.py b/spaces/flax-community/koclip/utils.py deleted file mode 100644 index eec353ce0ad99ce4d8e02da67414ca10841a6564..0000000000000000000000000000000000000000 --- a/spaces/flax-community/koclip/utils.py +++ /dev/null @@ -1,65 +0,0 @@ -import nmslib -import numpy as np -import streamlit as st -from transformers import AutoTokenizer, CLIPProcessor, ViTFeatureExtractor - -from config import MODEL_LIST -from koclip import FlaxHybridCLIP -from global_session import GlobalState -from threading import Lock - - -def load_index(img_file): - state = GlobalState(img_file) - if not hasattr(state, '_lock'): - state._lock = Lock() - print(f"Locking loading of features : {img_file} to avoid concurrent caching.") - - with state._lock: - cached_index = load_index_cached(img_file) - - print(f"Unlocking loading of features : {img_file} to avoid concurrent caching.") - return cached_index - - -@st.cache(allow_output_mutation=True) -def load_index_cached(img_file): - filenames, embeddings = [], [] - with open(img_file, "r") as f: - for line in f: - cols = line.strip().split("\t") - filename = cols[0] - embedding = [float(x) for x in cols[1].split(",")] - filenames.append(filename) - embeddings.append(embedding) - embeddings = np.array(embeddings) - index = nmslib.init(method="hnsw", space="cosinesimil") - index.addDataPointBatch(embeddings) - index.createIndex({"post": 2}, print_progress=True) - return filenames, index - - -def load_model(model_name="koclip/koclip-base"): - state = GlobalState(model_name) - if not hasattr(state, '_lock'): - state._lock = Lock() - print(f"Locking loading of model : {model_name} to avoid concurrent caching.") - - with state._lock: - cached_model = load_model_cached(model_name) - - print(f"Unlocking loading of model : {model_name} to avoid concurrent caching.") - return cached_model - - -@st.cache(allow_output_mutation=True) -def load_model_cached(model_name): - assert model_name in {f"koclip/{model}" for model in MODEL_LIST} - model = FlaxHybridCLIP.from_pretrained(model_name) - processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") - processor.tokenizer = AutoTokenizer.from_pretrained("klue/roberta-large") - if model_name == "koclip/koclip-large": - processor.feature_extractor = ViTFeatureExtractor.from_pretrained( - "google/vit-large-patch16-224" - ) - return model, processor diff --git a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/rendering.py b/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/rendering.py deleted file mode 100644 index de2024e4576cccd4606e5d3eed7be631f806c308..0000000000000000000000000000000000000000 --- a/spaces/flowers-team/SocialAISchool/gym-minigrid/gym_minigrid/rendering.py +++ /dev/null @@ -1,137 +0,0 @@ -import math -import numpy as np - -def downsample(img, factor): - """ - Downsample an image along both dimensions by some factor - """ - - assert img.shape[0] % factor == 0 - assert img.shape[1] % factor == 0 - - img = img.reshape([img.shape[0]//factor, factor, img.shape[1]//factor, factor, 3]) - img = img.mean(axis=3) - img = img.mean(axis=1) - - return img - -def fill_coords(img, fn, color): - """ - Fill pixels of an image with coordinates matching a filter function - """ - - for y in range(img.shape[0]): - for x in range(img.shape[1]): - yf = (y + 0.5) / img.shape[0] - xf = (x + 0.5) / img.shape[1] - if fn(xf, yf): - img[y, x] = color - - return img - -def rotate_fn(fin, cx, cy, theta): - def fout(x, y): - x = x - cx - y = y - cy - - x2 = cx + x * math.cos(-theta) - y * math.sin(-theta) - y2 = cy + y * math.cos(-theta) + x * math.sin(-theta) - - return fin(x2, y2) - - return fout - -def point_in_line(x0, y0, x1, y1, r): - p0 = np.array([x0, y0]) - p1 = np.array([x1, y1]) - dir = p1 - p0 - dist = np.linalg.norm(dir) - dir = dir / dist - - xmin = min(x0, x1) - r - xmax = max(x0, x1) + r - ymin = min(y0, y1) - r - ymax = max(y0, y1) + r - - def fn(x, y): - # Fast, early escape test - if x < xmin or x > xmax or y < ymin or y > ymax: - return False - - q = np.array([x, y]) - pq = q - p0 - - # Closest point on line - a = np.dot(pq, dir) - a = np.clip(a, 0, dist) - p = p0 + a * dir - - dist_to_line = np.linalg.norm(q - p) - return dist_to_line <= r - - return fn - -def point_in_circle(cx, cy, r): - def fn(x, y): - return (x-cx)*(x-cx) + (y-cy)*(y-cy) <= r * r - return fn - - -def point_in_circle_clip(cx, cy, r, theta_start=0, theta_end=-np.pi): - def fn(x, y): - - if (x-cx)*(x-cx) + (y-cy)*(y-cy) <= r * r: - if theta_start < 0: - return theta_start > np.arctan2(y-cy, x-cx) > theta_end - else: - return theta_start < np.arctan2(y - cy, x - cx) < theta_end - - return fn - -def point_in_rect(xmin, xmax, ymin, ymax): - def fn(x, y): - return x >= xmin and x <= xmax and y >= ymin and y <= ymax - return fn - -def point_in_triangle(a, b, c): - a = np.array(a) - b = np.array(b) - c = np.array(c) - - def fn(x, y): - v0 = c - a - v1 = b - a - v2 = np.array((x, y)) - a - - # Compute dot products - dot00 = np.dot(v0, v0) - dot01 = np.dot(v0, v1) - dot02 = np.dot(v0, v2) - dot11 = np.dot(v1, v1) - dot12 = np.dot(v1, v2) - - # Compute barycentric coordinates - inv_denom = 1 / (dot00 * dot11 - dot01 * dot01) - u = (dot11 * dot02 - dot01 * dot12) * inv_denom - v = (dot00 * dot12 - dot01 * dot02) * inv_denom - - # Check if point is in triangle - return (u >= 0) and (v >= 0) and (u + v) < 1 - - return fn - -def point_in_quadrangle(a, b, c, d): - fn1 = point_in_triangle(a, b, c) - fn2 = point_in_triangle(b, c, d) - - fn = lambda x, y: fn1(x, y) or fn2(x, y) - return fn - -def highlight_img(img, color=(255, 255, 255), alpha=0.30): - """ - Add highlighting to an image - """ - - blend_img = img + alpha * (np.array(color, dtype=np.uint8) - img) - blend_img = blend_img.clip(0, 255).astype(np.uint8) - img[:, :, :] = blend_img diff --git a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py b/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py deleted file mode 100644 index 689513fa9d2a40f14bf0ae4ae61f38f0dcc1b3da..0000000000000000000000000000000000000000 --- a/spaces/georgefen/Face-Landmark-ControlNet/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py +++ /dev/null @@ -1,49 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - decode_head=dict( - type='PSAHead', - in_channels=2048, - in_index=3, - channels=512, - mask_size=(97, 97), - psa_type='bi-direction', - compact=False, - shrink_factor=2, - normalization_factor=1.0, - psa_softmax=True, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, - concat_input=False, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/ggwvits/vits-uma-genshin-honkai/text/symbols.py b/spaces/ggwvits/vits-uma-genshin-honkai/text/symbols.py deleted file mode 100644 index edfbd24247be8c757275ce80b9ec27a0ffa808f3..0000000000000000000000000000000000000000 --- a/spaces/ggwvits/vits-uma-genshin-honkai/text/symbols.py +++ /dev/null @@ -1,39 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -'''# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' -''' - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' - - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") \ No newline at end of file diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Jaanam Movie Download EXCLUSIVE Free In English Hd.md b/spaces/gotiQspiryo/whisper-ui/examples/Jaanam Movie Download EXCLUSIVE Free In English Hd.md deleted file mode 100644 index b096d47f99d86b95f538be3b72bccfa2b17b609d..0000000000000000000000000000000000000000 --- a/spaces/gotiQspiryo/whisper-ui/examples/Jaanam Movie Download EXCLUSIVE Free In English Hd.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Jaanam Movie Download Free In English Hd</h2><br /><p><b><b>DOWNLOAD</b> ✦ <a href="https://urlgoal.com/2uyN51">https://urlgoal.com/2uyN51</a></b></p><br /><br /> -<br /> - aaccfb2cb3<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/data/random_input_dataset.py b/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/data/random_input_dataset.py deleted file mode 100644 index 886505616cc7f7a515ecebf34fae5c2bc541de03..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/examples/wav2vec/unsupervised/data/random_input_dataset.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import random -from typing import List - -from fairseq.data import BaseWrapperDataset, data_utils - - -class RandomInputDataset(BaseWrapperDataset): - def __init__( - self, - dataset, - random_input_dataset, - input_key_path: List[str], - add_to_input, - pad_idx, - ): - super().__init__(dataset) - self.random_input_dataset = random_input_dataset - if isinstance(input_key_path, str): - input_key_path = [input_key_path] - assert len(input_key_path) > 0 - self.input_key_path = input_key_path - self.add_to_input = add_to_input - self.pad_idx = pad_idx - - def get_target(self, item): - target_loc = item - for p in self.input_key_path[:-1]: - target_loc = target_loc[p] - return self.input_key_path[-1], target_loc - - def get_target_value(self, item): - k, target_loc = self.get_target(item) - return target_loc[k] - - def __getitem__(self, index): - item = self.dataset[index] - k, target_loc = self.get_target(item) - target_loc[k] = random.choice(self.random_input_dataset) - return item - - def collater(self, samples): - collated = self.dataset.collater(samples) - if len(collated) == 0: - return collated - indices = set(collated["id"].tolist()) - - random_inputs = data_utils.collate_tokens( - [self.get_target_value(s) for s in samples if s["id"] in indices], - pad_idx=self.pad_idx, - left_pad=False, - ) - k, target_loc = self.get_target( - collated if not self.add_to_input else collated["net_input"] - ) - target_loc[k] = random_inputs - - return collated diff --git a/spaces/gradio/HuBERT/fairseq/data/__init__.py b/spaces/gradio/HuBERT/fairseq/data/__init__.py deleted file mode 100644 index 8b7eb2ec4fc5190c4dcdfe34b0259e6f448e18a9..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/data/__init__.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -"""isort:skip_file""" - -from .dictionary import Dictionary, TruncatedDictionary - -from .fairseq_dataset import FairseqDataset, FairseqIterableDataset - -from .base_wrapper_dataset import BaseWrapperDataset - -from .add_target_dataset import AddTargetDataset -from .append_token_dataset import AppendTokenDataset -from .audio.raw_audio_dataset import BinarizedAudioDataset, FileAudioDataset -from .audio.hubert_dataset import HubertDataset -from .backtranslation_dataset import BacktranslationDataset -from .bucket_pad_length_dataset import BucketPadLengthDataset -from .colorize_dataset import ColorizeDataset -from .concat_dataset import ConcatDataset -from .concat_sentences_dataset import ConcatSentencesDataset -from .denoising_dataset import DenoisingDataset -from .id_dataset import IdDataset -from .indexed_dataset import ( - IndexedCachedDataset, - IndexedDataset, - IndexedRawTextDataset, - MMapIndexedDataset, -) -from .language_pair_dataset import LanguagePairDataset -from .list_dataset import ListDataset -from .lm_context_window_dataset import LMContextWindowDataset -from .lru_cache_dataset import LRUCacheDataset -from .mask_tokens_dataset import MaskTokensDataset -from .monolingual_dataset import MonolingualDataset -from .multi_corpus_sampled_dataset import MultiCorpusSampledDataset -from .nested_dictionary_dataset import NestedDictionaryDataset -from .noising import NoisingDataset -from .numel_dataset import NumelDataset -from .num_samples_dataset import NumSamplesDataset -from .offset_tokens_dataset import OffsetTokensDataset -from .pad_dataset import LeftPadDataset, PadDataset, RightPadDataset -from .prepend_dataset import PrependDataset -from .prepend_token_dataset import PrependTokenDataset -from .raw_label_dataset import RawLabelDataset -from .replace_dataset import ReplaceDataset -from .resampling_dataset import ResamplingDataset -from .roll_dataset import RollDataset -from .round_robin_zip_datasets import RoundRobinZipDatasets -from .sort_dataset import SortDataset -from .strip_token_dataset import StripTokenDataset -from .subsample_dataset import SubsampleDataset -from .token_block_dataset import TokenBlockDataset -from .transform_eos_dataset import TransformEosDataset -from .transform_eos_lang_pair_dataset import TransformEosLangPairDataset -from .shorten_dataset import TruncateDataset, RandomCropDataset -from .multilingual.sampled_multi_dataset import SampledMultiDataset -from .multilingual.sampled_multi_epoch_dataset import SampledMultiEpochDataset -from .fasta_dataset import FastaDataset, EncodedFastaDataset - -from .iterators import ( - CountingIterator, - EpochBatchIterator, - GroupedIterator, - ShardedIterator, -) - -__all__ = [ - "AddTargetDataset", - "AppendTokenDataset", - "BacktranslationDataset", - "BaseWrapperDataset", - "BinarizedAudioDataset", - "BucketPadLengthDataset", - "ColorizeDataset", - "ConcatDataset", - "ConcatSentencesDataset", - "CountingIterator", - "DenoisingDataset", - "Dictionary", - "EncodedFastaDataset", - "EpochBatchIterator", - "FairseqDataset", - "FairseqIterableDataset", - "FastaDataset", - "FileAudioDataset", - "GroupedIterator", - "HubertDataset", - "IdDataset", - "IndexedCachedDataset", - "IndexedDataset", - "IndexedRawTextDataset", - "LanguagePairDataset", - "LeftPadDataset", - "ListDataset", - "LMContextWindowDataset", - "LRUCacheDataset", - "MaskTokensDataset", - "MMapIndexedDataset", - "MonolingualDataset", - "MultiCorpusSampledDataset", - "NestedDictionaryDataset", - "NoisingDataset", - "NumelDataset", - "NumSamplesDataset", - "OffsetTokensDataset", - "PadDataset", - "PrependDataset", - "PrependTokenDataset", - "RandomCropDataset", - "RawLabelDataset", - "ResamplingDataset", - "ReplaceDataset", - "RightPadDataset", - "RollDataset", - "RoundRobinZipDatasets", - "SampledMultiDataset", - "SampledMultiEpochDataset", - "ShardedIterator", - "SortDataset", - "StripTokenDataset", - "SubsampleDataset", - "TokenBlockDataset", - "TransformEosDataset", - "TransformEosLangPairDataset", - "TruncateDataset", - "TruncatedDictionary", -] diff --git a/spaces/gradio/HuBERT/fairseq/tasks/speech_to_text.py b/spaces/gradio/HuBERT/fairseq/tasks/speech_to_text.py deleted file mode 100644 index 8bdf21564367d3647d582c72a6c3c9924760933e..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/fairseq/tasks/speech_to_text.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -import os.path as op -from argparse import Namespace - -from fairseq.data import Dictionary, encoders -from fairseq.data.audio.speech_to_text_dataset import ( - S2TDataConfig, - SpeechToTextDataset, - SpeechToTextDatasetCreator, - get_features_or_waveform -) -from fairseq.tasks import LegacyFairseqTask, register_task - - -logger = logging.getLogger(__name__) - - -@register_task("speech_to_text") -class SpeechToTextTask(LegacyFairseqTask): - @staticmethod - def add_args(parser): - parser.add_argument("data", help="manifest root path") - parser.add_argument( - "--config-yaml", - type=str, - default="config.yaml", - help="Configuration YAML filename (under manifest root)", - ) - parser.add_argument( - "--max-source-positions", - default=6000, - type=int, - metavar="N", - help="max number of tokens in the source sequence", - ) - parser.add_argument( - "--max-target-positions", - default=1024, - type=int, - metavar="N", - help="max number of tokens in the target sequence", - ) - - def __init__(self, args, tgt_dict): - super().__init__(args) - self.tgt_dict = tgt_dict - self.data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml)) - - @classmethod - def setup_task(cls, args, **kwargs): - data_cfg = S2TDataConfig(op.join(args.data, args.config_yaml)) - dict_path = op.join(args.data, data_cfg.vocab_filename) - if not op.isfile(dict_path): - raise FileNotFoundError(f"Dict not found: {dict_path}") - tgt_dict = Dictionary.load(dict_path) - logger.info( - f"dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}" - ) - - if getattr(args, "train_subset", None) is not None: - if not all(s.startswith("train") for s in args.train_subset.split(",")): - raise ValueError('Train splits should be named like "train*".') - return cls(args, tgt_dict) - - def build_criterion(self, args): - from fairseq import criterions - - if self.data_cfg.prepend_tgt_lang_tag and args.ignore_prefix_size != 1: - raise ValueError( - 'Please set "--ignore-prefix-size 1" since ' - "target language ID token is prepended as BOS." - ) - return criterions.build_criterion(args, self) - - def load_dataset(self, split, epoch=1, combine=False, **kwargs): - is_train_split = split.startswith("train") - pre_tokenizer = self.build_tokenizer(self.args) - bpe_tokenizer = self.build_bpe(self.args) - self.datasets[split] = SpeechToTextDatasetCreator.from_tsv( - self.args.data, - self.data_cfg, - split, - self.tgt_dict, - pre_tokenizer, - bpe_tokenizer, - is_train_split=is_train_split, - epoch=epoch, - seed=self.args.seed, - ) - - @property - def target_dictionary(self): - return self.tgt_dict - - @property - def source_dictionary(self): - return None - - def max_positions(self): - return self.args.max_source_positions, self.args.max_target_positions - - def build_model(self, args): - args.input_feat_per_channel = self.data_cfg.input_feat_per_channel - args.input_channels = self.data_cfg.input_channels - return super(SpeechToTextTask, self).build_model(args) - - def build_generator( - self, - models, - args, - seq_gen_cls=None, - extra_gen_cls_kwargs=None, - ): - if self.data_cfg.prepend_tgt_lang_tag and args.prefix_size != 1: - raise ValueError( - 'Please set "--prefix-size 1" since ' - "target language ID token is prepended as BOS." - ) - lang_token_ids = { - i - for s, i in self.tgt_dict.indices.items() - if SpeechToTextDataset.is_lang_tag(s) - } - extra_gen_cls_kwargs = {"symbols_to_strip_from_output": lang_token_ids} - return super().build_generator( - models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs - ) - - def build_tokenizer(self, args): - logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}") - return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer)) - - def build_bpe(self, args): - logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}") - return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer)) - - def get_interactive_tokens_and_lengths(self, lines, encode_fn): - n_frames = [get_features_or_waveform(p).shape[0] for p in lines] - return lines, n_frames - - def build_dataset_for_inference(self, src_tokens, src_lengths, **kwargs): - return SpeechToTextDataset( - "interactive", False, self.data_cfg, src_tokens, src_lengths - ) diff --git a/spaces/gradio/HuBERT/tests/test_sequence_scorer.py b/spaces/gradio/HuBERT/tests/test_sequence_scorer.py deleted file mode 100644 index 42f9447b599bcd7a9913aec37d94ea5078ff43a3..0000000000000000000000000000000000000000 --- a/spaces/gradio/HuBERT/tests/test_sequence_scorer.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import argparse -import unittest - -import tests.utils as test_utils -import torch -from fairseq.sequence_scorer import SequenceScorer - - -class TestSequenceScorer(unittest.TestCase): - def test_sequence_scorer(self): - # construct dummy dictionary - d = test_utils.dummy_dictionary(vocab_size=2) - self.assertEqual(d.pad(), 1) - self.assertEqual(d.eos(), 2) - self.assertEqual(d.unk(), 3) - eos = d.eos() - w1 = 4 - w2 = 5 - - # construct dataloader - data = [ - { - "source": torch.LongTensor([w1, w2, eos]), - "target": torch.LongTensor([w1, w2, w1, eos]), - }, - { - "source": torch.LongTensor([w2, eos]), - "target": torch.LongTensor([w2, w1, eos]), - }, - { - "source": torch.LongTensor([w2, eos]), - "target": torch.LongTensor([w2, eos]), - }, - ] - data_itr = test_utils.dummy_dataloader(data) - - # specify expected output probabilities - args = argparse.Namespace() - unk = 0.0 - args.beam_probs = [ - # step 0: - torch.FloatTensor( - [ - # eos w1 w2 - [0.0, unk, 0.6, 0.4], # sentence 1 - [0.0, unk, 0.4, 0.6], # sentence 2 - [0.0, unk, 0.7, 0.3], # sentence 3 - ] - ), - # step 1: - torch.FloatTensor( - [ - # eos w1 w2 - [0.0, unk, 0.2, 0.7], # sentence 1 - [0.0, unk, 0.8, 0.2], # sentence 2 - [0.7, unk, 0.1, 0.2], # sentence 3 - ] - ), - # step 2: - torch.FloatTensor( - [ - # eos w1 w2 - [0.10, unk, 0.50, 0.4], # sentence 1 - [0.15, unk, 0.15, 0.7], # sentence 2 - [0.00, unk, 0.00, 0.0], # sentence 3 - ] - ), - # step 3: - torch.FloatTensor( - [ - # eos w1 w2 - [0.9, unk, 0.05, 0.05], # sentence 1 - [0.0, unk, 0.00, 0.0], # sentence 2 - [0.0, unk, 0.00, 0.0], # sentence 3 - ] - ), - ] - expected_scores = [ - [0.6, 0.7, 0.5, 0.9], # sentence 1 - [0.6, 0.8, 0.15], # sentence 2 - [0.3, 0.7], # sentence 3 - ] - - task = test_utils.TestTranslationTask.setup_task(args, d, d) - model = task.build_model(args) - scorer = SequenceScorer(task.target_dictionary) - for sample in data_itr: - hypos = task.inference_step(scorer, [model], sample) - for id, hypos_id in zip(sample["id"].tolist(), hypos): - self.assertHypoTokens(hypos_id[0], data[id]["target"]) - self.assertHypoScore(hypos_id[0], expected_scores[id]) - - def assertHypoTokens(self, hypo, tokens): - self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) - - def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): - pos_scores = torch.FloatTensor(pos_probs).log() - self.assertAlmostEqual(hypo["positional_scores"], pos_scores) - self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) - score = pos_scores.sum() - if normalized: - score /= pos_scores.numel() ** lenpen - self.assertLess(abs(score - hypo["score"]), 1e-6) - - def assertAlmostEqual(self, t1, t2): - self.assertEqual(t1.size(), t2.size(), "size mismatch") - self.assertLess((t1 - t2).abs().max(), 1e-4) - - def assertTensorEqual(self, t1, t2): - self.assertEqual(t1.size(), t2.size(), "size mismatch") - self.assertEqual(t1.ne(t2).long().sum(), 0) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/grosenthal/aineid/src/aineid/src/serviceWorker.ts b/spaces/grosenthal/aineid/src/aineid/src/serviceWorker.ts deleted file mode 100644 index 69f26b605b125e3d9a47ef5195be02ea0e38b06a..0000000000000000000000000000000000000000 --- a/spaces/grosenthal/aineid/src/aineid/src/serviceWorker.ts +++ /dev/null @@ -1,146 +0,0 @@ -// This optional code is used to register a service worker. -// register() is not called by default. - -// This lets the app load faster on subsequent visits in production, and gives -// it offline capabilities. However, it also means that developers (and users) -// will only see deployed updates on subsequent visits to a page, after all the -// existing tabs open on the page have been closed, since previously cached -// resources are updated in the background. - -// To learn more about the benefits of this model and instructions on how to -// opt-in, read https://cra.link/PWA - -const isLocalhost = Boolean( - window.location.hostname === "localhost" || - // [::1] is the IPv6 localhost address. - window.location.hostname === "[::1]" || - // 127.0.0.0/8 are considered localhost for IPv4. - window.location.hostname.match( - /^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/, - ), -) - -type Config = { - onSuccess?: (registration: ServiceWorkerRegistration) => void - onUpdate?: (registration: ServiceWorkerRegistration) => void -} - -export function register(config?: Config) { - if (process.env.NODE_ENV === "production" && "serviceWorker" in navigator) { - // The URL constructor is available in all browsers that support SW. - const publicUrl = new URL(process.env.PUBLIC_URL, window.location.href) - if (publicUrl.origin !== window.location.origin) { - // Our service worker won't work if PUBLIC_URL is on a different origin - // from what our page is served on. This might happen if a CDN is used to - // serve assets; see https://github.com/facebook/create-react-app/issues/2374 - return - } - - window.addEventListener("load", () => { - const swUrl = `${process.env.PUBLIC_URL}/service-worker.js` - - if (isLocalhost) { - // This is running on localhost. Let's check if a service worker still exists or not. - checkValidServiceWorker(swUrl, config) - - // Add some additional logging to localhost, pointing developers to the - // service worker/PWA documentation. - navigator.serviceWorker.ready.then(() => { - console.log( - "This web app is being served cache-first by a service " + - "worker. To learn more, visit https://cra.link/PWA", - ) - }) - } else { - // Is not localhost. Just register service worker - registerValidSW(swUrl, config) - } - }) - } -} - -function registerValidSW(swUrl: string, config?: Config) { - navigator.serviceWorker - .register(swUrl) - .then((registration) => { - registration.onupdatefound = () => { - const installingWorker = registration.installing - if (installingWorker == null) { - return - } - installingWorker.onstatechange = () => { - if (installingWorker.state === "installed") { - if (navigator.serviceWorker.controller) { - // At this point, the updated precached content has been fetched, - // but the previous service worker will still serve the older - // content until all client tabs are closed. - console.log( - "New content is available and will be used when all " + - "tabs for this page are closed. See https://cra.link/PWA.", - ) - - // Execute callback - if (config && config.onUpdate) { - config.onUpdate(registration) - } - } else { - // At this point, everything has been precached. - // It is the perfect time to display a - // "Content is cached for offline use." message. - console.log("Content is cached for offline use.") - - // Execute callback - if (config && config.onSuccess) { - config.onSuccess(registration) - } - } - } - } - } - }) - .catch((error) => { - console.error("Error during service worker registration:", error) - }) -} - -function checkValidServiceWorker(swUrl: string, config?: Config) { - // Check if the service worker can be found. If it can't reload the page. - fetch(swUrl, { - headers: { "Service-Worker": "script" }, - }) - .then((response) => { - // Ensure service worker exists, and that we really are getting a JS file. - const contentType = response.headers.get("content-type") - if ( - response.status === 404 || - (contentType != null && contentType.indexOf("javascript") === -1) - ) { - // No service worker found. Probably a different app. Reload the page. - navigator.serviceWorker.ready.then((registration) => { - registration.unregister().then(() => { - window.location.reload() - }) - }) - } else { - // Service worker found. Proceed as normal. - registerValidSW(swUrl, config) - } - }) - .catch(() => { - console.log( - "No internet connection found. App is running in offline mode.", - ) - }) -} - -export function unregister() { - if ("serviceWorker" in navigator) { - navigator.serviceWorker.ready - .then((registration) => { - registration.unregister() - }) - .catch((error) => { - console.error(error.message) - }) - } -} diff --git a/spaces/h2oai/h2ogpt-chatbot/src/utils.py b/spaces/h2oai/h2ogpt-chatbot/src/utils.py deleted file mode 100644 index ba46804f5d32dba376a214589a74aed21a7fc2cb..0000000000000000000000000000000000000000 --- a/spaces/h2oai/h2ogpt-chatbot/src/utils.py +++ /dev/null @@ -1,1569 +0,0 @@ -import ast -import contextlib -import functools -import gc -import getpass -import hashlib -import inspect -import json -import os -import pathlib -import pickle -import platform -import random -import shutil -import subprocess -import sys -import threading -import time -import traceback -import zipfile -from concurrent.futures import ProcessPoolExecutor -from datetime import datetime -from typing import Tuple, Callable, Dict -from queue import Queue, Empty -from concurrent.futures import ThreadPoolExecutor - -import filelock -import fire -import numpy as np -import pandas as pd -import requests -import uuid - -import tabulate -from fire import inspectutils -from joblib import Parallel -from tqdm.auto import tqdm - - -def H2O_Fire(component=None): - config_prefix = "H2OGPT_" - - args = sys.argv[1:] - query_args = [arg.split("=")[0].split(" ")[0].lstrip("-") for arg in args] - - fn_spec = inspectutils.GetFullArgSpec(component) - for key, value in os.environ.items(): - if not ( - (key.startswith(config_prefix) or key.startswith(config_prefix.lower())) - and len(key) > len(config_prefix) - ): - continue # ignore as non H2OGPT argument - - new_key = key[len(config_prefix):].lower() - - if new_key in query_args: - continue # ignore as already passed as script argument - - if new_key not in fn_spec.args: - continue # ignore as not a valid H2OGPT argument - - args.append(f"--{new_key}={value}") - - fire.Fire(component=component, command=args) - - -def set_seed(seed: int): - """ - Sets the seed of the entire notebook so results are the same every time we run. - This is for REPRODUCIBILITY. - """ - import torch - np.random.seed(seed) - random_state = np.random.RandomState(seed) - random.seed(seed) - torch.manual_seed(seed) - torch.cuda.manual_seed(seed) - torch.backends.cudnn.deterministic = True - torch.backends.cudnn.benchmark = False - os.environ['PYTHONHASHSEED'] = str(seed) - return random_state - - -def flatten_list(lis): - """Given a list, possibly nested to any level, return it flattened.""" - new_lis = [] - for item in lis: - if type(item) == type([]): - new_lis.extend(flatten_list(item)) - else: - new_lis.append(item) - return new_lis - - -def clear_torch_cache(): - try: - import torch - if torch.cuda.is_available(): - torch.cuda.empty_cache() - torch.cuda.ipc_collect() - gc.collect() - except RuntimeError as e: - print("clear_torch_cache error: %s" % ''.join(traceback.format_tb(e.__traceback__)), flush=True) - - -def ping(): - try: - print('Ping: %s' % str(datetime.now()), flush=True) - except AttributeError: - # some programs wrap print and will fail with flush passed - pass - - -def ping_gpu(): - try: - print('Ping_GPU: %s %s' % (str(datetime.now()), system_info()), flush=True) - except AttributeError: - # some programs wrap print and will fail with flush passed - pass - try: - ping_gpu_memory() - except Exception as e: - print('Ping_GPU memory failure: %s' % str(e), flush=True) - - -def ping_gpu_memory(): - from models.gpu_mem_track import MemTracker - gpu_tracker = MemTracker() # define a GPU tracker - from torch.cuda import memory_summary - gpu_tracker.track() - - -def get_torch_allocated(): - import torch - return torch.cuda.memory_allocated() - - -def get_device(): - import torch - if torch.cuda.is_available(): - device = "cuda" - elif torch.backends.mps.is_built(): - device = "mps" - else: - device = "cpu" - - return device - - -def system_info(): - import psutil - - system = {} - # https://stackoverflow.com/questions/48951136/plot-multiple-graphs-in-one-plot-using-tensorboard - # https://arshren.medium.com/monitoring-your-devices-in-python-5191d672f749 - try: - temps = psutil.sensors_temperatures(fahrenheit=False) - if 'coretemp' in temps: - coretemp = temps['coretemp'] - temp_dict = {k.label: k.current for k in coretemp} - for k, v in temp_dict.items(): - system['CPU_C/%s' % k] = v - except AttributeError: - pass - - # https://github.com/gpuopenanalytics/pynvml/blob/master/help_query_gpu.txt - try: - from pynvml.smi import nvidia_smi - nvsmi = nvidia_smi.getInstance() - - gpu_power_dict = {'W_gpu%d' % i: x['power_readings']['power_draw'] for i, x in - enumerate(nvsmi.DeviceQuery('power.draw')['gpu'])} - for k, v in gpu_power_dict.items(): - system['GPU_W/%s' % k] = v - - gpu_temp_dict = {'C_gpu%d' % i: x['temperature']['gpu_temp'] for i, x in - enumerate(nvsmi.DeviceQuery('temperature.gpu')['gpu'])} - for k, v in gpu_temp_dict.items(): - system['GPU_C/%s' % k] = v - - gpu_memory_free_dict = {'MiB_gpu%d' % i: x['fb_memory_usage']['free'] for i, x in - enumerate(nvsmi.DeviceQuery('memory.free')['gpu'])} - gpu_memory_total_dict = {'MiB_gpu%d' % i: x['fb_memory_usage']['total'] for i, x in - enumerate(nvsmi.DeviceQuery('memory.total')['gpu'])} - gpu_memory_frac_dict = {k: gpu_memory_free_dict[k] / gpu_memory_total_dict[k] for k in gpu_memory_total_dict} - for k, v in gpu_memory_frac_dict.items(): - system[f'GPU_M/%s' % k] = v - except (KeyError, ModuleNotFoundError): - pass - system['hash'] = get_githash() - - return system - - -def system_info_print(): - try: - df = pd.DataFrame.from_dict(system_info(), orient='index') - # avoid slamming GPUs - time.sleep(1) - return df.to_markdown() - except Exception as e: - return "Error: %s" % str(e) - - -def zip_data(root_dirs=None, zip_file=None, base_dir='./', fail_any_exception=False): - try: - return _zip_data(zip_file=zip_file, base_dir=base_dir, root_dirs=root_dirs) - except Exception as e: - traceback.print_exc() - print('Exception in zipping: %s' % str(e)) - if not fail_any_exception: - raise - - -def _zip_data(root_dirs=None, zip_file=None, base_dir='./'): - if isinstance(root_dirs, str): - root_dirs = [root_dirs] - if zip_file is None: - datetime_str = str(datetime.now()).replace(" ", "_").replace(":", "_") - host_name = os.getenv('HF_HOSTNAME', 'emptyhost') - zip_file = "data_%s_%s.zip" % (datetime_str, host_name) - assert root_dirs is not None - base_path = os.path.dirname(zip_file) - if not os.path.isdir(base_path) and os.path.dirname(zip_file): - base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True) - zip_file = os.path.join(base_path, os.path.basename(zip_file)) - with zipfile.ZipFile(zip_file, "w") as expt_zip: - for root_dir in root_dirs: - if root_dir is None: - continue - for root, d, files in os.walk(root_dir): - for file in files: - file_to_archive = os.path.join(root, file) - assert os.path.exists(file_to_archive) - path_to_archive = os.path.relpath(file_to_archive, base_dir) - expt_zip.write(filename=file_to_archive, arcname=path_to_archive) - return zip_file, zip_file - - -def save_generate_output(prompt=None, output=None, base_model=None, save_dir=None, where_from='unknown where from', - extra_dict={}, error='', extra='', which_api='', valid_key=None, - h2ogpt_key='', return_dict=False): - if not save_dir: - return - try: - return _save_generate_output(prompt=prompt, output=output, base_model=base_model, save_dir=save_dir, - where_from=where_from, extra_dict=extra_dict, error=error, extra=extra, - which_api=which_api, valid_key=valid_key, h2ogpt_key=h2ogpt_key, - return_dict=return_dict) - except Exception as e: - traceback.print_exc() - print('Exception in saving: %s' % str(e)) - - -def _save_generate_output(prompt=None, output=None, base_model=None, save_dir=None, where_from='unknown where from', - extra_dict={}, error='', extra='', which_api='', - valid_key=None, h2ogpt_key='', - return_dict=False): - """ - Save conversation to .json, row by row. - json_file_path is path to final JSON file. If not in ., then will attempt to make directories. - Appends if file exists - """ - prompt = '<not set>' if prompt is None else prompt - output = '<not set>' if output is None else output - - # tokenize at end if need to, so doesn't block generation in multi-generator case - if extra_dict.get('ntokens') is None: - extra_dict['ntokens'] = FakeTokenizer().num_tokens_from_string(output) - # only do below if didn't already compute ntokens, else assume also computed rate - extra_dict['tokens_persecond'] = extra_dict['ntokens'] / extra_dict['t_generate'] - - dict_to_save = dict(prompt=prompt, text=output, time=time.ctime(), - base_model=base_model, - where_from=where_from, - error=error, - extra=extra, - which_api=which_api, - valid_key=valid_key, - h2ogpt_key=h2ogpt_key, - ) - dict_to_save.update(extra_dict) - - if return_dict: - return dict_to_save - - if os.path.exists(save_dir) and not os.path.isdir(save_dir): - raise RuntimeError("save_dir already exists and is not a directory!") - makedirs(save_dir, exist_ok=True) # already should be made, can't change at this point - import json - with filelock.FileLock("%s.lock" % os.path.basename(save_dir)): - # lock logging in case have concurrency - with open(os.path.join(save_dir, "history.json"), "a") as f: - # just add [ at start, and ] at end, and have proper JSON dataset - f.write( - " " + json.dumps( - dict_to_save - ) + ",\n" - ) - - -def s3up(filename): - try: - return _s3up(filename) - except Exception as e: - traceback.print_exc() - print('Exception for file %s in s3up: %s' % (filename, str(e))) - return "Failed to upload %s: Error: %s" % (filename, str(e)) - - -def _s3up(filename): - import boto3 - - aws_access_key_id = os.getenv('AWS_SERVER_PUBLIC_KEY') - aws_secret_access_key = os.getenv('AWS_SERVER_SECRET_KEY') - bucket = os.getenv('AWS_BUCKET') - assert aws_access_key_id, "Set AWS key" - assert aws_secret_access_key, "Set AWS secret" - assert bucket, "Set AWS Bucket" - - s3 = boto3.client('s3', - aws_access_key_id=os.getenv('AWS_SERVER_PUBLIC_KEY'), - aws_secret_access_key=os.getenv('AWS_SERVER_SECRET_KEY'), - ) - ret = s3.upload_file( - Filename=filename, - Bucket=os.getenv('AWS_BUCKET'), - Key=filename, - ) - if ret in [None, '']: - return "Successfully uploaded %s" % filename - - -def get_githash(): - try: - githash = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE).stdout.decode('utf-8')[0:-1] - except: - githash = '' - return githash - - -def copy_code(run_id): - """ - copy code to track changes - :param run_id: - :return: - """ - rnd_num = str(random.randint(0, 2 ** 31)) - run_id = 'run_' + str(run_id) - os.makedirs(run_id, exist_ok=True) - me_full = os.path.join(pathlib.Path(__file__).parent.resolve(), __file__) - me_file = os.path.basename(__file__) - new_me = os.path.join(run_id, me_file + '_' + get_githash()) - if os.path.isfile(new_me): - new_me = os.path.join(run_id, me_file + '_' + get_githash() + '_' + rnd_num) - shutil.copy(me_full, new_me) - else: - shutil.copy(me_full, new_me) - - -class NullContext(threading.local): - """No-op context manager, executes block without doing any additional processing. - - Used as a stand-in if a particular block of code is only sometimes - used with a normal context manager: - """ - - def __init__(self, *args, **kwargs): - pass - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_traceback): - self.finally_act() - - def finally_act(self): - pass - - -def wrapped_partial(func, *args, **kwargs): - """ - Give partial properties of normal function, like __name__ attribute etc. - :param func: - :param args: - :param kwargs: - :return: - """ - partial_func = functools.partial(func, *args, **kwargs) - functools.update_wrapper(partial_func, func) - return partial_func - - -class ThreadException(Exception): - pass - - -class EThread(threading.Thread): - # Function that raises the custom exception - def __init__(self, group=None, target=None, name=None, - args=(), kwargs=None, *, daemon=None, streamer=None, bucket=None): - self.bucket = bucket - self.streamer = streamer - self.exc = None - self._return = None - super().__init__(group=group, target=target, name=name, args=args, kwargs=kwargs, daemon=daemon) - - def run(self): - # Variable that stores the exception, if raised by someFunction - try: - if self._target is not None: - self._return = self._target(*self._args, **self._kwargs) - except BaseException as e: - print("thread exception: %s" % str(sys.exc_info())) - self.bucket.put(sys.exc_info()) - self.exc = e - if self.streamer: - print("make stop: %s" % str(sys.exc_info()), flush=True) - self.streamer.do_stop = True - finally: - # Avoid a refcycle if the thread is running a function with - # an argument that has a member that points to the thread. - del self._target, self._args, self._kwargs - - def join(self, timeout=None): - threading.Thread.join(self) - # Since join() returns in caller thread - # we re-raise the caught exception - # if any was caught - if self.exc: - raise self.exc - return self._return - - -def import_matplotlib(): - import matplotlib - matplotlib.use('agg') - # KEEP THESE HERE! START - import matplotlib.pyplot as plt - import pandas as pd - # to avoid dlopen deadlock in fork - import pandas.core.computation.expressions as pd_expressions - import pandas._libs.groupby as pd_libgroupby - import pandas._libs.reduction as pd_libreduction - import pandas.core.algorithms as pd_algorithms - import pandas.core.common as pd_com - import numpy as np - # KEEP THESE HERE! END - - -def get_sha(value): - return hashlib.md5(str(value).encode('utf-8')).hexdigest() - - -def sanitize_filename(name): - """ - Sanitize file *base* names. - :param name: name to sanitize - :return: - """ - bad_chars = ['[', ']', ',', '/', '\\', '\\w', '\\s', '-', '+', '\"', '\'', '>', '<', ' ', '=', ')', '(', ':', '^'] - for char in bad_chars: - name = name.replace(char, "_") - - length = len(name) - file_length_limit = 250 # bit smaller than 256 for safety - sha_length = 32 - real_length_limit = file_length_limit - (sha_length + 2) - if length > file_length_limit: - sha = get_sha(name) - half_real_length_limit = max(1, int(real_length_limit / 2)) - name = name[0:half_real_length_limit] + "_" + sha + "_" + name[length - half_real_length_limit:length] - - return name - - -def shutil_rmtree(*args, **kwargs): - return shutil.rmtree(*args, **kwargs) - - -def remove(path: str): - try: - if path is not None and os.path.exists(path): - if os.path.isdir(path): - shutil_rmtree(path, ignore_errors=True) - else: - with contextlib.suppress(FileNotFoundError): - os.remove(path) - except: - pass - - -def makedirs(path, exist_ok=True, tmp_ok=False, use_base=False): - """ - Avoid some inefficiency in os.makedirs() - :param path: - :param exist_ok: - :param tmp_ok: use /tmp if can't write locally - :param use_base: - :return: - """ - if path is None: - return path - # if base path set, make relative to that, unless user_path absolute path - if use_base: - if os.path.normpath(path) == os.path.normpath(os.path.abspath(path)): - pass - else: - if os.getenv('H2OGPT_BASE_PATH') is not None: - base_dir = os.path.normpath(os.getenv('H2OGPT_BASE_PATH')) - path = os.path.normpath(path) - if not path.startswith(base_dir): - path = os.path.join(os.getenv('H2OGPT_BASE_PATH', ''), path) - path = os.path.normpath(path) - - if os.path.isdir(path) and os.path.exists(path): - assert exist_ok, "Path already exists" - return path - try: - os.makedirs(path, exist_ok=exist_ok) - return path - except FileExistsError: - # e.g. soft link - return path - except PermissionError: - if tmp_ok: - path0 = path - path = os.path.join('/tmp/', path) - print("Permission denied to %s, using %s instead" % (path0, path), flush=True) - os.makedirs(path, exist_ok=exist_ok) - return path - else: - raise - - -def atomic_move_simple(src, dst): - try: - shutil.move(src, dst) - except (shutil.Error, FileExistsError): - pass - remove(src) - - -def download_simple(url, dest=None): - if dest is None: - dest = os.path.basename(url) - base_path = os.path.dirname(dest) - if base_path: # else local path - base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True) - dest = os.path.join(base_path, os.path.basename(dest)) - - if os.path.isfile(dest): - print("Already have %s from url %s, delete file if invalid" % (dest, str(url)), flush=True) - return dest - - print("BEGIN get url %s" % str(url), flush=True) - if url.startswith("file://"): - from requests_file import FileAdapter - s = requests.Session() - s.mount('file://', FileAdapter()) - url_data = s.get(url, stream=True) - else: - url_data = requests.get(url, stream=True) - print("GOT url %s" % str(url), flush=True) - - if url_data.status_code != requests.codes.ok: - msg = "Cannot get url %s, code: %s, reason: %s" % ( - str(url), - str(url_data.status_code), - str(url_data.reason), - ) - raise requests.exceptions.RequestException(msg) - url_data.raw.decode_content = True - - uuid_tmp = str(uuid.uuid4())[:6] - dest_tmp = dest + "_dl_" + uuid_tmp + ".tmp" - with open(dest_tmp, "wb") as f: - shutil.copyfileobj(url_data.raw, f) - atomic_move_simple(dest_tmp, dest) - print("DONE url %s" % str(url), flush=True) - return dest - - -def download(url, dest=None, dest_path=None): - if dest_path is not None: - dest = os.path.join(dest_path, os.path.basename(url)) - if os.path.isfile(dest): - print("already downloaded %s -> %s" % (url, dest)) - return dest - elif dest is not None: - if os.path.exists(dest): - print("already downloaded %s -> %s" % (url, dest)) - return dest - else: - uuid_tmp = "dl2_" + str(uuid.uuid4())[:6] - dest = uuid_tmp + os.path.basename(url) - - print("downloading %s to %s" % (url, dest)) - - if url.startswith("file://"): - from requests_file import FileAdapter - s = requests.Session() - s.mount('file://', FileAdapter()) - url_data = s.get(url, stream=True) - else: - url_data = requests.get(url, stream=True) - - if url_data.status_code != requests.codes.ok: - msg = "Cannot get url %s, code: %s, reason: %s" % ( - str(url), str(url_data.status_code), str(url_data.reason)) - raise requests.exceptions.RequestException(msg) - url_data.raw.decode_content = True - dirname = os.path.dirname(dest) - if dirname != "" and not os.path.isdir(dirname): - base_path = os.path.dirname(dest) - base_path = makedirs(base_path, exist_ok=True, tmp_ok=True, use_base=True) - dest = os.path.join(base_path, os.path.basename(dest)) - uuid_tmp = "dl3_" + str(uuid.uuid4())[:6] - dest_tmp = dest + "_" + uuid_tmp + ".tmp" - with open(dest_tmp, 'wb') as f: - shutil.copyfileobj(url_data.raw, f) - try: - shutil.move(dest_tmp, dest) - except FileExistsError: - pass - remove(dest_tmp) - return dest - - -def get_doc(x): - return x.page_content - - -def get_source(x): - return x.metadata.get('source', "UNKNOWN SOURCE") - - -def get_accordion(x, font_size=2, head_acc=50): - title = x.page_content[:head_acc].replace("\n", ' ').replace("<br>", ' ').replace("<p>", ' ').replace("\r", ' ') - content = x.page_content - return f"""<details><summary><font size="{font_size}">{title}</font></summary><font size="{font_size}">{content}</font></details>""" - - -def get_url(x, from_str=False, short_name=False, font_size=2): - if not from_str: - source = x.metadata['source'] - else: - source = x - if short_name: - source_name = get_short_name(source) - else: - source_name = source - if source.startswith('http://') or source.startswith('https://'): - return """<font size="%s"><a href="%s" target="_blank" rel="noopener noreferrer">%s</a></font>""" % ( - font_size, source, source_name) - elif '<a href=' not in source: - return """<font size="%s"><a href="file/%s" target="_blank" rel="noopener noreferrer">%s</a></font>""" % ( - font_size, source, source_name) - else: - # already filled - return source - - -def get_short_name(name, maxl=50): - if name is None: - return '' - length = len(name) - if length > maxl: - allow_length = maxl - 3 - half_allowed = max(1, int(allow_length / 2)) - name = name[0:half_allowed] + "..." + name[length - half_allowed:length] - return name - - -def cuda_vis_check(total_gpus): - """Helper function to count GPUs by environment variable - Stolen from Jon's h2o4gpu utils - """ - cudavis = os.getenv("CUDA_VISIBLE_DEVICES") - which_gpus = [] - if cudavis is not None: - # prune away white-space, non-numerics, - # except commas for simple checking - cudavis = "".join(cudavis.split()) - import re - cudavis = re.sub("[^0-9,]", "", cudavis) - - lencudavis = len(cudavis) - if lencudavis == 0: - total_gpus = 0 - else: - total_gpus = min( - total_gpus, - os.getenv("CUDA_VISIBLE_DEVICES").count(",") + 1) - which_gpus = os.getenv("CUDA_VISIBLE_DEVICES").split(",") - which_gpus = [int(x) for x in which_gpus] - else: - which_gpus = list(range(0, total_gpus)) - - return total_gpus, which_gpus - - -def get_ngpus_vis(raise_if_exception=True): - ngpus_vis1 = 0 - - shell = False - if shell: - cmd = "nvidia-smi -L 2> /dev/null" - else: - cmd = ["nvidia-smi", "-L"] - - try: - timeout = 5 * 3 - o = subprocess.check_output(cmd, shell=shell, timeout=timeout) - lines = o.decode("utf-8").splitlines() - ngpus_vis1 = 0 - for line in lines: - if 'Failed to initialize NVML' not in line: - ngpus_vis1 += 1 - except (FileNotFoundError, subprocess.CalledProcessError, OSError): - # GPU systems might not have nvidia-smi, so can't fail - pass - except subprocess.TimeoutExpired as e: - print('Failed get_ngpus_vis: %s' % str(e)) - if raise_if_exception: - raise - - ngpus_vis1, which_gpus = cuda_vis_check(ngpus_vis1) - return ngpus_vis1 - - -def get_mem_gpus(raise_if_exception=True, ngpus=None): - totalmem_gpus1 = 0 - usedmem_gpus1 = 0 - freemem_gpus1 = 0 - - if ngpus == 0: - return totalmem_gpus1, usedmem_gpus1, freemem_gpus1 - - try: - cmd = "nvidia-smi -q 2> /dev/null | grep -A 3 'FB Memory Usage'" - o = subprocess.check_output(cmd, shell=True, timeout=15) - lines = o.decode("utf-8").splitlines() - for line in lines: - if 'Total' in line: - totalmem_gpus1 += int(line.split()[2]) * 1024 ** 2 - if 'Used' in line: - usedmem_gpus1 += int(line.split()[2]) * 1024 ** 2 - if 'Free' in line: - freemem_gpus1 += int(line.split()[2]) * 1024 ** 2 - except (FileNotFoundError, subprocess.CalledProcessError, OSError): - # GPU systems might not have nvidia-smi, so can't fail - pass - except subprocess.TimeoutExpired as e: - print('Failed get_mem_gpus: %s' % str(e)) - if raise_if_exception: - raise - - return totalmem_gpus1, usedmem_gpus1, freemem_gpus1 - - -class ForkContext(threading.local): - """ - Set context for forking - Ensures state is returned once done - """ - - def __init__(self, args=None, kwargs=None, forkdata_capable=True): - """ - :param args: - :param kwargs: - :param forkdata_capable: whether fork is forkdata capable and will use copy-on-write forking of args/kwargs - """ - self.forkdata_capable = forkdata_capable - if self.forkdata_capable: - self.has_args = args is not None - self.has_kwargs = kwargs is not None - forkdatacontext.args = args - forkdatacontext.kwargs = kwargs - else: - self.has_args = False - self.has_kwargs = False - - def __enter__(self): - try: - # flush all outputs so doesn't happen during fork -- don't print/log inside ForkContext contexts! - sys.stdout.flush() - sys.stderr.flush() - except BaseException as e: - # exit not called if exception, and don't want to leave forkdatacontext filled in that case - print("ForkContext failure on enter: %s" % str(e)) - self.finally_act() - raise - return self - - def __exit__(self, exc_type, exc_value, exc_traceback): - self.finally_act() - - def finally_act(self): - """ - Done when exception hit or exit is reached in context - first reset forkdatacontext as crucial to have reset even if later 2 calls fail - :return: None - """ - if self.forkdata_capable and (self.has_args or self.has_kwargs): - forkdatacontext._reset() - - -class _ForkDataContext(threading.local): - def __init__( - self, - args=None, - kwargs=None, - ): - """ - Global context for fork to carry data to subprocess instead of relying upon copy/pickle/serialization - - :param args: args - :param kwargs: kwargs - """ - assert isinstance(args, (tuple, type(None))) - assert isinstance(kwargs, (dict, type(None))) - self.__args = args - self.__kwargs = kwargs - - @property - def args(self) -> Tuple: - """returns args""" - return self.__args - - @args.setter - def args(self, args): - if self.__args is not None: - raise AttributeError( - "args cannot be overwritten: %s %s" % (str(self.__args), str(self.__kwargs)) - ) - - self.__args = args - - @property - def kwargs(self) -> Dict: - """returns kwargs""" - return self.__kwargs - - @kwargs.setter - def kwargs(self, kwargs): - if self.__kwargs is not None: - raise AttributeError( - "kwargs cannot be overwritten: %s %s" % (str(self.__args), str(self.__kwargs)) - ) - - self.__kwargs = kwargs - - def _reset(self): - """Reset fork arg-kwarg context to default values""" - self.__args = None - self.__kwargs = None - - def get_args_kwargs(self, func, args, kwargs) -> Tuple[Callable, Tuple, Dict]: - if self.__args: - args = self.__args[1:] - if not func: - assert len(self.__args) > 0, "if have no func, must have in args" - func = self.__args[0] # should always be there - if self.__kwargs: - kwargs = self.__kwargs - try: - return func, args, kwargs - finally: - forkdatacontext._reset() - - @staticmethod - def get_args_kwargs_for_traced_func(func, args, kwargs): - """ - Return args/kwargs out of forkdatacontext when using copy-on-write way of passing args/kwargs - :param func: actual function ran by _traced_func, which itself is directly what mppool treats as function - :param args: - :param kwargs: - :return: func, args, kwargs from forkdatacontext if used, else originals - """ - # first 3 lines are debug - func_was_None = func is None - args_was_None_or_empty = args is None or len(args) == 0 - kwargs_was_None_or_empty = kwargs is None or len(kwargs) == 0 - - forkdatacontext_args_was_None = forkdatacontext.args is None - forkdatacontext_kwargs_was_None = forkdatacontext.kwargs is None - func, args, kwargs = forkdatacontext.get_args_kwargs(func, args, kwargs) - using_forkdatacontext = func_was_None and func is not None # pulled func out of forkdatacontext.__args[0] - assert forkdatacontext.args is None, "forkdatacontext.args should be None after get_args_kwargs" - assert forkdatacontext.kwargs is None, "forkdatacontext.kwargs should be None after get_args_kwargs" - - proc_type = kwargs.get('proc_type', 'SUBPROCESS') - if using_forkdatacontext: - assert proc_type == "SUBPROCESS" or proc_type == "SUBPROCESS" - if proc_type == "NORMAL": - assert forkdatacontext_args_was_None, "if no fork, expect forkdatacontext.args None entering _traced_func" - assert forkdatacontext_kwargs_was_None, "if no fork, expect forkdatacontext.kwargs None entering _traced_func" - assert func is not None, "function should not be None, indicates original args[0] was None or args was None" - - return func, args, kwargs - - -forkdatacontext = _ForkDataContext() - - -# Add user info -username = getpass.getuser() -current_working_directory = os.getcwd() -operating_system = platform.system() - - -def _traced_func(func, *args, **kwargs): - func, args, kwargs = forkdatacontext.get_args_kwargs_for_traced_func(func, args, kwargs) - return func(*args, **kwargs) - - -def call_subprocess_onetask(func, args=None, kwargs=None): - if platform.system() in ['Darwin', 'Windows']: - return func(*args, **kwargs) - if isinstance(args, list): - args = tuple(args) - if args is None: - args = () - if kwargs is None: - kwargs = {} - args = list(args) - args = [func] + args - args = tuple(args) - with ForkContext(args=args, kwargs=kwargs): - args = (None,) - kwargs = {} - with ProcessPoolExecutor(max_workers=1) as executor: - future = executor.submit(_traced_func, *args, **kwargs) - return future.result() - - -class ProgressParallel(Parallel): - def __init__(self, use_tqdm=True, total=None, *args, **kwargs): - self._use_tqdm = use_tqdm - self._total = total - super().__init__(*args, **kwargs) - - def __call__(self, *args, **kwargs): - with tqdm(disable=not self._use_tqdm, total=self._total) as self._pbar: - return Parallel.__call__(self, *args, **kwargs) - - def print_progress(self): - if self._total is None: - self._pbar.total = self.n_dispatched_tasks - self._pbar.n = self.n_completed_tasks - self._pbar.refresh() - - -def get_kwargs(func, exclude_names=None, **kwargs): - func_names = list(inspect.signature(func).parameters) - missing_kwargs = [x for x in func_names if x not in kwargs] - if exclude_names: - for k in exclude_names: - if k in missing_kwargs: - missing_kwargs.remove(k) - if k in func_names: - func_names.remove(k) - assert not missing_kwargs, "Missing %s" % missing_kwargs - kwargs = {k: v for k, v in kwargs.items() if k in func_names} - return kwargs - - -from importlib.metadata import distribution, PackageNotFoundError - -have_faiss = False - -try: - assert distribution('faiss') is not None - have_faiss = True -except (PackageNotFoundError, AssertionError): - pass -try: - assert distribution('faiss_gpu') is not None - have_faiss = True -except (PackageNotFoundError, AssertionError): - pass -try: - assert distribution('faiss_cpu') is not None - have_faiss = True -except (PackageNotFoundError, AssertionError): - pass - -have_chromamigdb = False -try: - assert distribution('chromamigdb') is not None - have_chromamigdb = True -except (PackageNotFoundError, AssertionError): - pass - - -have_serpapi = False -try: - assert distribution('google-search-results') is not None - have_serpapi = True -except (PackageNotFoundError, AssertionError): - pass - - -def hash_file(file): - try: - import hashlib - - # BUF_SIZE is totally arbitrary, change for your app! - BUF_SIZE = 65536 # lets read stuff in 64kb chunks! - - md5 = hashlib.md5() - # sha1 = hashlib.sha1() - - with open(file, 'rb') as f: - while True: - data = f.read(BUF_SIZE) - if not data: - break - md5.update(data) - # sha1.update(data) - except BaseException as e: - print("Cannot hash %s due to %s" % (file, str(e))) - traceback.print_exc() - return '' - return md5.hexdigest() - - -def start_faulthandler(): - # If hit server or any subprocess with signal SIGUSR1, it'll print out all threads stack trace, but wont't quit or coredump - # If more than one fork tries to write at same time, then looks corrupted. - import faulthandler - - # SIGUSR1 in h2oai/__init__.py as well - faulthandler.enable() - if hasattr(faulthandler, 'register'): - # windows/mac - import signal - faulthandler.register(signal.SIGUSR1) - - -def get_hf_server(inference_server): - inf_split = inference_server.split(" ") - assert len(inf_split) == 1 or len(inf_split) == 3 - inference_server = inf_split[0] - if len(inf_split) == 3: - headers = {"authorization": "%s %s" % (inf_split[1], inf_split[2])} - else: - headers = None - return inference_server, headers - - -class FakeTokenizer: - """ - 1) For keeping track of model_max_length - 2) For when model doesn't directly expose tokenizer but need to count tokens - """ - - def __init__(self, model_max_length=2048, encoding_name="cl100k_base"): - # dont' push limit, since if using fake tokenizer, only estimate, and seen underestimates by order 250 - self.model_max_length = model_max_length - 250 - self.encoding_name = encoding_name - # The first time this runs, it will require an internet connection to download. Later runs won't need an internet connection. - import tiktoken - self.encoding = tiktoken.get_encoding(self.encoding_name) - - def encode(self, x, *args, return_tensors="pt", **kwargs): - input_ids = self.encoding.encode(x, disallowed_special=()) - if return_tensors == 'pt' and isinstance(input_ids, list): - import torch - input_ids = torch.tensor(input_ids) - return dict(input_ids=input_ids) - - def decode(self, x, *args, **kwargs): - # input is input_ids[0] form - return self.encoding.decode(x) - - def num_tokens_from_string(self, prompt: str) -> int: - """Returns the number of tokens in a text string.""" - num_tokens = len(self.encode(prompt)['input_ids']) - return num_tokens - - def __call__(self, x, *args, **kwargs): - return self.encode(x, *args, **kwargs) - - -def get_local_ip(): - import socket - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - try: - # doesn't even have to be reachable - s.connect(('10.255.255.255', 1)) - IP = s.getsockname()[0] - except Exception: - IP = '127.0.0.1' - finally: - s.close() - return IP - - -try: - assert distribution('langchain') is not None - have_langchain = True -except (PackageNotFoundError, AssertionError): - have_langchain = False - -import distutils.spawn - -have_tesseract = distutils.spawn.find_executable("tesseract") -have_libreoffice = distutils.spawn.find_executable("libreoffice") -try: - from weasyprint import HTML - import doctr - have_doctr = True -except: - have_doctr = False - -try: - assert distribution('arxiv') is not None - assert distribution('pymupdf') is not None - have_arxiv = True -except (PackageNotFoundError, AssertionError): - have_arxiv = False - -try: - assert distribution('pymupdf') is not None - have_pymupdf = True -except (PackageNotFoundError, AssertionError): - have_pymupdf = False - -try: - assert distribution('selenium') is not None - have_selenium = True -except (PackageNotFoundError, AssertionError): - have_selenium = False - -try: - assert distribution('pillow') is not None - have_pillow = True -except (PackageNotFoundError, AssertionError): - have_pillow = False - -try: - assert distribution('playwright') is not None - have_playwright = True -except (PackageNotFoundError, AssertionError): - have_playwright = False - -try: - assert distribution('jq') is not None - have_jq = True -except (PackageNotFoundError, AssertionError): - have_jq = False - -only_unstructured_urls = os.environ.get("ONLY_UNSTRUCTURED_URLS", "0") == "1" -only_selenium = os.environ.get("ONLY_SELENIUM", "0") == "1" -only_playwright = os.environ.get("ONLY_PLAYWRIGHT", "0") == "1" - - -def set_openai(inference_server): - if inference_server.startswith('vllm'): - import openai_vllm - openai_vllm.api_key = "EMPTY" - inf_type = inference_server.split(':')[0] - ip_vllm = inference_server.split(':')[1] - port_vllm = inference_server.split(':')[2] - openai_vllm.api_base = f"http://{ip_vllm}:{port_vllm}/v1" - return openai_vllm, inf_type, None, None, None - else: - import openai - openai.api_key = os.getenv("OPENAI_API_KEY") - openai.api_base = os.environ.get("OPENAI_API_BASE", "https://api.openai.com/v1") - - base_url = None - deployment_type = None - api_version = None - inf_type = inference_server.split(':')[0] - if len(inference_server.split(':')) >= 2: - deployment_type = inference_server.split(':')[1] - if len(inference_server.split(':')) >= 3: - base_url = inference_server.split(':')[2] - base_url = 'https://' + base_url - if len(inference_server.split(':')) >= 4: - api_version = inference_server.split(':')[3] - - if deployment_type == 'None': - deployment_type = None - if base_url == 'None': - base_url = None - if base_url == 'None': - base_url = None - return openai, inf_type, deployment_type, base_url, api_version - - -def get_list_or_str(x): - if isinstance(x, list): - return x - elif isinstance(x, str): - try: - x1 = ast.literal_eval(x) - assert isinstance(x1, list) - return x1 - except: - return x - else: - return x - - -def deepcopy_by_pickle_object(object): - """ - Faster deepcopy, can only work on things that are picklable. Naive Deepcopy is more general. - Same method as for class Individual - :param object: - :return: - """ - gc.disable() - new_object = pickle.loads(pickle.dumps(object, -1)) - gc.enable() - return new_object - - -def url_alive(url): - try: - response = requests.head(url) - except Exception as e: - return False - else: - if response.status_code in [200, 301, 302]: - return True - else: - return False - - -def dict_to_html(x, small=True, api=False): - df = pd.DataFrame(x.items(), columns=['Key', 'Value']) - df.index = df.index + 1 - df.index.name = 'index' - if api: - return tabulate.tabulate(df, headers='keys') - else: - res = tabulate.tabulate(df, headers='keys', tablefmt='unsafehtml') - if small: - return "<small>" + res + "</small>" - else: - return res - - -def text_to_html(x, api=False): - if api: - return x - return """ -<style> - pre { - overflow-x: auto; - white-space: pre-wrap; - white-space: -moz-pre-wrap; - white-space: -pre-wrap; - white-space: -o-pre-wrap; - word-wrap: break-word; - } - </style> -<pre> -%s -</pre> -""" % x - - -def lg_to_gr( - **kwargs, -): - # translate: - import torch - n_gpus = torch.cuda.device_count() if torch.cuda.is_available() else 0 - n_gpus, _ = cuda_vis_check(n_gpus) - - image_loaders_options = ['Caption'] - if n_gpus != 0: - image_loaders_options.extend(['CaptionBlip2', 'Pix2Struct']) - if have_tesseract: - image_loaders_options.append('OCR') - if have_doctr: - image_loaders_options.append('DocTR') - - image_loaders_options0 = [] - if have_tesseract and kwargs['enable_ocr']: - image_loaders_options0.append('OCR') - if have_doctr and kwargs['enable_doctr']: - image_loaders_options0.append('DocTR') - if kwargs['enable_captions']: - if kwargs['max_quality'] and n_gpus > 0: - # BLIP2 only on GPU - image_loaders_options0.append('CaptionBlip2') - else: - image_loaders_options0.append('Caption') - - pdf_loaders_options = ['PyMuPDF', 'Unstructured', 'PyPDF', 'TryHTML'] - if have_tesseract: - pdf_loaders_options.append('OCR') - if have_doctr: - pdf_loaders_options.append('DocTR') - - pdf_loaders_options0 = [] - if kwargs['use_pymupdf'] in [True, 'auto', 'on']: - pdf_loaders_options0.append('PyMuPDF') - if kwargs['enable_pdf_ocr'] in [True, 'on']: - pdf_loaders_options0.append('OCR') - if have_doctr and kwargs['enable_pdf_doctr'] in [True, 'on']: - pdf_loaders_options0.append('DocTR') - - url_loaders_options = [] - if only_unstructured_urls: - url_loaders_options.append('Unstructured') - elif have_selenium and only_selenium: - url_loaders_options.append('Selenium') - elif have_playwright and only_playwright: - url_loaders_options.append('PlayWright') - else: - url_loaders_options.append('Unstructured') - if have_selenium: - url_loaders_options.append('Selenium') - if have_playwright: - url_loaders_options.append('PlayWright') - url_loaders_options0 = [url_loaders_options[0]] - - assert set(image_loaders_options0).issubset(image_loaders_options) - assert set(pdf_loaders_options0).issubset(pdf_loaders_options) - assert set(url_loaders_options0).issubset(url_loaders_options) - - return image_loaders_options0, image_loaders_options, \ - pdf_loaders_options0, pdf_loaders_options, \ - url_loaders_options0, url_loaders_options - - -def fix_json(s): - - # Attempt to parse the string as-is. - try: - return json.loads(s) - except json.JSONDecodeError: - pass - - # Initialize variables. - new_s = "" - stack = [] - is_inside_string = False - escaped = False - - # Process each character in the string one at a time. - for char in s: - if is_inside_string: - if char == '"' and not escaped: - is_inside_string = False - elif char == '\n' and not escaped: - char = '\\n' # Replace the newline character with the escape sequence. - elif char == '\\': - escaped = not escaped - else: - escaped = False - else: - if char == '"': - is_inside_string = True - escaped = False - elif char == '{': - stack.append('}') - elif char == '[': - stack.append(']') - elif char == '}' or char == ']': - if stack and stack[-1] == char: - stack.pop() - else: - # Mismatched closing character; the input is malformed. - return None - - # Append the processed character to the new string. - new_s += char - - # If we're still inside a string at the end of processing, we need to close the string. - if is_inside_string: - new_s += '"' - - # Close any remaining open structures in the reverse order that they were opened. - for closing_char in reversed(stack): - new_s += closing_char - - # Attempt to parse the modified string as JSON. - try: - return json.loads(new_s) - except json.JSONDecodeError: - # If we still can't parse the string as JSON, return None to indicate failure. - return None - - -def wrap_in_try_except(code): - # Add import traceback - code = "import traceback\n" + code - - # Parse the input code into an AST - parsed_code = ast.parse(code) - - # Wrap the entire code's AST in a single try-except block - try_except = ast.Try( - body=parsed_code.body, - handlers=[ - ast.ExceptHandler( - type=ast.Name(id="Exception", ctx=ast.Load()), - name=None, - body=[ - ast.Expr( - value=ast.Call( - func=ast.Attribute(value=ast.Name(id="traceback", ctx=ast.Load()), attr="print_exc", ctx=ast.Load()), - args=[], - keywords=[] - ) - ), - ] - ) - ], - orelse=[], - finalbody=[] - ) - - # Assign the try-except block as the new body - parsed_code.body = [try_except] - - # Convert the modified AST back to source code - return ast.unparse(parsed_code) - - -def enqueue_output(file, queue): - for line in iter(file.readline, ''): - queue.put(line) - file.close() - - -def read_popen_pipes(p): - - with ThreadPoolExecutor(2) as pool: - q_stdout, q_stderr = Queue(), Queue() - - pool.submit(enqueue_output, p.stdout, q_stdout) - pool.submit(enqueue_output, p.stderr, q_stderr) - - while True: - - if p.poll() is not None and q_stdout.empty() and q_stderr.empty(): - break - - out_line = err_line = '' - - try: - out_line = q_stdout.get_nowait() - except Empty: - pass - try: - err_line = q_stderr.get_nowait() - except Empty: - pass - - yield out_line, err_line - - -def start_process(cmd): - start_cmd = sys.executable + " -i -q -u" - print_cmd = 'print("{}")' - cmd = [start_cmd] + [cmd] - - process = subprocess.Popen(cmd, stdout=subprocess.PIPE) - for c in iter(lambda: process.stdout.read(1), b''): - sys.stdout.write(c) - - -def str_to_list(x, allow_none=False): - if isinstance(x, str): - if len(x.strip()) > 0: - if x.strip().startswith('['): - x = ast.literal_eval(x.strip()) - else: - raise ValueError("Invalid str_to_list for %s" % x) - else: - x = [] - elif x is None and not allow_none: - x = [] - if allow_none: - assert isinstance(x, (type(None), list)) - else: - assert isinstance(x, list) - return x - - -def str_to_dict(x): - if isinstance(x, str): - if len(x.strip()) > 0: - if x.strip().startswith('{'): - x = ast.literal_eval(x.strip()) - else: - raise ValueError("Invalid str_to_dict for %s" % x) - else: - x = {} - elif x is None: - x = {} - assert isinstance(x, dict) - return x - - -def get_token_count(x, tokenizer, token_count_fun=None): - # NOTE: Somewhat duplicates H2OTextGenerationPipeline.get_token_count() - # handle ambiguity in if get dict or list - if tokenizer: - if hasattr(tokenizer, 'encode'): - template_tokens = tokenizer.encode(x) - else: - template_tokens = tokenizer(x) - if isinstance(template_tokens, dict) and 'input_ids' in template_tokens: - n_tokens = len(tokenizer.encode(x)['input_ids']) - else: - n_tokens = len(tokenizer.encode(x)) - elif token_count_fun is not None: - assert callable(token_count_fun) - n_tokens = token_count_fun(x) - else: - tokenizer = FakeTokenizer() - n_tokens = tokenizer.num_tokens_from_string(x) - return n_tokens - - -def reverse_ucurve_list(lst): - if not lst: - return [] - if len(lst) == 1: - return lst - if len(lst) == 2: - return [lst[1], lst[0]] - - front_list = [] - end_list = [] - - for i, item in enumerate(lst): - if i % 2 == 0: - end_list.append(item) - else: - front_list.append(item) - - return front_list + end_list[::-1] - - -def undo_reverse_ucurve_list(lst): - if not lst: - return [] - if len(lst) == 1: - return lst - if len(lst) == 2: - return [lst[1], lst[0]] - - # Split the list into two halves: the first half and the second half (reversed) - mid = len(lst) // 2 - first_half = lst[:mid] - second_half = lst[mid:][::-1] - - # Merge the two halves by taking elements alternatively from the second half and then the first half - result = [] - for i in range(mid): - result.append(second_half[i]) - result.append(first_half[i]) - - # If the length of the list is odd, append the last element of the second half - if len(lst) % 2 != 0: - result.append(second_half[-1]) - - return result diff --git a/spaces/hamzapehlivan/StyleRes/models/dnnlib/util.py b/spaces/hamzapehlivan/StyleRes/models/dnnlib/util.py deleted file mode 100644 index 6afaab2a78fdedbd006a1c226cf5ff998169771f..0000000000000000000000000000000000000000 --- a/spaces/hamzapehlivan/StyleRes/models/dnnlib/util.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Miscellaneous utility classes and functions.""" - -# Util classes -# ------------------------------------------------------------------------------------------ - -class EasyDict(dict): - """Convenience class that behaves like a dict but allows access with the attribute syntax.""" - - def __getattr__(self, name: str): - try: - return self[name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name, value): - self[name] = value - - def __delattr__(self, name): - del self[name] - - diff --git a/spaces/harley001/anime-remove-background/README.md b/spaces/harley001/anime-remove-background/README.md deleted file mode 100644 index 1ba3cb5ea0e994e246d57b7d62b8aa5a6331901c..0000000000000000000000000000000000000000 --- a/spaces/harley001/anime-remove-background/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Anime Remove Background -emoji: 🪄🖼️ -colorFrom: indigo -colorTo: pink -sdk: gradio -sdk_version: 3.1.4 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: skytnt/anime-remove-background ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/DensePose/densepose/data/dataset_mapper.py b/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/DensePose/densepose/data/dataset_mapper.py deleted file mode 100644 index f74976745151952ece06c7b7ba542e0b63f53899..0000000000000000000000000000000000000000 --- a/spaces/hasibzunair/fifa-tryon-demo/Self-Correction-Human-Parsing-for-ACGPN/mhp_extension/detectron2/projects/DensePose/densepose/data/dataset_mapper.py +++ /dev/null @@ -1,118 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved - -import copy -import torch -from fvcore.common.file_io import PathManager - -from detectron2.data import MetadataCatalog -from detectron2.data import detection_utils as utils -from detectron2.data import transforms as T - -from .structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData - - -class DatasetMapper: - """ - A customized version of `detectron2.data.DatasetMapper` - """ - - def __init__(self, cfg, is_train=True): - self.tfm_gens = utils.build_transform_gen(cfg, is_train) - - # fmt: off - self.img_format = cfg.INPUT.FORMAT - self.mask_on = cfg.MODEL.MASK_ON - self.keypoint_on = cfg.MODEL.KEYPOINT_ON - self.densepose_on = cfg.MODEL.DENSEPOSE_ON - assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet" - # fmt: on - if self.keypoint_on and is_train: - # Flip only makes sense in training - self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN) - else: - self.keypoint_hflip_indices = None - - if self.densepose_on: - densepose_transform_srcs = [ - MetadataCatalog.get(ds).densepose_transform_src - for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST - ] - assert len(densepose_transform_srcs) > 0 - # TODO: check that DensePose transformation data is the same for - # all the data. Otherwise one would have to pass DB ID with - # each entry to select proper transformation data. For now, since - # all DensePose annotated data uses the same data semantics, we - # omit this check. - densepose_transform_data_fpath = PathManager.get_local_path(densepose_transform_srcs[0]) - self.densepose_transform_data = DensePoseTransformData.load( - densepose_transform_data_fpath - ) - - self.is_train = is_train - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - image = utils.read_image(dataset_dict["file_name"], format=self.img_format) - utils.check_image_size(dataset_dict, image) - - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - image_shape = image.shape[:2] # h, w - dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32")) - - if not self.is_train: - dataset_dict.pop("annotations", None) - return dataset_dict - - for anno in dataset_dict["annotations"]: - if not self.mask_on: - anno.pop("segmentation", None) - if not self.keypoint_on: - anno.pop("keypoints", None) - - # USER: Implement additional transformations if you have other types of data - # USER: Don't call transpose_densepose if you don't need - annos = [ - self._transform_densepose( - utils.transform_instance_annotations( - obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices - ), - transforms, - ) - for obj in dataset_dict.pop("annotations") - if obj.get("iscrowd", 0) == 0 - ] - instances = utils.annotations_to_instances(annos, image_shape) - - if len(annos) and "densepose" in annos[0]: - gt_densepose = [obj["densepose"] for obj in annos] - instances.gt_densepose = DensePoseList(gt_densepose, instances.gt_boxes, image_shape) - - dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()] - return dataset_dict - - def _transform_densepose(self, annotation, transforms): - if not self.densepose_on: - return annotation - - # Handle densepose annotations - is_valid, reason_not_valid = DensePoseDataRelative.validate_annotation(annotation) - if is_valid: - densepose_data = DensePoseDataRelative(annotation, cleanup=True) - densepose_data.apply_transform(transforms, self.densepose_transform_data) - annotation["densepose"] = densepose_data - else: - # logger = logging.getLogger(__name__) - # logger.debug("Could not load DensePose annotation: {}".format(reason_not_valid)) - DensePoseDataRelative.cleanup_annotation(annotation) - # NOTE: annotations for certain instances may be unavailable. - # 'None' is accepted by the DensePostList data structure. - annotation["densepose"] = None - return annotation diff --git a/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/cpp/cppipc/policy.h b/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/cpp/cppipc/policy.h deleted file mode 100644 index f88ab5d8cb343f97026966b402eaeed8831e356a..0000000000000000000000000000000000000000 --- a/spaces/hbestm/gpt-academic-play/crazy_functions/test_project/cpp/cppipc/policy.h +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include <type_traits> - -#include "libipc/def.h" -#include "libipc/prod_cons.h" - -#include "libipc/circ/elem_array.h" - -namespace ipc { -namespace policy { - -template <template <typename, std::size_t...> class Elems, typename Flag> -struct choose; - -template <typename Flag> -struct choose<circ::elem_array, Flag> { - using flag_t = Flag; - - template <std::size_t DataSize, std::size_t AlignSize> - using elems_t = circ::elem_array<ipc::prod_cons_impl<flag_t>, DataSize, AlignSize>; -}; - -} // namespace policy -} // namespace ipc diff --git "a/spaces/hbestm/gpt-academic-play/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" "b/spaces/hbestm/gpt-academic-play/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" deleted file mode 100644 index 7c6a7ffb5cb2c42e6543c75d6ad9dd643f412cd9..0000000000000000000000000000000000000000 --- "a/spaces/hbestm/gpt-academic-play/crazy_functions/\351\253\230\347\272\247\345\212\237\350\203\275\345\207\275\346\225\260\346\250\241\346\235\277.py" +++ /dev/null @@ -1,29 +0,0 @@ -from toolbox import CatchException, update_ui -from .crazy_utils import request_gpt_model_in_new_thread_with_ui_alive -import datetime -@CatchException -def 高阶功能模板函数(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port): - """ - txt 输入栏用户输入的文本,例如需要翻译的一段话,再例如一个包含了待处理文件的路径 - llm_kwargs gpt模型参数,如温度和top_p等,一般原样传递下去就行 - plugin_kwargs 插件模型的参数,暂时没有用武之地 - chatbot 聊天显示框的句柄,用于显示给用户 - history 聊天历史,前情提要 - system_prompt 给gpt的静默提醒 - web_port 当前软件运行的端口号 - """ - history = [] # 清空历史,以免输入溢出 - chatbot.append(("这是什么功能?", "[Local Message] 请注意,您正在调用一个[函数插件]的模板,该函数面向希望实现更多有趣功能的开发者,它可以作为创建新功能函数的模板(该函数只有20多行代码)。此外我们也提供可同步处理大量文件的多线程Demo供您参考。您若希望分享新的功能模组,请不吝PR!")) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 由于请求gpt需要一段时间,我们先及时地做一次界面更新 - for i in range(5): - currentMonth = (datetime.date.today() + datetime.timedelta(days=i)).month - currentDay = (datetime.date.today() + datetime.timedelta(days=i)).day - i_say = f'历史中哪些事件发生在{currentMonth}月{currentDay}日?列举两条并发送相关图片。发送图片时,请使用Markdown,将Unsplash API中的PUT_YOUR_QUERY_HERE替换成描述该事件的一个最重要的单词。' - gpt_say = yield from request_gpt_model_in_new_thread_with_ui_alive( - inputs=i_say, inputs_show_user=i_say, - llm_kwargs=llm_kwargs, chatbot=chatbot, history=[], - sys_prompt="当你想发送一张照片时,请使用Markdown, 并且不要有反斜线, 不要用代码块。使用 Unsplash API (https://source.unsplash.com/1280x720/? < PUT_YOUR_QUERY_HERE >)。" - ) - chatbot[-1] = (i_say, gpt_say) - history.append(i_say);history.append(gpt_say) - yield from update_ui(chatbot=chatbot, history=history) # 刷新界面 # 界面更新 diff --git a/spaces/hebert2099/MusicGen/tests/modules/test_lstm.py b/spaces/hebert2099/MusicGen/tests/modules/test_lstm.py deleted file mode 100644 index 1248964c8191e19f27661f0974bef9cc967eb015..0000000000000000000000000000000000000000 --- a/spaces/hebert2099/MusicGen/tests/modules/test_lstm.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import random -import torch - -from audiocraft.modules.lstm import StreamableLSTM - - -class TestStreamableLSTM: - - def test_lstm(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=False) - x = torch.randn(B, C, T) - y = lstm(x) - - print(y.shape) - assert y.shape == torch.Size([B, C, T]) - - def test_lstm_skip(self): - B, C, T = 4, 2, random.randint(1, 100) - - lstm = StreamableLSTM(C, 3, skip=True) - x = torch.randn(B, C, T) - y = lstm(x) - - assert y.shape == torch.Size([B, C, T]) diff --git a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/postprocessing/consolidate_postprocessing.py b/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/postprocessing/consolidate_postprocessing.py deleted file mode 100644 index e735d74821922b7504252a6455e93419db083b36..0000000000000000000000000000000000000000 --- a/spaces/ho11laqe/nnUNet_calvingfront_detection/nnunet/postprocessing/consolidate_postprocessing.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import shutil -from typing import Tuple - -from batchgenerators.utilities.file_and_folder_operations import * -from nnunet.configuration import default_num_threads -from nnunet.evaluation.evaluator import aggregate_scores -from nnunet.postprocessing.connected_components import determine_postprocessing -import argparse - - -def collect_cv_niftis(cv_folder: str, output_folder: str, validation_folder_name: str = 'validation_raw', - folds: tuple = (0, 1, 2, 3, 4)): - validation_raw_folders = [join(cv_folder, "fold_%d" % i, validation_folder_name) for i in folds] - exist = [isdir(i) for i in validation_raw_folders] - - if not all(exist): - raise RuntimeError("some folds are missing. Please run the full 5-fold cross-validation. " - "The following folds seem to be missing: %s" % - [i for j, i in enumerate(folds) if not exist[j]]) - - # now copy all raw niftis into cv_niftis_raw - maybe_mkdir_p(output_folder) - for f in folds: - niftis = subfiles(validation_raw_folders[f], suffix=".nii.gz") - for n in niftis: - shutil.copy(n, join(output_folder)) - - -def consolidate_folds(output_folder_base, validation_folder_name: str = 'validation_raw', - advanced_postprocessing: bool = False, folds: Tuple[int] = (0, 1, 2, 3, 4)): - """ - Used to determine the postprocessing for an experiment after all five folds have been completed. In the validation of - each fold, the postprocessing can only be determined on the cases within that fold. This can result in different - postprocessing decisions for different folds. In the end, we can only decide for one postprocessing per experiment, - so we have to rerun it - :param folds: - :param advanced_postprocessing: - :param output_folder_base:experiment output folder (fold_0, fold_1, etc must be subfolders of the given folder) - :param validation_folder_name: dont use this - :return: - """ - output_folder_raw = join(output_folder_base, "cv_niftis_raw") - if isdir(output_folder_raw): - shutil.rmtree(output_folder_raw) - - output_folder_gt = join(output_folder_base, "gt_niftis") - collect_cv_niftis(output_folder_base, output_folder_raw, validation_folder_name, - folds) - - num_niftis_gt = len(subfiles(join(output_folder_base, "gt_niftis"), suffix='.nii.gz')) - # count niftis in there - num_niftis = len(subfiles(output_folder_raw, suffix='.nii.gz')) - if num_niftis != num_niftis_gt: - raise AssertionError("If does not seem like you trained all the folds! Train all folds first!") - - # load a summary file so that we can know what class labels to expect - summary_fold0 = load_json(join(output_folder_base, "fold_0", validation_folder_name, "summary.json"))['results'][ - 'mean'] - classes = [int(i) for i in summary_fold0.keys()] - niftis = subfiles(output_folder_raw, join=False, suffix=".nii.gz") - test_pred_pairs = [(join(output_folder_raw, i), join(output_folder_gt, i)) for i in niftis] - - # determine_postprocessing needs a summary.json file in the folder where the raw predictions are. We could compute - # that from the summary files of the five folds but I am feeling lazy today - aggregate_scores(test_pred_pairs, labels=classes, json_output_file=join(output_folder_raw, "summary.json"), - num_threads=default_num_threads) - - determine_postprocessing(output_folder_base, output_folder_gt, 'cv_niftis_raw', - final_subf_name="cv_niftis_postprocessed", processes=default_num_threads, - advanced_postprocessing=advanced_postprocessing) - # determine_postprocessing will create a postprocessing.json file that can be used for inference - - -if __name__ == "__main__": - argparser = argparse.ArgumentParser() - argparser.add_argument("-f", type=str, required=True, help="experiment output folder (fold_0, fold_1, " - "etc must be subfolders of the given folder)") - - args = argparser.parse_args() - - folder = args.f - - consolidate_folds(folder) diff --git a/spaces/hshr/DeepFilterNet/app.py b/spaces/hshr/DeepFilterNet/app.py deleted file mode 100644 index 57161ff668d76e94ac7c2fd9be8fd1c8a6f58183..0000000000000000000000000000000000000000 --- a/spaces/hshr/DeepFilterNet/app.py +++ /dev/null @@ -1,289 +0,0 @@ -import math -import tempfile -from typing import Optional, Tuple, Union - -import gradio -import gradio.inputs -import gradio.outputs -import markdown -import matplotlib.pyplot as plt -import numpy as np -import torch -from loguru import logger -from torch import Tensor -from torchaudio.backend.common import AudioMetaData - -from df import config -from df.enhance import enhance, init_df, load_audio, save_audio -from df.utils import resample - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") -model, df, _ = init_df(config_allow_defaults=True) -model = model.to(device=device).eval() - - -def mix_at_snr(clean, noise, snr, eps=1e-10): - """Mix clean and noise signal at a given SNR. - - Args: - clean: 1D Tensor with the clean signal to mix. - noise: 1D Tensor of shape. - snr: Signal to noise ratio. - - Returns: - clean: 1D Tensor with gain changed according to the snr. - noise: 1D Tensor with the combined noise channels. - mix: 1D Tensor with added clean and noise signals. - - """ - clean = torch.as_tensor(clean).mean(0, keepdim=True) - noise = torch.as_tensor(noise).mean(0, keepdim=True) - if noise.shape[1] < clean.shape[1]: - noise = noise.repeat((1, int(math.ceil(clean.shape[1] / noise.shape[1])))) - max_start = int(noise.shape[1] - clean.shape[1]) - start = torch.randint(0, max_start, ()).item() if max_start > 0 else 0 - logger.debug(f"start: {start}, {clean.shape}") - noise = noise[:, start : start + clean.shape[1]] - E_speech = torch.mean(clean.pow(2)) + eps - E_noise = torch.mean(noise.pow(2)) - K = torch.sqrt((E_noise / E_speech) * 10 ** (snr / 10) + eps) - noise = noise / K - mixture = clean + noise - logger.debug("mixture: {mixture.shape}") - assert torch.isfinite(mixture).all() - max_m = mixture.abs().max() - if max_m > 1: - logger.warning(f"Clipping detected during mixing. Reducing gain by {1/max_m}") - clean, noise, mixture = clean / max_m, noise / max_m, mixture / max_m - return clean, noise, mixture - - -def load_audio_gradio( - audio_or_file: Union[None, str, Tuple[int, np.ndarray]], sr: int -) -> Optional[Tuple[Tensor, AudioMetaData]]: - if audio_or_file is None: - return None - if isinstance(audio_or_file, str): - if audio_or_file.lower() == "none": - return None - # First try default format - audio, meta = load_audio(audio_or_file, sr) - else: - meta = AudioMetaData(-1, -1, -1, -1, "") - assert isinstance(audio_or_file, (tuple, list)) - meta.sample_rate, audio_np = audio_or_file - # Gradio documentation says, the shape is [samples, 2], but apparently sometimes its not. - audio_np = audio_np.reshape(audio_np.shape[0], -1).T - if audio_np.dtype == np.int16: - audio_np = (audio_np / (1 << 15)).astype(np.float32) - elif audio_np.dtype == np.int32: - audio_np = (audio_np / (1 << 31)).astype(np.float32) - audio = resample(torch.from_numpy(audio_np), meta.sample_rate, sr) - return audio, meta - - -def mix_and_denoise( - speech_rec: Union[str, Tuple[int, np.ndarray]], speech_upl: str, noise_fn: str, snr: int -): - sr = config("sr", 48000, int, section="df") - logger.info( - f"Got parameters speech_rec: {speech_rec}, speech_upl: {speech_upl}, noise: {noise_fn}, snr: {snr}" - ) - if noise_fn is None: - noise_fn = "samples/dkitchen.wav" - meta = AudioMetaData(-1, -1, -1, -1, "") - max_s = 10 # limit to 10 seconds - if speech_rec is None and speech_upl is None: - speech, meta = load_audio("samples/p232_013_clean.wav", sr) - elif speech_upl is not None: - speech, meta = load_audio(speech_upl, sr) - else: - tmp = load_audio_gradio(speech_rec, sr) - assert tmp is not None - speech, meta = tmp - if speech.dim() > 1 and speech.shape[0] > 1: - assert ( - speech.shape[1] > speech.shape[0] - ), f"Expecting channels first, but got {speech.shape}" - speech = speech.mean(dim=0, keepdim=True) - speech = speech[..., : max_s * sr] - logger.info(f"Loaded speech with shape {speech.shape}") - noise, _ = load_audio(noise_fn, sr) # type: ignore - if meta.sample_rate != sr: - # Low pass filter by resampling - noise = resample(resample(noise, sr, meta.sample_rate), meta.sample_rate, sr) - logger.info(f"Loaded noise with shape {noise.shape}") - speech, noise, noisy = mix_at_snr(speech, noise, snr) - logger.info("Start denoising audio") - enhanced = enhance(model, df, noisy) - logger.info("Denoising finished") - lim = torch.linspace(0.0, 1.0, int(sr * 0.15)).unsqueeze(0) - lim = torch.cat((lim, torch.ones(1, enhanced.shape[1] - lim.shape[1])), dim=1) - enhanced = enhanced * lim - if meta.sample_rate != sr: - enhanced = resample(enhanced, sr, meta.sample_rate) - noisy = resample(noisy, sr, meta.sample_rate) - sr = meta.sample_rate - noisy_fn = tempfile.NamedTemporaryFile(suffix="noisy.wav", delete=False).name - save_audio(noisy_fn, noisy, sr) - enhanced_fn = tempfile.NamedTemporaryFile(suffix="enhanced.wav", delete=False).name - save_audio(enhanced_fn, enhanced, sr) - logger.info(f"saved audios: {noisy_fn}, {enhanced_fn}") - return ( - noisy_fn, - spec_figure(noisy, sr=sr), - enhanced_fn, - spec_figure(enhanced, sr=sr), - ) - - -def specshow( - spec, - ax=None, - title=None, - xlabel=None, - ylabel=None, - sr=48000, - n_fft=None, - hop=None, - t=None, - f=None, - vmin=-100, - vmax=0, - xlim=None, - ylim=None, - cmap="inferno", -): - """Plots a spectrogram of shape [F, T]""" - spec_np = spec.cpu().numpy() if isinstance(spec, torch.Tensor) else spec - if ax is not None: - set_title = ax.set_title - set_xlabel = ax.set_xlabel - set_ylabel = ax.set_ylabel - set_xlim = ax.set_xlim - set_ylim = ax.set_ylim - else: - ax = plt - set_title = plt.title - set_xlabel = plt.xlabel - set_ylabel = plt.ylabel - set_xlim = plt.xlim - set_ylim = plt.ylim - if n_fft is None: - if spec.shape[0] % 2 == 0: - n_fft = spec.shape[0] * 2 - else: - n_fft = (spec.shape[0] - 1) * 2 - hop = hop or n_fft // 4 - if t is None: - t = np.arange(0, spec_np.shape[-1]) * hop / sr - if f is None: - f = np.arange(0, spec_np.shape[0]) * sr // 2 / (n_fft // 2) / 1000 - im = ax.pcolormesh( - t, f, spec_np, rasterized=True, shading="auto", vmin=vmin, vmax=vmax, cmap=cmap - ) - if title is not None: - set_title(title) - if xlabel is not None: - set_xlabel(xlabel) - if ylabel is not None: - set_ylabel(ylabel) - if xlim is not None: - set_xlim(xlim) - if ylim is not None: - set_ylim(ylim) - return im - - -def spec_figure( - audio: torch.Tensor, - figsize=(15, 5), - colorbar=False, - colorbar_format=None, - figure=None, - return_im=False, - labels=True, - **kwargs, -) -> plt.Figure: - audio = torch.as_tensor(audio) - if labels: - kwargs.setdefault("xlabel", "Time [s]") - kwargs.setdefault("ylabel", "Frequency [Hz]") - n_fft = kwargs.setdefault("n_fft", 1024) - hop = kwargs.setdefault("hop", 512) - w = torch.hann_window(n_fft, device=audio.device) - spec = torch.stft(audio, n_fft, hop, window=w, return_complex=False) - spec = spec.div_(w.pow(2).sum()) - spec = torch.view_as_complex(spec).abs().clamp_min(1e-12).log10().mul(10) - kwargs.setdefault("vmax", max(0.0, spec.max().item())) - - if figure is None: - figure = plt.figure(figsize=figsize) - figure.set_tight_layout(True) - if spec.dim() > 2: - spec = spec.squeeze(0) - im = specshow(spec, **kwargs) - if colorbar: - ckwargs = {} - if "ax" in kwargs: - if colorbar_format is None: - if kwargs.get("vmin", None) is not None or kwargs.get("vmax", None) is not None: - colorbar_format = "%+2.0f dB" - ckwargs = {"ax": kwargs["ax"]} - plt.colorbar(im, format=colorbar_format, **ckwargs) - if return_im: - return im - return figure - - -inputs = [ - gradio.inputs.Audio( - source="microphone", - type="numpy", - optional=True, - label="Record your own voice", - ), - gradio.inputs.Audio( - source="upload", - type="filepath", - optional=True, - label="Alternative: Upload speech sample", - ), - gradio.inputs.Audio( - source="upload", type="filepath", optional=True, label="Upload noise sample" - ), - gradio.inputs.Slider(minimum=-10, maximum=40, step=5, default=10), # SNR -] -examples = [ - [ - "none", - "samples/p232_013_clean.wav", - "samples/dkitchen.wav", - 10, - ], - [ - "none", - "samples/p232_019_clean.wav", - "samples/dliving.wav", - 10, - ], -] -outputs = [ - gradio.outputs.Audio(label="Noisy"), - gradio.outputs.Image(type="plot"), - gradio.outputs.Audio(label="Enhanced"), - gradio.outputs.Image(type="plot"), -] -description = "This demo denoises audio files using DeepFilterNet. Try it with your own voice!" -iface = gradio.Interface( - fn=mix_and_denoise, - title="DeepFilterNet Demo", - inputs=inputs, - outputs=outputs, - examples=examples, - description=description, - layout="horizontal", - allow_flagging="never", - article=markdown.markdown(open("usage.md").read()), -) -iface.launch(cache_examples=False, debug=True) diff --git "a/spaces/huggingface/Model_Cards_Writing_Tool/pages/5_\360\237\217\213\357\270\217\342\200\215\342\231\200\357\270\217_Model_training.py" "b/spaces/huggingface/Model_Cards_Writing_Tool/pages/5_\360\237\217\213\357\270\217\342\200\215\342\231\200\357\270\217_Model_training.py" deleted file mode 100644 index b329c57a7c0f759c4fa6885f4cda5238a9fdfe80..0000000000000000000000000000000000000000 --- "a/spaces/huggingface/Model_Cards_Writing_Tool/pages/5_\360\237\217\213\357\270\217\342\200\215\342\231\200\357\270\217_Model_training.py" +++ /dev/null @@ -1,87 +0,0 @@ -import streamlit as st -from persist import persist, load_widget_state - -global variable_output - -def main(): - - cs_body() - - -def cs_body(): - - st.markdown('# Training Details') - st.write("Provide an overview of the Training Data and Training Procedure for this model") - left, middle, right = st.columns([2,1,7]) - - with left: - st.write("\n") - st.write("\n") - st.markdown('## Training Data:') - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - with middle: - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.markdown(' \n ## Training Procedure') - with left: - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - - st.markdown('#### Preprocessing:') - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.markdown('#### Speeds, Sizes, Time:') - - with right: - #soutput_jinja = parse_into_jinja_markdown() - - st.text_area("", help ="Ideally this links to a Dataset Card.", key=persist("training_Data")) - #st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - st.write("\n") - - st.text_area("",key=persist("model_preprocessing")) - st.text_area("", help = "This section provides information about throughput, start/end time, checkpoint size if relevant, etc.", key=persist("Speeds_Sizes_Times")) - - - - - - -if __name__ == '__main__': - load_widget_state() - main() \ No newline at end of file diff --git a/spaces/huspacy/example-applications/README.md b/spaces/huspacy/example-applications/README.md deleted file mode 100644 index 40642aa854e8b9f09b3592a1273976b8d5651af1..0000000000000000000000000000000000000000 --- a/spaces/huspacy/example-applications/README.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: HuSpaCy Use Cases -emoji: 👁 -colorFrom: red -colorTo: indigo -sdk: gradio -sdk_version: "3.20.0" -app_file: app.py -pinned: false -license: apache-2.0 -python_version: "3.10" ---- - -# HuSpaCy Examples - -This repository presents some practical examples on using HuSpaCy for various text mining applications. - - -## Development - -To start the demo: `poetry run python app.py` - -If you upgrade dependencies via poetry don't forget to update `requirements.txt` with `poetry export --without-hashes > requirements.txt` diff --git a/spaces/hussain-shk/IndiSent/indic_nlp_library/docs/Makefile b/spaces/hussain-shk/IndiSent/indic_nlp_library/docs/Makefile deleted file mode 100644 index faf86259fdbcb0dff091c22d980623b622f2bbd4..0000000000000000000000000000000000000000 --- a/spaces/hussain-shk/IndiSent/indic_nlp_library/docs/Makefile +++ /dev/null @@ -1,153 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = _build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext - -help: - @echo "Please use \`make <target>' where <target> is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/IndicNLPLibrary.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/IndicNLPLibrary.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/IndicNLPLibrary" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/IndicNLPLibrary" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/spaces/hzy123/bingo/src/lib/isomorphic/node.ts b/spaces/hzy123/bingo/src/lib/isomorphic/node.ts deleted file mode 100644 index da213ad6a86181979f098309c374da02835db5a0..0000000000000000000000000000000000000000 --- a/spaces/hzy123/bingo/src/lib/isomorphic/node.ts +++ /dev/null @@ -1,26 +0,0 @@ -import Debug from 'debug' - -const { fetch, setGlobalDispatcher, ProxyAgent } = require('undici') -const { HttpsProxyAgent } = require('https-proxy-agent') -const ws = require('ws') - -const debug = Debug('bingo') - -const httpProxy = process.env.http_proxy || process.env.HTTP_PROXY || process.env.https_proxy || process.env.HTTPS_PROXY; -let WebSocket = ws.WebSocket - -if (httpProxy) { - setGlobalDispatcher(new ProxyAgent(httpProxy)) - const agent = new HttpsProxyAgent(httpProxy) - // @ts-ignore - WebSocket = class extends ws.WebSocket { - constructor(address: string | URL, options: typeof ws.WebSocket) { - super(address, { - ...options, - agent, - }) - } - } -} - -export default { fetch, WebSocket, debug } diff --git a/spaces/inamXcontru/PoeticTTS/2012 (2009) 1080p Bluray X264 Dual Audio [English-Hindi] - TBI.md b/spaces/inamXcontru/PoeticTTS/2012 (2009) 1080p Bluray X264 Dual Audio [English-Hindi] - TBI.md deleted file mode 100644 index 3b591924542c8911845ddcdadfbf1a7164960c0e..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/2012 (2009) 1080p Bluray X264 Dual Audio [English-Hindi] - TBI.md +++ /dev/null @@ -1,18 +0,0 @@ - -<h1>2012: A Spectacular Disaster Movie in High Definition and Dual Audio</h1> -<p>If you are a fan of disaster movies, you might want to check out 2012, a 2009 film directed by Roland Emmerich and starring John Cusack, Chiwetel Ejiofor, Amanda Peet, Thandie Newton, and Danny Glover. The film depicts a series of cataclysmic events that occur in the year 2012, as predicted by the ancient Mayan calendar.</p> -<h2>2012 (2009) 1080p Bluray x264 Dual Audio [English-Hindi] - TBI</h2><br /><p><b><b>Download File</b> ✔ <a href="https://gohhs.com/2uz3oE">https://gohhs.com/2uz3oE</a></b></p><br /><br /> -<p>The film features stunning visual effects and thrilling action sequences, as the characters try to survive the end of the world. You can enjoy this movie in high definition quality with 1080p Bluray x264 format, which offers crisp and clear images and sound. You can also choose between English and Hindi audio tracks, thanks to the dual audio feature provided by TBI.</p> -<p>TBI is a trusted source of high-quality movies and TV shows, with a large collection of dual audio titles. You can download 2012 (2009) 1080p Bluray x264 Dual Audio [English-Hindi] - TBI from their website or torrent link, and watch it on your preferred device. Whether you want to experience the movie in its original language or in your native tongue, you can do so with TBI.</p> -<p>Don't miss this opportunity to watch one of the most epic disaster movies of all time in HD and dual audio. Download 2012 (2009) 1080p Bluray x264 Dual Audio [English-Hindi] - TBI today and enjoy the ride!</p> -<p></p> - -<p>2012 is not only a movie about the end of the world, but also a movie about the human spirit and the power of hope. The film explores how different people cope with the impending doom, and how they find meaning and purpose in their lives. Some of them try to save their loved ones, some of them try to help others, and some of them try to make peace with themselves.</p> -<p>The film also raises some interesting questions about the role of science, religion, and politics in the face of a global crisis. How do we decide who gets to survive and who doesn't? How do we balance individual rights and collective responsibilities? How do we deal with the ethical and moral dilemmas that arise from such a situation?</p> -<p>2012 is a movie that will make you think, feel, and wonder. It will also entertain you with its spectacular scenes of destruction and chaos, as well as its moments of humor and heart. You can watch this movie in the best possible quality and language with 1080p Bluray x264 Dual Audio [English-Hindi] - TBI. Don't wait any longer and download it now!</p> - -<p>If you are looking for a movie that will keep you on the edge of your seat and make you marvel at the wonders of cinema, look no further than 2012. This movie is a masterpiece of disaster filmmaking, with a stellar cast and a visionary director. You will witness the most incredible scenes of natural disasters and human survival ever put on screen.</p> -<p>But 2012 is not just a movie about spectacle and spectacle. It is also a movie about courage and compassion, about faith and destiny, about love and sacrifice. The movie shows how different characters react to the impending apocalypse, and how they find strength and inspiration in each other. Some of them fight for their families, some of them fight for their ideals, and some of them fight for their souls.</p> -<p>2012 is a movie that will inspire you, challenge you, and move you. It will also delight you with its stunning visuals and sound, as well as its witty and touching dialogues. You can enjoy this movie in the highest quality and language with 1080p Bluray x264 Dual Audio [English-Hindi] - TBI. Don't hesitate and download it today!</p> d5da3c52bf<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/BluffTitler Ultimate Crack 14.2.0.5 [Latest Version].md b/spaces/inamXcontru/PoeticTTS/BluffTitler Ultimate Crack 14.2.0.5 [Latest Version].md deleted file mode 100644 index 72251631ad4618803e66bceb2749632dbb21cbfe..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/BluffTitler Ultimate Crack 14.2.0.5 [Latest Version].md +++ /dev/null @@ -1,90 +0,0 @@ -<br /> -<h1>BluffTitler Ultimate Crack 14.2.0.5 [Latest Version]: A Review</h1> - -<p>Do you want to create stunning 3D video effects without spending a lot of money and time on complex software? If yes, then you might be interested in BluffTitler Ultimate Crack 14.2.0.5 [Latest Version], a powerful and easy-to-use tool that lets you design and animate 3D titles and intros for your videos.</p> -<h2>BluffTitler Ultimate Crack 14.2.0.5 [Latest Version]</h2><br /><p><b><b>Download File</b> » <a href="https://gohhs.com/2uz5yV">https://gohhs.com/2uz5yV</a></b></p><br /><br /> - -<p>In this article, we will review BluffTitler Ultimate Crack 14.2.0.5 [Latest Version], its features, benefits, drawbacks, and how to download and use it. We will also compare it with other similar software and give you our verdict on whether it is worth trying or not.</p> - -<h2>What is BluffTitler Ultimate?</h2> - -<p>BluffTitler Ultimate is a Windows desktop application that allows you to create amazing 3D effects for your videos. You can use it to make golden beveled titles, blood dripping titles, slimy titles, hairy titles, exploding titles, reflection mapped titles, twisted titles, cartoon shaded titles, golden glowing titles, spooky lightened titles, titles with silver spikes, jumbling titles, inverted titles, bouncing titles, power field emitting titles, titles with flying hearts, pumping titles, and much more.</p> - -<p>You can also use BluffTitler Ultimate to create plasma backgrounds, background videos morphing into donuts, particle effects, MP3 audio morphing, JPG pictures exploding, video backgrounds fractal backgrounds, and other cool effects.</p> - -<p>BluffTitler Ultimate supports many different formats and resolutions, such as HD, 4K, square, widescreen, and more. You can export your projects as videos or images in any format you want.</p> - -<h2>What is BluffTitler Ultimate Crack 14.2.0.5 [Latest Version]?</h2> - -<p>BluffTitler Ultimate Crack 14.2.0.5 [Latest Version] is a modified version of BluffTitler Ultimate that allows you to use the full features of the software without paying for it. It is a patch file that you need to apply to the original software to activate it.</p> -<p></p> - -<p>By using BluffTitler Ultimate Crack 14.2.0.5 [Latest Version], you can enjoy all the benefits of BluffTitler Ultimate without spending a dime. However, you should be aware that using cracked software is illegal and risky. You might face legal issues or malware infections if you download and use BluffTitler Ultimate Crack 14.2.0.5 [Latest Version] from untrusted sources.</p> - -<h2>How to use BluffTitler Ultimate Crack 14.2.0.5 [Latest Version]?</h2> - -<p>To use BluffTitler Ultimate Crack 14.2.0.5 [Latest Version], you need to follow these simple steps:</p> - -<ol> -<li>Download and install BluffTitler Ultimate from the official website or any trusted source.</li> -<li>Download BluffTitler Ultimate Crack 14.2.0.5 [Latest Version] from the link below or any reliable source.</li> -<li>Extract the zip file and run the patch file as administrator.</li> -<li>Select the installation directory of BluffTitler Ultimate and click on patch.</li> -<li>Wait for the process to complete and close the patch file.</li> -<li>Launch BluffTitler Ultimate and enjoy the full features of the software.</li> -</ol> - -<p>Once you have done that, you can start creating your own 3D effects with BluffTitler Ultimate. You can choose from hundreds of templates and presets that are included in the software or create your own from scratch.</p> - -<p>You can customize every aspect of your effects, such as the text, font, color, size, position, rotation, animation, lighting, shadows, textures, particles, etc. You can also add sound effects and music to your projects.</p> - -<p>You can preview your effects in real-time and adjust them as you like. You can also use the timeline to control the timing and speed of your animations.</p> - -<p>When you are happy with your results, you can export your projects as videos or images in any format you want. You can also share them online or burn them to DVD or Blu-ray discs.</p> - -<h2>Why choose BluffTitler Ultimate Crack 14.2.0.5 [Latest Version]?</h2> - -<p>There are many reasons why you should choose BluffTitler Ultimate Crack 14.2.0.5 [Latest Version] over other 3D animation software:</p> - -<ul> -<li>It is very easy to use and does not require any technical skills or experience.</li> -<li>It is very fast and does not require a powerful computer or graphics card.</li> -<li>It is very affordable and does not require a monthly subscription or license fee.</li> -<li>It is very versatile and can create any kind of 3D effect you can imagine.</li> -<li>It is very fun and creative and can make your videos stand out from the crowd.</li> -</ul> - -<p>If you want to impress your family, friends, and clients with spectacular 3D titles and intros for your videos,</p> -</p> - -<h2>How to download BluffTitler Ultimate Crack 14.2.0.5 [Latest Version]?</h2> - -<p>To download BluffTitler Ultimate Crack 14.2.0.5 [Latest Version], you need to follow these simple steps:</p> - -<ol> -<li>Click on the download link below or any other trusted source.</li> -<li>Choose a suitable location to save the zip file and wait for the download to finish.</li> -<li>Extract the zip file and run the setup file to install BluffTitler Ultimate.</li> -<li>Run the patch file as administrator and select the installation directory of BluffTitler Ultimate.</li> -<li>Click on patch and wait for the process to complete.</li> -<li>Enjoy the full version of BluffTitler Ultimate with all its features.</li> -</ol> - -<p>Download Link: https://www.sadeempc.com/blufftitler-ultimate-crack/</p> - -<h2>Conclusion</h2> - -<p>BluffTitler Ultimate Crack 14.2.0.5 [Latest Version] is a powerful and easy-to-use tool that lets you create stunning 3D video effects without spending a fortune on professional software. It is a great choice for anyone who wants to make their videos more attractive and impressive with 3D titles and intros.</p> - -<p>However, you should be careful when using cracked software as it might be illegal or unsafe. You should always download and use BluffTitler Ultimate Crack 14.2.0.5 [Latest Version] from trusted sources and scan it with antivirus software before installing it.</p> - -<p>We hope this article has helped you to learn more about BluffTitler Ultimate Crack 14.2.0.5 [Latest Version], its features, benefits, drawbacks, and how to download and use it. If you have any questions or feedback, please feel free to leave a comment below.</p> -<h2>Conclusion</h2> - -<p>BluffTitler Ultimate Crack 14.2.0.5 [Latest Version] is a powerful and easy-to-use tool that lets you create stunning 3D video effects without spending a fortune on professional software. It is a great choice for anyone who wants to make their videos more attractive and impressive with 3D titles and intros.</p> - -<p>However, you should be careful when using cracked software as it might be illegal or unsafe. You should always download and use BluffTitler Ultimate Crack 14.2.0.5 [Latest Version] from trusted sources and scan it with antivirus software before installing it.</p> - -<p>We hope this article has helped you to learn more about BluffTitler Ultimate Crack 14.2.0.5 [Latest Version], its features, benefits, drawbacks, and how to download and use it. If you have any questions or feedback, please feel free to leave a comment below.</p> 3cee63e6c2<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/inamXcontru/PoeticTTS/Brekkie Around the World How Different Cultures Start Their Day.md b/spaces/inamXcontru/PoeticTTS/Brekkie Around the World How Different Cultures Start Their Day.md deleted file mode 100644 index 9c07ac265441b615db2cabbe6bf5b9a8f42f6ff4..0000000000000000000000000000000000000000 --- a/spaces/inamXcontru/PoeticTTS/Brekkie Around the World How Different Cultures Start Their Day.md +++ /dev/null @@ -1,16 +0,0 @@ -<br /> -<p>When BA's creative director <strong>Alex Grossman</strong> and food director <strong>Carla Music</strong> traveled to Australia to research an upcoming story (stay tuned!), they solidified our staff's obsession with Australian "brekkie" culture. Featuring wholesome, healthy, filling, veggie-forward food, Australian breakfasts are exactly the type of food we love to eat. The beauty of brekkie is in its simplicity and lack of fanfare. Organic, responsibly-grown, and thoughtfully prepared food is just the norm for Aussies. Says Grossman: "It's not like they're telling you all about the house-fermented kimchi and locally-raised eggs." Instead, there's just some dude in the back of the kitchen making ferments because they're cool and tasty, and he likes them, and thinks you will too. "This is a new concept for us; it's not new at all for Australians," says Grossman.</p> -<p>As Grossman explains, the beauty of brekkie-ing it up in Australia is that you don't have to try too hard to find a great spot. They're just there, fermenting kombucha and mashing perfectly ripe avocados on seedy toast. You have a much greater chance of hitting the breakfast jackpot at random in Sydney and Melbourne than in most U.S. cities (although there are some amazing spots in the states, which we highlight below!). Here are some of our picks for Australia's best brekkies:</p> -<h2>brekkie</h2><br /><p><b><b>DOWNLOAD</b> ->>> <a href="https://gohhs.com/2uz43q">https://gohhs.com/2uz43q</a></b></p><br /><br /> -<p>You needn't fly across the world to have an Australian-style brekkie (although we wouldn't fault you for booking a ticket). A handful of restaurants in the states are serving up a proper brekkie, as well.</p> -<p><strong>Sweedeedee</strong> A Portland, OR charmer, Sweedeedee does brekkie culture with a side of pie. In addition to cherry, apple, marionberry, pecan, and lemon pies, Sweedeedee features anadama toast with house-made preserves and local honey, corncakes, and plenty of salads.</p> -<p>Don't feel like dining out? Recreating an Australian brekkie at home is easy, once you're familiar with the elements. Blur the lines between breakfast, brunch, and lunch, embrace whole foods and green things, and above all, keep it fresh and filling, without being heavy.</p> -<p>I can safely say, chorizo brekkie bowls are a huge brunch favorite in our home! They get gobbled up as soon as they come off of the stove. Some nights when we don't know what to make for dinner, we always come around to these! (If you have never had breakfast for dinner, you don't know what you are missing!)</p> -<p>These chorizo brekkie bowls also make a great and delicious option for weekday meal prep! All you have to do is cook all of the ingredients as per the recipe below, and prepare the easy hollandaise sauce. Portion and layer all of the ingredients into containers and keep the hollandaise sauce portions in separate containers.</p> -<p></p> -<p>It's delectable, it's beautiful, it's healthy. They call it <i>brekkie</i>. Yep, the Australians have even nicknamed the best meal of the day, it's that good. The trend hasn't just stayed in Australia, though. Trendy cafés from Amsterdam to NYC have adopted the superfood-y brekkie idea, and for good reason. Avocado toast ("avo smash" as they say), smoothie bowls, yogurt and granola, and always good coffee... The best way to start the day.</p> -<p>The Aussies have clearly created their own breakfast staples, but they've also taken staple breakfast options from around the world and healthy-ed them up. I found what would become without a doubt, the best breakfast, sorry, brekkie, I've ever had at the (actually Kiwi-inspired) best café in Amsterdam, Bakers & Roasters. My friend had a take on the traditional English breakfast and I had a sweet potato and chorizo hash with fried eggs on top. Not your typical full breakfast or potato hash.</p> -<p>Ultimately, one of my favorite aspects of the Australian brekkie scene is the personal experience it provides. While it's impossible to completely bring the laid-back vibe of Australia to the hustle and bustle of New York, this trend is definitely making an impact on how people treat breakfast. Whether it's taking time to enjoy brunch at the café, or getting excited to recreate Aussie favorites in the kitchen, breakfast is a thing. The aim of the Australian inspired café scene is to make eating breakfast an experience to be savored, not rushed. Allowing people to enjoy high quality coffee and tasty, healthy food with good friends, all at the same time, is the goal of these cafés, and at least from the food perspective, they're doing a good job at it.</p> -<p>Not only do we serve up amazing food for lunch and dinner, but our brekkie menu consisting of burritos, bowls, sourdough toast and more is sure to get your tastebuds tingling. What better way to enjoy this glorious offering than having it delivered to your door through our friends at DoorDash. For one week DoorDash will be offering 40% OFF all brekkie orders!</p> aaccfb2cb3<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/innnky/nyaru-svc2.0-advanced/utils.py b/spaces/innnky/nyaru-svc2.0-advanced/utils.py deleted file mode 100644 index b83c4601ad96d6b1e80a43e88593b887d4ea69d3..0000000000000000000000000000000000000000 --- a/spaces/innnky/nyaru-svc2.0-advanced/utils.py +++ /dev/null @@ -1,263 +0,0 @@ -import argparse -import glob -import json -import logging -import os -import subprocess -import sys - -import numpy as np -import torch -from scipy.io.wavfile import read - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location='cpu') - iteration = checkpoint_dict['iteration'] - learning_rate = checkpoint_dict['learning_rate'] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict['optimizer']) - # print(1111) - saved_state_dict = checkpoint_dict['model'] - # print(1111) - - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except Exception as e: - logger.info(e) - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, 'module'): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info("Loaded checkpoint '{}' (iteration {})".format( - checkpoint_path, iteration)) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info("Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path)) - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict, - 'iteration': iteration, - 'optimizer': optimizer.state_dict(), - 'learning_rate': learning_rate}, checkpoint_path) - - -def summarize(writer, global_step, scalars={}, histograms={}, images={}, audios={}, audio_sampling_rate=22050): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats='HWC') - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = numpy.fromstring(fig.canvas.tostring_rgb(), dtype=numpy.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger('matplotlib') - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow(alignment.transpose(), aspect='auto', origin='lower', - interpolation='none') - fig.colorbar(im, ax=ax) - xlabel = 'Decoder timestep' - if info is not None: - xlabel += '\n\n' + info - plt.xlabel(xlabel) - plt.ylabel('Encoder timestep') - plt.tight_layout() - - fig.canvas.draw() - data = numpy.fromstring(fig.canvas.tostring_rgb(), dtype=numpy.uint8, sep='') - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding='utf-8') as f: - filepaths_and_text = [line.strip().split(split) for line in f] - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument('-c', '--config', type=str, default="./configs/base.json", - help='JSON file for configuration') - parser.add_argument('-m', '--model', type=str, required=True, - help='Model name') - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r", encoding="utf-8") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warning("{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - )) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warning("git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8])) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams: - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Ao Haru Ride 720p Or 1080pl.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Ao Haru Ride 720p Or 1080pl.md deleted file mode 100644 index 6a4454332695e448c964b4b5f5fe774ba3d199b6..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Ao Haru Ride 720p Or 1080pl.md +++ /dev/null @@ -1,14 +0,0 @@ -<br /> -<h1>Ao Haru Ride: A Heartfelt Coming-of-Age Romance in High Definition</h1> -<p>If you are looking for a shoujo anime that will tug at your heartstrings, you might want to check out Ao Haru Ride, also known as Blue Spring Ride internationally. This anime is based on a popular manga series by Io Sakisaka, who is also known for her work on HAL and Omoi, Omoware, Furi, Furare. Ao Haru Ride follows the story of Futaba Yoshioka, a high school girl who tries to hide her feminine side to avoid being ostracized by her female classmates. She also harbors a crush on Kou Tanaka, a boy who disappeared from her life in middle school. When they reunite in high school, Futaba is shocked to see how much Kou has changed. He now goes by the name Kou Mabuchi, and he is cold and distant towards her. Futaba decides to change herself as well, and tries to reconnect with Kou while making new friends along the way.</p> -<p>Ao Haru Ride is a touching anime that explores the themes of friendship, love, loss, and growth. The characters are realistic and relatable, and their interactions are full of humor and emotion. The animation by Production I.G is smooth and colorful, and the music by Hiroaki Tsutsumi, Keiko Osaki, and Shota Hashimoto is soothing and catchy. The voice acting is also superb, with Maaya Uchida as Futaba and Yuuki Kaji as Kou.</p> -<h2>Ao Haru Ride 720p Or 1080pl</h2><br /><p><b><b>Download</b> ✔✔✔ <a href="https://urlin.us/2uEwdG">https://urlin.us/2uEwdG</a></b></p><br /><br /> -<p>If you want to watch Ao Haru Ride in high definition, you have two options: 720p or 1080p. Both resolutions offer clear and crisp images, but there are some differences between them. 720p has a lower file size and less bandwidth consumption than 1080p, which means it is faster to download and stream. However, 1080p has more pixels and higher quality than 720p, which means it has more details and sharper edges. The choice between 720p or 1080p depends on your personal preference and device compatibility. You can find both versions on various streaming platforms such as Crunchyroll.</p> -<p>Ao Haru Ride is a must-watch anime for fans of shoujo romance and slice-of-life genres. It will make you laugh, cry, and swoon over the adorable characters and their heartfelt stories. Whether you choose to watch it in 720p or 1080p, you will not regret giving this anime a try.</p> - -<p>If you are curious about the manga that inspired the anime, you can also read Ao Haru Ride by Io Sakisaka. The manga ran from 2011 to 2015 in Shueisha's Bessatsu Margaret magazine, and it has 13 volumes in total. The manga has more chapters and scenes than the anime, and it also covers the entire story of Futaba and Kou's relationship. The manga has been praised for its beautiful artwork, engaging plot, and realistic characters. It has also been translated into several languages, such as English, German, French, Italian, Taiwanese, and Polish. You can find the manga online or in bookstores near you.</p> -<p>Ao Haru Ride is not only a manga and an anime, but also a live-action film. The film was released in 2014, and it was directed by Takahiro Miki and written by Tomoko Yoshida. The film stars Tsubasa Honda as Futaba and Masahiro Higashide as Kou. The film follows the same story as the manga and the anime, but it also adds some original elements and changes some details. The film has received positive reviews from critics and fans alike, and it has been praised for its faithful adaptation, charming cast, and emotional impact. You can watch the film online or on DVD.</p> -<p></p> -<p>Ao Haru Ride is a wonderful series that will make you fall in love with its characters and their journey. Whether you prefer to read the manga, watch the anime, or see the film, you will not be disappointed by this shoujo masterpiece. Ao Haru Ride is a series that will stay with you for a long time.</p> d5da3c52bf<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Frutiger Lt Std Family Torrent.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Frutiger Lt Std Family Torrent.md deleted file mode 100644 index af58befaf147159b0845b2ebd4016afe5183a139..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Frutiger Lt Std Family Torrent.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Frutiger Lt Std Family Torrent</h2><br /><p><b><b>DOWNLOAD</b> ★★★ <a href="https://urlin.us/2uEy1u">https://urlin.us/2uEy1u</a></b></p><br /><br /> -<br /> - 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Heroes Of Might And Magic 3 Hd Edition !FREE! Crack 89.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Heroes Of Might And Magic 3 Hd Edition !FREE! Crack 89.md deleted file mode 100644 index 74bd679e2aa364bb2474f55b52284a28b5e05750..0000000000000000000000000000000000000000 --- a/spaces/inplisQlawa/anything-midjourney-v4-1/Heroes Of Might And Magic 3 Hd Edition !FREE! Crack 89.md +++ /dev/null @@ -1,91 +0,0 @@ - -<h1>Heroes Of Might And Magic 3 Hd Edition Crack 89: How to Enjoy the Classic Strategy Game for Free</h1> - -<p>Heroes Of Might And Magic 3 is a classic turn-based strategy game that was released in 1999 and became one of the most popular titles in the genre. The game features an epic story, a rich fantasy world, and a variety of heroes and creatures to command. However, the original game had some limitations, such as low-resolution graphics, compatibility issues, and lack of online multiplayer support.</p> - -<p>That's why Ubisoft Entertainment released Heroes Of Might And Magic 3 Hd Edition in 2015, a remastered version of the game that improved the graphics, added widescreen support, and enabled online multiplayer via Steam. This edition also included all the DLCs from the original game, such as The Restoration of Erathia, Armageddon's Blade, and The Shadow of Death.</p> -<h2>Heroes Of Might And Magic 3 Hd Edition Crack 89</h2><br /><p><b><b>Download File</b> ✫✫✫ <a href="https://urlin.us/2uExl4">https://urlin.us/2uExl4</a></b></p><br /><br /> - -<p>However, if you want to play this game without buying it from Steam or using a CD, you might be looking for a crack that can bypass the DRM protection and let you enjoy the game for free. In this article, we will show you how to download and play Heroes Of Might And Magic 3 Hd Edition Crack 89, a version of the game that has been cracked by Reloaded group and works on all Windows systems.</p> - -<h2>What is Heroes Of Might And Magic 3 Hd Edition Crack 89?</h2> - -<p>Heroes Of Might And Magic 3 Hd Edition Crack 89 is a cracked version of the game that allows you to play it without any DRM restrictions. This means that you don't need to have a CD or a Steam account to launch the game. You can also access online multiplayer mode via Steam if you have an account.</p> - -<p>The crack was created by Reloaded group, a team of hackers who specialize in cracking games and software. They released the crack in January 2015, shortly after the game was launched. The crack works on all Windows systems, including Windows 7 and up.</p> - -<p>The crack is based on the v1.0 version of the game, which is the first release. However, you can also update the game to v1.3 using a patch that is available online. The patch fixes some bugs and improves the performance of the game.</p> - -<h2>How to Download and Play Heroes Of Might And Magic 3 Hd Edition Crack 89?</h2> - -<p>If you want to download and play Heroes Of Might And Magic 3 Hd Edition Crack 89, you will need to follow these steps:</p> -<p></p> - -<ol> -<li>Download the cracked version of the game from a reliable source. You can use one of the links below to get the game file, which is about 2.97 GB in size. Make sure you have enough space on your hard drive and a good internet connection.</li> -<ul> -<li><a href="https://megagames.com/fixes/heroes-might-and-magic-3-hd-edition-v10-all-no-dvd-reloaded">MegaGames</a></li> -<li><a href="https://igg-games-site.org/heroes-of-might-and-magic-iii-complete-edition-download-torrents-repacks/">Igg Games Repacks Site</a></li> -<li><a href="https://gametrex.com/heroes-of-might-magic-iii-hd-edition-free-download/">GameTrex</a></li> -<li><a href="https://archive.org/details/homm-3">Archive.org</a></li> -</ul> -<li>After downloading the file, you will need to extract it using a program like WinRAR or 7-Zip. You should see a folder named HOMAM.3.HD.V1.0.ALL.RELOADED.NODVD inside.</li> -<li>Open the folder HOMAM.3.HD.V1.0.ALL.RELOADED.NODVD and find the file rld-homam3.iso.</li> -<li>Right-click on the file and select Mount Image or use a program like Daemon Tools to mount it as a virtual drive.</li> -<li>Open the virtual drive and run setup.exe to start the installation process.</li> -<li>Follow the on-screen instructions and choose a destination folder for the game.</li> -<li>Wait for the installation to finish.</li> -<li>Open the folder HOMAM.3.HD.V1.0.ALL.RELOADED.NODVD again and find the folder Crack.</li> -<li>Copy all the files from the folder Crack and paste them into your game installation directory, overwriting any existing files.</li> -<li>Run Heroes3.exe as administrator to launch the game.</li> -<li>Enjoy!</li> -</ol> - -<h2>What are the Benefits of Playing Heroes Of Might And Magic 3 Hd Edition Crack 89?</h2> - -<p>Playing Heroes Of Might And Magic 3 Hd Edition Crack 89 has some benefits over playing</p> -<h2>What are the Features of Heroes Of Might And Magic 3 Hd Edition?</h2> - -<p>Heroes Of Might And Magic 3 Hd Edition is not just a simple port of the original game. It also adds some new features and improvements that make the game more enjoyable and modern. Here are some of them:</p> - -<ul> -<li>A new HD experience: re-live the Heroes III in HD, a true craftsmanship which offers players updated graphics, with wide screen compatibility. You can now enjoy the game in full HD resolution, with enhanced textures, animations, and effects.</li> -<li>Enjoy the critically acclaimed Heroes III gameplay, with 7 exciting campaign scenarios, around 50 skirmish maps, a local multiplayer mode and a map editor. You can play through the original story of Queen Catherine Ironfist and her quest to restore her kingdom, or choose from other campaigns that feature different heroes and factions. You can also create your own maps and scenarios using the built-in editor, or download custom maps from other players.</li> -<li>A new online multiplayer lobby: Now Steamworks compatible, Heroes III offers an online multiplayer lobby, where you can share your experience with the Heroes III community. You can join or host online games with up to 8 players, chat with other players, and use Steam achievements and cloud saves.</li> -<li>All DLCs included: Heroes Of Might And Magic 3 Hd Edition includes all the DLCs from the original game, such as The Restoration of Erathia, Armageddon's Blade, and The Shadow of Death. You can access more than 200 hours of gameplay, with new heroes, creatures, artifacts, spells, and scenarios.</li> -</ul> - -<h2>What are the Reviews of Heroes Of Might And Magic 3 Hd Edition?</h2> - -<p>Heroes Of Might And Magic 3 Hd Edition has received mixed reviews from critics and players. Some praised the game for its nostalgic value, its improved graphics, and its online multiplayer mode. Others criticized the game for its high price, its lack of new content, and its technical issues.</p> - -<p>Here are some of the reviews from different sources:</p> - -<blockquote> -<p>"Heroes of Might & Magic III - HD Edition is a great way to relive one of the best strategy games ever created." - IGN</p> -</blockquote> - -<blockquote> -<p>"Heroes of Might & Magic III - HD Edition is a lazy port that does nothing but add support for higher resolutions." - PC Gamer</p> -</blockquote> - -<blockquote> -<p>"Heroes of Might & Magic III - HD Edition is a must-have for fans of the series and newcomers alike." - GameSpot</p> -</blockquote> - -<blockquote> -<p>"Heroes of Might & Magic III - HD Edition is a disappointing remaster that fails to deliver on its promises." - Eurogamer</p> -</blockquote> - -<h2>Conclusion</h2> - -<p>Heroes Of Might And Magic 3 Hd Edition Crack 89 is a cracked version of the game that allows you to play it for free without any DRM restrictions. It also lets you enjoy improved graphics, online multiplayer mode, and all DLCs from the original game. However, you might encounter some technical issues or bugs while playing it.</p> - -<p>If you want to download and play Heroes Of Might And Magic 3 Hd Edition Crack 89, you can follow the steps we have provided in this article. However, we do not condone piracy or illegal downloading of games. If you like this game, we recommend you to buy it from Steam or use a CD to support the developers and publishers.</p> -<p>Conclusion</p> - -<p>Heroes Of Might And Magic 3 Hd Edition Crack 89 is a cracked version of the game that allows you to play it for free without any DRM restrictions. It also lets you enjoy improved graphics, online multiplayer mode, and all DLCs from the original game. However, you might encounter some technical issues or bugs while playing it.</p> - -<p>If you want to download and play Heroes Of Might And Magic 3 Hd Edition Crack 89, you can follow the steps we have provided in this article. However, we do not condone piracy or illegal downloading of games. If you like this game, we recommend you to buy it from Steam or use a CD to support the developers and publishers.</p> 3cee63e6c2<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/inreVtussa/clothingai/Examples/APN2 6C101.md b/spaces/inreVtussa/clothingai/Examples/APN2 6C101.md deleted file mode 100644 index dff0d400b6f42102b12f9df2fba46d9a3d48db82..0000000000000000000000000000000000000000 --- a/spaces/inreVtussa/clothingai/Examples/APN2 6C101.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>APN2 6C101</h2><br /><p><b><b>Download File</b> ⏩ <a href="https://tiurll.com/2uCiYN">https://tiurll.com/2uCiYN</a></b></p><br /><br /> -<br /> -Download apn2 » apn2 could be available for fast direct download ... NET] Honda Navigation v3.60 West Europe APN2 6C101 Patched: 8 years3872 MB00. 1fdad05405<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/james-oldfield/PandA/networks/genforce/runners/base_encoder_runner.py b/spaces/james-oldfield/PandA/networks/genforce/runners/base_encoder_runner.py deleted file mode 100644 index 0b2433e641fa44ce81ea2aefdd9d950abd0af051..0000000000000000000000000000000000000000 --- a/spaces/james-oldfield/PandA/networks/genforce/runners/base_encoder_runner.py +++ /dev/null @@ -1,152 +0,0 @@ -# python3.7 -"""Contains the base class for Encoder (GAN Inversion) runner.""" - -import os -import shutil - -import torch -import torch.distributed as dist - -from utils.visualizer import HtmlPageVisualizer -from utils.visualizer import get_grid_shape -from utils.visualizer import postprocess_image -from utils.visualizer import save_image -from utils.visualizer import load_image -from .base_runner import BaseRunner - -__all__ = ['BaseEncoderRunner'] - - -class BaseEncoderRunner(BaseRunner): - """Defines the base class for Encoder runner.""" - - def __init__(self, config, logger): - super().__init__(config, logger) - self.inception_model = None - - def build_models(self): - super().build_models() - assert 'encoder' in self.models - assert 'generator' in self.models - assert 'discriminator' in self.models - - self.resolution = self.models['generator'].resolution - self.G_kwargs_train = self.config.modules['generator'].get( - 'kwargs_train', dict()) - self.G_kwargs_val = self.config.modules['generator'].get( - 'kwargs_val', dict()) - self.D_kwargs_train = self.config.modules['discriminator'].get( - 'kwargs_train', dict()) - self.D_kwargs_val = self.config.modules['discriminator'].get( - 'kwargs_val', dict()) - - def train_step(self, data, **train_kwargs): - raise NotImplementedError('Should be implemented in derived class.') - - def val(self, **val_kwargs): - self.synthesize(**val_kwargs) - - def synthesize(self, - num, - html_name=None, - save_raw_synthesis=False): - """Synthesizes images. - - Args: - num: Number of images to synthesize. - z: Latent codes used for generation. If not specified, this function - will sample latent codes randomly. (default: None) - html_name: Name of the output html page for visualization. If not - specified, no visualization page will be saved. (default: None) - save_raw_synthesis: Whether to save raw synthesis on the disk. - (default: False) - """ - if not html_name and not save_raw_synthesis: - return - - self.set_mode('val') - - if self.val_loader is None: - self.build_dataset('val') - - temp_dir = os.path.join(self.work_dir, 'synthesize_results') - os.makedirs(temp_dir, exist_ok=True) - - if not num: - return - if num % self.val_batch_size != 0: - num = (num //self.val_batch_size +1)*self.val_batch_size - # TODO: Use same z during the entire training process. - - self.logger.init_pbar() - task1 = self.logger.add_pbar_task('Synthesize', total=num) - - indices = list(range(self.rank, num, self.world_size)) - for batch_idx in range(0, len(indices), self.val_batch_size): - sub_indices = indices[batch_idx:batch_idx + self.val_batch_size] - batch_size = len(sub_indices) - data = next(self.val_loader) - for key in data: - data[key] = data[key][:batch_size].cuda( - torch.cuda.current_device(), non_blocking=True) - - with torch.no_grad(): - real_images = data['image'] - E = self.models['encoder'] - if 'generator_smooth' in self.models: - G = self.get_module(self.models['generator_smooth']) - else: - G = self.get_module(self.models['generator']) - latents = E(real_images) - if self.config.space_of_latent == 'z': - rec_images = G( - latents, **self.G_kwargs_val)['image'] - elif self.config.space_of_latent == 'wp': - rec_images = G.synthesis( - latents, **self.G_kwargs_val)['image'] - elif self.config.space_of_latent == 'y': - G.set_space_of_latent('y') - rec_images = G.synthesis( - latents, **self.G_kwargs_val)['image'] - else: - raise NotImplementedError( - f'Space of latent `{self.config.space_of_latent}` ' - f'is not supported!') - rec_images = postprocess_image( - rec_images.detach().cpu().numpy()) - real_images = postprocess_image( - real_images.detach().cpu().numpy()) - for sub_idx, rec_image, real_image in zip( - sub_indices, rec_images, real_images): - save_image(os.path.join(temp_dir, f'{sub_idx:06d}_rec.jpg'), - rec_image) - save_image(os.path.join(temp_dir, f'{sub_idx:06d}_ori.jpg'), - real_image) - self.logger.update_pbar(task1, batch_size * self.world_size) - - dist.barrier() - if self.rank != 0: - return - - if html_name: - task2 = self.logger.add_pbar_task('Visualize', total=num) - row, col = get_grid_shape(num * 2) - if row % 2 != 0: - row, col = col, row - html = HtmlPageVisualizer(num_rows=row, num_cols=col) - for image_idx in range(num): - rec_image = load_image( - os.path.join(temp_dir, f'{image_idx:06d}_rec.jpg')) - real_image = load_image( - os.path.join(temp_dir, f'{image_idx:06d}_ori.jpg')) - row_idx, col_idx = divmod(image_idx, html.num_cols) - html.set_cell(2*row_idx, col_idx, image=real_image, - text=f'Sample {image_idx:06d}_ori') - html.set_cell(2*row_idx+1, col_idx, image=rec_image, - text=f'Sample {image_idx:06d}_rec') - self.logger.update_pbar(task2, 1) - html.save(os.path.join(self.work_dir, html_name)) - if not save_raw_synthesis: - shutil.rmtree(temp_dir) - - self.logger.close_pbar() diff --git a/spaces/james-oldfield/PandA/networks/stylegan3/viz/latent_widget.py b/spaces/james-oldfield/PandA/networks/stylegan3/viz/latent_widget.py deleted file mode 100644 index 32c743bdbcac8a12425f8e5b32b9ea2d4612365d..0000000000000000000000000000000000000000 --- a/spaces/james-oldfield/PandA/networks/stylegan3/viz/latent_widget.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import numpy as np -import imgui -import dnnlib -from gui_utils import imgui_utils - -#---------------------------------------------------------------------------- - -class LatentWidget: - def __init__(self, viz): - self.viz = viz - self.latent = dnnlib.EasyDict(x=0, y=0, anim=False, speed=0.25) - self.latent_def = dnnlib.EasyDict(self.latent) - self.step_y = 100 - - def drag(self, dx, dy): - viz = self.viz - self.latent.x += dx / viz.font_size * 4e-2 - self.latent.y += dy / viz.font_size * 4e-2 - - @imgui_utils.scoped_by_object_id - def __call__(self, show=True): - viz = self.viz - if show: - imgui.text('Latent') - imgui.same_line(viz.label_w) - seed = round(self.latent.x) + round(self.latent.y) * self.step_y - with imgui_utils.item_width(viz.font_size * 8): - changed, seed = imgui.input_int('##seed', seed) - if changed: - self.latent.x = seed - self.latent.y = 0 - imgui.same_line(viz.label_w + viz.font_size * 8 + viz.spacing) - frac_x = self.latent.x - round(self.latent.x) - frac_y = self.latent.y - round(self.latent.y) - with imgui_utils.item_width(viz.font_size * 5): - changed, (new_frac_x, new_frac_y) = imgui.input_float2('##frac', frac_x, frac_y, format='%+.2f', flags=imgui.INPUT_TEXT_ENTER_RETURNS_TRUE) - if changed: - self.latent.x += new_frac_x - frac_x - self.latent.y += new_frac_y - frac_y - imgui.same_line(viz.label_w + viz.font_size * 13 + viz.spacing * 2) - _clicked, dragging, dx, dy = imgui_utils.drag_button('Drag', width=viz.button_w) - if dragging: - self.drag(dx, dy) - imgui.same_line(viz.label_w + viz.font_size * 13 + viz.button_w + viz.spacing * 3) - _clicked, self.latent.anim = imgui.checkbox('Anim', self.latent.anim) - imgui.same_line(round(viz.font_size * 27.7)) - with imgui_utils.item_width(-1 - viz.button_w * 2 - viz.spacing * 2), imgui_utils.grayed_out(not self.latent.anim): - changed, speed = imgui.slider_float('##speed', self.latent.speed, -5, 5, format='Speed %.3f', power=3) - if changed: - self.latent.speed = speed - imgui.same_line() - snapped = dnnlib.EasyDict(self.latent, x=round(self.latent.x), y=round(self.latent.y)) - if imgui_utils.button('Snap', width=viz.button_w, enabled=(self.latent != snapped)): - self.latent = snapped - imgui.same_line() - if imgui_utils.button('Reset', width=-1, enabled=(self.latent != self.latent_def)): - self.latent = dnnlib.EasyDict(self.latent_def) - - if self.latent.anim: - self.latent.x += viz.frame_delta * self.latent.speed - viz.args.w0_seeds = [] # [[seed, weight], ...] - for ofs_x, ofs_y in [[0, 0], [1, 0], [0, 1], [1, 1]]: - seed_x = np.floor(self.latent.x) + ofs_x - seed_y = np.floor(self.latent.y) + ofs_y - seed = (int(seed_x) + int(seed_y) * self.step_y) & ((1 << 32) - 1) - weight = (1 - abs(self.latent.x - seed_x)) * (1 - abs(self.latent.y - seed_y)) - if weight > 0: - viz.args.w0_seeds.append([seed, weight]) - -#---------------------------------------------------------------------------- diff --git a/spaces/jayyd/fashion-collect/app.py b/spaces/jayyd/fashion-collect/app.py deleted file mode 100644 index 1eef07b82a1e0d7dc6de8292956bc8dd050ee0e4..0000000000000000000000000000000000000000 --- a/spaces/jayyd/fashion-collect/app.py +++ /dev/null @@ -1,216 +0,0 @@ -"""Provide a text query describing what you are looking for and get back out images with links!""" -import argparse -import logging -import os -import wandb -import gradio as gr - -import zipfile -import pickle -from pathlib import Path -from typing import List, Any, Dict -from PIL import Image -from pathlib import Path - -from transformers import AutoTokenizer -from sentence_transformers import SentenceTransformer, util -from multilingual_clip import pt_multilingual_clip -import torch - -from pathlib import Path -from typing import Callable, Dict, List, Tuple -from PIL.Image import Image - -print(__file__) - -os.environ["CUDA_VISIBLE_DEVICES"] = "" # do not use GPU - -logging.basicConfig(level=logging.INFO) -DEFAULT_APPLICATION_NAME = "fashion-aggregator" - -APP_DIR = Path(__file__).resolve().parent # what is the directory for this application? -FAVICON = APP_DIR / "t-shirt_1f455.png" # path to a small image for display in browser tab and social media -README = APP_DIR / "README.md" # path to an app readme file in HTML/markdown - -DEFAULT_PORT = 11700 - -EMBEDDINGS_DIR = "artifacts/img-embeddings" -EMBEDDINGS_FILE = os.path.join(EMBEDDINGS_DIR, "embeddings.pkl") -RAW_PHOTOS_DIR = "artifacts/raw-photos" - -# Download image embeddings and raw photos -wandb.login(key="4b5a23a662b20fdd61f2aeb5032cf56fdce278a4") # os.getenv('wandb') -api = wandb.Api() -artifact_embeddings = api.artifact("ryparmar/fashion-aggregator/unimoda-images:v1") -artifact_embeddings.download(EMBEDDINGS_DIR) -artifact_raw_photos = api.artifact("ryparmar/fashion-aggregator/unimoda-raw-images:v1") -artifact_raw_photos.download("artifacts") - -with zipfile.ZipFile("artifacts/unimoda.zip", 'r') as zip_ref: - zip_ref.extractall(RAW_PHOTOS_DIR) - - -class TextEncoder: - """Encodes the given text""" - - def __init__(self, model_path="M-CLIP/XLM-Roberta-Large-Vit-B-32"): - self.model = pt_multilingual_clip.MultilingualCLIP.from_pretrained(model_path) - self.tokenizer = AutoTokenizer.from_pretrained(model_path) - - @torch.no_grad() - def encode(self, query: str) -> torch.Tensor: - """Predict/infer text embedding for a given query.""" - query_emb = self.model.forward([query], self.tokenizer) - return query_emb - - -class ImageEnoder: - """Encodes the given image""" - - def __init__(self, model_path="clip-ViT-B-32"): - self.model = SentenceTransformer(model_path) - - @torch.no_grad() - def encode(self, image: Image) -> torch.Tensor: - """Predict/infer text embedding for a given query.""" - image_emb = self.model.encode([image], convert_to_tensor=True, show_progress_bar=False) - return image_emb - - -class Retriever: - """Retrieves relevant images for a given text embedding.""" - - def __init__(self, image_embeddings_path=None): - self.text_encoder = TextEncoder() - self.image_encoder = ImageEnoder() - - with open(image_embeddings_path, "rb") as file: - self.image_names, self.image_embeddings = pickle.load(file) - self.image_names = [ - img_name.replace("fashion-aggregator/fashion_aggregator/data/photos/", "") - for img_name in self.image_names - ] - print("Images:", len(self.image_names)) - - @torch.no_grad() - def predict(self, text_query: str, k: int = 10) -> List[Any]: - """Return top-k relevant items for a given embedding""" - query_emb = self.text_encoder.encode(text_query) - relevant_images = util.semantic_search(query_emb, self.image_embeddings, top_k=k)[0] - return relevant_images - - @torch.no_grad() - def search_images(self, text_query: str, k: int = 6) -> Dict[str, List[Any]]: - """Return top-k relevant images for a given embedding""" - images = self.predict(text_query, k) - paths_and_scores = {"path": [], "score": []} - for img in images: - paths_and_scores["path"].append(os.path.join(RAW_PHOTOS_DIR, self.image_names[img["corpus_id"]])) - paths_and_scores["score"].append(img["score"]) - return paths_and_scores - - -def main(args): - predictor = PredictorBackend(url=args.model_url) - frontend = make_frontend(predictor.run, flagging=args.flagging, gantry=args.gantry, app_name=args.application) - frontend.launch( - # server_name="0.0.0.0", # make server accessible, binding all interfaces # noqa: S104 - # server_port=args.port, # set a port to bind to, failing if unavailable - # share=False, # should we create a (temporary) public link on https://gradio.app? - # favicon_path=FAVICON, # what icon should we display in the address bar? - ) - - -def make_frontend( - fn: Callable[[Image], str], flagging: bool = False, gantry: bool = False, app_name: str = "fashion-aggregator" -): - """Creates a gradio.Interface frontend for text to image search function.""" - - allow_flagging = "never" - - # build a basic browser interface to a Python function - frontend = gr.Interface( - fn=fn, # which Python function are we interacting with? - outputs=gr.Gallery(label="Relevant Items"), - # what input widgets does it need? we configure an image widget - inputs=gr.components.Textbox(label="Item Description"), - title="📝 Text2Image 👕", # what should we display at the top of the page? - thumbnail=FAVICON, # what should we display when the link is shared, e.g. on social media? - description=__doc__, # what should we display just above the interface? - cache_examples=False, # should we cache those inputs for faster inference? slows down start - allow_flagging=allow_flagging, # should we show users the option to "flag" outputs? - flagging_options=["incorrect", "offensive", "other"], # what options do users have for feedback? - ) - return frontend - - -class PredictorBackend: - """Interface to a backend that serves predictions. - - To communicate with a backend accessible via a URL, provide the url kwarg. - - Otherwise, runs a predictor locally. - """ - - def __init__(self, url=None): - if url is not None: - self.url = url - self._predict = self._predict_from_endpoint - else: - model = Retriever(image_embeddings_path=EMBEDDINGS_FILE) - self._predict = model.predict - self._search_images = model.search_images - - def run(self, text: str): - pred, metrics = self._predict_with_metrics(text) - self._log_inference(pred, metrics) - return pred - - def _predict_with_metrics(self, text: str) -> Tuple[List[str], Dict[str, float]]: - paths_and_scores = self._search_images(text) - metrics = {"mean_score": sum(paths_and_scores["score"]) / len(paths_and_scores["score"])} - return paths_and_scores["path"], metrics - - def _log_inference(self, pred, metrics): - for key, value in metrics.items(): - logging.info(f"METRIC {key} {value}") - logging.info(f"PRED >begin\n{pred}\nPRED >end") - - -def _make_parser(): - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - "--model_url", - default=None, - type=str, - help="Identifies a URL to which to send image data. Data is base64-encoded, converted to a utf-8 string, and then set via a POST request as JSON with the key 'image'. Default is None, which instead sends the data to a model running locally.", - ) - parser.add_argument( - "--port", - default=DEFAULT_PORT, - type=int, - help=f"Port on which to expose this server. Default is {DEFAULT_PORT}.", - ) - parser.add_argument( - "--flagging", - action="store_true", - help="Pass this flag to allow users to 'flag' model behavior and provide feedback.", - ) - parser.add_argument( - "--gantry", - action="store_true", - help="Pass --flagging and this flag to log user feedback to Gantry. Requires GANTRY_API_KEY to be defined as an environment variable.", - ) - parser.add_argument( - "--application", - default=DEFAULT_APPLICATION_NAME, - type=str, - help=f"Name of the Gantry application to which feedback should be logged, if --gantry and --flagging are passed. Default is {DEFAULT_APPLICATION_NAME}.", - ) - return parser - - -if __name__ == "__main__": - parser = _make_parser() - args = parser.parse_args() - main(args) diff --git a/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/modeling/criterion.py b/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/modeling/criterion.py deleted file mode 100644 index f4d5b71242f87c6f67463f9c31f873a742f3e5c7..0000000000000000000000000000000000000000 --- a/spaces/jcenaa/Segment-Any-RGBD/open_vocab_seg/modeling/criterion.py +++ /dev/null @@ -1,229 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/models/detr.py -# Copyright (c) Meta Platforms, Inc. All Rights Reserved - -""" -MaskFormer criterion. -""" -import torch -import torch.nn.functional as F -from torch import nn - -from detectron2.utils.comm import get_world_size - -from ..utils.misc import is_dist_avail_and_initialized, nested_tensor_from_tensor_list - - -def dice_loss(inputs, targets, num_masks): - """ - Compute the DICE loss, similar to generalized IOU for masks - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - """ - inputs = inputs.sigmoid() - inputs = inputs.flatten(1) - numerator = 2 * (inputs * targets).sum(-1) - denominator = inputs.sum(-1) + targets.sum(-1) - loss = 1 - (numerator + 1) / (denominator + 1) - return loss.sum() / num_masks - - -def sigmoid_focal_loss( - inputs, targets, num_masks, alpha: float = 0.25, gamma: float = 2 -): - """ - Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. - Returns: - Loss tensor - """ - prob = inputs.sigmoid() - ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") - p_t = prob * targets + (1 - prob) * (1 - targets) - loss = ce_loss * ((1 - p_t) ** gamma) - - if alpha >= 0: - alpha_t = alpha * targets + (1 - alpha) * (1 - targets) - loss = alpha_t * loss - - return loss.mean(1).sum() / num_masks - - -class SetCriterion(nn.Module): - """This class computes the loss for DETR. - The process happens in two steps: - 1) we compute hungarian assignment between ground truth boxes and the outputs of the model - 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) - """ - - def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses): - """Create the criterion. - Parameters: - num_classes: number of object categories, omitting the special no-object category - matcher: module able to compute a matching between targets and proposals - weight_dict: dict containing as key the names of the losses and as values their relative weight. - eos_coef: relative classification weight applied to the no-object category - losses: list of all the losses to be applied. See get_loss for list of available losses. - """ - super().__init__() - self.num_classes = num_classes - self.matcher = matcher - self.weight_dict = weight_dict - self.eos_coef = eos_coef - self.losses = losses - if eos_coef > 0: - - empty_weight = torch.ones(self.num_classes + 1) - - empty_weight[-1] = self.eos_coef - self.register_buffer("empty_weight", empty_weight) - self.use_ignore_idx = False - else: - self.use_ignore_idx = True - self.cur_target = [] - - def loss_labels(self, outputs, targets, indices, num_masks): - """Classification loss (NLL) - targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] - """ - assert "pred_logits" in outputs - src_logits = outputs["pred_logits"] - - idx = self._get_src_permutation_idx(indices) - target_classes_o = torch.cat( - [t["labels"][J] for t, (_, J) in zip(targets, indices)] - ) - target_classes = torch.full( - src_logits.shape[:2], - self.num_classes, - dtype=torch.int64, - device=src_logits.device, - ) - target_classes[idx] = target_classes_o - if self.use_ignore_idx: - loss_ce = F.cross_entropy( - src_logits.transpose(1, 2), - target_classes, - ignore_index=self.num_classes, - ) - else: - if "empty_weight" in outputs: - empty_weight = torch.cat( - [outputs["empty_weight"], self.empty_weight[-1:]] - ).detach() - else: - empty_weight = self.empty_weight - loss_ce = F.cross_entropy( - src_logits.transpose(1, 2), target_classes, empty_weight - ) - losses = {"loss_ce": loss_ce} - return losses - - def loss_masks(self, outputs, targets, indices, num_masks): - """Compute the losses related to the masks: the focal loss and the dice loss. - targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] - """ - assert "pred_masks" in outputs - - src_idx = self._get_src_permutation_idx(indices) - tgt_idx = self._get_tgt_permutation_idx(indices) - src_masks = outputs["pred_masks"] - src_masks = src_masks[src_idx] - masks = [t["masks"] for t in targets] - # TODO use valid to mask invalid areas due to padding in loss - target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() - target_masks = target_masks.to(src_masks) - target_masks = target_masks[tgt_idx] - - # upsample predictions to the target size - src_masks = F.interpolate( - src_masks[:, None], - size=target_masks.shape[-2:], - mode="bilinear", - align_corners=False, - ) - src_masks = src_masks[:, 0].flatten(1) - - target_masks = target_masks.flatten(1) - target_masks = target_masks.view(src_masks.shape) - losses = { - "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_masks), - "loss_dice": dice_loss(src_masks, target_masks, num_masks), - } - return losses - - def _get_src_permutation_idx(self, indices): - # permute predictions following indices - batch_idx = torch.cat( - [torch.full_like(src, i) for i, (src, _) in enumerate(indices)] - ) - src_idx = torch.cat([src for (src, _) in indices]) - return batch_idx, src_idx - - def _get_tgt_permutation_idx(self, indices): - # permute targets following indices - batch_idx = torch.cat( - [torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)] - ) - tgt_idx = torch.cat([tgt for (_, tgt) in indices]) - return batch_idx, tgt_idx - - def get_loss(self, loss, outputs, targets, indices, num_masks): - loss_map = {"labels": self.loss_labels, "masks": self.loss_masks} - assert loss in loss_map, f"do you really want to compute {loss} loss?" - return loss_map[loss](outputs, targets, indices, num_masks) - - def forward(self, outputs, targets): - """This performs the loss computation. - Parameters: - outputs: dict of tensors, see the output specification of the model for the format - targets: list of dicts, such that len(targets) == batch_size. - The expected keys in each dict depends on the losses applied, see each loss' doc - """ - outputs_without_aux = {k: v for k, v in outputs.items() if k != "aux_outputs"} - - # Retrieve the matching between the outputs of the last layer and the targets - indices = self.matcher(outputs_without_aux, targets) - - # Compute the average number of target boxes accross all nodes, for normalization purposes - num_masks = sum(len(t["labels"]) for t in targets) - num_masks = torch.as_tensor( - [num_masks], dtype=torch.float, device=next(iter(outputs.values())).device - ) - if is_dist_avail_and_initialized(): - torch.distributed.all_reduce(num_masks) - num_masks = torch.clamp(num_masks / get_world_size(), min=1).item() - - # Compute all the requested losses - losses = {} - for loss in self.losses: - losses.update(self.get_loss(loss, outputs, targets, indices, num_masks)) - - # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. - if "aux_outputs" in outputs: - for i, aux_outputs in enumerate(outputs["aux_outputs"]): - indices = self.matcher(aux_outputs, targets) - for loss in self.losses: - l_dict = self.get_loss( - loss, aux_outputs, targets, indices, num_masks - ) - l_dict = {k + f"_{i}": v for k, v in l_dict.items()} - losses.update(l_dict) - - return losses - - def clean_buffer(self): - self.cur_target = [] diff --git a/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/modules/distributions/distributions.py b/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/modules/distributions/distributions.py deleted file mode 100644 index f2b8ef901130efc171aa69742ca0244d94d3f2e9..0000000000000000000000000000000000000000 --- a/spaces/jennysun/jwsun-multisubject-render-model/gligen/ldm/modules/distributions/distributions.py +++ /dev/null @@ -1,92 +0,0 @@ -import torch -import numpy as np - - -class AbstractDistribution: - def sample(self): - raise NotImplementedError() - - def mode(self): - raise NotImplementedError() - - -class DiracDistribution(AbstractDistribution): - def __init__(self, value): - self.value = value - - def sample(self): - return self.value - - def mode(self): - return self.value - - -class DiagonalGaussianDistribution(object): - def __init__(self, parameters, deterministic=False): - self.parameters = parameters - self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) - self.logvar = torch.clamp(self.logvar, -30.0, 20.0) - self.deterministic = deterministic - self.std = torch.exp(0.5 * self.logvar) - self.var = torch.exp(self.logvar) - if self.deterministic: - self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) - - def sample(self): - x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) - return x - - def kl(self, other=None): - if self.deterministic: - return torch.Tensor([0.]) - else: - if other is None: - return 0.5 * torch.sum(torch.pow(self.mean, 2) - + self.var - 1.0 - self.logvar, - dim=[1, 2, 3]) - else: - return 0.5 * torch.sum( - torch.pow(self.mean - other.mean, 2) / other.var - + self.var / other.var - 1.0 - self.logvar + other.logvar, - dim=[1, 2, 3]) - - def nll(self, sample, dims=[1,2,3]): - if self.deterministic: - return torch.Tensor([0.]) - logtwopi = np.log(2.0 * np.pi) - return 0.5 * torch.sum( - logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, - dim=dims) - - def mode(self): - return self.mean - - -def normal_kl(mean1, logvar1, mean2, logvar2): - """ - source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 - Compute the KL divergence between two gaussians. - Shapes are automatically broadcasted, so batches can be compared to - scalars, among other use cases. - """ - tensor = None - for obj in (mean1, logvar1, mean2, logvar2): - if isinstance(obj, torch.Tensor): - tensor = obj - break - assert tensor is not None, "at least one argument must be a Tensor" - - # Force variances to be Tensors. Broadcasting helps convert scalars to - # Tensors, but it does not work for torch.exp(). - logvar1, logvar2 = [ - x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) - for x in (logvar1, logvar2) - ] - - return 0.5 * ( - -1.0 - + logvar2 - - logvar1 - + torch.exp(logvar1 - logvar2) - + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) - ) diff --git a/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/analysis/SuppleFig5c_Pre_subsystem.py b/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/analysis/SuppleFig5c_Pre_subsystem.py deleted file mode 100644 index ad12ce27ec82fc67ed8de7903a61f928212ec6b7..0000000000000000000000000000000000000000 --- a/spaces/jie1/succ1/DLKcat/DeeplearningApproach/Code/analysis/SuppleFig5c_Pre_subsystem.py +++ /dev/null @@ -1,493 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -# Author: LE YUAN -# Date: 2021-01-14 - -# This python script is to classify subsystem base on EC number, Kcat and subsystem mapping for the data predicted by deep learning - -import json -import math -import model -import torch -import pickle -import seaborn as sns -import matplotlib.pyplot as plt -from matplotlib import rc -import pandas as pd -import numpy as np -import statsmodels.api as sm -from rdkit import Chem -from Bio import SeqIO -from collections import defaultdict -from scipy import stats -from sklearn.metrics import mean_squared_error,r2_score - - -fingerprint_dict = model.load_pickle('../../Data/input/fingerprint_dict.pickle') -atom_dict = model.load_pickle('../../Data/input/atom_dict.pickle') -bond_dict = model.load_pickle('../../Data/input/bond_dict.pickle') -edge_dict = model.load_pickle('../../Data/input/edge_dict.pickle') -word_dict = model.load_pickle('../../Data/input/sequence_dict.pickle') - -proteins = list() -compounds = list() -adjacencies = list() - -def split_sequence(sequence, ngram): - sequence = '-' + sequence + '=' - # print(sequence) - words = [word_dict[sequence[i:i+ngram]] for i in range(len(sequence)-ngram+1)] - return np.array(words) - # return word_dict - -def create_atoms(mol): - """Create a list of atom (e.g., hydrogen and oxygen) IDs - considering the aromaticity.""" - # atom_dict = defaultdict(lambda: len(atom_dict)) - atoms = [a.GetSymbol() for a in mol.GetAtoms()] - # print(atoms) - for a in mol.GetAromaticAtoms(): - i = a.GetIdx() - atoms[i] = (atoms[i], 'aromatic') - atoms = [atom_dict[a] for a in atoms] - return np.array(atoms) - -def create_ijbonddict(mol): - """Create a dictionary, which each key is a node ID - and each value is the tuples of its neighboring node - and bond (e.g., single and double) IDs.""" - # bond_dict = defaultdict(lambda: len(bond_dict)) - i_jbond_dict = defaultdict(lambda: []) - for b in mol.GetBonds(): - i, j = b.GetBeginAtomIdx(), b.GetEndAtomIdx() - bond = bond_dict[str(b.GetBondType())] - i_jbond_dict[i].append((j, bond)) - i_jbond_dict[j].append((i, bond)) - return i_jbond_dict - -def extract_fingerprints(atoms, i_jbond_dict, radius): - """Extract the r-radius subgraphs (i.e., fingerprints) - from a molecular graph using Weisfeiler-Lehman algorithm.""" - - # fingerprint_dict = defaultdict(lambda: len(fingerprint_dict)) - # edge_dict = defaultdict(lambda: len(edge_dict)) - - if (len(atoms) == 1) or (radius == 0): - fingerprints = [fingerprint_dict[a] for a in atoms] - - else: - nodes = atoms - i_jedge_dict = i_jbond_dict - - for _ in range(radius): - - """Update each node ID considering its neighboring nodes and edges - (i.e., r-radius subgraphs or fingerprints).""" - fingerprints = [] - for i, j_edge in i_jedge_dict.items(): - neighbors = [(nodes[j], edge) for j, edge in j_edge] - fingerprint = (nodes[i], tuple(sorted(neighbors))) - fingerprints.append(fingerprint_dict[fingerprint]) - nodes = fingerprints - - """Also update each edge ID considering two nodes - on its both sides.""" - _i_jedge_dict = defaultdict(lambda: []) - for i, j_edge in i_jedge_dict.items(): - for j, edge in j_edge: - both_side = tuple(sorted((nodes[i], nodes[j]))) - edge = edge_dict[(both_side, edge)] - _i_jedge_dict[i].append((j, edge)) - i_jedge_dict = _i_jedge_dict - - return np.array(fingerprints) - -def create_adjacency(mol): - adjacency = Chem.GetAdjacencyMatrix(mol) - return np.array(adjacency) - -def dump_dictionary(dictionary, filename): - with open(filename, 'wb') as file: - pickle.dump(dict(dictionary), file) - -def load_tensor(file_name, dtype): - return [dtype(d).to(device) for d in np.load(file_name + '.npy', allow_pickle=True)] - -class Predictor(object): - def __init__(self, model): - self.model = model - - def predict(self, data): - predicted_value = self.model.forward(data) - - return predicted_value - -def deeplearning() : - with open('../../Data/database/Kcat_combination_0918_wildtype_mutant.json', 'r') as infile : - Kcat_data = json.load(infile) - - fingerprint_dict = model.load_pickle('../../Data/input/fingerprint_dict.pickle') - atom_dict = model.load_pickle('../../Data/input/atom_dict.pickle') - bond_dict = model.load_pickle('../../Data/input/bond_dict.pickle') - word_dict = model.load_pickle('../../Data/input/sequence_dict.pickle') - n_fingerprint = len(fingerprint_dict) - n_word = len(word_dict) - print(n_fingerprint) # 3958 - print(n_word) # 8542 - - radius=2 - ngram=3 - # n_fingerprint = 3958 - # n_word = 8542 - - dim=10 - layer_gnn=3 - side=5 - window=11 - layer_cnn=3 - layer_output=3 - lr=1e-3 - lr_decay=0.5 - decay_interval=10 - weight_decay=1e-6 - iteration=100 - - if torch.cuda.is_available(): - device = torch.device('cuda') - else: - device = torch.device('cpu') - - # torch.manual_seed(1234) - Kcat_model = model.KcatPrediction(device, n_fingerprint, n_word, 2*dim, layer_gnn, window, layer_cnn, layer_output).to(device) - Kcat_model.load_state_dict(torch.load('../../Results/output/all--radius2--ngram3--dim20--layer_gnn3--window11--layer_cnn3--layer_output3--lr1e-3--lr_decay0.5--decay_interval10--weight_decay1e-6--iteration50', map_location=device)) - # print(state_dict.keys()) - # model.eval() - predictor = Predictor(Kcat_model) - - print('It\'s time to start the prediction!') - print('-----------------------------------') - - # prediction = predictor.predict(inputs) - - i = 0 - x = list() - y = list() - x1 = list() - y1 = list() - new_data = list() - for data in Kcat_data : - smiles = data['Smiles'] - sequence = data['Sequence'] - # print(smiles) - Kcat = data['Value'] - enzyme_type = data['Type'] - if "." not in smiles and float(Kcat) > 0 and enzyme_type == 'wildtype': - i += 1 - print('This is', i, '---------------------------------------') - - try : - - mol = Chem.AddHs(Chem.MolFromSmiles(smiles)) - atoms = create_atoms(mol) - # print(atoms) - i_jbond_dict = create_ijbonddict(mol) - # print(i_jbond_dict) - - fingerprints = extract_fingerprints(atoms, i_jbond_dict, radius) - # print(fingerprints) - # compounds.append(fingerprints) - - adjacency = create_adjacency(mol) - # print(adjacency) - # adjacencies.append(adjacency) - - words = split_sequence(sequence,ngram) - # print(words) - # proteins.append(words) - - fingerprints = torch.LongTensor(fingerprints) - adjacency = torch.FloatTensor(adjacency) - words = torch.LongTensor(words) - - inputs = [fingerprints, adjacency, words] - - value = float(data['Value']) - print(value) - # print(type(value)) - - - prediction = predictor.predict(inputs) - Kcat_log_value = prediction.item() - Kcat_value = math.pow(2,Kcat_log_value) - print(Kcat_value) - # print(type(Kcat_value)) - - data['Value'] = Kcat_value - - new_data.append(data) - - except : - continue - - # print(len(new_data)) - # print(new_data[:2]) - - return new_data - -# Data1 (Data used in deep learning workflow) -def EC_Kcat() : - - datasets = deeplearning() - - print(len(datasets)) # - - EC_Kcat = dict() - for data in datasets : - # print(data) - EC_Number = data['ECNumber'] - try : - if EC_Kcat[EC_Number] and float(data['Value']) > 0 : - value = math.log10(float(data['Value'])) - # if float(data['Value']) >= 1e-3 and float(data['Value']) <= 1e3 : - EC_Kcat[EC_Number].append(value) # math.log10(float(data['Value'])) float(data['Value']) - except : - if float(data['Value']) > 0 : - Kcat = list() - value = math.log10(float(data['Value'])) - # if float(data['Value']) >= 1e-3 and float(data['Value']) <= 1e3 : - Kcat.append(value) - # print(len(Kcat)) - # print(Kcat) - EC_Kcat[EC_Number] = Kcat - - return EC_Kcat - -def EC_subsystem() : - with open('../../Data/subsystem/module_ec.txt', 'r') as infile : - datasets = infile.readlines() - - print(len(datasets)) # 2200 - - metabolism_types = list() - types_abbre = { - 'Primary - Carbohydrate & Energy Metabolism': 'Primary-CE', - 'Secondary_other': 'Secondary_other', - 'Intermediate': 'Intermediate', - 'Secondary': 'Secondary', - 'Primary - amino acids, fatty acids and nucleotides': 'Primary-AFN', - 'x': 'x' - } - - types_EC = dict() - for data in datasets : - # print(data) - line = data.strip().split('\t') - # print(line) - metabolism_types.append(line[2]) - abbre = types_abbre[line[2]] - # EC_types[line[1][2:]] = line[2] - try : - if types_EC[abbre] : - types_EC[abbre].append(line[1][2:]) - except : - EC_Number = list() - EC_Number.append(line[1][2:]) - types_EC[abbre] = EC_Number - - # metabolism_types = list(set(metabolism_types)) - # print(len(metabolism_types)) # 6 - # print(metabolism_types) - # # ['Primary - Carbohydrate & Energy Metabolism', 'Secondary_other', 'Intermediate', 'Secondary', 'Primary - amino acids, fatty acids and nucleotides', 'x'] - - # print(types_EC) - - print(len(types_EC)) - - i = 0 - new_types_EC = dict() - for types, EC_Number in types_EC.items() : - # print(len(set(EC_Number))) - i += len(set(EC_Number)) - new_types_EC[types] = list(set(EC_Number)) - # print('The type of %s has %s unique EC Number.' % (types, len(set(EC_Number)))) - - print('Total EC number is:', i) # 2200, the same with all entries in module.txt - - return new_types_EC - -def median(lst): - sortedLst = sorted(lst) - lstLen = len(lst) - index = (lstLen - 1) // 2 - - if (lstLen % 2): - return sortedLst[index] - else: - return (sortedLst[index] + sortedLst[index + 1])/2.0 - -def Kcat_subsystem() : - EC_Kcat_relation = EC_Kcat() - types_EC = EC_subsystem() - # print(EC_Kcat_relation) - - types_Kcat = dict() - for types, EC_Number in types_EC.items() : - # print(types) - Kcat_values = list() - for EC in EC_Number : - try : - Kcat_values += EC_Kcat_relation[EC] - except : - continue - types_Kcat[types] = Kcat_values - - # # print(len(types_Kcat)) - # for types, Kcat in types_Kcat.items() : - # # print('The type of %s has %s Kcat values.' % (types, len(Kcat))) - # # print('---'*15) - # # print('The median value of %s is %s' %(types, math.log10(median(Kcat)))) - # print('The median value of %s is %s' %(types, median(Kcat))) - - return types_Kcat - -def plot_subsystem_Kcat_counts() : - types_Kcat = Kcat_subsystem() - - for types, Kcat in types_Kcat.items() : - print('The type of %s has %s Kcat values.' % (types, len(Kcat))) - # print('---'*15) - # print('The median value of %s is %s' %(types, math.log10(median(Kcat)))) - # print('The median value of %s is %s' %(types, median(Kcat))) - - # types = ['Primary-CE', 'Primary-AFN', 'Intermediate', 'Secondary'] - types = ['Primary-CE', 'Primary-AFN', 'Intermediate', 'Secondary', 'Secondary_other'] - counts = [len(types_Kcat[subsystem]) for subsystem in types] - - print(types) - print(counts) - - plt.figure(figsize=(3.4,2.5)) - - # https://juejin.im/post/6858230839986421767 - plt.bar(range(len(types)), counts, tick_label=types, width=0.5, alpha=0.8, color='pink', edgecolor='r') - - # ax = plt.axes() - # ax.spines['top'].set_visible(False) - # ax.spines['right'].set_visible(False) - - # plt.ylim(0,600) - # plt.yticks([0,100,200,300,400,500,600]) - - # plt.xlabel('Subsystem type',fontsize=12) - plt.ylabel("Counts", fontsize=12) - plt.xticks(rotation=30, ha='right') - plt.xticks(fontsize=10) - plt.yticks(fontsize=10) - - plt.savefig("../../Results/figures/subsystem_Kcat_counts_4.pdf", dpi=400, bbox_inches='tight') - -# https://my.oschina.net/u/4349448/blog/3448306 python code -def plot_subsystem_distribution() : - types_Kcat = Kcat_subsystem() - - plt.figure(figsize=(1.5,1.5)) - # To solve the 'Helvetica' font cannot be used in PDF file - # https://stackoverflow.com/questions/59845568/the-pdf-backend-does-not-currently-support-the-selected-font - rc('font',**{'family':'serif','serif':['Helvetica']}) - plt.rcParams['pdf.fonttype'] = 42 - - plt.axes([0.12,0.12,0.83,0.83]) - - plt.tick_params(direction='in') - plt.tick_params(which='major',length=1.5) - plt.tick_params(which='major',width=0.4) - - plt.rcParams['font.family'] = 'Helvetica' - - types_color = {'Primary-CE': '#F781BF', 'Intermediate': '#4DAF4A', 'Primary-AFN': '#A65628', 'Secondary': '#3182BD'} - - for types, Kcat in types_Kcat.items() : - if types in ['Primary-CE', 'Intermediate', 'Primary-AFN', 'Secondary'] : - # print('The type of %s has %s Kcat values.' % (types, len(Kcat))) - # print('---'*15) '%.4f' %(Kcat_value) - # print('The median value on log10 scale of %s is %.4f' %(types, math.log10(median(Kcat)))) - - print('The median value of %s is %.2f' %(types, math.pow(10, median(Kcat)))) - # print('The median value in log10 of %s is %.2f' %(types, median(Kcat))) - - # The median value of Primary-CE is 1.0723991918615159 - # The median value of Intermediate is 0.4174998131351355 - # The median value of Primary-AFN is 0.577312924135785 - # The median value of x is -0.3737793622447971 - # The median value of Secondary is 0.6365409301885816 - # The median value of Secondary_other is 0.42474643787125355 - - # if types == 'Primary-CE' : - if types in ['Primary-CE', 'Intermediate', 'Primary-AFN', 'Secondary'] : - ecdf = sm.distributions.ECDF(Kcat) - - x = np.linspace(min(Kcat),max(Kcat),50000) # 10000 - y = ecdf(x) - - # plt.xlim(1e-4, 1e4) - - # plt.xscale('log') - # plt.xticks([-2, -1, 0, 1, 2, 3]) - # plt.plot(x,y,linewidth='2',label='Primary-CE') - - plt.plot(x,y,linewidth='0.75',label=types,color=types_color[types]) - - # https://blog.csdn.net/weixin_38314865/article/details/115173371?utm_medium=distribute.pc_relevant.none-task-blog-baidujs_baidulandingword-5&spm=1001.2101.3001.4242 - # https://blog.csdn.net/dta0502/article/details/83827345 - plt.axvline(x=median(Kcat),ymin=0,ymax=0.5,linewidth='0.75',linestyle='--',color=types_color[types]) - - # plt.text(2.2, 0.3, 'Primary-CE', fontweight ="normal", fontsize=6, color='#F781BF') - # plt.text(2.2, 0.2, 'Primary-AFN', fontweight ="normal", fontsize=6, color='#A65628') - # plt.text(2.2, 0.1, 'Secondary', fontweight ="normal", fontsize=6, color='#3182BD') - # plt.text(2.2, 0.0, 'Intermediate', fontweight ="normal", fontsize=6, color='#4DAF4A') - - plt.text(-5, 0.9, 'Primary-CE', fontweight ="normal", fontsize=6, color='#F781BF') - plt.text(-5, 0.8, 'Primary-AFN', fontweight ="normal", fontsize=6, color='#A65628') - plt.text(-5, 0.7, 'Secondary', fontweight ="normal", fontsize=6, color='#3182BD') - plt.text(-5, 0.6, 'Intermediate', fontweight ="normal", fontsize=6, color='#4DAF4A') - - plt.rcParams['font.family'] = 'Helvetica' - - plt.xlabel('Predicted $k$$_\mathregular{cat}$ value', fontsize=7) - plt.ylabel('Cumulative distribution', fontsize=7) - - plt.xticks([-6,-4,-2,0,2,4,6,8]) - - plt.xticks(fontsize=6) - plt.yticks(fontsize=6) - # plt.legend(loc="lower right", frameon=False, prop={"size":6}) - - ax = plt.gca() - ax.spines['bottom'].set_linewidth(0.5) - ax.spines['left'].set_linewidth(0.5) - ax.spines['top'].set_linewidth(0.5) - ax.spines['right'].set_linewidth(0.5) - - plt.savefig("../../Results/figures/SuppleFig5c.pdf", dpi=400, bbox_inches='tight') - - -if __name__ == "__main__" : - # deeplearning() - # EC_subsystem() - # main() - # EC_Kcat() - # Kcat_subsystem() - # plot_subsystem_Kcat_counts() - plot_subsystem_distribution() - - # Results: - # The median value in log10 of Primary-CE is 1.51 - # The median value in log10 of Intermediate is 0.64 - # The median value in log10 of Primary-AFN is 0.86 - # The median value in log10 of Secondary is 0.86 - - # The median value of Primary-CE is 32.06 - # The median value of Intermediate is 4.33 - # The median value of Primary-AFN is 7.21 - # The median value of Secondary is 7.23 diff --git a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/certifi/__main__.py b/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/certifi/__main__.py deleted file mode 100644 index 8945b5da857f4a7dec2b84f1225f012f6098418c..0000000000000000000000000000000000000000 --- a/spaces/joaopereirajp/livvieChatBot/venv/lib/python3.9/site-packages/certifi/__main__.py +++ /dev/null @@ -1,12 +0,0 @@ -import argparse - -from certifi import contents, where - -parser = argparse.ArgumentParser() -parser.add_argument("-c", "--contents", action="store_true") -args = parser.parse_args() - -if args.contents: - print(contents()) -else: - print(where()) diff --git a/spaces/jpwahle/field-time-diversity/main.py b/spaces/jpwahle/field-time-diversity/main.py deleted file mode 100644 index fafbb7e1d7ebbe1b4c49d0b5568c1bc1ca7391f1..0000000000000000000000000000000000000000 --- a/spaces/jpwahle/field-time-diversity/main.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/ -# All rights reserved. -# Thanks to Mukund Rungta for inspiration on early versions of this demo https://huggingface.co/spaces/mrungta8/CitationalAmnesia - - -import asyncio - -import gradio as gr - -from aclanthology import determine_page_type -from plots import generate_cfdi_plot, generate_maoc_plot -from s2 import (check_s2_id_type, compute_stats_for_acl_author, - compute_stats_for_acl_paper, compute_stats_for_acl_venue, - compute_stats_for_pdf, compute_stats_for_s2_author, - compute_stats_for_s2_paper) - - -def return_clear(): - """Clearing all demo inputs - - Returns: - None - """ - return None, None, None, None, None, None, None, None, None, None, None - - -def create_compute_stats(submit_type=None): - def compute_stats(s2_id=None, pdf_file=None, acl_link=None): - if submit_type == "s2_id" and s2_id: - # Check if s2_id is a paper id or an author id - id_type, author_name = check_s2_id_type(s2_id) - if id_type == "paper": - results = compute_stats_for_s2_paper(s2_id) - results = results + ("paper",) - return plot_and_return_stats(*results) - if id_type == "author": - results = compute_stats_for_s2_author(s2_id, author_name) - results = results + ("author",) - return plot_and_return_stats(*results) - if submit_type == "acl_link" and acl_link: - # Crawl all papers for the author or venue or just the paper if it is a paper link - url_type = determine_page_type(acl_link) - if url_type == "paper": - results = compute_stats_for_acl_paper(acl_link) - results = results + ("paper",) - return plot_and_return_stats(*results) - if url_type == "author": - results = compute_stats_for_acl_author(acl_link) - results = results + ("author",) - return plot_and_return_stats(*results) - if url_type == "venue": - results = compute_stats_for_acl_venue(acl_link) - results = results + ("proceedings",) - return plot_and_return_stats(*results) - if submit_type == "pdf_file" and pdf_file: - # Compute the citation field diversity index and citation age diversity index - results = asyncio.run(compute_stats_for_pdf(pdf_file)) - results = results + ("paper",) - return plot_and_return_stats(*results) - return None, None, None, None, None, None, None, None - - return compute_stats - - -def plot_and_return_stats( - title_authors, - num_references, - field_counts, - year_title_dict, - cfdi, - cadi, - maoc, - compute_type, -): - """ - Plots the data and returns statistics. - - Args: - title_authors (str): The title and authors of the paper. - num_references (int): The number of references in the paper. - field_counts (dict): A dictionary containing the count of each field. - year_title_dict (dict): A dictionary containing the year and title of each paper. - cfdi (list): A list of tuples containing the citation field and the number of papers in that field. - cadi (list): A list of tuples containing the citation author and the number of papers by that author. - maoc (list): A list of tuples containing the main author and the number of papers by that author. - - Returns: - tuple: A tuple containing the title and authors of the paper, the number of references, the top 3 most cited fields, - the most common oldest papers, the cfdi, cadi, and the plots for cfdi and maoc. - """ - # Generate cfdi plot - plot_cfdi = generate_cfdi_plot(cfdi, compute_type) - - # Generate cadi plot - plot_maoc = generate_maoc_plot(maoc, compute_type) - - # Get top 3 most cited fields - top_fields_text = "\n".join( - [ - f"{field}: {count}" - for field, count in sorted( - field_counts.items(), reverse=True, key=lambda x: x[1] - )[:3] - ] - ) - - # Get most common oldest papers - oldest_paper_text = "".join( - f"[{str(year)}] {title}" + "\n" - for year, title in sorted(year_title_dict.items())[:3] - ) - - # Round CFDI and CADI - cfdi = round(cfdi, 3) - cadi = round(cadi, 3) - - return ( - title_authors, - num_references, - top_fields_text, - oldest_paper_text, - cfdi, - cadi, - plot_cfdi, - plot_maoc, - ) - - -with gr.Blocks( - theme=gr.themes.Soft() -) as demo: - with gr.Row(): - gr.Markdown( - """ - # Citation Age and Field Diversity Calculator - - <div align="center"> - <img src="https://onedrive.live.com/embed?resid=684CB5200DB6B388%21682618&authkey=%21AILbTZikzXAbAyc&width=1310&height=728" /> - </div> - - Welcome to this interactive demo to analyze various aspects of your citational diversity. This tool will enable you to reflect on two critical aspects: - - - By whom am I influenced? Which fields heavily inform and shape the research trajectory of my works? - - How far back in time do I cite? What are critical works (present and past) that shape my research? - - In addition, you will be able to analyze how the above compares to the average paper or author. The results you will receive can not be categorized into “good” or “bad”. Instead, they are meant to raise self-awareness about one’s citational diversity and reflect on it. The results might bring you to further questions, such as: - - - Am I reading widely across fields and time? - - Should I expand my literature search to include works from other fields? - - Are there ideas rooted in the past that can be used in an innovative way? - - Using citations as a tangible marker of influence, our demo provides empirical insights into the influence of papers across fields and time. - - ## What is Citation Field Diversity? - - Field diversity is a measure of the variety of research Fields that a paper or an author draws upon. A high field diversity indicates that the work draws from various distinct research fields, demonstrating a multidisciplinary influence on that work or author. - - ## What is Citation Age Diversity? - - Citation age is a measure of how far back in time a paper cites other papers. A high citation age shows that the work draws from past works, while a low citation age indicates that mostly recent work has influenced that paper. - - """ - ) - gr.Markdown( - """ - ## What are the Citation Field Diversity Index (CFDI) and Citation Age Diversity Index (CADI) and how are they calculated? - - The calculation of Field Diversity involves extracting all the references of a paper, categorizing them into distinct study fields, and determining the proportion of each study field over all the references. The Citation Field Diversity Index (CFDI) is then computed by applying the Gini Index on these proportions. - Calculating CADI is similar to CFDI but instead of determining the proportion of each study field, we determine the proportion of citation ages. If we take a paper from 2020 that cites two papers, one from 2010 and one from 1990, the citation ages are 10 and 30, respectively. The CADI is then computed by applying the Gini Index on these ages. - For more details, please refer to Eq. 3 in [this paper](https://aclanthology.org/2023.acl-long.341/) and Eq. 4 in [this paper](https://arxiv.org/). - - ## How do I Interpret CFDI and CADI? - - For both indices, higher values indicate a greater diversity of a NLP paper (in terms of how far back it cites and in the fields it cites). On the other hand, lower values signify a lower diversity, indicating that citations are more concentrated in specific fields and time ranges. - - ## How can I use this demo? - - There are three ways how you to compute the field and age diversity for papers: - 1. **Semantic Scholar ID**: Enter the Semantic Scholar ID of a **paper** or **author** and click the *"Compute"* button. - 2. **ACL Anthology Link**: Paste the ACL Anthology link of a **paper**, **venue**, or **author** and click the *"Compute"* button. - 3. **PDF File**: Upload your **paper** PDF and click the *"Compute"* button. - - To retrieve the **Semantic Scholar ID** for a paper such as "The Elephant in the Room: Analyzing the Presence of Big Tech in Natural Language Processing Research," search the paper on Semantic Scholar [here](https://www.semanticscholar.org/paper/The-Elephant-in-the-Room%3A-Analyzing-the-Presence-of-Abdalla-Wahle/587ffdfd7229e8e0dbc5250b44df5fad6251f6ad) and use the last part of the URL. The Semantic Scholar ID (SSID) for this paper is: **587ffdfd7229e8e0dbc5250b44df5fad6251f6ad**. - - To get an ACL Anthology link, you can go to any ACL Anthology paper, author or proceedings page and just copy and paste the url. For example: - - https://aclanthology.org/2023.acl-long.1/ - - https://aclanthology.org/people/a/anna-rogers/ - - https://aclanthology.org/events/acl-2002/ - """ - ) - - with gr.Row(): - with gr.Tabs(): - with gr.TabItem("Semantic Scholar ID"): - s2_id = gr.Textbox( - label="Semantic Scholar ID", - placeholder=( - "Enter the Semantic Scholar ID here and press enter..." - ), - # value="587ffdfd7229e8e0dbc5250b44df5fad6251f6ad", - ) - with gr.Row(): - s2_submit_btn = gr.Button("Compute") - with gr.TabItem("ACL Anthology Link"): - acl_link = gr.Textbox( - label="ACL Anthology Link", - placeholder="Paste the ACL Anthology link here...", - ) - with gr.Row(): - acl_submit_btn = gr.Button("Compute") - with gr.TabItem("PDF File"): - pdf_file = gr.File( - file_types=[".pdf"], label="Upload your paper PDF" - ) - with gr.Row(): - file_submit_btn = gr.Button("Compute") - with gr.Row(): - title = gr.Textbox( - label="Title / Author Name / Venue Name:", lines=2 - ) # Can be either paper title, author name, or proceedings title - with gr.Row(): - num_ref = gr.Textbox(label="Number of references", lines=3) - top_field_list = gr.Textbox(label="Top 3 fields cited:", lines=3) - top_age_list = gr.Textbox(label="Top 3 oldest papers cited:", lines=3) - with gr.Row(): - cfdi = gr.Textbox(label="CFDI") - cadi = gr.Textbox(label="CADI") - with gr.Row(): - cfdi_plot = gr.Plot(label="Citation Field Diversity") - cadi_plot = gr.Plot(label="Citation Age Diversity") - with gr.Row(): - clear_btn = gr.Button("Clear") - - submit_args = dict( - inputs=[s2_id, pdf_file, acl_link], - outputs=[ - title, - num_ref, - top_field_list, - top_age_list, - cfdi, - cadi, - cfdi_plot, - cadi_plot, - ], - ) - - s2_submit_args = submit_args.copy() - s2_submit_args["fn"] = create_compute_stats(submit_type="s2_id") - - acl_submit_args = submit_args.copy() - acl_submit_args["fn"] = create_compute_stats(submit_type="acl_link") - - file_submit_args = submit_args.copy() - file_submit_args["fn"] = create_compute_stats(submit_type="pdf_file") - - s2_id.submit(**s2_submit_args) - acl_link.submit(**acl_submit_args) - - acl_submit_btn.click(**acl_submit_args) - s2_submit_btn.click(**s2_submit_args) - file_submit_btn.click(**file_submit_args) - - clear_btn.click( - fn=return_clear, - inputs=[], - outputs=[ - title, - num_ref, - top_field_list, - top_age_list, - cfdi, - cadi, - cfdi_plot, - cadi_plot, - s2_id, - acl_link, - pdf_file, - ], - ) - -demo.queue(concurrency_count=3) -demo.launch(server_port=7860, server_name="0.0.0.0") diff --git a/spaces/k2-fsa/generate-subtitles-for-videos/app.py b/spaces/k2-fsa/generate-subtitles-for-videos/app.py deleted file mode 100644 index 64563796254f3789281a398ba0558c4b3dc1a78e..0000000000000000000000000000000000000000 --- a/spaces/k2-fsa/generate-subtitles-for-videos/app.py +++ /dev/null @@ -1,240 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright 2022-2023 Xiaomi Corp. (authors: Fangjun Kuang) -# -# See LICENSE for clarification regarding multiple authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# References: -# https://gradio.app/docs/#dropdown - -import logging -import os -from pathlib import Path - -import gradio as gr - -from decode import decode -from model import get_pretrained_model, get_vad, language_to_models - -title = "# Next-gen Kaldi: Generate subtitles for videos" - -description = """ -This space shows how to generate subtitles/captions with Next-gen Kaldi. - -It is running on CPU within a docker container provided by Hugging Face. - -Please find test video files at -<https://huggingface.co/csukuangfj/vad/tree/main> - -See more information by visiting the following links: - -- <https://github.com/k2-fsa/sherpa-onnx> -- <https://github.com/k2-fsa/icefall> -- <https://github.com/k2-fsa/k2> -- <https://github.com/lhotse-speech/lhotse> - -If you want to deploy it locally, please see -<https://k2-fsa.github.io/sherpa/> -""" - -# css style is copied from -# https://huggingface.co/spaces/alphacep/asr/blob/main/app.py#L113 -css = """ -.result {display:flex;flex-direction:column} -.result_item {padding:15px;margin-bottom:8px;border-radius:15px;width:100%} -.result_item_success {background-color:mediumaquamarine;color:white;align-self:start} -.result_item_error {background-color:#ff7070;color:white;align-self:start} -""" - - -def update_model_dropdown(language: str): - if language in language_to_models: - choices = language_to_models[language] - return gr.Dropdown.update(choices=choices, value=choices[0]) - - raise ValueError(f"Unsupported language: {language}") - - -def build_html_output(s: str, style: str = "result_item_success"): - return f""" - <div class='result'> - <div class='result_item {style}'> - {s} - </div> - </div> - """ - - -def show_file_info(in_filename: str): - logging.info(f"Input file: {in_filename}") - _ = os.system(f"ffprobe -hide_banner -i '{in_filename}'") - - -def process_uploaded_video_file( - language: str, - repo_id: str, - in_filename: str, -): - if in_filename is None or in_filename == "": - return "", build_html_output( - "Please first upload a file and then click " - 'the button "submit for recognition"', - "result_item_error", - ) - - logging.info(f"Processing uploaded file: {in_filename}") - - ans = process(language, repo_id, in_filename) - return (in_filename, ans[0]), ans[0], ans[1], ans[2] - - -def process_uploaded_audio_file( - language: str, - repo_id: str, - in_filename: str, -): - if in_filename is None or in_filename == "": - return "", build_html_output( - "Please first upload a file and then click " - 'the button "submit for recognition"', - "result_item_error", - ) - - logging.info(f"Processing uploaded file: {in_filename}") - - return process(language, repo_id, in_filename) - - -def process(language: str, repo_id: str, in_filename: str): - recognizer = get_pretrained_model(repo_id) - vad = get_vad() - - result = decode(recognizer, vad, in_filename) - logging.info(result) - - srt_filename = Path(in_filename).with_suffix(".srt") - with open(srt_filename, "w", encoding="utf-8") as f: - f.write(result) - - show_file_info(in_filename) - logging.info("Done") - - return ( - srt_filename, - build_html_output("Done! Please download the SRT file", "result_item_success"), - result, - ) - - -demo = gr.Blocks(css=css) - - -with demo: - gr.Markdown(title) - language_choices = list(language_to_models.keys()) - - language_radio = gr.Radio( - label="Language", - choices=language_choices, - value=language_choices[0], - ) - - model_dropdown = gr.Dropdown( - choices=language_to_models[language_choices[0]], - label="Select a model", - value=language_to_models[language_choices[0]][0], - ) - - language_radio.change( - update_model_dropdown, - inputs=language_radio, - outputs=model_dropdown, - ) - - with gr.Tabs(): - with gr.TabItem("Upload video from disk"): - uploaded_video_file = gr.Video( - source="upload", - interactive=True, - label="Upload from disk", - show_share_button=True, - ) - upload_video_button = gr.Button("Submit for recognition") - - output_video = gr.Video(label="Output") - output_srt_file_video = gr.File( - label="Generated subtitles", show_label=True - ) - - output_info_video = gr.HTML(label="Info") - output_textbox_video = gr.Textbox( - label="Recognized speech from uploaded video file" - ) - - with gr.TabItem("Upload audio from disk"): - uploaded_audio_file = gr.Audio( - source="upload", # Choose between "microphone", "upload" - type="filepath", - optional=False, - label="Upload audio from disk", - ) - upload_audio_button = gr.Button("Submit for recognition") - - output_srt_file_audio = gr.File( - label="Generated subtitles", show_label=True - ) - - output_info_audio = gr.HTML(label="Info") - output_textbox_audio = gr.Textbox( - label="Recognized speech from uploaded audio file" - ) - - upload_video_button.click( - process_uploaded_video_file, - inputs=[ - language_radio, - model_dropdown, - uploaded_video_file, - ], - outputs=[ - output_video, - output_srt_file_video, - output_info_video, - output_textbox_video, - ], - ) - - upload_audio_button.click( - process_uploaded_audio_file, - inputs=[ - language_radio, - model_dropdown, - uploaded_audio_file, - ], - outputs=[ - output_srt_file_audio, - output_info_audio, - output_textbox_audio, - ], - ) - - gr.Markdown(description) - -if __name__ == "__main__": - formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" - - logging.basicConfig(format=formatter, level=logging.INFO) - - demo.launch() diff --git a/spaces/kaustubh35/tax/README.md b/spaces/kaustubh35/tax/README.md deleted file mode 100644 index 8b063eaee9aed75e5ec7d06b2466222d24f6e514..0000000000000000000000000000000000000000 --- a/spaces/kaustubh35/tax/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: Shiny for Python template -emoji: 🌍 -colorFrom: yellow -colorTo: indigo -sdk: docker -pinned: false -license: openrail -duplicated_from: posit/shiny-for-python-template ---- - -This is a templated Space for [Shiny for Python](https://shiny.rstudio.com/py/). - -To get started with a new app do the following: - -1) Install Shiny with `pip install shiny` -2) Create a new app with `shiny create .` -3) Then run the app with `shiny run --reload` - -To learn more about this framework please see the [Documentation](https://shiny.rstudio.com/py/docs/overview.html). diff --git a/spaces/kcagle/AutoGPT/run.bat b/spaces/kcagle/AutoGPT/run.bat deleted file mode 100644 index afbab57a0603a126b04845ec754d1ecf3fdea18d..0000000000000000000000000000000000000000 --- a/spaces/kcagle/AutoGPT/run.bat +++ /dev/null @@ -1,8 +0,0 @@ -@echo off -python scripts/check_requirements.py requirements.txt -if errorlevel 1 ( - echo Installing missing packages... - pip install -r requirements.txt -) -python -m autogpt %* -pause diff --git a/spaces/keras-io/Human-Part-Segmentation/README.md b/spaces/keras-io/Human-Part-Segmentation/README.md deleted file mode 100644 index a204022c0cac0b5331da5182bebc6196750b2f6b..0000000000000000000000000000000000000000 --- a/spaces/keras-io/Human-Part-Segmentation/README.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: Human Part Segmentation -emoji: 👤 -colorFrom: gray -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false -tags: -- computer-vision -- image-segmentation -license: cc0-1.0 ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker/src/test_audio2coeff.py b/spaces/kevinwang676/ChatGLM2-SadTalker/src/test_audio2coeff.py deleted file mode 100644 index bbf19f494e2127b4ae9d6074b172fddb694d6e34..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker/src/test_audio2coeff.py +++ /dev/null @@ -1,123 +0,0 @@ -import os -import torch -import numpy as np -from scipy.io import savemat, loadmat -from yacs.config import CfgNode as CN -from scipy.signal import savgol_filter - -import safetensors -import safetensors.torch - -from src.audio2pose_models.audio2pose import Audio2Pose -from src.audio2exp_models.networks import SimpleWrapperV2 -from src.audio2exp_models.audio2exp import Audio2Exp -from src.utils.safetensor_helper import load_x_from_safetensor - -def load_cpk(checkpoint_path, model=None, optimizer=None, device="cpu"): - checkpoint = torch.load(checkpoint_path, map_location=torch.device(device)) - if model is not None: - model.load_state_dict(checkpoint['model']) - if optimizer is not None: - optimizer.load_state_dict(checkpoint['optimizer']) - - return checkpoint['epoch'] - -class Audio2Coeff(): - - def __init__(self, sadtalker_path, device): - #load config - fcfg_pose = open(sadtalker_path['audio2pose_yaml_path']) - cfg_pose = CN.load_cfg(fcfg_pose) - cfg_pose.freeze() - fcfg_exp = open(sadtalker_path['audio2exp_yaml_path']) - cfg_exp = CN.load_cfg(fcfg_exp) - cfg_exp.freeze() - - # load audio2pose_model - self.audio2pose_model = Audio2Pose(cfg_pose, None, device=device) - self.audio2pose_model = self.audio2pose_model.to(device) - self.audio2pose_model.eval() - for param in self.audio2pose_model.parameters(): - param.requires_grad = False - - try: - if sadtalker_path['use_safetensor']: - checkpoints = safetensors.torch.load_file(sadtalker_path['checkpoint']) - self.audio2pose_model.load_state_dict(load_x_from_safetensor(checkpoints, 'audio2pose')) - else: - load_cpk(sadtalker_path['audio2pose_checkpoint'], model=self.audio2pose_model, device=device) - except: - raise Exception("Failed in loading audio2pose_checkpoint") - - # load audio2exp_model - netG = SimpleWrapperV2() - netG = netG.to(device) - for param in netG.parameters(): - netG.requires_grad = False - netG.eval() - try: - if sadtalker_path['use_safetensor']: - checkpoints = safetensors.torch.load_file(sadtalker_path['checkpoint']) - netG.load_state_dict(load_x_from_safetensor(checkpoints, 'audio2exp')) - else: - load_cpk(sadtalker_path['audio2exp_checkpoint'], model=netG, device=device) - except: - raise Exception("Failed in loading audio2exp_checkpoint") - self.audio2exp_model = Audio2Exp(netG, cfg_exp, device=device, prepare_training_loss=False) - self.audio2exp_model = self.audio2exp_model.to(device) - for param in self.audio2exp_model.parameters(): - param.requires_grad = False - self.audio2exp_model.eval() - - self.device = device - - def generate(self, batch, coeff_save_dir, pose_style, ref_pose_coeff_path=None): - - with torch.no_grad(): - #test - results_dict_exp= self.audio2exp_model.test(batch) - exp_pred = results_dict_exp['exp_coeff_pred'] #bs T 64 - - #for class_id in range(1): - #class_id = 0#(i+10)%45 - #class_id = random.randint(0,46) #46 styles can be selected - batch['class'] = torch.LongTensor([pose_style]).to(self.device) - results_dict_pose = self.audio2pose_model.test(batch) - pose_pred = results_dict_pose['pose_pred'] #bs T 6 - - pose_len = pose_pred.shape[1] - if pose_len<13: - pose_len = int((pose_len-1)/2)*2+1 - pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), pose_len, 2, axis=1)).to(self.device) - else: - pose_pred = torch.Tensor(savgol_filter(np.array(pose_pred.cpu()), 13, 2, axis=1)).to(self.device) - - coeffs_pred = torch.cat((exp_pred, pose_pred), dim=-1) #bs T 70 - - coeffs_pred_numpy = coeffs_pred[0].clone().detach().cpu().numpy() - - if ref_pose_coeff_path is not None: - coeffs_pred_numpy = self.using_refpose(coeffs_pred_numpy, ref_pose_coeff_path) - - savemat(os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name'])), - {'coeff_3dmm': coeffs_pred_numpy}) - - return os.path.join(coeff_save_dir, '%s##%s.mat'%(batch['pic_name'], batch['audio_name'])) - - def using_refpose(self, coeffs_pred_numpy, ref_pose_coeff_path): - num_frames = coeffs_pred_numpy.shape[0] - refpose_coeff_dict = loadmat(ref_pose_coeff_path) - refpose_coeff = refpose_coeff_dict['coeff_3dmm'][:,64:70] - refpose_num_frames = refpose_coeff.shape[0] - if refpose_num_frames<num_frames: - div = num_frames//refpose_num_frames - re = num_frames%refpose_num_frames - refpose_coeff_list = [refpose_coeff for i in range(div)] - refpose_coeff_list.append(refpose_coeff[:re, :]) - refpose_coeff = np.concatenate(refpose_coeff_list, axis=0) - - #### relative head pose - coeffs_pred_numpy[:, 64:70] = coeffs_pred_numpy[:, 64:70] + ( refpose_coeff[:num_frames, :] - refpose_coeff[0:1, :] ) - return coeffs_pred_numpy - - diff --git a/spaces/kevinwang676/SadTalker/src/facerender/sync_batchnorm/replicate.py b/spaces/kevinwang676/SadTalker/src/facerender/sync_batchnorm/replicate.py deleted file mode 100644 index b71c7b8ed51a1d6c55b1f753bdd8d90bad79bd06..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/facerender/sync_batchnorm/replicate.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# File : replicate.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import functools - -from torch.nn.parallel.data_parallel import DataParallel - -__all__ = [ - 'CallbackContext', - 'execute_replication_callbacks', - 'DataParallelWithCallback', - 'patch_replication_callback' -] - - -class CallbackContext(object): - pass - - -def execute_replication_callbacks(modules): - """ - Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. - - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Note that, as all modules are isomorphism, we assign each sub-module with a context - (shared among multiple copies of this module on different devices). - Through this context, different copies can share some information. - - We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback - of any slave copies. - """ - master_copy = modules[0] - nr_modules = len(list(master_copy.modules())) - ctxs = [CallbackContext() for _ in range(nr_modules)] - - for i, module in enumerate(modules): - for j, m in enumerate(module.modules()): - if hasattr(m, '__data_parallel_replicate__'): - m.__data_parallel_replicate__(ctxs[j], i) - - -class DataParallelWithCallback(DataParallel): - """ - Data Parallel with a replication callback. - - An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by - original `replicate` function. - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - # sync_bn.__data_parallel_replicate__ will be invoked. - """ - - def replicate(self, module, device_ids): - modules = super(DataParallelWithCallback, self).replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - -def patch_replication_callback(data_parallel): - """ - Monkey-patch an existing `DataParallel` object. Add the replication callback. - Useful when you have customized `DataParallel` implementation. - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) - > patch_replication_callback(sync_bn) - # this is equivalent to - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - """ - - assert isinstance(data_parallel, DataParallel) - - old_replicate = data_parallel.replicate - - @functools.wraps(old_replicate) - def new_replicate(module, device_ids): - modules = old_replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - data_parallel.replicate = new_replicate diff --git a/spaces/khxu/pegasus-text-summarizers/README.md b/spaces/khxu/pegasus-text-summarizers/README.md deleted file mode 100644 index 3ec88d4cc647e46bfd5c5a203c50d2cbb9270ef5..0000000000000000000000000000000000000000 --- a/spaces/khxu/pegasus-text-summarizers/README.md +++ /dev/null @@ -1,46 +0,0 @@ ---- -title: Pegasus Text Summarizers -emoji: 👀 -colorFrom: yellow -colorTo: gray -sdk: gradio -app_file: app.py -pinned: false -license: apache-2.0 ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`models`: _List[string]_ -HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space. -Will be parsed automatically from your code if not specified here. - -`datasets`: _List[string]_ -HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space. -Will be parsed automatically from your code if not specified here. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/fpn_r50.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/fpn_r50.py deleted file mode 100644 index 86ab327db92e44c14822d65f1c9277cb007f17c1..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/configs/_base_/models/fpn_r50.py +++ /dev/null @@ -1,36 +0,0 @@ -# model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) -model = dict( - type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', - backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 1, 1), - strides=(1, 2, 2, 2), - norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), - neck=dict( - type='FPN', - in_channels=[256, 512, 1024, 2048], - out_channels=256, - num_outs=4), - decode_head=dict( - type='FPNHead', - in_channels=[256, 256, 256, 256], - in_index=[0, 1, 2, 3], - feature_strides=[4, 8, 16, 32], - channels=128, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(mode='whole')) diff --git a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/backbones/__init__.py b/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/backbones/__init__.py deleted file mode 100644 index 8339983905fb5d20bae42ba6f76fea75d278b1aa..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/annotator/uniformer/mmseg/models/backbones/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -from .cgnet import CGNet -# from .fast_scnn import FastSCNN -from .hrnet import HRNet -from .mobilenet_v2 import MobileNetV2 -from .mobilenet_v3 import MobileNetV3 -from .resnest import ResNeSt -from .resnet import ResNet, ResNetV1c, ResNetV1d -from .resnext import ResNeXt -from .unet import UNet -from .vit import VisionTransformer -from .uniformer import UniFormer - -__all__ = [ - 'ResNet', 'ResNetV1c', 'ResNetV1d', 'ResNeXt', 'HRNet', - 'ResNeSt', 'MobileNetV2', 'UNet', 'CGNet', 'MobileNetV3', - 'VisionTransformer', 'UniFormer' -] diff --git a/spaces/kirch/Text2Video-Zero/app.py b/spaces/kirch/Text2Video-Zero/app.py deleted file mode 100644 index a1f11a10bf7e3a3a412259242c2e75fdd17e2c30..0000000000000000000000000000000000000000 --- a/spaces/kirch/Text2Video-Zero/app.py +++ /dev/null @@ -1,83 +0,0 @@ -import gradio as gr -import torch -import os -from model import Model, ModelType - -from app_canny import create_demo as create_demo_canny -from app_pose import create_demo as create_demo_pose -from app_text_to_video import create_demo as create_demo_text_to_video -from app_pix2pix_video import create_demo as create_demo_pix2pix_video -from app_canny_db import create_demo as create_demo_canny_db - - -model = Model(device='cuda', dtype=torch.float16) - -with gr.Blocks(css='style.css') as demo: - gr.HTML( - """ - <div style="text-align: center; max-width: 1200px; margin: 20px auto;"> - <h1 style="font-weight: 900; font-size: 3rem; margin: 0rem"> - Text2Video-Zero - </h1> - <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem"> - Levon Khachatryan<sup>1*</sup>, Andranik Movsisyan<sup>1*</sup>, Vahram Tadevosyan<sup>1*</sup>, Roberto Henschel<sup>1*</sup>, Zhangyang Wang<sup>1,2</sup>, Shant Navasardyan<sup>1</sup> - and <a href="https://www.humphreyshi.com/home">Humphrey Shi</a><sup>1,3,4</sup> - </h2> - <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem"> - <sup>1</sup>Picsart AI Resarch (PAIR), <sup>2</sup>UT Austin, <sup>3</sup>U of Oregon, <sup>4</sup>UIUC - </h2> - <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem"> - [<a href="https://arxiv.org/abs/2303.13439" style="color:blue;">arXiv</a>] - [<a href="https://github.com/Picsart-AI-Research/Text2Video-Zero" style="color:blue;">GitHub</a>] - </h2> - <h2 style="font-weight: 450; font-size: 1rem; margin-top: 0.5rem; margin-bottom: 0.5rem"> - We built <b>Text2Video-Zero</b>, a first zero-shot text-to-video synthesis diffusion framework, that enables low cost yet high-quality and consistent video generation with only pre-trained text-to-image diffusion models without any training on videos or optimization! - Text2Video-Zero also naturally supports cool extension works of pre-trained text-to-image models such as Instruct Pix2Pix, ControlNet and DreamBooth, and based on which we present Video Instruct Pix2Pix, Pose Conditional, Edge Conditional and, Edge Conditional and DreamBooth Specialized applications. - We hope our Text2Video-Zero will further democratize AI and empower the creativity of everyone by unleashing the zero-shot video generation and editing capacity of the amazing text-to-image models and encourage future research! - </h2> - </div> - """) - - - gr.HTML(""" - <p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. - <br/> - <a href="https://huggingface.co/spaces/PAIR/Text2Video-Zero?duplicate=true"> - <img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a> - </p>""") - - with gr.Tab('Zero-Shot Text2Video'): - create_demo_text_to_video(model) - with gr.Tab('Video Instruct Pix2Pix'): - create_demo_pix2pix_video(model) - with gr.Tab('Pose Conditional'): - create_demo_pose(model) - with gr.Tab('Edge Conditional'): - create_demo_canny(model) - with gr.Tab('Edge Conditional and Dreambooth Specialized'): - create_demo_canny_db(model) - - gr.HTML( - """ - <div style="text-align: justify; max-width: 1200px; margin: 20px auto;"> - <h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem"> - <b>Version: v1.0</b> - </h3> - <h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem"> - <b>Caution</b>: - We would like the raise the awareness of users of this demo of its potential issues and concerns. - Like previous large foundation models, Text2Video-Zero could be problematic in some cases, partially we use pretrained Stable Diffusion, therefore Text2Video-Zero can Inherit Its Imperfections. - So far, we keep all features available for research testing both to show the great potential of the Text2Video-Zero framework and to collect important feedback to improve the model in the future. - We welcome researchers and users to report issues with the HuggingFace community discussion feature or email the authors. - </h3> - <h3 style="font-weight: 450; font-size: 0.8rem; margin: 0rem"> - <b>Biases and content acknowledgement</b>: - Beware that Text2Video-Zero may output content that reinforces or exacerbates societal biases, as well as realistic faces, pornography, and violence. - Text2Video-Zero in this demo is meant only for research purposes. - </h3> - </div> - """) - -demo.queue(max_size=20) -demo.launch(auth=(os.getenv("SECRET_USER"), os.getenv("SECRET_PASS"))) -# demo.queue(api_open=False).launch(file_directories=['temporal'], share=True) diff --git a/spaces/kohrisatou-infinity/KIP_01_beta/vdecoder/__init__.py b/spaces/kohrisatou-infinity/KIP_01_beta/vdecoder/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/konfuzio-com/PP-OCRv3-ch/README.md b/spaces/konfuzio-com/PP-OCRv3-ch/README.md deleted file mode 100644 index 3ea098fecaf806ba4bd802972cd9e75c7896b8d0..0000000000000000000000000000000000000000 --- a/spaces/konfuzio-com/PP-OCRv3-ch/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ch_PP-OCRv3 -emoji: 📈 -colorFrom: purple -colorTo: gray -sdk: gradio -sdk_version: 3.3 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: PaddlePaddle/PP-OCRv3-ch ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/altair/expr/__init__.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/altair/expr/__init__.py deleted file mode 100644 index 6ba7f8b8b96e28e4f0f7f143f29023d1bc0e58ba..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/altair/expr/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Tools for creating transform & filter expressions with a python syntax""" -# ruff: noqa -from typing import Any - -from .core import datum, Expression -from .funcs import * -from .consts import * -from ..vegalite.v5.schema.core import ExprRef as _ExprRef - - -class _ExprType: - def __init__(self, expr): - vars(self).update(expr) - - def __call__(self, expr, **kwargs): - return _ExprRef(expr, **kwargs) - - -expr: Any = _ExprType(globals()) diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/contourpy/util/renderer.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/contourpy/util/renderer.py deleted file mode 100644 index ef1d065ee1328728af04ab61525dad77a73e3d28..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/contourpy/util/renderer.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any - -import numpy as np - -if TYPE_CHECKING: - import io - - from numpy.typing import ArrayLike - - from contourpy._contourpy import CoordinateArray, FillReturn, FillType, LineReturn, LineType - - -class Renderer(ABC): - """Abstract base class for renderers, defining the interface that they must implement.""" - - def _grid_as_2d(self, x: ArrayLike, y: ArrayLike) -> tuple[CoordinateArray, CoordinateArray]: - x = np.asarray(x) - y = np.asarray(y) - if x.ndim == 1: - x, y = np.meshgrid(x, y) - return x, y - - x = np.asarray(x) - y = np.asarray(y) - if x.ndim == 1: - x, y = np.meshgrid(x, y) - return x, y - - @abstractmethod - def filled( - self, - filled: FillReturn, - fill_type: FillType, - ax: Any = 0, - color: str = "C0", - alpha: float = 0.7, - ) -> None: - pass - - @abstractmethod - def grid( - self, - x: ArrayLike, - y: ArrayLike, - ax: Any = 0, - color: str = "black", - alpha: float = 0.1, - point_color: str | None = None, - quad_as_tri_alpha: float = 0, - ) -> None: - pass - - @abstractmethod - def lines( - self, - lines: LineReturn, - line_type: LineType, - ax: Any = 0, - color: str = "C0", - alpha: float = 1.0, - linewidth: float = 1, - ) -> None: - pass - - @abstractmethod - def mask( - self, - x: ArrayLike, - y: ArrayLike, - z: ArrayLike | np.ma.MaskedArray[Any, Any], - ax: Any = 0, - color: str = "black", - ) -> None: - pass - - @abstractmethod - def save(self, filename: str, transparent: bool = False) -> None: - pass - - @abstractmethod - def save_to_buffer(self) -> io.BytesIO: - pass - - @abstractmethod - def show(self) -> None: - pass - - @abstractmethod - def title(self, title: str, ax: Any = 0, color: str | None = None) -> None: - pass - - @abstractmethod - def z_values( - self, - x: ArrayLike, - y: ArrayLike, - z: ArrayLike, - ax: Any = 0, - color: str = "green", - fmt: str = ".1f", - quad_as_tri: bool = False, - ) -> None: - pass diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/table.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/table.py deleted file mode 100644 index a431eaf618cacbdb2d4de5b73e5753ce16d6ad77..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/matplotlib/table.py +++ /dev/null @@ -1,830 +0,0 @@ -# Original code by: -# John Gill <jng@europe.renre.com> -# Copyright 2004 John Gill and John Hunter -# -# Subsequent changes: -# The Matplotlib development team -# Copyright The Matplotlib development team - -""" -Tables drawing. - -.. note:: - The table implementation in Matplotlib is lightly maintained. For a more - featureful table implementation, you may wish to try `blume - <https://github.com/swfiua/blume>`_. - -Use the factory function `~matplotlib.table.table` to create a ready-made -table from texts. If you need more control, use the `.Table` class and its -methods. - -The table consists of a grid of cells, which are indexed by (row, column). -The cell (0, 0) is positioned at the top left. - -Thanks to John Gill for providing the class and table. -""" - -from . import _api, _docstring -from .artist import Artist, allow_rasterization -from .patches import Rectangle -from .text import Text -from .transforms import Bbox -from .path import Path - - -class Cell(Rectangle): - """ - A cell is a `.Rectangle` with some associated `.Text`. - - As a user, you'll most likely not creates cells yourself. Instead, you - should use either the `~matplotlib.table.table` factory function or - `.Table.add_cell`. - """ - - PAD = 0.1 - """Padding between text and rectangle.""" - - _edges = 'BRTL' - _edge_aliases = {'open': '', - 'closed': _edges, # default - 'horizontal': 'BT', - 'vertical': 'RL' - } - - @_api.make_keyword_only("3.6", name="edgecolor") - def __init__(self, xy, width, height, - edgecolor='k', facecolor='w', - fill=True, - text='', - loc=None, - fontproperties=None, - *, - visible_edges='closed', - ): - """ - Parameters - ---------- - xy : 2-tuple - The position of the bottom left corner of the cell. - width : float - The cell width. - height : float - The cell height. - edgecolor : color - The color of the cell border. - facecolor : color - The cell facecolor. - fill : bool - Whether the cell background is filled. - text : str - The cell text. - loc : {'left', 'center', 'right'}, default: 'right' - The alignment of the text within the cell. - fontproperties : dict - A dict defining the font properties of the text. Supported keys and - values are the keyword arguments accepted by `.FontProperties`. - visible_edges : str, default: 'closed' - The cell edges to be drawn with a line: a substring of 'BRTL' - (bottom, right, top, left), or one of 'open' (no edges drawn), - 'closed' (all edges drawn), 'horizontal' (bottom and top), - 'vertical' (right and left). - """ - - # Call base - super().__init__(xy, width=width, height=height, fill=fill, - edgecolor=edgecolor, facecolor=facecolor) - self.set_clip_on(False) - self.visible_edges = visible_edges - - # Create text object - if loc is None: - loc = 'right' - self._loc = loc - self._text = Text(x=xy[0], y=xy[1], clip_on=False, - text=text, fontproperties=fontproperties, - horizontalalignment=loc, verticalalignment='center') - - def set_transform(self, trans): - super().set_transform(trans) - # the text does not get the transform! - self.stale = True - - def set_figure(self, fig): - super().set_figure(fig) - self._text.set_figure(fig) - - def get_text(self): - """Return the cell `.Text` instance.""" - return self._text - - def set_fontsize(self, size): - """Set the text fontsize.""" - self._text.set_fontsize(size) - self.stale = True - - def get_fontsize(self): - """Return the cell fontsize.""" - return self._text.get_fontsize() - - def auto_set_font_size(self, renderer): - """Shrink font size until the text fits into the cell width.""" - fontsize = self.get_fontsize() - required = self.get_required_width(renderer) - while fontsize > 1 and required > self.get_width(): - fontsize -= 1 - self.set_fontsize(fontsize) - required = self.get_required_width(renderer) - - return fontsize - - @allow_rasterization - def draw(self, renderer): - if not self.get_visible(): - return - # draw the rectangle - super().draw(renderer) - # position the text - self._set_text_position(renderer) - self._text.draw(renderer) - self.stale = False - - def _set_text_position(self, renderer): - """Set text up so it is drawn in the right place.""" - bbox = self.get_window_extent(renderer) - # center vertically - y = bbox.y0 + bbox.height / 2 - # position horizontally - loc = self._text.get_horizontalalignment() - if loc == 'center': - x = bbox.x0 + bbox.width / 2 - elif loc == 'left': - x = bbox.x0 + bbox.width * self.PAD - else: # right. - x = bbox.x0 + bbox.width * (1 - self.PAD) - self._text.set_position((x, y)) - - def get_text_bounds(self, renderer): - """ - Return the text bounds as *(x, y, width, height)* in table coordinates. - """ - return (self._text.get_window_extent(renderer) - .transformed(self.get_data_transform().inverted()) - .bounds) - - def get_required_width(self, renderer): - """Return the minimal required width for the cell.""" - l, b, w, h = self.get_text_bounds(renderer) - return w * (1.0 + (2.0 * self.PAD)) - - @_docstring.dedent_interpd - def set_text_props(self, **kwargs): - """ - Update the text properties. - - Valid keyword arguments are: - - %(Text:kwdoc)s - """ - self._text._internal_update(kwargs) - self.stale = True - - @property - def visible_edges(self): - """ - The cell edges to be drawn with a line. - - Reading this property returns a substring of 'BRTL' (bottom, right, - top, left'). - - When setting this property, you can use a substring of 'BRTL' or one - of {'open', 'closed', 'horizontal', 'vertical'}. - """ - return self._visible_edges - - @visible_edges.setter - def visible_edges(self, value): - if value is None: - self._visible_edges = self._edges - elif value in self._edge_aliases: - self._visible_edges = self._edge_aliases[value] - else: - if any(edge not in self._edges for edge in value): - raise ValueError('Invalid edge param {}, must only be one of ' - '{} or string of {}'.format( - value, - ", ".join(self._edge_aliases), - ", ".join(self._edges))) - self._visible_edges = value - self.stale = True - - def get_path(self): - """Return a `.Path` for the `.visible_edges`.""" - codes = [Path.MOVETO] - codes.extend( - Path.LINETO if edge in self._visible_edges else Path.MOVETO - for edge in self._edges) - if Path.MOVETO not in codes[1:]: # All sides are visible - codes[-1] = Path.CLOSEPOLY - return Path( - [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]], - codes, - readonly=True - ) - - -CustomCell = Cell # Backcompat. alias. - - -class Table(Artist): - """ - A table of cells. - - The table consists of a grid of cells, which are indexed by (row, column). - - For a simple table, you'll have a full grid of cells with indices from - (0, 0) to (num_rows-1, num_cols-1), in which the cell (0, 0) is positioned - at the top left. However, you can also add cells with negative indices. - You don't have to add a cell to every grid position, so you can create - tables that have holes. - - *Note*: You'll usually not create an empty table from scratch. Instead use - `~matplotlib.table.table` to create a table from data. - """ - codes = {'best': 0, - 'upper right': 1, # default - 'upper left': 2, - 'lower left': 3, - 'lower right': 4, - 'center left': 5, - 'center right': 6, - 'lower center': 7, - 'upper center': 8, - 'center': 9, - 'top right': 10, - 'top left': 11, - 'bottom left': 12, - 'bottom right': 13, - 'right': 14, - 'left': 15, - 'top': 16, - 'bottom': 17, - } - """Possible values where to place the table relative to the Axes.""" - - FONTSIZE = 10 - - AXESPAD = 0.02 - """The border between the Axes and the table edge in Axes units.""" - - def __init__(self, ax, loc=None, bbox=None, **kwargs): - """ - Parameters - ---------- - ax : `matplotlib.axes.Axes` - The `~.axes.Axes` to plot the table into. - loc : str - The position of the cell with respect to *ax*. This must be one of - the `~.Table.codes`. - bbox : `.Bbox` or [xmin, ymin, width, height], optional - A bounding box to draw the table into. If this is not *None*, this - overrides *loc*. - - Other Parameters - ---------------- - **kwargs - `.Artist` properties. - """ - - super().__init__() - - if isinstance(loc, str): - if loc not in self.codes: - raise ValueError( - "Unrecognized location {!r}. Valid locations are\n\t{}" - .format(loc, '\n\t'.join(self.codes))) - loc = self.codes[loc] - self.set_figure(ax.figure) - self._axes = ax - self._loc = loc - self._bbox = bbox - - # use axes coords - ax._unstale_viewLim() - self.set_transform(ax.transAxes) - - self._cells = {} - self._edges = None - self._autoColumns = [] - self._autoFontsize = True - self._internal_update(kwargs) - - self.set_clip_on(False) - - def add_cell(self, row, col, *args, **kwargs): - """ - Create a cell and add it to the table. - - Parameters - ---------- - row : int - Row index. - col : int - Column index. - *args, **kwargs - All other parameters are passed on to `Cell`. - - Returns - ------- - `.Cell` - The created cell. - - """ - xy = (0, 0) - cell = Cell(xy, visible_edges=self.edges, *args, **kwargs) - self[row, col] = cell - return cell - - def __setitem__(self, position, cell): - """ - Set a custom cell in a given position. - """ - _api.check_isinstance(Cell, cell=cell) - try: - row, col = position[0], position[1] - except Exception as err: - raise KeyError('Only tuples length 2 are accepted as ' - 'coordinates') from err - cell.set_figure(self.figure) - cell.set_transform(self.get_transform()) - cell.set_clip_on(False) - self._cells[row, col] = cell - self.stale = True - - def __getitem__(self, position): - """Retrieve a custom cell from a given position.""" - return self._cells[position] - - @property - def edges(self): - """ - The default value of `~.Cell.visible_edges` for newly added - cells using `.add_cell`. - - Notes - ----- - This setting does currently only affect newly created cells using - `.add_cell`. - - To change existing cells, you have to set their edges explicitly:: - - for c in tab.get_celld().values(): - c.visible_edges = 'horizontal' - - """ - return self._edges - - @edges.setter - def edges(self, value): - self._edges = value - self.stale = True - - def _approx_text_height(self): - return (self.FONTSIZE / 72.0 * self.figure.dpi / - self._axes.bbox.height * 1.2) - - @allow_rasterization - def draw(self, renderer): - # docstring inherited - - # Need a renderer to do hit tests on mouseevent; assume the last one - # will do - if renderer is None: - renderer = self.figure._get_renderer() - if renderer is None: - raise RuntimeError('No renderer defined') - - if not self.get_visible(): - return - renderer.open_group('table', gid=self.get_gid()) - self._update_positions(renderer) - - for key in sorted(self._cells): - self._cells[key].draw(renderer) - - renderer.close_group('table') - self.stale = False - - def _get_grid_bbox(self, renderer): - """ - Get a bbox, in axes coordinates for the cells. - - Only include those in the range (0, 0) to (maxRow, maxCol). - """ - boxes = [cell.get_window_extent(renderer) - for (row, col), cell in self._cells.items() - if row >= 0 and col >= 0] - bbox = Bbox.union(boxes) - return bbox.transformed(self.get_transform().inverted()) - - def contains(self, mouseevent): - # docstring inherited - inside, info = self._default_contains(mouseevent) - if inside is not None: - return inside, info - # TODO: Return index of the cell containing the cursor so that the user - # doesn't have to bind to each one individually. - renderer = self.figure._get_renderer() - if renderer is not None: - boxes = [cell.get_window_extent(renderer) - for (row, col), cell in self._cells.items() - if row >= 0 and col >= 0] - bbox = Bbox.union(boxes) - return bbox.contains(mouseevent.x, mouseevent.y), {} - else: - return False, {} - - def get_children(self): - """Return the Artists contained by the table.""" - return list(self._cells.values()) - - def get_window_extent(self, renderer=None): - # docstring inherited - if renderer is None: - renderer = self.figure._get_renderer() - self._update_positions(renderer) - boxes = [cell.get_window_extent(renderer) - for cell in self._cells.values()] - return Bbox.union(boxes) - - def _do_cell_alignment(self): - """ - Calculate row heights and column widths; position cells accordingly. - """ - # Calculate row/column widths - widths = {} - heights = {} - for (row, col), cell in self._cells.items(): - height = heights.setdefault(row, 0.0) - heights[row] = max(height, cell.get_height()) - width = widths.setdefault(col, 0.0) - widths[col] = max(width, cell.get_width()) - - # work out left position for each column - xpos = 0 - lefts = {} - for col in sorted(widths): - lefts[col] = xpos - xpos += widths[col] - - ypos = 0 - bottoms = {} - for row in sorted(heights, reverse=True): - bottoms[row] = ypos - ypos += heights[row] - - # set cell positions - for (row, col), cell in self._cells.items(): - cell.set_x(lefts[col]) - cell.set_y(bottoms[row]) - - def auto_set_column_width(self, col): - """ - Automatically set the widths of given columns to optimal sizes. - - Parameters - ---------- - col : int or sequence of ints - The indices of the columns to auto-scale. - """ - # check for col possibility on iteration - try: - iter(col) - except (TypeError, AttributeError): - self._autoColumns.append(col) - else: - for cell in col: - self._autoColumns.append(cell) - - self.stale = True - - def _auto_set_column_width(self, col, renderer): - """Automatically set width for column.""" - cells = [cell for key, cell in self._cells.items() if key[1] == col] - max_width = max((cell.get_required_width(renderer) for cell in cells), - default=0) - for cell in cells: - cell.set_width(max_width) - - def auto_set_font_size(self, value=True): - """Automatically set font size.""" - self._autoFontsize = value - self.stale = True - - def _auto_set_font_size(self, renderer): - - if len(self._cells) == 0: - return - fontsize = next(iter(self._cells.values())).get_fontsize() - cells = [] - for key, cell in self._cells.items(): - # ignore auto-sized columns - if key[1] in self._autoColumns: - continue - size = cell.auto_set_font_size(renderer) - fontsize = min(fontsize, size) - cells.append(cell) - - # now set all fontsizes equal - for cell in self._cells.values(): - cell.set_fontsize(fontsize) - - def scale(self, xscale, yscale): - """Scale column widths by *xscale* and row heights by *yscale*.""" - for c in self._cells.values(): - c.set_width(c.get_width() * xscale) - c.set_height(c.get_height() * yscale) - - def set_fontsize(self, size): - """ - Set the font size, in points, of the cell text. - - Parameters - ---------- - size : float - - Notes - ----- - As long as auto font size has not been disabled, the value will be - clipped such that the text fits horizontally into the cell. - - You can disable this behavior using `.auto_set_font_size`. - - >>> the_table.auto_set_font_size(False) - >>> the_table.set_fontsize(20) - - However, there is no automatic scaling of the row height so that the - text may exceed the cell boundary. - """ - for cell in self._cells.values(): - cell.set_fontsize(size) - self.stale = True - - def _offset(self, ox, oy): - """Move all the artists by ox, oy (axes coords).""" - for c in self._cells.values(): - x, y = c.get_x(), c.get_y() - c.set_x(x + ox) - c.set_y(y + oy) - - def _update_positions(self, renderer): - # called from renderer to allow more precise estimates of - # widths and heights with get_window_extent - - # Do any auto width setting - for col in self._autoColumns: - self._auto_set_column_width(col, renderer) - - if self._autoFontsize: - self._auto_set_font_size(renderer) - - # Align all the cells - self._do_cell_alignment() - - bbox = self._get_grid_bbox(renderer) - l, b, w, h = bbox.bounds - - if self._bbox is not None: - # Position according to bbox - if isinstance(self._bbox, Bbox): - rl, rb, rw, rh = self._bbox.bounds - else: - rl, rb, rw, rh = self._bbox - self.scale(rw / w, rh / h) - ox = rl - l - oy = rb - b - self._do_cell_alignment() - else: - # Position using loc - (BEST, UR, UL, LL, LR, CL, CR, LC, UC, C, - TR, TL, BL, BR, R, L, T, B) = range(len(self.codes)) - # defaults for center - ox = (0.5 - w / 2) - l - oy = (0.5 - h / 2) - b - if self._loc in (UL, LL, CL): # left - ox = self.AXESPAD - l - if self._loc in (BEST, UR, LR, R, CR): # right - ox = 1 - (l + w + self.AXESPAD) - if self._loc in (BEST, UR, UL, UC): # upper - oy = 1 - (b + h + self.AXESPAD) - if self._loc in (LL, LR, LC): # lower - oy = self.AXESPAD - b - if self._loc in (LC, UC, C): # center x - ox = (0.5 - w / 2) - l - if self._loc in (CL, CR, C): # center y - oy = (0.5 - h / 2) - b - - if self._loc in (TL, BL, L): # out left - ox = - (l + w) - if self._loc in (TR, BR, R): # out right - ox = 1.0 - l - if self._loc in (TR, TL, T): # out top - oy = 1.0 - b - if self._loc in (BL, BR, B): # out bottom - oy = - (b + h) - - self._offset(ox, oy) - - def get_celld(self): - r""" - Return a dict of cells in the table mapping *(row, column)* to - `.Cell`\s. - - Notes - ----- - You can also directly index into the Table object to access individual - cells:: - - cell = table[row, col] - - """ - return self._cells - - -@_docstring.dedent_interpd -def table(ax, - cellText=None, cellColours=None, - cellLoc='right', colWidths=None, - rowLabels=None, rowColours=None, rowLoc='left', - colLabels=None, colColours=None, colLoc='center', - loc='bottom', bbox=None, edges='closed', - **kwargs): - """ - Add a table to an `~.axes.Axes`. - - At least one of *cellText* or *cellColours* must be specified. These - parameters must be 2D lists, in which the outer lists define the rows and - the inner list define the column values per row. Each row must have the - same number of elements. - - The table can optionally have row and column headers, which are configured - using *rowLabels*, *rowColours*, *rowLoc* and *colLabels*, *colColours*, - *colLoc* respectively. - - For finer grained control over tables, use the `.Table` class and add it to - the axes with `.Axes.add_table`. - - Parameters - ---------- - cellText : 2D list of str, optional - The texts to place into the table cells. - - *Note*: Line breaks in the strings are currently not accounted for and - will result in the text exceeding the cell boundaries. - - cellColours : 2D list of colors, optional - The background colors of the cells. - - cellLoc : {'left', 'center', 'right'}, default: 'right' - The alignment of the text within the cells. - - colWidths : list of float, optional - The column widths in units of the axes. If not given, all columns will - have a width of *1 / ncols*. - - rowLabels : list of str, optional - The text of the row header cells. - - rowColours : list of colors, optional - The colors of the row header cells. - - rowLoc : {'left', 'center', 'right'}, default: 'left' - The text alignment of the row header cells. - - colLabels : list of str, optional - The text of the column header cells. - - colColours : list of colors, optional - The colors of the column header cells. - - colLoc : {'left', 'center', 'right'}, default: 'left' - The text alignment of the column header cells. - - loc : str, optional - The position of the cell with respect to *ax*. This must be one of - the `~.Table.codes`. - - bbox : `.Bbox` or [xmin, ymin, width, height], optional - A bounding box to draw the table into. If this is not *None*, this - overrides *loc*. - - edges : substring of 'BRTL' or {'open', 'closed', 'horizontal', 'vertical'} - The cell edges to be drawn with a line. See also - `~.Cell.visible_edges`. - - Returns - ------- - `~matplotlib.table.Table` - The created table. - - Other Parameters - ---------------- - **kwargs - `.Table` properties. - - %(Table:kwdoc)s - """ - - if cellColours is None and cellText is None: - raise ValueError('At least one argument from "cellColours" or ' - '"cellText" must be provided to create a table.') - - # Check we have some cellText - if cellText is None: - # assume just colours are needed - rows = len(cellColours) - cols = len(cellColours[0]) - cellText = [[''] * cols] * rows - - rows = len(cellText) - cols = len(cellText[0]) - for row in cellText: - if len(row) != cols: - raise ValueError("Each row in 'cellText' must have {} columns" - .format(cols)) - - if cellColours is not None: - if len(cellColours) != rows: - raise ValueError("'cellColours' must have {} rows".format(rows)) - for row in cellColours: - if len(row) != cols: - raise ValueError("Each row in 'cellColours' must have {} " - "columns".format(cols)) - else: - cellColours = ['w' * cols] * rows - - # Set colwidths if not given - if colWidths is None: - colWidths = [1.0 / cols] * cols - - # Fill in missing information for column - # and row labels - rowLabelWidth = 0 - if rowLabels is None: - if rowColours is not None: - rowLabels = [''] * rows - rowLabelWidth = colWidths[0] - elif rowColours is None: - rowColours = 'w' * rows - - if rowLabels is not None: - if len(rowLabels) != rows: - raise ValueError("'rowLabels' must be of length {0}".format(rows)) - - # If we have column labels, need to shift - # the text and colour arrays down 1 row - offset = 1 - if colLabels is None: - if colColours is not None: - colLabels = [''] * cols - else: - offset = 0 - elif colColours is None: - colColours = 'w' * cols - - # Set up cell colours if not given - if cellColours is None: - cellColours = ['w' * cols] * rows - - # Now create the table - table = Table(ax, loc, bbox, **kwargs) - table.edges = edges - height = table._approx_text_height() - - # Add the cells - for row in range(rows): - for col in range(cols): - table.add_cell(row + offset, col, - width=colWidths[col], height=height, - text=cellText[row][col], - facecolor=cellColours[row][col], - loc=cellLoc) - # Do column labels - if colLabels is not None: - for col in range(cols): - table.add_cell(0, col, - width=colWidths[col], height=height, - text=colLabels[col], facecolor=colColours[col], - loc=colLoc) - - # Do row labels - if rowLabels is not None: - for row in range(rows): - table.add_cell(row + offset, -1, - width=rowLabelWidth or 1e-15, height=height, - text=rowLabels[row], facecolor=rowColours[row], - loc=rowLoc) - if rowLabelWidth == 0: - table.auto_set_column_width(-1) - - ax.add_table(table) - return table diff --git a/spaces/lanyi2023/QQsign/unidbg-fetch-qsign/bin/unidbg-fetch-qsign.bat b/spaces/lanyi2023/QQsign/unidbg-fetch-qsign/bin/unidbg-fetch-qsign.bat deleted file mode 100644 index b60d7ffdb4048e81142e738e59e2fb43b4e76ed3..0000000000000000000000000000000000000000 --- a/spaces/lanyi2023/QQsign/unidbg-fetch-qsign/bin/unidbg-fetch-qsign.bat +++ /dev/null @@ -1,84 +0,0 @@ -@if "%DEBUG%" == "" @echo off -@rem ########################################################################## -@rem -@rem unidbg-fetch-qsign startup script for Windows -@rem -@rem ########################################################################## - -@rem Set local scope for the variables with windows NT shell -if "%OS%"=="Windows_NT" setlocal - -set DIRNAME=%~dp0 -if "%DIRNAME%" == "" set DIRNAME=. -set APP_BASE_NAME=%~n0 -set APP_HOME=%DIRNAME%.. - -@rem Add default JVM options here. You can also use JAVA_OPTS and UNIDBG_FETCH_QSIGN_OPTS to pass JVM options to this script. -set DEFAULT_JVM_OPTS= - -@rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome - -set JAVA_EXE=java.exe -%JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto init - -echo. -echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:findJavaFromJavaHome -set JAVA_HOME=%JAVA_HOME:"=% -set JAVA_EXE=%JAVA_HOME%/bin/java.exe - -if exist "%JAVA_EXE%" goto init - -echo. -echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% -echo. -echo Please set the JAVA_HOME variable in your environment to match the -echo location of your Java installation. - -goto fail - -:init -@rem Get command-line arguments, handling Windows variants - -if not "%OS%" == "Windows_NT" goto win9xME_args - -:win9xME_args -@rem Slurp the command line arguments. -set CMD_LINE_ARGS= -set _SKIP=2 - -:win9xME_args_slurp -if "x%~1" == "x" goto execute - -set CMD_LINE_ARGS=%* - -:execute -@rem Setup the command line - -set CLASSPATH=%APP_HOME%\lib\unidbg-fetch-qsign-1.1.5-all.jar - -@rem Execute unidbg-fetch-qsign -"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %UNIDBG_FETCH_QSIGN_OPTS% -jar "%CLASSPATH%" %CMD_LINE_ARGS% - -:end -@rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd - -:fail -rem Set variable UNIDBG_FETCH_QSIGN_EXIT_CONSOLE if you need the _script_ return code instead of -rem the _cmd.exe /c_ return code! -if not "" == "%UNIDBG_FETCH_QSIGN_EXIT_CONSOLE%" exit 1 -exit /b 1 - -:mainEnd -if "%OS%"=="Windows_NT" endlocal - -:omega \ No newline at end of file diff --git a/spaces/leogabraneth/text-generation-webui-main/extensions/superboogav2/chat_handler.py b/spaces/leogabraneth/text-generation-webui-main/extensions/superboogav2/chat_handler.py deleted file mode 100644 index 215f7bdbc762ed759f1054d2cc56e26bdd358cf9..0000000000000000000000000000000000000000 --- a/spaces/leogabraneth/text-generation-webui-main/extensions/superboogav2/chat_handler.py +++ /dev/null @@ -1,138 +0,0 @@ -""" -This module is responsible for modifying the chat prompt and history. -""" -import json -import re - -import extensions.superboogav2.parameters as parameters - -from modules import chat -from modules.text_generation import get_encoded_length -from modules.logging_colors import logger -from extensions.superboogav2.utils import create_context_text, create_metadata_source - -from .data_processor import process_and_add_to_collector -from .chromadb import ChromaCollector - - -CHAT_METADATA = create_metadata_source('automatic-chat-insert') - -INSTRUCT_MODE = 'instruct' -CHAT_INSTRUCT_MODE = 'chat-instruct' - - -def _is_instruct_mode(state: dict): - mode = state.get('mode') - return mode == INSTRUCT_MODE or mode == CHAT_INSTRUCT_MODE - - -def _remove_tag_if_necessary(user_input: str): - if not parameters.get_is_manual(): - return user_input - - return re.sub(r'^\s*!c\s*|\s*!c\s*$', '', user_input) - - -def _should_query(input: str): - if not parameters.get_is_manual(): - return True - - if re.search(r'^\s*!c|!c\s*$', input, re.MULTILINE): - return True - - return False - - -def _format_single_exchange(name, text): - if re.search(r':\s*$', name): - return '{} {}\n'.format(name, text) - else: - return '{}: {}\n'.format(name, text) - - -def _get_names(state: dict): - if _is_instruct_mode(state): - user_name = state['name1_instruct'] - bot_name = state['name2_instruct'] - else: - user_name = state['name1'] - bot_name = state['name2'] - - if not user_name: - user_name = 'User' - if not bot_name: - bot_name = 'Assistant' - - return user_name, bot_name - - -def _concatinate_history(history: dict, state: dict): - full_history_text = '' - user_name, bot_name = _get_names(state) - - # Grab the internal history. - internal_history = history['internal'] - assert isinstance(internal_history, list) - - # Iterate through the history. - for exchange in internal_history: - assert isinstance(exchange, list) - - if len(exchange) >= 1: - full_history_text += _format_single_exchange(user_name, exchange[0]) - if len(exchange) >= 2: - full_history_text += _format_single_exchange(bot_name, exchange[1]) - - return full_history_text[:-1] # Remove the last new line. - - -def _hijack_last(context_text: str, history: dict, max_len: int, state: dict): - num_context_tokens = get_encoded_length(context_text) - - names = _get_names(state)[::-1] - - history_tokens = 0 - replace_position = None - for i, messages in enumerate(reversed(history['internal'])): - for j, message in enumerate(reversed(messages)): - num_message_tokens = get_encoded_length(_format_single_exchange(names[j], message)) - - # TODO: This is an extremely naive solution. A more robust implementation must be made. - if history_tokens + num_context_tokens <= max_len: - # This message can be replaced - replace_position = (i, j) - - history_tokens += num_message_tokens - - if replace_position is None: - logger.warn("The provided context_text is too long to replace any message in the history.") - else: - # replace the message at replace_position with context_text - i, j = replace_position - history['internal'][-i-1][-j-1] = context_text - - -def custom_generate_chat_prompt_internal(user_input: str, state: dict, collector: ChromaCollector, **kwargs): - if parameters.get_add_chat_to_data(): - # Get the whole history as one string - history_as_text = _concatinate_history(kwargs['history'], state) - - if history_as_text: - # Delete all documents that were auto-inserted - collector.delete(ids_to_delete=None, where=CHAT_METADATA) - # Insert the processed history - process_and_add_to_collector(history_as_text, collector, False, CHAT_METADATA) - - if _should_query(user_input): - user_input = _remove_tag_if_necessary(user_input) - results = collector.get_sorted_by_dist(user_input, n_results=parameters.get_chunk_count(), max_token_count=int(parameters.get_max_token_count())) - - # Check if the strategy is to modify the last message. If so, prepend or append to the user query. - if parameters.get_injection_strategy() == parameters.APPEND_TO_LAST: - user_input = user_input + create_context_text(results) - elif parameters.get_injection_strategy() == parameters.PREPEND_TO_LAST: - user_input = create_context_text(results) + user_input - elif parameters.get_injection_strategy() == parameters.HIJACK_LAST_IN_CONTEXT: - _hijack_last(create_context_text(results), kwargs['history'], state['truncation_length'], state) - - return chat.generate_chat_prompt(user_input, state, **kwargs) diff --git a/spaces/leurez/moss/service/src/utils/index.ts b/spaces/leurez/moss/service/src/utils/index.ts deleted file mode 100644 index 726f807eae004126c2c0f360a8026bfc442d0536..0000000000000000000000000000000000000000 --- a/spaces/leurez/moss/service/src/utils/index.ts +++ /dev/null @@ -1,22 +0,0 @@ -interface SendResponseOptions<T = any> { - type: 'Success' | 'Fail' - message?: string - data?: T -} - -export function sendResponse<T>(options: SendResponseOptions<T>) { - if (options.type === 'Success') { - return Promise.resolve({ - message: options.message ?? null, - data: options.data ?? null, - status: options.type, - }) - } - - // eslint-disable-next-line prefer-promise-reject-errors - return Promise.reject({ - message: options.message ?? 'Failed', - data: options.data ?? null, - status: options.type, - }) -} diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Acdsee15serialkeyfreedownload __HOT__.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Acdsee15serialkeyfreedownload __HOT__.md deleted file mode 100644 index 0b252556a5b1a8f7cf01fe9032d2ed35a0502ab2..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Acdsee15serialkeyfreedownload __HOT__.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>acdsee15serialkeyfreedownload</h2><br /><p><b><b>DOWNLOAD</b> ⚹⚹⚹ <a href="https://bytlly.com/2uGwyA">https://bytlly.com/2uGwyA</a></b></p><br /><br /> -<br /> -TeamViewer 15. ... 19 Cracked (Loader) Tool with Free Activation Keys Generator Keygen; It's entirely ... 36 (x86x64) Keygen Crack free download · FULL AutoCAD keygen · x force x32 ... ACDSee Photo Studio Ultimate 2021 v14 Rus + Crack. 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Aerosoft Bergamo 23.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Aerosoft Bergamo 23.md deleted file mode 100644 index acc8dd3eb9ced18959087556136a77c328b17176..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Aerosoft Bergamo 23.md +++ /dev/null @@ -1,10 +0,0 @@ - -<p>on the aerosoft forum, mathijs posted a new topic showcasing some previews for their upcoming release of milan malpensa. aerosoft malpensa is being developed by david rosenfeld, who is renowned for his previous sceneries such as ben gurion. enjoy the previews below and for more information, stay tuned!</p> -<h2>Aerosoft Bergamo 23</h2><br /><p><b><b>Download Zip</b> ↔ <a href="https://bytlly.com/2uGwFR">https://bytlly.com/2uGwFR</a></b></p><br /><br /> -<p>here are some of our favorite add-ons from the month of october, 2018! scenery short final designs launched their munich (eddm) package at the beginning of the month. thomson flies in here quite a bit and says it looks perfect! aerosoft released airport bergamo this month. as the third busiest airport in italy, this is []</p> -<p>this week's featured package is airport bergamo. this is a free airport for x-plane 11 and x-plane 12. it was created by aerosoft and contains a variety of different areas which have more realistic than the default scenery. i have the airport based on aerosoft's airport package for a long time and thought it's time to give it an update. i think the airport is still in need of improvement, like removing the billboards at the runway. the height differences with the runway aren't too nice. </p> -<p>i could not install airport bergamo on my configuration (ubuntu 18.04.3 lts) and i had a problem with the runway, which is not a problem for everybody. so i asked aerosoft for help. they told me to change the runway texture, which is wrong. when i asked them why they didn't change the runway texture, they didn't give me an answer. even though i asked them multiple times they ignored me. </p> -<p>the scenery includes the following: new terminal buildings for both the main terminal and the cargo terminal, including a huge new shopping center, f and g gates, airport car park, a huge new cargo terminal, terminal 2, and an expanded cargo terminal, carbi (cargo buiding). this airport is based on bergamo orio al serio airport, which is just north of milan in the lombardy region of italy.</p> -<p></p> 899543212b<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Davinci Resolve Studio 14.3 Crack Serial Key (Win).md b/spaces/lincquiQcaudo/Top-20-Diffusion/Davinci Resolve Studio 14.3 Crack Serial Key (Win).md deleted file mode 100644 index 2755be4a724dc0a20bfbc1e3a08bd4e72acc17dd..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Davinci Resolve Studio 14.3 Crack Serial Key (Win).md +++ /dev/null @@ -1,11 +0,0 @@ -<h2>Davinci Resolve Studio 14.3 Crack Serial Key (Win)</h2><br /><p><b><b>Download Zip</b> ››› <a href="https://bytlly.com/2uGwf6">https://bytlly.com/2uGwf6</a></b></p><br /><br /> -<br /> -Resolve 14.3 Studio. GTX 970 with GeForce 390.77 driver. Desktop video 10.9.10. USB 3.0 shuttle intensity. Windows 10 Pro. Up. ----- -Mikhail Fayto -Regarding the video card driver. I go into its settings - and what do I see? 1. It doesn't work for me. 2. In the "Driver" section in the "Details" column, I see "No Drivers Have Been Released". So it's not just a hover. ------ -Of course, I understand that all the drivers that I have tried are not suitable. However, this is still at least some information, and I can try to put them. First, I'll try to install the driver from the "Downloads" - perhaps it will work. ------ 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Find My Font Pro Crack.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Find My Font Pro Crack.md deleted file mode 100644 index 91f9c289feabe61c16ad5129a9cce3c9c38b2ece..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Find My Font Pro Crack.md +++ /dev/null @@ -1,10 +0,0 @@ -<br /> -<p>there are a few different ways to do this. here are the most common methods for adding fonts to a premiere pro timeline. for mac os, open the font book app, click the add button (denoted by a + symbol), find the font you want and choose open to install them.</p> -<h2>find my font pro crack</h2><br /><p><b><b>Download</b> ⭐ <a href="https://bytlly.com/2uGyJ0">https://bytlly.com/2uGyJ0</a></b></p><br /><br /> -<p>now it happens all the time. i will use my macbook pro at night to watch netflix and sometimes play games. suddenly, my macbook pro will start to beep and shake, and the screen will crack. it will beep and shake for about 3 seconds and then the screen will crack. i have to force shut down my macbook pro. i am so disappointed to have to pay the apple store a service call to repair it. i had no idea that this could happen. </p> -<p>all in all, it becomes apparent that find my font is a powerful and handy piece of software for people interested in determining the name of a font. the ui is intuitive, the environment encompasses many options and the response time is good. even if an exact match cannot be made, multiple similar ones will be listed.</p> -<p>once youve chosen the letters from the image, select a template to apply. the types of templates you can use include arranged, first letter only, and text only. in the best match drop-down menu, you can select font names, image files, or a combination of both. you can also activate a gif animation to make the preview more entertaining.</p> -<p>you can also edit the text of the first letter of the text in the image, font color, and font size to adjust the results. to save a font, click the export button, select a location on your computer, and save it. once youve saved the font, you can use it in the program. to add the font to your system, open adobe photoshop, find the new fonts folder, and create a new folder to store your font. if you want to keep the font, click save or choose file (mac) or save as (windows) in the text window. this allows you to easily find the font later.</p> -<p></p> 899543212b<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Jin Li Tong Software Multiviewer UPD.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Jin Li Tong Software Multiviewer UPD.md deleted file mode 100644 index 89a3c1fe293fd5add0a89a997c50f1a29dd448dc..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Jin Li Tong Software Multiviewer UPD.md +++ /dev/null @@ -1,14 +0,0 @@ -<br /> -<h1>Jin Li Tong Software Multiviewer: A Powerful Solution for Monitoring Multiple Video Sources</h1> -<p>Jin Li Tong Software Multiviewer is a software application that allows you to monitor multiple video sources on a single display. It supports various input types, such as SDI, HDMI, analog, NDI, and UDP[^1^]. You can configure each panel to display different information, such as audio meters, custom labels, and alarms for image freezing, black level, and white level[^1^]. You can also monitor up to 16 sound channels by simply clicking on the pair that you want to listen to[^1^].</p> -<p>Jin Li Tong Software Multiviewer is designed for professional environments such as broadcasting stations and production studios. It is suitable for 24/7 monitoring and it works under Windows 7 / 8 and 10 and equivalent Windows Server OS (64-bit only)[^1^]. It can handle SD, HD, and 4K resolutions[^1^]. You can customize the panels layout by using an easy to operate wizard. You can also manage the panels (loading, cloning or deleting) from a user interface that allows you to preview the panels layout[^1^]. Jin Li Tong Software Multiviewer has a watch-dog application that ensures the uninterrupted functionality of the software[^1^].</p> -<h2>Jin li tong software multiviewer</h2><br /><p><b><b>Download File</b> »»» <a href="https://bytlly.com/2uGwXp">https://bytlly.com/2uGwXp</a></b></p><br /><br /> -<p>Multiviewer software is a type of software that lets you monitor multiple video sources on a single display. It is useful for situations where you need to view more sources than you have monitors available. Multiviewer software can also provide additional features such as overlays, audio monitoring, and alarms[^2^] [^3^] [^4^]. Multiviewer software can be used for various purposes, such as live multi-camera production, control room monitoring, security surveillance, video conferencing, and more[^2^] [^3^] [^4^].</p> -<p>If you are looking for a reliable and flexible multiviewer software solution, you should consider Jin Li Tong Software Multiviewer. It offers a wide range of features and options to suit your needs and preferences. You can download it from <a href="https://byaresylog.blogspot.com/?d=2sVMtE">this link</a> and try it for yourself.</p> - -<p>Jin Li Tong Software Multiviewer is easy to install and use. You just need to download the software from the link provided and run the setup file. Then you can launch the software and start configuring your panels. You can choose the type of input, the info to be displayed, and the thresholds for alarms for each panel. You can also adjust the layout of the panels by using the wizard or the user interface. You can save your settings as presets and load them whenever you need them.</p> -<p>There are some alternatives to Jin Li Tong Software Multiviewer, such as MultiView by Blackmagic Design, MagicSoft Multiviewer, and MultiViewer by Apponic. These are some other multiviewer software applications that offer similar features and functions. However, they may differ in terms of price, compatibility, performance, and user interface . You should compare them carefully and choose the one that best suits your needs and budget.</p> -<p></p> -<p>To get a better idea of how Jin Li Tong Software Multiviewer works, you can see an image of it below. This image shows a sample layout with four panels displaying different video sources. You can also see the audio meters, custom labels, and alarms on each panel. This is just one example of how you can use Jin Li Tong Software Multiviewer to monitor multiple video sources on a single display.</p> d5da3c52bf<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Los Campanilleros Partitura Pdf Download TOP.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Los Campanilleros Partitura Pdf Download TOP.md deleted file mode 100644 index 68594c3b83b35aaa2a4c63933edfa5eb7b6e3283..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Los Campanilleros Partitura Pdf Download TOP.md +++ /dev/null @@ -1,24 +0,0 @@ - -<h1>How to Download Los Campanilleros Partitura Pdf for Free</h1> -<p>Los Campanilleros is a popular Christmas carol from Andalusia, Spain. It tells the story of the gypsies who go through the fields singing and dancing with their bells and guitars, and waking up the shepherds with their songs. The melody is catchy and festive, and the lyrics are full of joy and devotion.</p> -<h2>Los Campanilleros Partitura Pdf Download</h2><br /><p><b><b>DOWNLOAD</b> — <a href="https://bytlly.com/2uGwwY">https://bytlly.com/2uGwwY</a></b></p><br /><br /> -<p>If you want to play this beautiful song on your piano, guitar, or choir, you might be looking for a sheet music pdf file that you can download for free. Luckily, there are some websites that offer this service, and we will show you how to find them in this article.</p> -<h2>Where to Find Los Campanilleros Partitura Pdf</h2> -<p>One of the best places to find Los Campanilleros partitura pdf is Musescore.com. This is a website where musicians can share their scores and arrangements of different songs, and also access a huge collection of official scores licensed from print music publishers. You can browse through different versions of Los Campanilleros for solo piano, mixed trio, choral, or sextet, and listen to how they sound before downloading them.</p> -<p>To download Los Campanilleros partitura pdf from Musescore.com, you need to create a free account or log in with your Facebook or Google account. Then, you can choose the score you like and click on the download button. You can select the format you prefer: pdf, midi, xml, or mscz. You can also print the score directly from the website.</p> -<p></p> -<p>Another website where you can find Los Campanilleros partitura pdf is Scribd.com. This is a platform where people can upload and read documents of various kinds, including sheet music. You can find a complete score of Los Campanilleros for soprano, alto, tenor, bass, and piano on Scribd.com.</p> -<p>To download Los Campanilleros partitura pdf from Scribd.com, you need to sign up for a free trial or a paid subscription. Then, you can open the document and click on the download icon. You can also save it to your online library or share it with others.</p> -<h2>How to Play Los Campanilleros Partitura Pdf</h2> -<p>Once you have downloaded Los Campanilleros partitura pdf, you can start practicing it on your instrument or with your choir. Here are some tips to help you play it well:</p> -<ul> -<li>Pay attention to the key signature and the time signature. Los Campanilleros is usually written in F major or D minor, and has a 3/4 meter.</li> -<li>Follow the tempo and the dynamics. Los Campanilleros is a lively and cheerful song, so it should be played at a moderate to fast speed and with expression.</li> -<li>Sing or play the melody with accuracy and clarity. The melody of Los Campanilleros is based on a pentatonic scale and has some syncopated rhythms. Make sure you hit the right notes and emphasize the accents.</li> -<li>Harmonize the melody with the chords. The harmony of Los Campanilleros is simple and follows a typical Andalusian cadence: i-VII-VI-V-i (or vi-IV-III-VI-vi in major). You can use your left hand on the piano or your guitar to play the chords along with the melody.</li> -<li>Enjoy the music and have fun! Los Campanilleros is a song that celebrates Christmas and life, so don't be afraid to express your emotions and enthusiasm while playing it.</li> -</ul> -<h2>Conclusion</h2> -<p>Los Campanilleros is a wonderful song that you can learn and play for yourself or for your friends and family during the holiday season. You can find Los Campanilleros partitura pdf online for free on websites like Musescore.com or Scribd.com, and download it to your device or print it out. Then, you can practice it on your piano, guitar, or choir, following some tips to play it well. We hope this article has helped you find what you were looking for and enjoy this beautiful carol.</p> d5da3c52bf<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Matlab 32 Bit Download Crack _BEST_.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Matlab 32 Bit Download Crack _BEST_.md deleted file mode 100644 index 7c510d45f07acb63dcecdf9b99241fd649b25338..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Matlab 32 Bit Download Crack _BEST_.md +++ /dev/null @@ -1,62 +0,0 @@ -<h2>matlab 32 bit download crack</h2><br /><p><b><b>DOWNLOAD</b> ———>>> <a href="https://bytlly.com/2uGwkx">https://bytlly.com/2uGwkx</a></b></p><br /><br /> -<br /> ->> ip(1:3) - -ans = - - 1 2 3 - -Compare: - ->> ip = [1 2 3]; - ->> ip = [1 2; 2 3; 3 1]; - -IPs are represented by column vectors, since they have no third dimension. There are 2^3 = 8 possible combinations. - -Q: - -How to ensure one input is entered for all dropdown boxes - -I have two dropdown boxes, one for customers and one for inventory. I want to have both dropdowns require one input for selection. - -I have tried this - -function fillCalculator() { - - var $input = $('#name'); - - $input.bind('keyup blur', function () - - if (!this.value.trim()) - - $input.focus(); - - - - else - - $input.unbind(); - - ); - - if ($input.val().length > 0) - - $input.change(); - - - - - -This works fine as long as the customer name is entered, but if the customer name is removed, or no name is entered for customer selection, the blank input line doesn't get removed. How can I make sure both of these are populated, and if either are not populated the blank input line gets removed? - -A: - -You could check if there is any text or length in the input then it would be empty if not it would be something. You could also test if the values are empty like so: - - if (this.value.trim() && this.value.length > 0) { - - $input.focus(); 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/liuyuan-pal/SyncDreamer/ldm/modules/attention.py b/spaces/liuyuan-pal/SyncDreamer/ldm/modules/attention.py deleted file mode 100644 index e04837a20c8d97ef11786f08d4ddc477b0a1c35c..0000000000000000000000000000000000000000 --- a/spaces/liuyuan-pal/SyncDreamer/ldm/modules/attention.py +++ /dev/null @@ -1,336 +0,0 @@ -from inspect import isfunction -import math -import torch -import torch.nn.functional as F -from torch import nn, einsum -from einops import rearrange, repeat - -from ldm.modules.diffusionmodules.util import checkpoint - - -def exists(val): - return val is not None - - -def uniq(arr): - return{el: True for el in arr}.keys() - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def max_neg_value(t): - return -torch.finfo(t.dtype).max - - -def init_(tensor): - dim = tensor.shape[-1] - std = 1 / math.sqrt(dim) - tensor.uniform_(-std, std) - return tensor - - -# feedforward -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) -# feedforward -class ConvGEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Conv2d(dim_in, dim_out * 2, 1, 1, 0) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def Normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - - -class LinearAttention(nn.Module): - def __init__(self, dim, heads=4, dim_head=32): - super().__init__() - self.heads = heads - hidden_dim = dim_head * heads - self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) - self.to_out = nn.Conv2d(hidden_dim, dim, 1) - - def forward(self, x): - b, c, h, w = x.shape - qkv = self.to_qkv(x) - q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) - k = k.softmax(dim=-1) - context = torch.einsum('bhdn,bhen->bhde', k, v) - out = torch.einsum('bhde,bhdn->bhen', context, q) - out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) - return self.to_out(out) - - -class SpatialSelfAttention(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = rearrange(q, 'b c h w -> b (h w) c') - k = rearrange(k, 'b c h w -> b c (h w)') - w_ = torch.einsum('bij,bjk->bik', q, k) - - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = rearrange(v, 'b c h w -> b c (h w)') - w_ = rearrange(w_, 'b i j -> b j i') - h_ = torch.einsum('bij,bjk->bik', v, w_) - h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) - h_ = self.proj_out(h_) - - return x+h_ - - -class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.scale = dim_head ** -0.5 - self.heads = heads - - self.to_q = nn.Linear(query_dim, inner_dim, bias=False) - self.to_k = nn.Linear(context_dim, inner_dim, bias=False) - self.to_v = nn.Linear(context_dim, inner_dim, bias=False) - - self.to_out = nn.Sequential( - nn.Linear(inner_dim, query_dim), - nn.Dropout(dropout) - ) - - def forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - - if exists(mask): - mask = mask>0 - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - - out = einsum('b i j, b j d -> b i d', attn, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) - -class BasicSpatialTransformer(nn.Module): - def __init__(self, dim, n_heads, d_head, context_dim=None, checkpoint=True): - super().__init__() - inner_dim = n_heads * d_head - self.proj_in = nn.Sequential( - nn.GroupNorm(8, dim), - nn.Conv2d(dim, inner_dim, kernel_size=1, stride=1, padding=0), - nn.GroupNorm(8, inner_dim), - nn.ReLU(True), - ) - self.attn = CrossAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, context_dim=context_dim) # is a self-attention if not self.disable_self_attn - self.out_conv = nn.Sequential( - nn.GroupNorm(8, inner_dim), - nn.ReLU(True), - nn.Conv2d(inner_dim, inner_dim, 1, 1), - ) - self.proj_out = nn.Sequential( - nn.GroupNorm(8, inner_dim), - nn.ReLU(True), - zero_module(nn.Conv2d(inner_dim, dim, kernel_size=1, stride=1, padding=0)), - ) - self.checkpoint = checkpoint - - def forward(self, x, context=None): - return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) - - def _forward(self, x, context): - # input - b,_,h,w = x.shape - x_in = x - x = self.proj_in(x) - - # attention - x = rearrange(x, 'b c h w -> b (h w) c').contiguous() - context = rearrange(context, 'b c h w -> b (h w) c').contiguous() - x = self.attn(x, context) + x - x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous() - - # output - x = self.out_conv(x) + x - x = self.proj_out(x) + x_in - return x - -class BasicTransformerBlock(nn.Module): - def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, disable_self_attn=False): - super().__init__() - self.disable_self_attn = disable_self_attn - self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout, - context_dim=context_dim if self.disable_self_attn else None) # is a self-attention if not self.disable_self_attn - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.norm3 = nn.LayerNorm(dim) - self.checkpoint = checkpoint - - def forward(self, x, context=None): - return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) - - def _forward(self, x, context=None): - x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x - x = self.attn2(self.norm2(x), context=context) + x - x = self.ff(self.norm3(x)) + x - return x - -class ConvFeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Conv2d(dim, inner_dim, 1, 1, 0), - nn.GELU() - ) if not glu else ConvGEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Conv2d(inner_dim, dim_out, 1, 1, 0) - ) - - def forward(self, x): - return self.net(x) - - -class SpatialTransformer(nn.Module): - """ - Transformer block for image-like data. - First, project the input (aka embedding) - and reshape to b, t, d. - Then apply standard transformer action. - Finally, reshape to image - """ - def __init__(self, in_channels, n_heads, d_head, - depth=1, dropout=0., context_dim=None, - disable_self_attn=False): - super().__init__() - self.in_channels = in_channels - inner_dim = n_heads * d_head - self.norm = Normalize(in_channels) - - self.proj_in = nn.Conv2d(in_channels, - inner_dim, - kernel_size=1, - stride=1, - padding=0) - - self.transformer_blocks = nn.ModuleList( - [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim, - disable_self_attn=disable_self_attn) - for d in range(depth)] - ) - - self.proj_out = zero_module(nn.Conv2d(inner_dim, - in_channels, - kernel_size=1, - stride=1, - padding=0)) - - def forward(self, x, context=None): - # note: if no context is given, cross-attention defaults to self-attention - b, c, h, w = x.shape - x_in = x - x = self.norm(x) - x = self.proj_in(x) - x = rearrange(x, 'b c h w -> b (h w) c').contiguous() - for block in self.transformer_blocks: - x = block(x, context=context) - x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous() - x = self.proj_out(x) - return x + x_in diff --git a/spaces/lixq/bingo61/README.md b/spaces/lixq/bingo61/README.md deleted file mode 100644 index 218767d1d7debd26932ffddca2ec0f421c0171a9..0000000000000000000000000000000000000000 --- a/spaces/lixq/bingo61/README.md +++ /dev/null @@ -1,195 +0,0 @@ ---- -title: bingo -emoji: 📉 -colorFrom: red -colorTo: red -sdk: docker -pinned: true -license: mit -duplicated_from: hf4all/bingo ---- - -<div align="center"> - -# Bingo - -Bingo,一个让你呼吸顺畅 New Bing。 - -高度还原 New Bing 网页版的主要操作,国内可用,兼容绝大多数微软 Bing AI 的功能,可自行部署使用。 - -![Github stars](https://badgen.net/github/stars/weaigc/bingo?icon=github&label=stars) -![Gthub issues](https://img.shields.io/github/issues/weaigc/bingo) -[![docker build](https://github.com/weaigc/bingo/actions/workflows/docker.yml/badge.svg)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![docker hub](https://badgen.net/docker/size/weaigc/bingo?icon=docker&label=image%20size)](https://hub.docker.com/repository/docker/weaigc/bingo/) -[![MIT License](https://img.shields.io/badge/license-MIT-97c50f)](https://github.com/weaigc/bingo/blob/main/license) - -</div> - -## 演示站点 - -https://bing.github1s.tk - - - -[![img](./docs/images/demo.png)](https://bing.github1s.tk) - -## 功能和特点 - -- 完全基于 Next.js 重写,高度还原 New Bing Web 版 UI,使用体验和 Bing AI 基本一致。 -- 支持 Docker 构建,方便快捷地部署和访问。 -- Cookie 可全局配置,全局共享。 -- 支持持续语音对话 - -## RoadMap - - - [x] 支持 wss 转发 - - [x] 支持一键部署 - - [x] 优化移动端展示 - - [x] 支持画图 - - [x] 支持语音输入(支持语音指令,目前仅支持 PC 版 Edge 及 Chrome 浏览器) - - [x] 支持语音输出(需要手动开启) - - [x] 支持图片输入 - - [x] 支持自定义域名 - - [ ] 支持历史记录 - - [ ] 适配深色模式 - - [ ] 支持内置提示词 - - [ ] 支持离线访问 - - [ ] 国际化翻译 - -## 一键部署 -你也可以一键部署自己的 New Bing AI 到 🤗 HuggingFace 。 - -### 部署到 Huggingface -1. 点击此图标 -[![Deploy to HuggingFace](https://img.shields.io/badge/%E7%82%B9%E5%87%BB%E9%83%A8%E7%BD%B2-%F0%9F%A4%97-fff)](https://huggingface.co/login?next=%2Fspaces%2Fhf4all%2Fbingo%3Fduplicate%3Dtrue%26visibility%3Dpublic),配置可以不改。 - -2. 部署署完成后,点击“设置” 》“站点域名”,点一下,复制一下 HF 域名信息,然后分享给别人即可。 - -> Huggingface 不支持绑定自己的域名,不过我们可以使用曲线救国的方式来达到这个目的 -> 1. 方式二,借助 Cloudflare Workers [部署Cloudflare Workers](#使用Cloudflare-Workers自定义域名) -> 2. 方式一,借助 Github Pages 及 iframe [如何绑定域名](https://github.com/weaigc/bingo/issues/4) - -### 使用Cloudflare Workers自定义域名 - -> 核心代码 [worker.js](./cloudflare/worker.js) - -- [注册 Cloudflare 账号](https://dash.cloudflare.com/sign-up) - -- 添加一个新的网站,需要你有自己的域名并且将域名`Name Server`托管给 Cloudflare 才行(更多信息可自行 Google) - -- 通过左侧菜单进入「Workers」,并点击「Create a Worker」。 - -- 创建 Worker 服务,复制 [worker.js](./cloudflare/worker.js) 全部代码,粘贴至创建的服务中,根据注释进行改动,保存并部署。 - -- 触发器 中自定义访问域名。 - -### 部署其它平台 -<details> -<summary> -由于其他平台目前遭到 New Bing 封杀,会遇到很多问题,不再做推荐,有需要的可以自行查看 -</summary> - -#### 部署到 Netlify -[![Deploy to Netlify Button](https://www.netlify.com/img/deploy/button.svg)](https://app.netlify.com/start/deploy?repository=https://github.com/weaigc/bingo) - -#### 部署到 Vercel -如果你是 Vercel 付费用户,可以点以下链接一键部署到 Vercel。免费版本有[接口超时限制](https://vercel.com/docs/concepts/limits/overview),不推荐使用 - -[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?demo-title=bingo&demo-description=bingo&demo-url=https%3A%2F%2Fbing.github1s.tk%2F&project-name=bingo&repository-name=bingo&repository-url=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo&from=templates&skippable-integrations=1&env=BING_HEADER&envDescription=%E5%A6%82%E6%9E%9C%E4%B8%8D%E7%9F%A5%E9%81%93%E6%80%8E%E4%B9%88%E9%85%8D%E7%BD%AE%E8%AF%B7%E7%82%B9%E5%8F%B3%E4%BE%A7Learn+More&envLink=https%3A%2F%2Fgithub.com%2Fweaigc%2Fbingo%2Fblob%2Fmain%2F.env.example) - -#### 部署到 Render - -[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://render.com/deploy?repo=https://github.com/weaigc/bingo) -</details> - -## 环境和依赖 - -- Node.js >= 18 -- Bing AI 的[身份信息](#如何获取-BING_HEADER)) - -## 安装和使用 - -* 使用 Node 启动 - -```bash -git clone https://github.com/weaigc/bingo.git -npm i # 推荐使用 pnpm i -npm run build -npm run start -``` - -* 使用 Docker 启动 -```bash -docker pull weaigc/bingo -docker run --rm -it -p 7860:7860 weaigc/bingo -# 或者 -docker run --rm -it -e BING_HEADER=xxxx -p 7860:7860 weaigc/bingo -``` - -## 如何获取 BING_HEADER -> 配置了 BING_HEADER 意味着你将自己的账号共享给所有使用此服务的人,如果不需要免登录画图的功能,不建议设置此变量 - -打开 https://www.bing.com 并登录,然后访问 https://www.bing.com/turing/captcha/challenge,通过人机校验,然后 - -![BING HEADER](./docs/images/curl.png) - -> 复制出来的内容应该如下所示。确认格式无误后,打开 https://effulgent-bubblegum-e2f5df.netlify.app/#dialog=%22settings%22 ,粘贴进去,点击“转成 BING_HEADER 并复制”,然后从剪切板粘贴即可得到。(你也可以先在网页上进行验证) - -以下是格式参考,需要注意的是,网页端保存的格式是以`curl`开头, 而服务端配置的 `BING_HEADER` 是 `base64` 格式,两者不能互通。 -<details> -<summary>正常格式/网页端保存的格式(格式仅供参考)</summary> - -``` -curl 'https://www.bing.com/turing/captcha/challenge' \ - -H 'authority: www.bing.com' \ - -H 'accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7' \ - -H 'accept-language: zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6' \ - -H 'cache-control: max-age=0' \ - -H 'cookie: MicrosoftApplicationsTelemetryDeviceId=3399c004-fd0e-48ec-bb92-d82a27b2bbd4; _EDGE_V=1; SRCHD=AF=NOFORM; SRCHUID=V=2&GUID=29EBDDA4E6674329ACCF1A0A423C3E98&dmnchg=1; _UR=QS=0&TQS=0; _HPVN=CS=eyJQbiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiUCJ9LCJTYyI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiSCJ9LCJReiI6eyJDbiI6MSwiU3QiOjAsIlFzIjowLCJQcm9kIjoiVCJ9LCJBcCI6dHJ1ZSwiTXV0ZSI6dHJ1ZSwiTGFkIjoiMjAyMy0wNy0yNVQwMDowMDowMFoiLCJJb3RkIjowLCJHd2IiOjAsIkRmdCI6bnVsbCwiTXZzIjowLCJGbHQiOjAsIkltcCI6Mn0=; _RwBf=ilt=1&ihpd=1&ispd=0&rc=0&rb=0&gb=0&rg=200&pc=0&mtu=0&rbb=0&g=0&cid=&clo=0&v=1&l=2023-07-25T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=2&p=&c=&t=0&s=0001-01-01T00:00:00.0000000+00:00&ts=2023-07-25T11:00:31.7111548+00:00&rwred=0&wls=&lka=0&lkt=0&TH=&dci=0; ANON=A=0043C6590EA808ED6E395059FFFFFFFF&E=1c8b&W=1; NAP=V=1.9&E=1c31&C=DnaMSbDN_4efZ_xXqBF3Daorjr53kYqYoaP8YHsupjmiXnysX7a37A&W=1; PPLState=1; KievRPSSecAuth=FABSBBRaTOJILtFsMkpLVWSG6AN6C/svRwNmAAAEgAAACMGUA7EGVSjGEAQBGHtNsc5sNL7unmJsfPJ2t6imfo4BeUJlAia3IpMTtMUy4PU/C5QAzRI5pODtsIee0+blgllXt/5IiWwGjwmdhivsFM597pRPkjARPfwsPhNLPNbJrCPNPHdje4Is78MnCADXw6/NBq2FL8V2/byw2fH6IuAMD2MvN/VvqpEa9ZxiDjZtENj4HEj0mO2SgzjfyEhVAkjvznJqU2rw/Q2tHmX94NAM2kzlzKF/hWPhCCUmu8IHLvCnHDS6mSptvJDDP/sp3ovtzOXkP1mlM/Xju5ftesUvccVEQGffXORa1dE5hEMbKIiKXz1tDdduSXE19g9/+mRMAjaQhpwhI8XmilCTx1adb1Ll5qK+VjC9GNfEZzcbsGBPVaOl+anG8rEMq+Xnhjo7J+NqTNolavHgcuV8kJsCeJZIged33UA8eOZeFo+wAECMguxMoSqgpGH+sthqynvD/FJD6r/tiU2N3uqVq8NE8V37asrN6T14Z0FGBJOe6ET1+PGApm3s11OY9/xhFEB9T5BEPUGEbvRcLcW2ncFQX0EU+xweiPqo1Q1hNUg/dCtSI+lZ7c2H8XheePZavZ0TJQ8oNCSAuKiTqJmI0fVGpwbXwfaADkEipuawz3fIuMJBNgMU0OtA7Hm59v2fGLIBuvi6YeKS6GgVk3BIPf+P/eKahwozrxQZaFnoHTSqMkvct7xCP4atBROfXKf5Ww0CcFKp+2WX9BIskTOo2jjk6bAyyYJ+ElUB1fgLKNk5m/YSMc9iYCLIBMIGN8F0Yvy3tZ7cvh7Ue5Klo98US/I+nW1G7ZJMHRgUO8h8lpneHqEMegKd8gynO4VF7RpCjJkunDmW0Ta+RkXAP619pg0dqHMFkoOgknN78oBbGTV6fJUKotv+vi61kLhAeXZGWoHGCRXh2wUC6YgfPgKA6ESRNHtFn7E5B3HHpLc5rVMDSNhKZYfdhupV4Ezf6+5DhMcZLZhi0kk+ivDiN1gdHlVtSN55xpvf+c+XZDzR0uhgcvgy0LAbmzgk6y4WbYH+LQsMpzNNj+aC72vMiWovWrKh9jY4MYCmdgxsS/skPtLdp18muiEIRXTbZQGUmhxFpJAIbBIsCscMpzL0BgeujxUwM5wr79Sd9r4xwbgSMwmBlBfUHRVBdNyg8feepeJbCS63nD6eHOuLqMRsPIio3w/ki/EAa92UUEiZeavLsMUD/y/qAvWUdzdP5Y+C/TM+CMGS/kGL4LEdY/28MQeTvU1qv1X21kQt2aiaj3pPVL36hAzxbcLgqcMo9oymDRy87kdCXW/+g4oKLtMh6fm/G6W6Y/B01JlxohyyvueHQIG557uzkEkTJ3FnOVODSKBKpb3WZ65rExfV71zSZa25F3GmpaIG6HiYrX2YYhQAkIE9pKEQBHbnwHuwNDGottZTXZw=; WLS=C=9df3f9d8518fae19&N=wen; WLID=pGY8HgWCu4p5XYCOk2oa0+DBdftkMUfmNIn8XtSjSTKsgv/Il7GUlYs0Jpjf/E12jZMgV7x44Dy3fXOgjjUoJx7Y/ClLrLhsk20THksJJoI=; _EDGE_S=F=1&SID=17CF6EE006426448213C7DB907436588&mkt=zh-CN; MUID=225621093D8A6C27301632413C0E6D08; MUIDB=225621093D8A6C27301632413C0E6D08; SUID=A; SNRHOP=I=&TS=; _U=nGyzKQruEsDwLiu65fZFIG6e12hf2lwTJmroW__k8joUJIKmG3OIjayXKGW9dCVR3sNhF76mEVxyW6yjUGPodOfjtSa3s3J_DxMOrEK1BqXCOBI9bC66spAIASV7prsYFlVAJz73jVNENp_tBubLHJy6EbT0BKRe4AjrYkH-9uMnmCKB8Zmyg; _SS=SID=17CF6EE006426448213C7DB907436588&R=0&RB=0&GB=0&RG=200&RP=0&PC=U531; SRCHS=PC=U531; USRLOC=HS=1&ELOC=LAT=22.501529693603516|LON=113.9263687133789|N=%E5%8D%97%E5%B1%B1%E5%8C%BA%EF%BC%8C%E5%B9%BF%E4%B8%9C%E7%9C%81|ELT=2|&CLOC=LAT=22.50153029046461|LON=113.92637070632928|A=733.4464586120832|TS=230726151034|SRC=W; SRCHUSR=DOB=20230725&T=1690384908000&POEX=W; ipv6=hit=1690388509974&t=6; SRCHHPGUSR=HV=1690384945&SRCHLANG=zh-Hans&PV=15.0.0&BRW=MW&BRH=MT&CW=410&CH=794&SCW=410&SCH=794&DPR=1.5&UTC=480&DM=0&WTS=63825879627&PRVCW=410&PRVCH=794&PR=1.5; cct=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpny6Y_CVyi_MSyM94VyMWnjdYkkccVtm3czoIAtXUGQA; GC=AjWIBYOoVP-Afq6gWwtx80If6yHn6iBuEVHA1XHdAKpR3Y_D9Ytcks4Ht6XhadXk75dvhzP4YOUS0UmoEyqyxw' \ - -H 'dnt: 1' \ - -H 'sec-ch-ua: "Chromium";v="116", "Not)A;Brand";v="24", "Microsoft Edge";v="116"' \ - -H 'sec-ch-ua-arch: "x86"' \ - -H 'sec-ch-ua-bitness: "64"' \ - -H 'sec-ch-ua-full-version: "116.0.1938.29"' \ - -H 'sec-ch-ua-full-version-list: "Chromium";v="116.0.5845.42", "Not)A;Brand";v="24.0.0.0", "Microsoft Edge";v="116.0.1938.29"' \ - -H 'sec-ch-ua-mobile: ?0' \ - -H 'sec-ch-ua-model: ""' \ - -H 'sec-ch-ua-platform: "Windows"' \ - -H 'sec-ch-ua-platform-version: "15.0.0"' \ - -H 'sec-fetch-dest: document' \ - -H 'sec-fetch-mode: navigate' \ - -H 'sec-fetch-site: none' \ - -H 'sec-fetch-user: ?1' \ - -H 'sec-ms-gec: B3F47AD4A283CAB374C0451C46AAFD147C6A4DACAFF6A1C13F34B2C72B024494' \ - -H 'sec-ms-gec-version: 1-116.0.1938.29' \ - -H 'upgrade-insecure-requests: 1' \ - -H 'user-agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.0.0' \ - -H 'x-client-data: eyIxIjoiMiIsIjEwIjoiXCJTMGg3R05HOTF2aDQ1TUZSUnZ5NHN2akRmMWdlaVJKenNxNlA3aU1WbnF3PVwiIiwiMiI6IjEiLCIzIjoiMSIsIjQiOiIyMTU4ODQ5NTM4MjY4OTM5NTA3IiwiNSI6IlwiSm9GUWpPTDk3OS9MbkRRZnlCd2N1M2FsOUN3eTZTQmdaMGNYMXBtOWVMZz1cIiIsIjYiOiJiZXRhIiwiNyI6IjE4MDM4ODYyNjQzNSIsIjkiOiJkZXNrdG9wIn0=' \ - -H 'x-edge-shopping-flag: 1' \ - --compressed -``` -</details> - -<details> -<summary>转成base64之后的格式(BING_HEADER只能使用 base64 之后的格式)</summary> - -``` -Y3VybCAnaHR0cHM6Ly93d3cuYmluZy5jb20vdHVyaW5nL2NvbnZlcnNhdGlvbi9jcmVhdGUnIFwgICAtSCAnYXV0aG9yaXR5OiB3d3cuYmluZy5jb20nIFwgICAtSCAnYWNjZXB0OiB0ZXh0L2h0bWwsYXBwbGljYXRpb24veGh0bWwreG1sLGFwcGxpY2F0aW9uL3htbDtxPTAuOSxpbWFnZS93ZWJwLGltYWdlL2FwbmcsKi8qO3E9MC44LGFwcGxpY2F0aW9uL3NpZ25lZC1leGNoYW5nZTt2PWIzO3E9MC43JyBcICAgLUggJ2FjY2VwdC1sYW5ndWFnZTogemgtQ04semg7cT0wLjksZW47cT0wLjgsZW4tR0I7cT0wLjcsZW4tVVM7cT0wLjYnIFwgICAtSCAnY2FjaGUtY29udHJvbDogbWF4LWFnZT0wJyBcICAgLUggJ2Nvb2tpZTogTWljcm9zb2Z0QXBwbGljYXRpb25zVGVsZW1ldHJ5RGV2aWNlSWQ9MzM5OWMwMDQtZmQwZS00OGVjLWJiOTItZDgyYTI3YjJiYmQ0OyBfRURHRV9WPTE7IFNSQ0hEPUFGPU5PRk9STTsgU1JDSFVJRD1WPTImR1VJRD0yOUVCRERBNEU2Njc0MzI5QUNDRjFBMEE0MjNDM0U5OCZkbW5jaGc9MTsgX1VSPVFTPTAmVFFTPTA7IF9IUFZOPUNTPWV5SlFiaUk2ZXlKRGJpSTZNU3dpVTNRaU9qQXNJbEZ6SWpvd0xDSlFjbTlrSWpvaVVDSjlMQ0pUWXlJNmV5SkRiaUk2TVN3aVUzUWlPakFzSWxGeklqb3dMQ0pRY205a0lqb2lTQ0o5TENKUmVpSTZleUpEYmlJNk1Td2lVM1FpT2pBc0lsRnpJam93TENKUWNtOWtJam9pVkNKOUxDSkJjQ0k2ZEhKMVpTd2lUWFYwWlNJNmRISjFaU3dpVEdGa0lqb2lNakF5TXkwd055MHlOVlF3TURvd01Eb3dNRm9pTENKSmIzUmtJam93TENKSGQySWlPakFzSWtSbWRDSTZiblZzYkN3aVRYWnpJam93TENKR2JIUWlPakFzSWtsdGNDSTZNbjA9OyBfUndCZj1pbHQ9MSZpaHBkPTEmaXNwZD0wJnJjPTAmcmI9MCZnYj0wJnJnPTIwMCZwYz0wJm10dT0wJnJiYj0wJmc9MCZjaWQ9JmNsbz0wJnY9MSZsPTIwMjMtMDctMjVUMDc6MDA6MDAuMDAwMDAwMFombGZ0PTAwMDEtMDEtMDFUMDA6MDA6MDAuMDAwMDAwMCZhb2Y9MCZvPTImcD0mYz0mdD0wJnM9MDAwMS0wMS0wMVQwMDowMDowMC4wMDAwMDAwKzAwOjAwJnRzPTIwMjMtMDctMjVUMTE6MDA6MzEuNzExMTU0OCswMDowMCZyd3JlZD0wJndscz0mbGthPTAmbGt0PTAmVEg9JmRjaT0wOyBBTk9OPUE9MDA0M0M2NTkwRUE4MDhFRDZFMzk1MDU5RkZGRkZGRkYmRT0xYzhiJlc9MTsgTkFQPVY9MS45JkU9MWMzMSZDPURuYU1TYkROXzRlZlpfeFhxQkYzRGFvcmpyNTNrWXFZb2FQOFlIc3Vwam1pWG55c1g3YTM3QSZXPTE7IFBQTFN0YXRlPTE7IEtpZXZSUFNTZWNBdXRoPUZBQlNCQlJhVE9KSUx0RnNNa3BMVldTRzZBTjZDL3N2UndObUFBQUVnQUFBQ01HVUE3RUdWU2pHRUFRQkdIdE5zYzVzTkw3dW5tSnNmUEoydDZpbWZvNEJlVUpsQWlhM0lwTVR0TVV5NFBVL0M1UUF6Ukk1cE9EdHNJZWUwK2JsZ2xsWHQvNUlpV3dHandtZGhpdnNGTTU5N3BSUGtqQVJQZndzUGhOTFBOYkpyQ1BOUEhkamU0SXM3OE1uQ0FEWHc2L05CcTJGTDhWMi9ieXcyZkg2SXVBTUQyTXZOL1Z2cXBFYTlaeGlEalp0RU5qNEhFajBtTzJTZ3pqZnlFaFZBa2p2em5KcVUycncvUTJ0SG1YOTROQU0ya3psektGL2hXUGhDQ1VtdThJSEx2Q25IRFM2bVNwdHZKRERQL3NwM292dHpPWGtQMW1sTS9YanU1ZnRlc1V2Y2NWRVFHZmZYT1JhMWRFNWhFTWJLSWlLWHoxdERkZHVTWEUxOWc5LyttUk1BamFRaHB3aEk4WG1pbENUeDFhZGIxTGw1cUsrVmpDOUdOZkVaemNic0dCUFZhT2wrYW5HOHJFTXErWG5oam83SitOcVROb2xhdkhnY3VWOGtKc0NlSlpJZ2VkMzNVQThlT1plRm8rd0FFQ01ndXhNb1NxZ3BHSCtzdGhxeW52RC9GSkQ2ci90aVUyTjN1cVZxOE5FOFYzN2Fzck42VDE0WjBGR0JKT2U2RVQxK1BHQXBtM3MxMU9ZOS94aEZFQjlUNUJFUFVHRWJ2UmNMY1cybmNGUVgwRVUreHdlaVBxbzFRMWhOVWcvZEN0U0krbFo3YzJIOFhoZWVQWmF2WjBUSlE4b05DU0F1S2lUcUptSTBmVkdwd2JYd2ZhQURrRWlwdWF3ejNmSXVNSkJOZ01VME90QTdIbTU5djJmR0xJQnV2aTZZZUtTNkdnVmszQklQZitQL2VLYWh3b3pyeFFaYUZub0hUU3FNa3ZjdDd4Q1A0YXRCUk9mWEtmNVd3MENjRktwKzJXWDlCSXNrVE9vMmpqazZiQXl5WUorRWxVQjFmZ0xLTms1bS9ZU01jOWlZQ0xJQk1JR044RjBZdnkzdFo3Y3ZoN1VlNUtsbzk4VVMvSStuVzFHN1pKTUhSZ1VPOGg4bHBuZUhxRU1lZ0tkOGd5bk80VkY3UnBDakprdW5EbVcwVGErUmtYQVA2MTlwZzBkcUhNRmtvT2drbk43OG9CYkdUVjZmSlVLb3R2K3ZpNjFrTGhBZVhaR1dvSEdDUlhoMndVQzZZZ2ZQZ0tBNkVTUk5IdEZuN0U1QjNISHBMYzVyVk1EU05oS1pZZmRodXBWNEV6ZjYrNURoTWNaTFpoaTBraytpdkRpTjFnZEhsVnRTTjU1eHB2ZitjK1haRHpSMHVoZ2N2Z3kwTEFibXpnazZ5NFdiWUgrTFFzTXB6Tk5qK2FDNzJ2TWlXb3ZXcktoOWpZNE1ZQ21kZ3hzUy9za1B0TGRwMThtdWlFSVJYVGJaUUdVbWh4RnBKQUliQklzQ3NjTXB6TDBCZ2V1anhVd001d3I3OVNkOXI0eHdiZ1NNd21CbEJmVUhSVkJkTnlnOGZlZXBlSmJDUzYzbkQ2ZUhPdUxxTVJzUElpbzN3L2tpL0VBYTkyVVVFaVplYXZMc01VRC95L3FBdldVZHpkUDVZK0MvVE0rQ01HUy9rR0w0TEVkWS8yOE1RZVR2VTFxdjFYMjFrUXQyYWlhajNwUFZMMzZoQXp4YmNMZ3FjTW85b3ltRFJ5ODdrZENYVy8rZzRvS0x0TWg2Zm0vRzZXNlkvQjAxSmx4b2h5eXZ1ZUhRSUc1NTd1emtFa1RKM0ZuT1ZPRFNLQktwYjNXWjY1ckV4ZlY3MXpTWmEyNUYzR21wYUlHNkhpWXJYMllZaFFBa0lFOXBLRVFCSGJud0h1d05ER290dFpUWFp3PTsgV0xTPUM9OWRmM2Y5ZDg1MThmYWUxOSZOPXdlbjsgV0xJRD1wR1k4SGdXQ3U0cDVYWUNPazJvYTArREJkZnRrTVVmbU5JbjhYdFNqU1RLc2d2L0lsN0dVbFlzMEpwamYvRTEyalpNZ1Y3eDQ0RHkzZlhPZ2pqVW9KeDdZL0NsTHJMaHNrMjBUSGtzSkpvST07IF9FREdFX1M9Rj0xJlNJRD0xN0NGNkVFMDA2NDI2NDQ4MjEzQzdEQjkwNzQzNjU4OCZta3Q9emgtQ047IE1VSUQ9MjI1NjIxMDkzRDhBNkMyNzMwMTYzMjQxM0MwRTZEMDg7IE1VSURCPTIyNTYyMTA5M0Q4QTZDMjczMDE2MzI0MTNDMEU2RDA4OyBTVUlEPUE7IFNOUkhPUD1JPSZUUz07IF9VPW5HeXpLUXJ1RXNEd0xpdTY1ZlpGSUc2ZTEyaGYybHdUSm1yb1dfX2s4am9VSklLbUczT0lqYXlYS0dXOWRDVlIzc05oRjc2bUVWeHlXNnlqVUdQb2RPZmp0U2EzczNKX0R4TU9yRUsxQnFYQ09CSTliQzY2c3BBSUFTVjdwcnNZRmxWQUp6NzNqVk5FTnBfdEJ1YkxISnk2RWJUMEJLUmU0QWpyWWtILTl1TW5tQ0tCOFpteWc7IF9TUz1TSUQ9MTdDRjZFRTAwNjQyNjQ0ODIxM0M3REI5MDc0MzY1ODgmUj0wJlJCPTAmR0I9MCZSRz0yMDAmUlA9MCZQQz1VNTMxOyBTUkNIUz1QQz1VNTMxOyBVU1JMT0M9SFM9MSZFTE9DPUxBVD0yMi41MDE1Mjk2OTM2MDM1MTZ8TE9OPTExMy45MjYzNjg3MTMzNzg5fE49JUU1JThEJTk3JUU1JUIxJUIxJUU1JThDJUJBJUVGJUJDJThDJUU1JUI5JUJGJUU0JUI4JTlDJUU3JTlDJTgxfEVMVD0yfCZDTE9DPUxBVD0yMi41MDE1MzAyOTA0NjQ2MXxMT049MTEzLjkyNjM3MDcwNjMyOTI4fEE9NzMzLjQ0NjQ1ODYxMjA4MzJ8VFM9MjMwNzI2MTUxMDM0fFNSQz1XOyBTUkNIVVNSPURPQj0yMDIzMDcyNSZUPTE2OTAzODQ5MDgwMDAmUE9FWD1XOyBpcHY2PWhpdD0xNjkwMzg4NTA5OTc0JnQ9NjsgU1JDSEhQR1VTUj1IVj0xNjkwMzg0OTQ1JlNSQ0hMQU5HPXpoLUhhbnMmUFY9MTUuMC4wJkJSVz1NVyZCUkg9TVQmQ1c9NDEwJkNIPTc5NCZTQ1c9NDEwJlNDSD03OTQmRFBSPTEuNSZVVEM9NDgwJkRNPTAmV1RTPTYzODI1ODc5NjI3JlBSVkNXPTQxMCZQUlZDSD03OTQmUFI9MS41OyBjY3Q9QWpXSUJZT29WUC1BZnE2Z1d3dHg4MElmNnlIbjZpQnVFVkhBMVhIZEFLcG55NllfQ1Z5aV9NU3lNOTRWeU1XbmpkWWtrY2NWdG0zY3pvSUF0WFVHUUE7IEdDPUFqV0lCWU9vVlAtQWZxNmdXd3R4ODBJZjZ5SG42aUJ1RVZIQTFYSGRBS3BSM1lfRDlZdGNrczRIdDZYaGFkWGs3NWR2aHpQNFlPVVMwVW1vRXlxeXh3JyBcICAgLUggJ2RudDogMScgXCAgIC1IICdzZWMtY2gtdWE6ICJDaHJvbWl1bSI7dj0iMTE2IiwgIk5vdClBO0JyYW5kIjt2PSIyNCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2IicgXCAgIC1IICdzZWMtY2gtdWEtYXJjaDogIng4NiInIFwgICAtSCAnc2VjLWNoLXVhLWJpdG5lc3M6ICI2NCInIFwgICAtSCAnc2VjLWNoLXVhLWZ1bGwtdmVyc2lvbjogIjExNi4wLjE5MzguMjkiJyBcICAgLUggJ3NlYy1jaC11YS1mdWxsLXZlcnNpb24tbGlzdDogIkNocm9taXVtIjt2PSIxMTYuMC41ODQ1LjQyIiwgIk5vdClBO0JyYW5kIjt2PSIyNC4wLjAuMCIsICJNaWNyb3NvZnQgRWRnZSI7dj0iMTE2LjAuMTkzOC4yOSInIFwgICAtSCAnc2VjLWNoLXVhLW1vYmlsZTogPzAnIFwgICAtSCAnc2VjLWNoLXVhLW1vZGVsOiAiIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm06ICJXaW5kb3dzIicgXCAgIC1IICdzZWMtY2gtdWEtcGxhdGZvcm0tdmVyc2lvbjogIjE1LjAuMCInIFwgICAtSCAnc2VjLWZldGNoLWRlc3Q6IGRvY3VtZW50JyBcICAgLUggJ3NlYy1mZXRjaC1tb2RlOiBuYXZpZ2F0ZScgXCAgIC1IICdzZWMtZmV0Y2gtc2l0ZTogbm9uZScgXCAgIC1IICdzZWMtZmV0Y2gtdXNlcjogPzEnIFwgICAtSCAnc2VjLW1zLWdlYzogQjNGNDdBRDRBMjgzQ0FCMzc0QzA0NTFDNDZBQUZEMTQ3QzZBNERBQ0FGRjZBMUMxM0YzNEIyQzcyQjAyNDQ5NCcgXCAgIC1IICdzZWMtbXMtZ2VjLXZlcnNpb246IDEtMTE2LjAuMTkzOC4yOScgXCAgIC1IICd1cGdyYWRlLWluc2VjdXJlLXJlcXVlc3RzOiAxJyBcICAgLUggJ3VzZXItYWdlbnQ6IE1vemlsbGEvNS4wIChXaW5kb3dzIE5UIDEwLjA7IFdpbjY0OyB4NjQpIEFwcGxlV2ViS2l0LzUzNy4zNiAoS0hUTUwsIGxpa2UgR2Vja28pIENocm9tZS8xMTYuMC4wLjAgU2FmYXJpLzUzNy4zNiBFZGcvMTE2LjAuMC4wJyBcICAgLUggJ3gtY2xpZW50LWRhdGE6IGV5SXhJam9pTWlJc0lqRXdJam9pWENKVE1HZzNSMDVIT1RGMmFEUTFUVVpTVW5aNU5ITjJha1JtTVdkbGFWSktlbk54TmxBM2FVMVdibkYzUFZ3aUlpd2lNaUk2SWpFaUxDSXpJam9pTVNJc0lqUWlPaUl5TVRVNE9EUTVOVE00TWpZNE9UTTVOVEEzSWl3aU5TSTZJbHdpU205R1VXcFBURGszT1M5TWJrUlJabmxDZDJOMU0yRnNPVU4zZVRaVFFtZGFNR05ZTVhCdE9XVk1aejFjSWlJc0lqWWlPaUppWlhSaElpd2lOeUk2SWpFNE1ETTRPRFl5TmpRek5TSXNJamtpT2lKa1pYTnJkRzl3SW4wPScgXCAgIC1IICd4LWVkZ2Utc2hvcHBpbmctZmxhZzogMScgXCAgIC0tY29tcHJlc3NlZA== -``` -</details> - - -## 鸣谢 - - 感谢 [EdgeGPT](https://github.com/acheong08/EdgeGPT) 提供的代理 API 的方法。 - - 感谢 [Vercel AI](https://github.com/vercel-labs/ai-chatbot) 提供的基础脚手架和 [ChatHub](https://github.com/chathub-dev/chathub) [go-proxy-bingai](https://github.com/adams549659584/go-proxy-bingai) 提供的部分代码。 - - -## 答疑及交流 - -<image src="./docs/images/wechat.png" width=240 /> - -## License - -MIT © [LICENSE](https://github.com/weaigc/bingo/blob/main/LICENSE). - - diff --git a/spaces/ljrmary/UT_Hackathon/app.py b/spaces/ljrmary/UT_Hackathon/app.py deleted file mode 100644 index bd148ba25a3abd6497a8663111db37fdaade9516..0000000000000000000000000000000000000000 --- a/spaces/ljrmary/UT_Hackathon/app.py +++ /dev/null @@ -1,84 +0,0 @@ -import gradio as gr -import requests -import geopandas as gpd -from shapely.geometry import Point - - -def process_input(address, selected_option, additional_input): - transport_analysis_needed = selected_option in [ - "Residential", - "Office", - "Community Facility", - ] - - output_address = f"You entered the address:\n{address}" - output_option = f"Selected option:\n{selected_option}" - - response = requests.get( - f"https://geosearch.planninglabs.nyc/v2/autocomplete?text={address}" - ) - data = response.json() - x = data["features"][0]["geometry"]["coordinates"] - - # Load the GeoJSON file into a GeoDataFrame - geodata = gpd.read_file("/content/zone_data.geojson") - - # Create a Point for the given coordinates - location_point = Point(x[0], x[1]) - - # Find the zone that the location point is in - zone = geodata[geodata.geometry.contains(location_point)]["id"].values.item() - - if selected_option in ["Off-Street Parking Facility", "Residential"]: - output_additional = f"Number of Units/Spaces:\n{additional_input}" - else: - output_additional = f"Area (in 1000 GSF):\n{additional_input}" - - output_transport_analysis = ( - f"Transport Analysis Needed:\n{transport_analysis_needed}" - ) - - # Replace 'Your Zone Calculation Logic' with the actual zone calculation code - - output_zone = f"Zone:\n{zone}" - - return ( - output_address, - output_option, - output_additional, - output_transport_analysis, - output_zone, - ) - - -iface = gr.Interface( - fn=process_input, - inputs=[ - gr.inputs.Textbox(label="Enter your address"), - gr.inputs.Radio( - [ - "Residential", - "Office", - "Regional Retail", - "Local Retail", - "Sit Down/High Turnover Restaurant", - "Fast Food/without Drive Through", - "Community Facility", - "Off-Street Parking Facility", - ], - label="Select an option", - ), - gr.inputs.Number( - label="Number of Units/Spaces or Area (in 1000 GSF)", default=1 - ), # Default value is 1 - ], - outputs=[ - gr.outputs.Textbox(label="Address"), - gr.outputs.Textbox(label="Selected Option"), - gr.outputs.Textbox(label="Number of Units/Spaces or Area"), - gr.outputs.Textbox(label="Transport Analysis Needed"), - gr.outputs.Textbox(label="Zone"), - ], -) - -iface.launch() diff --git a/spaces/lojban/text-to-speech/vits/monotonic_align/__init__.py b/spaces/lojban/text-to-speech/vits/monotonic_align/__init__.py deleted file mode 100644 index 1d663e0fa02000ca7d79635d4a656fa24b1b6932..0000000000000000000000000000000000000000 --- a/spaces/lojban/text-to-speech/vits/monotonic_align/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -import numpy as np -import torch - -from vits.monotonic_align.core import maximum_path_c - -def maximum_path(neg_cent, mask): - """ Cython optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) - path = np.zeros(neg_cent.shape, dtype=np.int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) - maximum_path_c(path, neg_cent, t_t_max, t_s_max) - return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/spaces/lvwerra/in-the-stack-gr/README.md b/spaces/lvwerra/in-the-stack-gr/README.md deleted file mode 100644 index 7c17561f2e3ba49dcdf023934674465c0daa1837..0000000000000000000000000000000000000000 --- a/spaces/lvwerra/in-the-stack-gr/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: In The Stack Gr -emoji: 👁 -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.9.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ma-xu/LIVE/thrust/thrust/system/tbb/detail/transform_scan.h b/spaces/ma-xu/LIVE/thrust/thrust/system/tbb/detail/transform_scan.h deleted file mode 100644 index 75b075b6b16f063a1c5cda8893911d3f3c533f2d..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/system/tbb/detail/transform_scan.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#pragma once - -#include <thrust/detail/config.h> - -// this system inherits transform_scan -#include <thrust/system/cpp/detail/transform_scan.h> - diff --git a/spaces/maha-vishnu/mahavishnu/app.py b/spaces/maha-vishnu/mahavishnu/app.py deleted file mode 100644 index a362dcc7d0ddd1eee86961f1bc3db6d894fbd3d5..0000000000000000000000000000000000000000 --- a/spaces/maha-vishnu/mahavishnu/app.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -import gradio as gr -from langchain.chat_models import ChatOpenAI -from langchain import LLMChain, PromptTemplate -from langchain.memory import ConversationBufferMemory - -OPENAI_API_KEY=os.getenv('OPENAI_API_KEY') - -template = """You are a helpful assistant to answer all user queries. -{chat_history} -User: {user_message} -Chatbot:""" - -prompt = PromptTemplate( - input_variables=["chat_history", "user_message"], template=template -) - -memory = ConversationBufferMemory(memory_key="chat_history") - -llm_chain = LLMChain( - llm=ChatOpenAI(temperature='0.5', model_name="gpt-3.5-turbo"), - prompt=prompt, - verbose=True, - memory=memory, -) - -def get_text_response(user_message,history): - response = llm_chain.predict(user_message = user_message) - return response - -demo = gr.ChatInterface(get_text_response) - -if __name__ == "__main__": - demo.launch() #To create a public link, set `share=True` in `launch()`. To enable errors and logs, set `debug=True` in `launch()`. diff --git a/spaces/majweldon/AIScribe/README.md b/spaces/majweldon/AIScribe/README.md deleted file mode 100644 index e34df9149aa36292888bb7a39908b499048cc913..0000000000000000000000000000000000000000 --- a/spaces/majweldon/AIScribe/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: AIScribe -emoji: ⚡ -colorFrom: red -colorTo: red -sdk: gradio -sdk_version: 3.23.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/manhkhanhUIT/BOPBTL/Global/detection_util/util.py b/spaces/manhkhanhUIT/BOPBTL/Global/detection_util/util.py deleted file mode 100644 index be10881fc4077015d12a28f5ae5b0a04021ad627..0000000000000000000000000000000000000000 --- a/spaces/manhkhanhUIT/BOPBTL/Global/detection_util/util.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. - -import os -import sys -import time -import shutil -import platform -import numpy as np -from datetime import datetime - -import torch -import torchvision as tv -import torch.backends.cudnn as cudnn - -# from torch.utils.tensorboard import SummaryWriter - -import yaml -import matplotlib.pyplot as plt -from easydict import EasyDict as edict -import torchvision.utils as vutils - - -##### option parsing ###### -def print_options(config_dict): - print("------------ Options -------------") - for k, v in sorted(config_dict.items()): - print("%s: %s" % (str(k), str(v))) - print("-------------- End ----------------") - - -def save_options(config_dict): - from time import gmtime, strftime - - file_dir = os.path.join(config_dict["checkpoint_dir"], config_dict["name"]) - mkdir_if_not(file_dir) - file_name = os.path.join(file_dir, "opt.txt") - with open(file_name, "wt") as opt_file: - opt_file.write(os.path.basename(sys.argv[0]) + " " + strftime("%Y-%m-%d %H:%M:%S", gmtime()) + "\n") - opt_file.write("------------ Options -------------\n") - for k, v in sorted(config_dict.items()): - opt_file.write("%s: %s\n" % (str(k), str(v))) - opt_file.write("-------------- End ----------------\n") - - -def config_parse(config_file, options, save=True): - with open(config_file, "r") as stream: - config_dict = yaml.safe_load(stream) - config = edict(config_dict) - - for option_key, option_value in vars(options).items(): - config_dict[option_key] = option_value - config[option_key] = option_value - - if config.debug_mode: - config_dict["num_workers"] = 0 - config.num_workers = 0 - config.batch_size = 2 - if isinstance(config.gpu_ids, str): - config.gpu_ids = [int(x) for x in config.gpu_ids.split(",")][0] - - print_options(config_dict) - if save: - save_options(config_dict) - - return config - - -###### utility ###### -def to_np(x): - return x.cpu().numpy() - - -def prepare_device(use_gpu, gpu_ids): - if use_gpu: - cudnn.benchmark = True - os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" - if isinstance(gpu_ids, str): - gpu_ids = [int(x) for x in gpu_ids.split(",")] - torch.cuda.set_device(gpu_ids[0]) - device = torch.device("cuda:" + str(gpu_ids[0])) - else: - torch.cuda.set_device(gpu_ids) - device = torch.device("cuda:" + str(gpu_ids)) - print("running on GPU {}".format(gpu_ids)) - else: - device = torch.device("cpu") - print("running on CPU") - - return device - - -###### file system ###### -def get_dir_size(start_path="."): - total_size = 0 - for dirpath, dirnames, filenames in os.walk(start_path): - for f in filenames: - fp = os.path.join(dirpath, f) - total_size += os.path.getsize(fp) - return total_size - - -def mkdir_if_not(dir_path): - if not os.path.exists(dir_path): - os.makedirs(dir_path) - - -##### System related ###### -class Timer: - def __init__(self, msg): - self.msg = msg - self.start_time = None - - def __enter__(self): - self.start_time = time.time() - - def __exit__(self, exc_type, exc_value, exc_tb): - elapse = time.time() - self.start_time - print(self.msg % elapse) - - -###### interactive ###### -def get_size(start_path="."): - total_size = 0 - for dirpath, dirnames, filenames in os.walk(start_path): - for f in filenames: - fp = os.path.join(dirpath, f) - total_size += os.path.getsize(fp) - return total_size - - -def clean_tensorboard(directory): - tensorboard_list = os.listdir(directory) - SIZE_THRESH = 100000 - for tensorboard in tensorboard_list: - tensorboard = os.path.join(directory, tensorboard) - if get_size(tensorboard) < SIZE_THRESH: - print("deleting the empty tensorboard: ", tensorboard) - # - if os.path.isdir(tensorboard): - shutil.rmtree(tensorboard) - else: - os.remove(tensorboard) - - -def prepare_tensorboard(config, experiment_name=datetime.now().strftime("%Y-%m-%d %H-%M-%S")): - tensorboard_directory = os.path.join(config.checkpoint_dir, config.name, "tensorboard_logs") - mkdir_if_not(tensorboard_directory) - clean_tensorboard(tensorboard_directory) - tb_writer = SummaryWriter(os.path.join(tensorboard_directory, experiment_name), flush_secs=10) - - # try: - # shutil.copy('outputs/opt.txt', tensorboard_directory) - # except: - # print('cannot find file opt.txt') - return tb_writer - - -def tb_loss_logger(tb_writer, iter_index, loss_logger): - for tag, value in loss_logger.items(): - tb_writer.add_scalar(tag, scalar_value=value.item(), global_step=iter_index) - - -def tb_image_logger(tb_writer, iter_index, images_info, config): - ### Save and write the output into the tensorboard - tb_logger_path = os.path.join(config.output_dir, config.name, config.train_mode) - mkdir_if_not(tb_logger_path) - for tag, image in images_info.items(): - if tag == "test_image_prediction" or tag == "image_prediction": - continue - image = tv.utils.make_grid(image.cpu()) - image = torch.clamp(image, 0, 1) - tb_writer.add_image(tag, img_tensor=image, global_step=iter_index) - tv.transforms.functional.to_pil_image(image).save( - os.path.join(tb_logger_path, "{:06d}_{}.jpg".format(iter_index, tag)) - ) - - -def tb_image_logger_test(epoch, iter, images_info, config): - - url = os.path.join(config.output_dir, config.name, config.train_mode, "val_" + str(epoch)) - if not os.path.exists(url): - os.makedirs(url) - scratch_img = images_info["test_scratch_image"].data.cpu() - if config.norm_input: - scratch_img = (scratch_img + 1.0) / 2.0 - scratch_img = torch.clamp(scratch_img, 0, 1) - gt_mask = images_info["test_mask_image"].data.cpu() - predict_mask = images_info["test_scratch_prediction"].data.cpu() - - predict_hard_mask = (predict_mask.data.cpu() >= 0.5).float() - - imgs = torch.cat((scratch_img, predict_hard_mask, gt_mask), 0) - img_grid = vutils.save_image( - imgs, os.path.join(url, str(iter) + ".jpg"), nrow=len(scratch_img), padding=0, normalize=True - ) - - -def imshow(input_image, title=None, to_numpy=False): - inp = input_image - if to_numpy or type(input_image) is torch.Tensor: - inp = input_image.numpy() - - fig = plt.figure() - if inp.ndim == 2: - fig = plt.imshow(inp, cmap="gray", clim=[0, 255]) - else: - fig = plt.imshow(np.transpose(inp, [1, 2, 0]).astype(np.uint8)) - plt.axis("off") - fig.axes.get_xaxis().set_visible(False) - fig.axes.get_yaxis().set_visible(False) - plt.title(title) - - -###### vgg preprocessing ###### -def vgg_preprocess(tensor): - # input is RGB tensor which ranges in [0,1] - # output is BGR tensor which ranges in [0,255] - tensor_bgr = torch.cat((tensor[:, 2:3, :, :], tensor[:, 1:2, :, :], tensor[:, 0:1, :, :]), dim=1) - # tensor_bgr = tensor[:, [2, 1, 0], ...] - tensor_bgr_ml = tensor_bgr - torch.Tensor([0.40760392, 0.45795686, 0.48501961]).type_as(tensor_bgr).view( - 1, 3, 1, 1 - ) - tensor_rst = tensor_bgr_ml * 255 - return tensor_rst - - -def torch_vgg_preprocess(tensor): - # pytorch version normalization - # note that both input and output are RGB tensors; - # input and output ranges in [0,1] - # normalize the tensor with mean and variance - tensor_mc = tensor - torch.Tensor([0.485, 0.456, 0.406]).type_as(tensor).view(1, 3, 1, 1) - tensor_mc_norm = tensor_mc / torch.Tensor([0.229, 0.224, 0.225]).type_as(tensor_mc).view(1, 3, 1, 1) - return tensor_mc_norm - - -def network_gradient(net, gradient_on=True): - if gradient_on: - for param in net.parameters(): - param.requires_grad = True - else: - for param in net.parameters(): - param.requires_grad = False - return net diff --git a/spaces/marcusj83/MusicGenbruh/tests/data/__init__.py b/spaces/marcusj83/MusicGenbruh/tests/data/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/marcusj83/MusicGenbruh/tests/data/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/face_of_art/old/deep_heatmaps_model_ect.py b/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/face_of_art/old/deep_heatmaps_model_ect.py deleted file mode 100644 index a1e6365aeecdee40ba7040900f40212684cab17d..0000000000000000000000000000000000000000 --- a/spaces/marlenezw/audio-driven-animations/MakeItTalk/thirdparty/face_of_art/old/deep_heatmaps_model_ect.py +++ /dev/null @@ -1,544 +0,0 @@ -import scipy.io -import scipy.misc -from glob import glob -import os -import numpy as np -from image_utils import * -from ops import * -from sklearn.model_selection import train_test_split -import tensorflow as tf -from tensorflow import contrib - - -class DeepHeatmapsModel(object): - - """facial landmark localization Network""" - - def __init__(self, mode='TRAIN', train_iter=500000, learning_rate=0.000001, image_size=256, c_dim=3, batch_size=10, - num_landmarks=68, img_path='data', save_log_path='logs', save_sample_path='sample', - save_model_path='model',test_model_path='model/deep_heatmaps-1000'): - - self.mode = mode - self.train_iter=train_iter - self.learning_rate=learning_rate - - self.image_size = image_size - self.c_dim = c_dim - self.batch_size = batch_size - - self.num_landmarks = num_landmarks - - self.save_log_path=save_log_path - self.save_sample_path=save_sample_path - self.save_model_path=save_model_path - self.test_model_path=test_model_path - self.img_path=img_path - - self.momentum = 0.95 - self.step = 20000 # for lr decay - self.gamma = 0.05 # for lr decay - - self.weight_initializer = 'random_normal' # random_normal or xavier - self.weight_initializer_std = 0.01 - self.bias_initializer = 0.0 - - self.l_weight_primary = 100. - self.l_weight_fusion = 3.*self.l_weight_primary - - self.sigma = 6 # sigma for heatmap generation - self.scale = 'zero_center' # scale for image normalization '255' / '1' / 'zero_center' - - self.print_every=2 - self.save_every=100 - self.sample_every_epoch = False - self.sample_every=10 - self.sample_grid=4 - self.log_every_epoch=1 - self.log_histograms = True - - self.config = tf.ConfigProto() - self.config.gpu_options.allow_growth = True - - bb_dir = '/Users/arik/Desktop/DATA/face_data/300W/Bounding_Boxes/' - test_data='full' # if mode is TEST, this choose the set to use full/common/challenging/test - margin = 0.25 # for face crops - bb_type = 'gt' # gt/init - - self.bb_dictionary = load_bb_dictionary(bb_dir, mode, test_data=test_data) - - self.img_menpo_list = load_menpo_image_list(img_path, mode, self.bb_dictionary, image_size, - margin=margin, bb_type=bb_type, test_data=test_data) - - if mode is 'TRAIN': - train_params = locals() - print_training_params_to_file(train_params) - - def add_placeholders(self): - - if self.mode == 'TEST': - self.test_images = tf.placeholder( - tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'images') - # self.test_landmarks = tf.placeholder(tf.float32, [None, self.num_landmarks * 2], 'landmarks') - - self.test_heatmaps = tf.placeholder( - tf.float32, [None, self.image_size, self.image_size, self.num_landmarks], 'heatmaps') - - self.test_heatmaps_small = tf.placeholder( - tf.float32, [None, self.image_size/4, self.image_size/4, self.num_landmarks], 'heatmaps_small') - - elif self.mode == 'TRAIN': - self.train_images = tf.placeholder( - tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'train_images') - # self.train_landmarks = tf.placeholder(tf.float32, [None, self.num_landmarks*2], 'train_landmarks') - - self.train_heatmaps = tf.placeholder( - tf.float32, [None, self.image_size, self.image_size, self.num_landmarks], 'train_heatmaps') - - self.train_heatmaps_small = tf.placeholder( - tf.float32, [None, self.image_size/4, self.image_size/4, self.num_landmarks], 'train_heatmaps_small') - - # self.valid_images = tf.placeholder( - # tf.float32, [None, self.image_size, self.image_size, self.c_dim], 'valid_images') - # # self.valid_landmarks = tf.placeholder(tf.float32, [None, self.num_landmarks * 2], 'valid_landmarks') - # - # self.valid_heatmaps = tf.placeholder( - # tf.float32, [None, self.image_size, self.image_size, self.num_landmarks], 'valid_heatmaps') - # - # self.valid_heatmaps_small = tf.placeholder( - # tf.float32,[None, self.image_size / 4, self.image_size / 4, self.num_landmarks], 'valid_heatmaps_small') - - def heatmaps_network(self, input_images, reuse=None, name='pred_heatmaps'): - - with tf.name_scope(name): - - # if training is None: - # if self.mode == 'train': - # training = True - # else: - # training = False - - if self.weight_initializer == 'xavier': - weight_initializer = contrib.layers.xavier_initializer() - else: - weight_initializer = tf.random_normal_initializer(stddev=self.weight_initializer_std) - - bias_init = tf.constant_initializer(self.bias_initializer) - - with tf.variable_scope('heatmaps_network'): - with tf.name_scope('primary_net'): - - l1 = conv_relu_pool(input_images, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init, - reuse=reuse, var_scope='conv_1') - l2 = conv_relu_pool(l1, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init, - reuse=reuse, var_scope='conv_2') - l3 = conv_relu(l2, 5, 128, conv_ker_init=weight_initializer, conv_bias_init=bias_init, - reuse=reuse, var_scope='conv_3') - - l4_1 = conv_relu(l3, 3, 128, conv_dilation=1, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_1') - l4_2 = conv_relu(l3, 3, 128, conv_dilation=2, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_2') - l4_3 = conv_relu(l3, 3, 128, conv_dilation=3, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_3') - l4_4 = conv_relu(l3, 3, 128, conv_dilation=4, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_4_4') - - l4 = tf.concat([l4_1, l4_2, l4_3, l4_4], 3, name='conv_4') - - l5_1 = conv_relu(l4, 3, 256, conv_dilation=1, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_1') - l5_2 = conv_relu(l4, 3, 256, conv_dilation=2, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_2') - l5_3 = conv_relu(l4, 3, 256, conv_dilation=3, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_3') - l5_4 = conv_relu(l4, 3, 256, conv_dilation=4, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_5_4') - - l5 = tf.concat([l5_1, l5_2, l5_3, l5_4], 3, name='conv_5') - - l6 = conv_relu(l5, 1, 512, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_6') - l7 = conv_relu(l6, 1, 256, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_7') - primary_out = conv(l7, 1, self.num_landmarks, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_8') - - with tf.name_scope('fusion_net'): - - l_fsn_0 = tf.concat([l3, l7], 3, name='conv_3_7_fsn') - - l_fsn_1_1 = conv_relu(l_fsn_0, 3, 64, conv_dilation=1, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_1_1') - l_fsn_1_2 = conv_relu(l_fsn_0, 3, 64, conv_dilation=2, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_1_2') - l_fsn_1_3 = conv_relu(l_fsn_0, 3, 64, conv_dilation=3, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_1_3') - - l_fsn_1 = tf.concat([l_fsn_1_1, l_fsn_1_2, l_fsn_1_3], 3, name='conv_fsn_1') - - l_fsn_2_1 = conv_relu(l_fsn_1, 3, 64, conv_dilation=1, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_1') - l_fsn_2_2 = conv_relu(l_fsn_1, 3, 64, conv_dilation=2, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_2') - l_fsn_2_3 = conv_relu(l_fsn_1, 3, 64, conv_dilation=4, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_3') - l_fsn_2_4 = conv_relu(l_fsn_1, 5, 64, conv_dilation=3, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_2_4') - - l_fsn_2 = tf.concat([l_fsn_2_1, l_fsn_2_2, l_fsn_2_3, l_fsn_2_4], 3, name='conv_fsn_2') - - l_fsn_3_1 = conv_relu(l_fsn_2, 3, 128, conv_dilation=1, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_1') - l_fsn_3_2 = conv_relu(l_fsn_2, 3, 128, conv_dilation=2, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_2') - l_fsn_3_3 = conv_relu(l_fsn_2, 3, 128, conv_dilation=4, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_3') - l_fsn_3_4 = conv_relu(l_fsn_2, 5, 128, conv_dilation=3, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_3_4') - - l_fsn_3 = tf.concat([l_fsn_3_1, l_fsn_3_2, l_fsn_3_3, l_fsn_3_4], 3, name='conv_fsn_3') - - l_fsn_4 = conv_relu(l_fsn_3, 1, 256, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_4') - l_fsn_5 = conv(l_fsn_4, 1, self.num_landmarks, conv_ker_init=weight_initializer, - conv_bias_init=bias_init, reuse=reuse, var_scope='conv_fsn_5') - - with tf.name_scope('upsample_net'): - - out = deconv(l_fsn_5, 8, self.num_landmarks, conv_stride=4, - conv_ker_init=deconv2d_bilinear_upsampling_initializer( - [8, 8, self.num_landmarks, self.num_landmarks]), conv_bias_init=bias_init, - reuse=reuse, var_scope='deconv_1') - - self.all_layers = [l1, l2, l3, l4, l5, l6, l7, primary_out, l_fsn_1, l_fsn_2, l_fsn_3, l_fsn_4, - l_fsn_5, out] - - return primary_out, out - - def build_model(self): - if self.mode == 'TEST': - self.pred_hm_p, self.pred_hm_f = self.heatmaps_network(self.test_images) - elif self.mode == 'TRAIN': - self.pred_hm_p,self.pred_hm_f = self.heatmaps_network(self.train_images,name='pred_heatmaps_train') - # self.pred_landmarks_valid = self.landmarks_network(self.valid_images,name='pred_landmarks_valid') - # self.pred_landmarks_eval = self.landmarks_network(self.test_images,training=False,reuse=True,name='pred_landmarks_eval') - # self.pred_landmarks_train = self.landmarks_network(self.train_images, reuse=True, name='pred_landmarks_train') - - def create_loss_ops(self): - - def l2_loss_norm_eyes(pred_landmarks, real_landmarks, normalize=True, name='l2_loss'): - - with tf.name_scope(name): - with tf.name_scope('real_pred_landmarks_diff'): - landmarks_diff = pred_landmarks - real_landmarks - - if normalize: - with tf.name_scope('real_landmarks_eye_dist'): - with tf.name_scope('left_eye'): - p1_out = tf.slice(real_landmarks, [0, 72], [-1, 2]) - p1_in = tf.slice(real_landmarks, [0, 78], [-1, 2]) - p1 = (p1_in + p1_out) / 2 - with tf.name_scope('right_eye'): - p2_out = tf.slice(real_landmarks, [0, 90], [-1, 2]) - p2_in = tf.slice(real_landmarks, [0, 84], [-1, 2]) - p2 = (p2_in + p2_out) / 2 - eps = 1e-6 - eye_dist = tf.expand_dims(tf.sqrt(tf.reduce_sum(tf.square(p1 - p2), axis=1)) + eps, axis=1) - norm_landmarks_diff = landmarks_diff / eye_dist - l2_landmarks_norm = tf.reduce_mean(tf.square(norm_landmarks_diff)) - - out = l2_landmarks_norm - else: - l2_landmarks = tf.reduce_mean(tf.square(landmarks_diff)) - out = l2_landmarks - - return out - - if self.mode is 'TRAIN': - primary_maps_diff = self.pred_hm_p-self.train_heatmaps_small - fusion_maps_diff = self.pred_hm_f - self.train_heatmaps - - self.l2_primary = tf.reduce_mean(tf.square(primary_maps_diff)) - self.l2_fusion = tf.reduce_mean(tf.square(fusion_maps_diff)) - - self.total_loss = self.l_weight_primary * self.l2_primary + self.l_weight_fusion * self.l2_fusion - - # self.l2_loss_batch_train = l2_loss_norm_eyes(self.pred_landmarks_train, self.train_landmarks, - # self.normalize_loss_by_eyes, name='loss_train_batch') - # with tf.name_scope('losses_not_for_train_step'): - # self.l2_loss_train = l2_loss_norm_eyes(self.pred_landmarks_train, self.train_landmarks, - # self.normalize_loss_by_eyes, name='train') - # - # self.l2_loss_valid = l2_loss_norm_eyes(self.pred_landmarks_valid, self.valid_landmarks, - # self.normalize_loss_by_eyes, name='valid') - # else: - # self.l2_loss_test = l2_loss_norm_eyes(self.pred_landmarks_eval, self.test_landmarks, - # self.normalize_loss_by_eyes) - - # def predict_landmarks_in_batches(self,image_paths,session): - # - # num_batches = int(1.*len(image_paths)/self.batch_size) - # if num_batches == 0: - # batch_size = len(image_paths) - # num_batches = 1 - # else: - # batch_size = self.batch_size - # - # for i in range(num_batches): - # batch_image_paths = image_paths[i * batch_size:(i + 1) * batch_size] - # batch_images, _ = \ - # load_data(batch_image_paths, None, self.image_size, self.num_landmarks, conv=True) - # if i == 0: - # all_pred_landmarks = session.run(self.pred_landmarks_eval,{self.test_images:batch_images}) - # else: - # batch_pred = session.run(self.pred_landmarks_eval,{self.test_images:batch_images}) - # all_pred_landmarks = np.concatenate((all_pred_landmarks,batch_pred),0) - # - # reminder = len(image_paths)-num_batches*batch_size - # if reminder >0: - # reminder_paths = image_paths[-reminder:] - # batch_images, _ = \ - # load_data(reminder_paths, None, self.image_size, self.num_landmarks, conv=True) - # batch_pred = session.run(self.pred_landmarks_eval,{self.test_images:batch_images}) - # all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred), 0) - # - # return all_pred_landmarks - - def create_summary_ops(self): - - var_summary = [tf.summary.histogram(var.name,var) for var in tf.trainable_variables()] - grads = tf.gradients(self.total_loss, tf.trainable_variables()) - grads = list(zip(grads, tf.trainable_variables())) - grad_summary = [tf.summary.histogram(var.name+'/grads',grad) for grad,var in grads] - activ_summary = [tf.summary.histogram(layer.name, layer) for layer in self.all_layers] - l2_primary = tf.summary.scalar('l2_primary', self.l2_primary) - l2_fusion = tf.summary.scalar('l2_fusion', self.l2_fusion) - l_total = tf.summary.scalar('l_total', self.total_loss) - - if self.log_histograms: - self.batch_summary_op = tf.summary.merge([l2_primary, l2_fusion, l_total, var_summary, grad_summary, - activ_summary]) - else: - self.batch_summary_op = tf.summary.merge([l2_primary, l2_fusion, l_total]) - - # l2_train_loss_summary = tf.summary.scalar('l2_loss_train', self.l2_loss_train) - # l2_valid_loss_summary = tf.summary.scalar('l2_loss_valid', self.l2_loss_valid) - # - # self.epoch_summary_op = tf.summary.merge([l2_train_loss_summary, l2_valid_loss_summary]) - - def eval(self): - - self.add_placeholders() - # build model - self.build_model() - - num_images = len(self.img_menpo_list) - img_inds = np.arange(num_images) - - sample_iter = int(1. * len(num_images) / self.sample_grid) - - if self.max_test_sample is not None: - if self.max_test_sample < sample_iter: - sample_iter = self.max_test_sample - - with tf.Session(config=self.config) as sess: - - # load trained parameters - print ('loading test model...') - saver = tf.train.Saver() - saver.restore(sess, self.test_model_path) - - _, model_name = os.path.split(self.test_model_path) - - # if self.new_test_data is False: - # # create loss ops - # self.create_loss_ops() - # - # all_test_pred_landmarks = self.predict_landmarks_in_batches(test_data_paths, session=sess) - # _, all_test_real_landmarks = load_data(None, test_landmarks_paths, self.image_size, - # self.num_landmarks, conv=True) - # all_test_loss = sess.run(self.l2_loss_test, {self.pred_landmarks_eval: all_test_pred_landmarks, - # self.test_landmarks: all_test_real_landmarks}) - # with open(os.path.join(self.save_log_path, model_name+'-test_loss.txt'), 'w') as f: - # f.write(str(all_test_loss)) - - for i in range(sample_iter): - - batch_inds = img_inds[i * self.sample_grid:(i + 1) * self.sample_grid] - - batch_images, _, _, _ = \ - load_data(self.img_menpo_list, batch_inds, image_size=self.image_size, c_dim=self.c_dim, - num_landmarks=self.num_landmarks, sigma=self.sigma, scale=self.scale, - save_landmarks=False) - - batch_maps_pred, batch_maps_small_pred = sess.run([self.pred_hm_f, self.pred_hm_p], - {self.test_images: batch_images}) - - sample_path_imgs = os.path.join(self.save_sample_path, model_name + '-sample-%d-to-%d-1.png' % ( - i * self.sample_grid, (i + 1) * self.sample_grid)) - - sample_path_maps = os.path.join(self.save_sample_path, model_name + '-sample-%d-to-%d-2.png' % ( - i * self.sample_grid, (i + 1) * self.sample_grid)) - - merged_img = merge_images_landmarks_maps( - batch_images, batch_maps_pred, image_size=self.image_size, - num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale) - - merged_map = merge_compare_maps( - batch_maps_small_pred, batch_maps_pred, image_size=self.image_size/4, - num_landmarks=self.num_landmarks, num_samples=self.sample_grid) - - scipy.misc.imsave(sample_path_imgs, merged_img) - scipy.misc.imsave(sample_path_maps, merged_map) - - print ('saved %s' % sample_path_imgs) - - def train(self): - tf.set_random_seed(1234) - # build a graph - # add placeholders - self.add_placeholders() - # build model - self.build_model() - # create loss ops - self.create_loss_ops() - # create summary ops - self.create_summary_ops() - - # create optimizer and training op - global_step = tf.Variable(0, trainable=False) - lr = tf.train.exponential_decay(self.learning_rate,global_step, self.step, self.gamma, staircase=True) - optimizer = tf.train.MomentumOptimizer(lr,self.momentum) - - train_op = optimizer.minimize(self.total_loss,global_step=global_step) - - with tf.Session(config=self.config) as sess: - - tf.global_variables_initializer().run() - - # create model saver and file writer - summary_writer = tf.summary.FileWriter(logdir=self.save_log_path, graph=tf.get_default_graph()) - saver = tf.train.Saver() - - print - print('*** Start Training ***') - - # set random seed - epoch = 0 - print_epoch=True - - num_train_images = len(self.img_menpo_list) - num_train_images=10 - img_inds = np.arange(num_train_images) - np.random.shuffle(img_inds) - - for step in range(self.train_iter + 1): - - # get batch images - j = step % int(float(num_train_images) / float(self.batch_size)) - - if step > 0 and j == 0: - np.random.shuffle(img_inds) # shuffle data if finished epoch - epoch += 1 - print_epoch=True - - batch_inds = img_inds[j * self.batch_size:(j + 1) * self.batch_size] - - batch_images, batch_maps, batch_maps_small, _ =\ - load_data(self.img_menpo_list, batch_inds, image_size=self.image_size, c_dim=self.c_dim, - num_landmarks=self.num_landmarks, sigma=self.sigma, scale=self.scale, save_landmarks=False) - - feed_dict_train = {self.train_images: batch_images, self.train_heatmaps: batch_maps, - self.train_heatmaps_small: batch_maps_small} - - sess.run(train_op, feed_dict_train) - - # print loss every *log_every_epoch* epoch - # if step == 0 or (step+1) == self.train_iter or (epoch % self.log_every_epoch ==0 and print_epoch): - # if self.sample_every_epoch is not True: - # print_epoch=False - # all_train_pred_landmarks=self.predict_landmarks_in_batches(train_data_paths,session=sess) - # _,all_train_real_landmarks = load_data(None,train_landmarks_paths,self.image_size, - # self.num_landmarks, conv=True) - # all_train_loss = sess.run(self.l2_loss_train,{self.pred_landmarks_train:all_train_pred_landmarks, - # self.train_landmarks:all_train_real_landmarks}) - # - # all_valid_pred_landmarks = self.predict_landmarks_in_batches(valid_data_paths,session=sess) - # _, all_valid_real_landmarks = load_data(None, valid_landmarks_paths, self.image_size, - # self.num_landmarks, conv=True) - # all_valid_loss = sess.run(self.l2_loss_valid, {self.pred_landmarks_valid: all_valid_pred_landmarks, - # self.valid_landmarks: all_valid_real_landmarks}) - # print("--------- EPOCH %d ---------" % (epoch)) - # print ('step: [%d/%d] train loss: [%.6f] valid loss: [%.6f]' - # % (step + 1, self.train_iter, all_train_loss, all_valid_loss)) - # print("----------------------------") - # summary= sess.run(self.epoch_summary_op,{self.l2_loss_valid:all_valid_loss,self.l2_loss_train:all_train_loss}) - # summary_writer.add_summary(summary, epoch) - - # save to log and print status - if step == 0 or (step + 1) % self.print_every == 0: - - summary, l_p, l_f, l_t = sess.run( - [self.batch_summary_op, self.l2_primary,self.l2_fusion,self.total_loss], - feed_dict_train) - - summary_writer.add_summary(summary, step) - - print ('epoch: [%d] step: [%d/%d] primary loss: [%.6f] fusion loss: [%.6f] total loss: [%.6f]' - % (epoch, step + 1, self.train_iter, l_p, l_f, l_t)) - - # save model - if (step + 1) % self.save_every == 0: - saver.save(sess, os.path.join(self.save_model_path, 'deep_heatmaps'), global_step=step + 1) - print ('model/deep-heatmaps-%d saved' % (step + 1)) - - # save images with landmarks - if self.sample_every_epoch and (epoch % self.log_every_epoch ==0 and print_epoch): - print_epoch = False - - # train_pred = sess.run(self.pred_landmarks_eval, {self.test_images: batch_images}) - # valid_pred = sess.run(self.pred_landmarks_eval, {self.test_images: valid_images_sample}) - # - # train_sample_path = os.path.join(self.save_sample_path, 'train-epoch-%d.png' % (epoch)) - # valid_sample_path = os.path.join(self.save_sample_path, 'valid-epoch-%d.png' % (epoch)) - # - # merge_images_train = merge_images_with_landmarks(batch_images, train_pred, self.image_size, - # self.num_landmarks, self.sample_grid) - # merge_images_valid = merge_images_with_landmarks(valid_images_sample, valid_pred, - # self.image_size, self.num_landmarks, - # self.sample_grid) - # - # scipy.misc.imsave(train_sample_path, merge_images_train) - # scipy.misc.imsave(valid_sample_path, merge_images_valid) - - elif (self.sample_every_epoch is False) and (step == 0 or (step + 1) % self.sample_every == 0): - - batch_maps_pred, batch_maps_small_pred = sess.run([self.pred_hm_f, self.pred_hm_p], - {self.train_images: batch_images}) - - print 'map vals', batch_maps_pred.min(), batch_maps_pred.max() - print 'small map vals', batch_maps_small_pred.min(), batch_maps_small_pred.max() - - sample_path_imgs = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-1.png' % (epoch, step + 1)) - sample_path_maps = os.path.join(self.save_sample_path,'epoch-%d-train-iter-%d-2.png' % (epoch, step + 1)) - - merged_img = merge_images_landmarks_maps( - batch_images, batch_maps_pred, image_size=self.image_size, - num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale) - - merged_map = merge_compare_maps( - batch_maps_small_pred, batch_maps_pred, image_size=self.image_size/4, - num_landmarks=self.num_landmarks, num_samples=self.sample_grid) - - scipy.misc.imsave(sample_path_imgs, merged_img) - scipy.misc.imsave(sample_path_maps, merged_map) - - print('*** Finished Training ***') - # evaluate model on test set - # all_test_pred_landmarks = self.predict_landmarks_in_batches(test_data_paths,session=sess) - # _, all_test_real_landmarks = load_data(None, test_landmarks_paths, self.image_size, - # self.num_landmarks, conv=True) - # all_test_loss = sess.run(self.l2_loss_test, {self.pred_landmarks_test: all_test_pred_landmarks, - # self.test_landmarks: all_test_real_landmarks}) - # - # print ('step: [%d/%d] test loss: [%.6f]' % (step, self.train_iter, all_test_loss)) diff --git a/spaces/mascIT/AgeGuesser/yolov5/utils/augmentations.c b/spaces/mascIT/AgeGuesser/yolov5/utils/augmentations.c deleted file mode 100644 index 2646c5673ffc26d17269d60eb5dd1456947ebc3d..0000000000000000000000000000000000000000 --- a/spaces/mascIT/AgeGuesser/yolov5/utils/augmentations.c +++ /dev/null @@ -1,14380 +0,0 @@ -/* Generated by Cython 3.0.0a10 */ - -/* BEGIN: Cython Metadata -{ - "distutils": { - "name": "pdf_toolbox.lib.dia_yolov5.utils.augmentations", - "sources": [ - "pdf_toolbox\\lib\\dia_yolov5\\utils\\augmentations.py" - ] - }, - "module_name": "pdf_toolbox.lib.dia_yolov5.utils.augmentations" -} -END: Cython Metadata */ - -#ifndef PY_SSIZE_T_CLEAN -#define PY_SSIZE_T_CLEAN -#endif /* PY_SSIZE_T_CLEAN */ -#if defined(CYTHON_LIMITED_API) && 0 - #ifndef Py_LIMITED_API - #if CYTHON_LIMITED_API+0 > 0x03030000 - #define Py_LIMITED_API CYTHON_LIMITED_API - #else - #define Py_LIMITED_API 0x03030000 - #endif - #endif -#endif - -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02070000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.7+ or Python 3.3+. -#else -#define CYTHON_ABI "3_0_0a10" -#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI -#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "." -#define CYTHON_HEX_VERSION 0x030000AA -#define CYTHON_FUTURE_DIVISION 1 -#include <stddef.h> -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #define HAVE_LONG_LONG -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#if defined(GRAALVM_PYTHON) - /* For very preliminary testing purposes. Most variables are set the same as PyPy. - The existence of this section does not imply that anything works or is even tested */ - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 1 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) - #endif - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(PYPY_VERSION) - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) - #endif - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(CYTHON_LIMITED_API) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #define CYTHON_COMPILING_IN_LIMITED_API 1 - #define CYTHON_COMPILING_IN_GRAAL 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 1 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #endif - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL 0 - #undef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS 1 - #endif - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 1 - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 1 - #endif - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #define CYTHON_COMPILING_IN_LIMITED_API 0 - #define CYTHON_COMPILING_IN_GRAAL 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #ifndef CYTHON_USE_TYPE_SPECS - #define CYTHON_USE_TYPE_SPECS 0 - #endif - #ifndef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #ifndef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_GIL - #define CYTHON_FAST_GIL (PY_MAJOR_VERSION < 3 || PY_VERSION_HEX >= 0x03060000) - #endif - #ifndef CYTHON_METH_FASTCALL - #define CYTHON_METH_FASTCALL (PY_VERSION_HEX >= 0x030700A1) - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP487_INIT_SUBCLASS - #define CYTHON_PEP487_INIT_SUBCLASS 1 - #endif - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) - #define CYTHON_PEP489_MULTI_PHASE_INIT 1 - #endif - #ifndef CYTHON_USE_MODULE_STATE - #define CYTHON_USE_MODULE_STATE 0 - #endif - #if PY_VERSION_HEX < 0x030400a1 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #elif !defined(CYTHON_USE_TP_FINALIZE) - #define CYTHON_USE_TP_FINALIZE 1 - #endif - #if PY_VERSION_HEX < 0x030600B1 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #elif !defined(CYTHON_USE_DICT_VERSIONS) - #define CYTHON_USE_DICT_VERSIONS 1 - #endif - #if PY_VERSION_HEX < 0x030700A3 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 - #elif !defined(CYTHON_USE_EXC_INFO_STACK) - #define CYTHON_USE_EXC_INFO_STACK 1 - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if !defined(CYTHON_VECTORCALL) -#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL && PY_VERSION_HEX >= 0x030800B1) -#endif -#define CYTHON_BACKPORT_VECTORCALL (CYTHON_METH_FASTCALL && PY_VERSION_HEX < 0x030800B1) -#if CYTHON_USE_PYLONG_INTERNALS - #if PY_MAJOR_VERSION < 3 - #include "longintrepr.h" - #endif - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_UNUSED_VAR -# if defined(__cplusplus) - template<class T> void CYTHON_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR - #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x) -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned short uint16_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int16 uint16_t; - typedef unsigned __int32 uint32_t; - #endif - #endif - #if _MSC_VER < 1300 - #ifdef _WIN64 - typedef unsigned long long __pyx_uintptr_t; - #else - typedef unsigned int __pyx_uintptr_t; - #endif - #else - #ifdef _WIN64 - typedef unsigned __int64 __pyx_uintptr_t; - #else - typedef unsigned __int32 __pyx_uintptr_t; - #endif - #endif -#else - #include <stdint.h> - typedef uintptr_t __pyx_uintptr_t; -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) && __cplusplus >= 201103L - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #elif __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__ ) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif - -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #elif defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define Py_OptimizeFlag 0 -#endif -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_DefaultClassType PyClass_Type - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" - #define __Pyx_DefaultClassType PyType_Type -#if PY_VERSION_HEX >= 0x030B00A1 - static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, - PyObject *code, PyObject *c, PyObject* n, PyObject *v, - PyObject *fv, PyObject *cell, PyObject* fn, - PyObject *name, int fline, PyObject *lnos) { - PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL; - PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL; - const char *fn_cstr=NULL; - const char *name_cstr=NULL; - PyCodeObject* co=NULL; - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (!(kwds=PyDict_New())) goto end; - if (!(argcount=PyLong_FromLong(a))) goto end; - if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end; - if (!(posonlyargcount=PyLong_FromLong(p))) goto end; - if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end; - if (!(kwonlyargcount=PyLong_FromLong(k))) goto end; - if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end; - if (!(nlocals=PyLong_FromLong(l))) goto end; - if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end; - if (!(stacksize=PyLong_FromLong(s))) goto end; - if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end; - if (!(flags=PyLong_FromLong(f))) goto end; - if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end; - if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end; - if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end; - if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end; - if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end; - if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too; - if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here - if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too; - Py_XDECREF((PyObject*)co); - co = (PyCodeObject*)call_result; - call_result = NULL; - if (0) { - cleanup_code_too: - Py_XDECREF((PyObject*)co); - co = NULL; - } - end: - Py_XDECREF(kwds); - Py_XDECREF(argcount); - Py_XDECREF(posonlyargcount); - Py_XDECREF(kwonlyargcount); - Py_XDECREF(nlocals); - Py_XDECREF(stacksize); - Py_XDECREF(replace); - Py_XDECREF(call_result); - Py_XDECREF(empty); - if (type) { - PyErr_Restore(type, value, traceback); - } - return co; - } -#elif PY_VERSION_HEX >= 0x030800B2 && !CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif -#endif -#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE) - #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type) -#else - #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type)) -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_METH_FASTCALL - #define __Pyx_METH_FASTCALL METH_FASTCALL - #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast - #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords -#else - #define __Pyx_METH_FASTCALL METH_VARARGS - #define __Pyx_PyCFunction_FastCall PyCFunction - #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords -#endif -#if CYTHON_VECTORCALL - #define __pyx_vectorcallfunc vectorcallfunc - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET - #define __Pyx_PyVectorcall_NARGS(n) PyVectorcall_NARGS((size_t)(n)) -#elif CYTHON_BACKPORT_VECTORCALL - typedef PyObject *(*__pyx_vectorcallfunc)(PyObject *callable, PyObject *const *args, - size_t nargsf, PyObject *kwnames); - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET ((size_t)1 << (8 * sizeof(size_t) - 1)) - #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(((size_t)(n)) & ~__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)) -#else - #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET 0 - #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(n)) -#endif -#if PY_VERSION_HEX < 0x030900B1 - #define __Pyx_PyType_FromModuleAndSpec(m, s, b) ((void)m, PyType_FromSpecWithBases(s, b)) - typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *); -#else - #define __Pyx_PyType_FromModuleAndSpec(m, s, b) PyType_FromModuleAndSpec(m, s, b) - #define __Pyx_PyCMethod PyCMethod -#endif -#ifndef METH_METHOD - #define METH_METHOD 0x200 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyThreadState_Current PyThreadState_Get() -#elif !CYTHON_FAST_THREAD_STATE - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_INLINE void *__Pyx_PyModule_GetState(PyObject *op) -{ - void *result; - result = PyModule_GetState(op); - if (!result) - Py_FatalError("Couldn't find the module state"); - return result; -} -#endif -#define __Pyx_PyObject_GetSlot(obj, name, func_ctype) __Pyx_PyType_GetSlot(Py_TYPE(obj), name, func_ctype) -#if CYTHON_COMPILING_IN_LIMITED_API - #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((func_ctype) PyType_GetSlot((type), Py_##name)) -#else - #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((type)->name) -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if PY_MAJOR_VERSION < 3 - #if CYTHON_COMPILING_IN_PYPY - #if PYPY_VERSION_NUM < 0x07030600 - #if defined(__cplusplus) && __cplusplus >= 201402L - [[deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")]] - #elif defined(__GNUC__) || defined(__clang__) - __attribute__ ((__deprecated__("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6"))) - #elif defined(_MSC_VER) - __declspec(deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")) - #endif - static CYTHON_INLINE int PyGILState_Check(void) { - return 0; - } - #else // PYPY_VERSION_NUM < 0x07030600 - #endif // PYPY_VERSION_NUM < 0x07030600 - #else - static CYTHON_INLINE int PyGILState_Check(void) { - PyThreadState * tstate = _PyThreadState_Current; - return tstate && (tstate == PyGILState_GetThisThreadState()); - } - #endif -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B4 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStrWithError(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) { - PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name); - if (res == NULL) PyErr_Clear(); - return res; -} -#elif PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000) -#define __Pyx_PyDict_GetItemStrWithError PyDict_GetItemWithError -#define __Pyx_PyDict_GetItemStr PyDict_GetItem -#else -static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) { -#if CYTHON_COMPILING_IN_PYPY - return PyDict_GetItem(dict, name); -#else - PyDictEntry *ep; - PyDictObject *mp = (PyDictObject*) dict; - long hash = ((PyStringObject *) name)->ob_shash; - assert(hash != -1); - ep = (mp->ma_lookup)(mp, name, hash); - if (ep == NULL) { - return NULL; - } - return ep->me_value; -#endif -} -#define __Pyx_PyDict_GetItemStr PyDict_GetItem -#endif -#if CYTHON_USE_TYPE_SLOTS - #define __Pyx_PyType_GetFlags(tp) (((PyTypeObject *)tp)->tp_flags) - #define __Pyx_PyType_HasFeature(type, feature) ((__Pyx_PyType_GetFlags(type) & (feature)) != 0) - #define __Pyx_PyObject_GetIterNextFunc(obj) (Py_TYPE(obj)->tp_iternext) -#else - #define __Pyx_PyType_GetFlags(tp) (PyType_GetFlags((PyTypeObject *)tp)) - #define __Pyx_PyType_HasFeature(type, feature) PyType_HasFeature(type, feature) - #define __Pyx_PyObject_GetIterNextFunc(obj) PyIter_Next -#endif -#if CYTHON_USE_TYPE_SPECS && PY_VERSION_HEX >= 0x03080000 -#define __Pyx_PyHeapTypeObject_GC_Del(obj) {\ - PyTypeObject *type = Py_TYPE(obj);\ - assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\ - PyObject_GC_Del(obj);\ - Py_DECREF(type);\ -} -#else -#define __Pyx_PyHeapTypeObject_GC_Del(obj) PyObject_GC_Del(obj) -#endif -#if CYTHON_COMPILING_IN_LIMITED_API - #define CYTHON_PEP393_ENABLED 1 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GetLength(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111) - #define __Pyx_PyUnicode_KIND(u) ((void)u, (0)) - #define __Pyx_PyUnicode_DATA(u) ((void*)u) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)k, PyUnicode_ReadChar((PyObject*)(d), i)) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GetLength(u)) -#elif PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #if defined(PyUnicode_IS_READY) - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #else - #define __Pyx_PyUnicode_READY(op) (0) - #endif - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u)) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) - #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) - #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) - #endif - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) - #endif -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) - #define __Pyx_PyUnicode_KIND(u) ((int)sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #if !defined(PyUnicode_DecodeUnicodeEscape) - #define PyUnicode_DecodeUnicodeEscape(s, size, errors) PyUnicode_Decode(s, size, "unicode_escape", errors) - #endif - #if !defined(PyUnicode_Contains) || (PY_MAJOR_VERSION == 2 && PYPY_VERSION_NUM < 0x07030500) - #undef PyUnicode_Contains - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) - #endif - #if !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) - #endif - #if !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) - #endif -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str -#endif -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#if CYTHON_COMPILING_IN_CPYTHON - #define __Pyx_PySequence_ListKeepNew(obj)\ - (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj)) -#else - #define __Pyx_PySequence_ListKeepNew(obj) PySequence_List(obj) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) __Pyx_IS_TYPE(obj, &PySet_Type) -#endif -#if PY_VERSION_HEX >= 0x030900A4 - #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) -#else - #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) - #define _USE_MATH_DEFINES -#endif -#include <math.h> -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - -#define __PYX_MARK_ERR_POS(f_index, lineno) \ - { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } -#define __PYX_ERR(f_index, lineno, Ln_error) \ - { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__pdf_toolbox__lib__dia_yolov5__utils__augmentations -#define __PYX_HAVE_API__pdf_toolbox__lib__dia_yolov5__utils__augmentations -/* Early includes */ -#ifdef _OPENMP -#include <omp.h> -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include <cstdlib> - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const wchar_t *u) -{ - const wchar_t *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#else -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) -{ - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#endif -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -#if !CYTHON_USE_MODULE_STATE -static PyObject *__pyx_m = NULL; -static PyObject *__pyx_d; -static PyObject *__pyx_b; -static PyObject *__pyx_cython_runtime = NULL; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static PyObject *__pyx_empty_unicode; -#endif -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm = __FILE__; -static const char *__pyx_filename; - -/* #### Code section: filename_table ### */ - -static const char *__pyx_f[] = { - "pdf_toolbox\\\\lib\\\\dia_yolov5\\\\utils\\\\augmentations.py", -}; -/* #### Code section: utility_code_proto_before_types ### */ -/* #### Code section: numeric_typedefs ### */ -/* #### Code section: complex_type_declarations ### */ -/* #### Code section: type_declarations ### */ - -/*--- Type declarations ---*/ -/* #### Code section: utility_code_proto ### */ - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, Py_ssize_t); - void (*DECREF)(void*, PyObject*, Py_ssize_t); - void (*GOTREF)(void*, PyObject*, Py_ssize_t); - void (*GIVEREF)(void*, PyObject*, Py_ssize_t); - void* (*SetupContext)(const char*, Py_ssize_t, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ - } - #define __Pyx_RefNannyFinishContextNogil() {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __Pyx_RefNannyFinishContext();\ - PyGILState_Release(__pyx_gilstate_save);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__)) - #define __Pyx_RefNannyFinishContextNogil() __Pyx_RefNannyFinishContext() -#endif - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) - #define __Pyx_XINCREF(r) do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContextNogil() - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_Py_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; Py_XDECREF(tmp);\ - } while (0) -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* PyErrExceptionMatches.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() PyErr_Occurred() -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* PyObjectGetAttrStrNoError.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* TupleAndListFromArray.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n); -static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n); -#endif - -/* IncludeStringH.proto */ -#include <string.h> - -/* BytesEquals.proto */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); - -/* UnicodeEquals.proto */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); - -/* fastcall.proto */ -#define __Pyx_Arg_VARARGS(args, i) PyTuple_GET_ITEM(args, i) -#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds) -#define __Pyx_KwValues_VARARGS(args, nargs) NULL -#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s) -#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw) -#if CYTHON_METH_FASTCALL - #define __Pyx_Arg_FASTCALL(args, i) args[i] - #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds) - #define __Pyx_KwValues_FASTCALL(args, nargs) (&args[nargs]) - static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s); - #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw) -#else - #define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS - #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS - #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS - #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS - #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS -#endif -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_ArgsSlice_VARARGS(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_VARARGS(args, start), stop - start) -#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_FASTCALL(args, start), stop - start) -#else -#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop) -#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop) -#endif - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject *const *kwvalues, - PyObject **argnames[], - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, - const char* function_name); - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) -#endif - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* PyObjectCall.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/* PyFunctionFastCall.proto */ -#if CYTHON_FAST_PYCALL -#if !CYTHON_VECTORCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs)\ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#endif -#define __Pyx_BUILD_ASSERT_EXPR(cond)\ - (sizeof(char [1 - 2*!(cond)]) - 1) -#ifndef Py_MEMBER_SIZE -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif -#if !CYTHON_VECTORCALL - static size_t __pyx_pyframe_localsplus_offset = 0; - #include "frameobject.h" - #define __Pxy_PyFrame_Initialize_Offsets()\ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame)\ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif // !CYTHON_VECTORCALL -#endif - -/* PyObjectCallMethO.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); -#endif - -/* PyObjectFastCall.proto */ -#define __Pyx_PyObject_FastCall(func, args, nargs) __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL) -static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs); - -/* RaiseTooManyValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -/* RaiseNeedMoreValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -/* IterFinish.proto */ -static CYTHON_INLINE int __Pyx_IterFinish(void); - -/* UnpackItemEndCheck.proto */ -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_RemainderObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_RemainderObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceRemainder(op1, op2) : PyNumber_Remainder(op1, op2)) -#endif - -/* GetItemInt.proto */ -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) -#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/* PyObjectCallOneArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); - -/* ObjectGetItem.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key); -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/* SliceObject.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( - PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** py_start, PyObject** py_stop, PyObject** py_slice, - int has_cstart, int has_cstop, int wraparound); - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_TrueDivideObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_TrueDivideObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceTrueDivide(op1, op2) : PyNumber_TrueDivide(op1, op2)) -#endif - -/* PyFloatBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyFloat_SubtractObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyFloat_SubtractObjC(op1, op2, floatval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) -#endif - -/* PyFloatBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyFloat_AddObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyFloat_AddObjC(op1, op2, floatval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) -#endif - -/* ListExtend.proto */ -static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { -#if CYTHON_COMPILING_IN_CPYTHON - PyObject* none = _PyList_Extend((PyListObject*)L, v); - if (unlikely(!none)) - return -1; - Py_DECREF(none); - return 0; -#else - return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); -#endif -} - -/* ListAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) -#endif - -/* PyObjectCall2Args.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); - -/* PyObjectGetMethod.proto */ -static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); - -/* PyObjectCallMethod1.proto */ -static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg); - -/* append.proto */ -static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x); - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_FloorDivideObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_FloorDivideObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceFloorDivide(op1, op2) : PyNumber_FloorDivide(op1, op2)) -#endif - -/* ListCompAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len)) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) -#endif - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_SubtractCObj(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_SubtractCObj(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceSubtract(op1, op2) : PyNumber_Subtract(op1, op2)) -#endif - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* ImportDottedModule.proto */ -static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple); - -/* ImportFrom.proto */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); - -/* IncludeStructmemberH.proto */ -#include <structmember.h> - -/* FixUpExtensionType.proto */ -#if CYTHON_USE_TYPE_SPECS -static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type); -#endif - -/* FetchCommonType.proto */ -#if !CYTHON_USE_TYPE_SPECS -static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); -#else -static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases); -#endif - -/* PyMethodNew.proto */ -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { - CYTHON_UNUSED_VAR(typ); - if (!self) - return __Pyx_NewRef(func); - return PyMethod_New(func, self); -} -#else - #define __Pyx_PyMethod_New PyMethod_New -#endif - -/* PyVectorcallFastCallDict.proto */ -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw); -#endif - -/* CythonFunctionShared.proto */ -#define __Pyx_CyFunction_USED -#define __Pyx_CYFUNCTION_STATICMETHOD 0x01 -#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 -#define __Pyx_CYFUNCTION_CCLASS 0x04 -#define __Pyx_CYFUNCTION_COROUTINE 0x08 -#define __Pyx_CyFunction_GetClosure(f)\ - (((__pyx_CyFunctionObject *) (f))->func_closure) -#if PY_VERSION_HEX < 0x030900B1 - #define __Pyx_CyFunction_GetClassObj(f)\ - (((__pyx_CyFunctionObject *) (f))->func_classobj) -#else - #define __Pyx_CyFunction_GetClassObj(f)\ - ((PyObject*) ((PyCMethodObject *) (f))->mm_class) -#endif -#define __Pyx_CyFunction_SetClassObj(f, classobj)\ - __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj)) -#define __Pyx_CyFunction_Defaults(type, f)\ - ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) -#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ - ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) -typedef struct { -#if PY_VERSION_HEX < 0x030900B1 - PyCFunctionObject func; -#else - PyCMethodObject func; -#endif -#if CYTHON_BACKPORT_VECTORCALL - __pyx_vectorcallfunc func_vectorcall; -#endif -#if PY_VERSION_HEX < 0x030500A0 - PyObject *func_weakreflist; -#endif - PyObject *func_dict; - PyObject *func_name; - PyObject *func_qualname; - PyObject *func_doc; - PyObject *func_globals; - PyObject *func_code; - PyObject *func_closure; -#if PY_VERSION_HEX < 0x030900B1 - PyObject *func_classobj; -#endif - void *defaults; - int defaults_pyobjects; - size_t defaults_size; // used by FusedFunction for copying defaults - int flags; - PyObject *defaults_tuple; - PyObject *defaults_kwdict; - PyObject *(*defaults_getter)(PyObject *); - PyObject *func_annotations; - PyObject *func_is_coroutine; -} __pyx_CyFunctionObject; -#if !CYTHON_USE_MODULE_STATE -static PyTypeObject *__pyx_CyFunctionType = 0; -#endif -#define __Pyx_CyFunction_Check(obj) __Pyx_TypeCheck(obj, __pyx_CyFunctionType) -#define __Pyx_IsCyOrPyCFunction(obj) __Pyx_TypeCheck2(obj, __pyx_CyFunctionType, &PyCFunction_Type) -#define __Pyx_CyFunction_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CyFunctionType) -static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, - int flags, PyObject* qualname, - PyObject *closure, - PyObject *module, PyObject *globals, - PyObject* code); -static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj); -static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, - size_t size, - int pyobjects); -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, - PyObject *tuple); -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, - PyObject *dict); -static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, - PyObject *dict); -static int __pyx_CyFunction_init(PyObject *module); -#if CYTHON_METH_FASTCALL -static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); -#if CYTHON_BACKPORT_VECTORCALL -#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall) -#else -#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall) -#endif -#endif - -/* CythonFunction.proto */ -static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, - int flags, PyObject* qualname, - PyObject *closure, - PyObject *module, PyObject *globals, - PyObject* code); - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -#if !CYTHON_COMPILING_IN_LIMITED_API -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); -#endif - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -/* GCCDiagnostics.proto */ -#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) -#define __Pyx_HAS_GCC_DIAGNOSTIC -#endif - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* FormatTypeName.proto */ -#if CYTHON_COMPILING_IN_LIMITED_API -typedef PyObject *__Pyx_TypeName; -#define __Pyx_FMT_TYPENAME "%U" -static __Pyx_TypeName __Pyx_PyType_GetName(PyTypeObject* tp); -#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj) -#else -typedef const char *__Pyx_TypeName; -#define __Pyx_FMT_TYPENAME "%.200s" -#define __Pyx_PyType_GetName(tp) ((tp)->tp_name) -#define __Pyx_DECREF_TypeName(obj) -#endif - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2)) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyErr_ExceptionMatches2(err1, err2) __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_Occurred(), err1, err2) -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -#if CYTHON_COMPILING_IN_LIMITED_API -static int __Pyx_InitString(__Pyx_StringTabEntry t, PyObject **str); -#else -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); -#endif - -/* #### Code section: module_declarations ### */ - -/* Module declarations from "pdf_toolbox.lib.dia_yolov5.utils.augmentations" */ -#if !CYTHON_USE_MODULE_STATE -#endif -/* #### Code section: typeinfo ### */ -/* #### Code section: before_global_var ### */ -#define __Pyx_MODULE_NAME "pdf_toolbox.lib.dia_yolov5.utils.augmentations" -extern int __pyx_module_is_main_pdf_toolbox__lib__dia_yolov5__utils__augmentations; -int __pyx_module_is_main_pdf_toolbox__lib__dia_yolov5__utils__augmentations = 0; - -/* Implementation of "pdf_toolbox.lib.dia_yolov5.utils.augmentations" */ -/* #### Code section: global_var ### */ -static PyObject *__pyx_builtin_round; -static PyObject *__pyx_builtin_range; -/* #### Code section: string_decls ### */ -static const char __pyx_k_T[] = "T"; -static const char __pyx_k_c[] = "c"; -static const char __pyx_k_h[] = "h"; -static const char __pyx_k_i[] = "i"; -static const char __pyx_k_j[] = "j"; -static const char __pyx_k_k[] = "k"; -static const char __pyx_k_l[] = "l"; -static const char __pyx_k_n[] = "n"; -static const char __pyx_k_p[] = "p"; -static const char __pyx_k_r[] = "r"; -static const char __pyx_k_s[] = "s"; -static const char __pyx_k_w[] = "w"; -static const char __pyx_k_x[] = "x"; -static const char __pyx_k_ar[] = "ar"; -static const char __pyx_k_bh[] = "bh"; -static const char __pyx_k_bw[] = "bw"; -static const char __pyx_k_dh[] = "dh"; -static const char __pyx_k_dw[] = "dw"; -static const char __pyx_k_h1[] = "h1"; -static const char __pyx_k_h2[] = "h2"; -static const char __pyx_k_im[] = "im"; -static const char __pyx_k_np[] = "np"; -static const char __pyx_k_w1[] = "w1"; -static const char __pyx_k_w2[] = "w2"; -static const char __pyx_k_x1[] = "x1"; -static const char __pyx_k_x2[] = "x2"; -static const char __pyx_k_xc[] = "xc"; -static const char __pyx_k_y1[] = "y1"; -static const char __pyx_k_y2[] = "y2"; -static const char __pyx_k_yc[] = "yc"; -static const char __pyx_k_LUT[] = "LUT"; -static const char __pyx_k__21[] = "*"; -static const char __pyx_k__22[] = "."; -static const char __pyx_k__37[] = "_"; -static const char __pyx_k__46[] = "?"; -static const char __pyx_k_all[] = "all"; -static const char __pyx_k_bgr[] = "bgr"; -static const char __pyx_k_box[] = "box"; -static const char __pyx_k_cv2[] = "cv2"; -static const char __pyx_k_dst[] = "dst"; -static const char __pyx_k_eps[] = "eps"; -static const char __pyx_k_hue[] = "hue"; -static const char __pyx_k_im2[] = "im2"; -static const char __pyx_k_ioa[] = "ioa"; -static const char __pyx_k_mod[] = "mod"; -static const char __pyx_k_sat[] = "sat"; -static const char __pyx_k_top[] = "top"; -static const char __pyx_k_val[] = "val"; -static const char __pyx_k_x1a[] = "x1a"; -static const char __pyx_k_x1b[] = "x1b"; -static const char __pyx_k_x2a[] = "x2a"; -static const char __pyx_k_x2b[] = "x2b"; -static const char __pyx_k_y1a[] = "y1a"; -static const char __pyx_k_y1b[] = "y1b"; -static const char __pyx_k_y2a[] = "y2a"; -static const char __pyx_k_y2b[] = "y2b"; -static const char __pyx_k_yuv[] = "yuv"; -static const char __pyx_k_auto[] = "auto"; -static const char __pyx_k_axis[] = "axis"; -static const char __pyx_k_beta[] = "beta"; -static const char __pyx_k_box1[] = "box1"; -static const char __pyx_k_box2[] = "box2"; -static const char __pyx_k_clip[] = "clip"; -static const char __pyx_k_flip[] = "flip"; -static const char __pyx_k_left[] = "left"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_math[] = "math"; -static const char __pyx_k_name[] = "__name__"; -static const char __pyx_k_size[] = "size"; -static const char __pyx_k_spec[] = "__spec__"; -static const char __pyx_k_src1[] = "src1"; -static const char __pyx_k_src2[] = "src2"; -static const char __pyx_k_test[] = "__test__"; -static const char __pyx_k_xmax[] = "xmax"; -static const char __pyx_k_xmin[] = "xmin"; -static const char __pyx_k_ymax[] = "ymax"; -static const char __pyx_k_ymin[] = "ymin"; -static const char __pyx_k_apply[] = "apply"; -static const char __pyx_k_array[] = "array"; -static const char __pyx_k_boxes[] = "boxes"; -static const char __pyx_k_clahe[] = "clahe"; -static const char __pyx_k_color[] = "color"; -static const char __pyx_k_dtype[] = "dtype"; -static const char __pyx_k_hgain[] = "hgain"; -static const char __pyx_k_int32[] = "int32"; -static const char __pyx_k_merge[] = "merge"; -static const char __pyx_k_mixup[] = "mixup"; -static const char __pyx_k_numpy[] = "numpy"; -static const char __pyx_k_range[] = "range"; -static const char __pyx_k_ratio[] = "ratio"; -static const char __pyx_k_right[] = "right"; -static const char __pyx_k_round[] = "round"; -static const char __pyx_k_sgain[] = "sgain"; -static const char __pyx_k_shape[] = "shape"; -static const char __pyx_k_split[] = "split"; -static const char __pyx_k_uint8[] = "uint8"; -static const char __pyx_k_value[] = "value"; -static const char __pyx_k_vgain[] = "vgain"; -static const char __pyx_k_zeros[] = "zeros"; -static const char __pyx_k_FILLED[] = "FILLED"; -static const char __pyx_k_LOGGER[] = "LOGGER"; -static const char __pyx_k_append[] = "append"; -static const char __pyx_k_ar_thr[] = "ar_thr"; -static const char __pyx_k_arange[] = "arange"; -static const char __pyx_k_astype[] = "astype"; -static const char __pyx_k_bottom[] = "bottom"; -static const char __pyx_k_cutout[] = "cutout"; -static const char __pyx_k_im_hsv[] = "im_hsv"; -static const char __pyx_k_im_new[] = "im_new"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_labels[] = "labels"; -static const char __pyx_k_mask_h[] = "mask_h"; -static const char __pyx_k_mask_w[] = "mask_w"; -static const char __pyx_k_random[] = "random"; -static const char __pyx_k_resize[] = "resize"; -static const char __pyx_k_result[] = "result"; -static const char __pyx_k_sample[] = "sample"; -static const char __pyx_k_scales[] = "scales"; -static const char __pyx_k_stride[] = "stride"; -static const char __pyx_k_wh_thr[] = "wh_thr"; -static const char __pyx_k_argsort[] = "argsort"; -static const char __pyx_k_float32[] = "float32"; -static const char __pyx_k_labels2[] = "labels2"; -static const char __pyx_k_lut_hue[] = "lut_hue"; -static const char __pyx_k_lut_sat[] = "lut_sat"; -static const char __pyx_k_lut_val[] = "lut_val"; -static const char __pyx_k_maximum[] = "maximum"; -static const char __pyx_k_randint[] = "randint"; -static const char __pyx_k_scaleup[] = "scaleup"; -static const char __pyx_k_uniform[] = "uniform"; -static const char __pyx_k_area_thr[] = "area_thr"; -static const char __pyx_k_bbox_ioa[] = "bbox_ioa"; -static const char __pyx_k_colorstr[] = "colorstr"; -static const char __pyx_k_cvtColor[] = "cvtColor"; -static const char __pyx_k_segments[] = "segments"; -static const char __pyx_k_clipLimit[] = "clipLimit"; -static const char __pyx_k_letterbox[] = "letterbox"; -static const char __pyx_k_new_shape[] = "new_shape"; -static const char __pyx_k_new_unpad[] = "new_unpad"; -static const char __pyx_k_replicate[] = "replicate"; -static const char __pyx_k_scaleFill[] = "scaleFill"; -static const char __pyx_k_copy_paste[] = "copy_paste"; -static const char __pyx_k_augment_hsv[] = "augment_hsv"; -static const char __pyx_k_bitwise_and[] = "bitwise_and"; -static const char __pyx_k_concatenate[] = "concatenate"; -static const char __pyx_k_createCLAHE[] = "createCLAHE"; -static const char __pyx_k_INTER_LINEAR[] = "INTER_LINEAR"; -static const char __pyx_k_drawContours[] = "drawContours"; -static const char __pyx_k_equalizeHist[] = "equalizeHist"; -static const char __pyx_k_initializing[] = "_initializing"; -static const char __pyx_k_is_coroutine[] = "_is_coroutine"; -static const char __pyx_k_tileGridSize[] = "tileGridSize"; -static const char __pyx_k_COLOR_BGR2HSV[] = "COLOR_BGR2HSV"; -static const char __pyx_k_COLOR_BGR2YUV[] = "COLOR_BGR2YUV"; -static const char __pyx_k_COLOR_HSV2BGR[] = "COLOR_HSV2BGR"; -static const char __pyx_k_COLOR_RGB2YUV[] = "COLOR_RGB2YUV"; -static const char __pyx_k_COLOR_YUV2BGR[] = "COLOR_YUV2BGR"; -static const char __pyx_k_COLOR_YUV2RGB[] = "COLOR_YUV2RGB"; -static const char __pyx_k_class_getitem[] = "__class_getitem__"; -static const char __pyx_k_hist_equalize[] = "hist_equalize"; -static const char __pyx_k_interpolation[] = "interpolation"; -static const char __pyx_k_box_candidates[] = "box_candidates"; -static const char __pyx_k_copyMakeBorder[] = "copyMakeBorder"; -static const char __pyx_k_BORDER_CONSTANT[] = "BORDER_CONSTANT"; -static const char __pyx_k_asyncio_coroutines[] = "asyncio.coroutines"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_Image_augmentation_functions[] = "\nImage augmentation functions\n"; -static const char __pyx_k_pdf_toolbox_lib_dia_yolov5_utils[] = "pdf_toolbox.lib.dia_yolov5.utils.general"; -static const char __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_2[] = "pdf_toolbox.lib.dia_yolov5.utils.metrics"; -static const char __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_3[] = "pdf_toolbox\\lib\\dia_yolov5\\utils\\augmentations.py"; -static const char __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_4[] = "pdf_toolbox.lib.dia_yolov5.utils.augmentations"; -#if !CYTHON_USE_MODULE_STATE -static PyObject *__pyx_n_s_BORDER_CONSTANT; -static PyObject *__pyx_n_s_COLOR_BGR2HSV; -static PyObject *__pyx_n_s_COLOR_BGR2YUV; -static PyObject *__pyx_n_s_COLOR_HSV2BGR; -static PyObject *__pyx_n_s_COLOR_RGB2YUV; -static PyObject *__pyx_n_s_COLOR_YUV2BGR; -static PyObject *__pyx_n_s_COLOR_YUV2RGB; -static PyObject *__pyx_n_s_FILLED; -static PyObject *__pyx_n_s_INTER_LINEAR; -static PyObject *__pyx_n_s_LOGGER; -static PyObject *__pyx_n_s_LUT; -static PyObject *__pyx_n_s_T; -static PyObject *__pyx_n_s__21; -static PyObject *__pyx_kp_u__22; -static PyObject *__pyx_n_s__37; -static PyObject *__pyx_n_s__46; -static PyObject *__pyx_n_s_all; -static PyObject *__pyx_n_s_append; -static PyObject *__pyx_n_s_apply; -static PyObject *__pyx_n_s_ar; -static PyObject *__pyx_n_s_ar_thr; -static PyObject *__pyx_n_s_arange; -static PyObject *__pyx_n_s_area_thr; -static PyObject *__pyx_n_s_argsort; -static PyObject *__pyx_n_s_array; -static PyObject *__pyx_n_s_astype; -static PyObject *__pyx_n_s_asyncio_coroutines; -static PyObject *__pyx_n_s_augment_hsv; -static PyObject *__pyx_n_s_auto; -static PyObject *__pyx_n_s_axis; -static PyObject *__pyx_n_s_bbox_ioa; -static PyObject *__pyx_n_s_beta; -static PyObject *__pyx_n_s_bgr; -static PyObject *__pyx_n_s_bh; -static PyObject *__pyx_n_s_bitwise_and; -static PyObject *__pyx_n_s_bottom; -static PyObject *__pyx_n_s_box; -static PyObject *__pyx_n_s_box1; -static PyObject *__pyx_n_s_box2; -static PyObject *__pyx_n_s_box_candidates; -static PyObject *__pyx_n_s_boxes; -static PyObject *__pyx_n_s_bw; -static PyObject *__pyx_n_s_c; -static PyObject *__pyx_n_s_clahe; -static PyObject *__pyx_n_s_class_getitem; -static PyObject *__pyx_n_s_cline_in_traceback; -static PyObject *__pyx_n_s_clip; -static PyObject *__pyx_n_s_clipLimit; -static PyObject *__pyx_n_s_color; -static PyObject *__pyx_n_s_colorstr; -static PyObject *__pyx_n_s_concatenate; -static PyObject *__pyx_n_s_copyMakeBorder; -static PyObject *__pyx_n_s_copy_paste; -static PyObject *__pyx_n_s_createCLAHE; -static PyObject *__pyx_n_s_cutout; -static PyObject *__pyx_n_s_cv2; -static PyObject *__pyx_n_s_cvtColor; -static PyObject *__pyx_n_s_dh; -static PyObject *__pyx_n_s_drawContours; -static PyObject *__pyx_n_s_dst; -static PyObject *__pyx_n_s_dtype; -static PyObject *__pyx_n_s_dw; -static PyObject *__pyx_n_s_eps; -static PyObject *__pyx_n_s_equalizeHist; -static PyObject *__pyx_n_s_flip; -static PyObject *__pyx_n_s_float32; -static PyObject *__pyx_n_s_h; -static PyObject *__pyx_n_s_h1; -static PyObject *__pyx_n_s_h2; -static PyObject *__pyx_n_s_hgain; -static PyObject *__pyx_n_s_hist_equalize; -static PyObject *__pyx_n_s_hue; -static PyObject *__pyx_n_s_i; -static PyObject *__pyx_n_s_im; -static PyObject *__pyx_n_s_im2; -static PyObject *__pyx_n_s_im_hsv; -static PyObject *__pyx_n_s_im_new; -static PyObject *__pyx_n_s_import; -static PyObject *__pyx_n_s_initializing; -static PyObject *__pyx_n_s_int32; -static PyObject *__pyx_n_s_interpolation; -static PyObject *__pyx_n_s_ioa; -static PyObject *__pyx_n_s_is_coroutine; -static PyObject *__pyx_n_s_j; -static PyObject *__pyx_n_s_k; -static PyObject *__pyx_n_s_l; -static PyObject *__pyx_n_s_labels; -static PyObject *__pyx_n_s_labels2; -static PyObject *__pyx_n_s_left; -static PyObject *__pyx_n_s_letterbox; -static PyObject *__pyx_n_s_lut_hue; -static PyObject *__pyx_n_s_lut_sat; -static PyObject *__pyx_n_s_lut_val; -static PyObject *__pyx_n_s_main; -static PyObject *__pyx_n_s_mask_h; -static PyObject *__pyx_n_s_mask_w; -static PyObject *__pyx_n_s_math; -static PyObject *__pyx_n_s_maximum; -static PyObject *__pyx_n_s_merge; -static PyObject *__pyx_n_s_mixup; -static PyObject *__pyx_n_s_mod; -static PyObject *__pyx_n_s_n; -static PyObject *__pyx_n_s_name; -static PyObject *__pyx_n_s_new_shape; -static PyObject *__pyx_n_s_new_unpad; -static PyObject *__pyx_n_s_np; -static PyObject *__pyx_n_s_numpy; -static PyObject *__pyx_n_s_p; -static PyObject *__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils; -static PyObject *__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2; -static PyObject *__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3; -static PyObject *__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4; -static PyObject *__pyx_n_s_r; -static PyObject *__pyx_n_s_randint; -static PyObject *__pyx_n_s_random; -static PyObject *__pyx_n_s_range; -static PyObject *__pyx_n_s_ratio; -static PyObject *__pyx_n_s_replicate; -static PyObject *__pyx_n_s_resize; -static PyObject *__pyx_n_s_result; -static PyObject *__pyx_n_s_right; -static PyObject *__pyx_n_s_round; -static PyObject *__pyx_n_s_s; -static PyObject *__pyx_n_s_sample; -static PyObject *__pyx_n_s_sat; -static PyObject *__pyx_n_s_scaleFill; -static PyObject *__pyx_n_s_scales; -static PyObject *__pyx_n_s_scaleup; -static PyObject *__pyx_n_s_segments; -static PyObject *__pyx_n_s_sgain; -static PyObject *__pyx_n_s_shape; -static PyObject *__pyx_n_s_size; -static PyObject *__pyx_n_s_spec; -static PyObject *__pyx_n_s_split; -static PyObject *__pyx_n_s_src1; -static PyObject *__pyx_n_s_src2; -static PyObject *__pyx_n_s_stride; -static PyObject *__pyx_n_s_test; -static PyObject *__pyx_n_s_tileGridSize; -static PyObject *__pyx_n_s_top; -static PyObject *__pyx_n_s_uint8; -static PyObject *__pyx_n_s_uniform; -static PyObject *__pyx_n_s_val; -static PyObject *__pyx_n_s_value; -static PyObject *__pyx_n_s_vgain; -static PyObject *__pyx_n_s_w; -static PyObject *__pyx_n_s_w1; -static PyObject *__pyx_n_s_w2; -static PyObject *__pyx_n_s_wh_thr; -static PyObject *__pyx_n_s_x; -static PyObject *__pyx_n_s_x1; -static PyObject *__pyx_n_s_x1a; -static PyObject *__pyx_n_s_x1b; -static PyObject *__pyx_n_s_x2; -static PyObject *__pyx_n_s_x2a; -static PyObject *__pyx_n_s_x2b; -static PyObject *__pyx_n_s_xc; -static PyObject *__pyx_n_s_xmax; -static PyObject *__pyx_n_s_xmin; -static PyObject *__pyx_n_s_y1; -static PyObject *__pyx_n_s_y1a; -static PyObject *__pyx_n_s_y1b; -static PyObject *__pyx_n_s_y2; -static PyObject *__pyx_n_s_y2a; -static PyObject *__pyx_n_s_y2b; -static PyObject *__pyx_n_s_yc; -static PyObject *__pyx_n_s_ymax; -static PyObject *__pyx_n_s_ymin; -static PyObject *__pyx_n_s_yuv; -static PyObject *__pyx_n_s_zeros; -#endif -/* #### Code section: decls ### */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_augment_hsv(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_hgain, PyObject *__pyx_v_sgain, PyObject *__pyx_v_vgain); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_2hist_equalize(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_clahe, PyObject *__pyx_v_bgr); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_4replicate(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_labels); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_6letterbox(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_new_shape, PyObject *__pyx_v_color, PyObject *__pyx_v_auto, PyObject *__pyx_v_scaleFill, PyObject *__pyx_v_scaleup, PyObject *__pyx_v_stride); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_8copy_paste(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_labels, PyObject *__pyx_v_segments, PyObject *__pyx_v_p); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_10cutout(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_labels, PyObject *__pyx_v_p); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_12mixup(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_labels, PyObject *__pyx_v_im2, PyObject *__pyx_v_labels2); /* proto */ -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_14box_candidates(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_box1, PyObject *__pyx_v_box2, PyObject *__pyx_v_wh_thr, PyObject *__pyx_v_ar_thr, PyObject *__pyx_v_area_thr, PyObject *__pyx_v_eps); /* proto */ -#if !CYTHON_USE_MODULE_STATE -static PyObject *__pyx_float_0_0; -static PyObject *__pyx_float_0_1; -static PyObject *__pyx_float_0_5; -static PyObject *__pyx_float_2_0; -static PyObject *__pyx_float_0_03; -static PyObject *__pyx_float_0_25; -static PyObject *__pyx_float_0_30; -static PyObject *__pyx_float_0_60; -static PyObject *__pyx_float_32_0; -static PyObject *__pyx_float_0_125; -static PyObject *__pyx_float_1eneg_16; -static PyObject *__pyx_float_0_0625; -static PyObject *__pyx_float_0_03125; -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_1; -static PyObject *__pyx_int_2; -static PyObject *__pyx_int_3; -static PyObject *__pyx_int_5; -static PyObject *__pyx_int_8; -static PyObject *__pyx_int_32; -static PyObject *__pyx_int_64; -static PyObject *__pyx_int_100; -static PyObject *__pyx_int_114; -static PyObject *__pyx_int_180; -static PyObject *__pyx_int_191; -static PyObject *__pyx_int_255; -static PyObject *__pyx_int_256; -static PyObject *__pyx_int_640; -static PyObject *__pyx_int_neg_1; -#endif -#if !CYTHON_USE_MODULE_STATE -static PyObject *__pyx_tuple_; -static PyObject *__pyx_slice__4; -static PyObject *__pyx_slice__6; -static PyObject *__pyx_slice__7; -static PyObject *__pyx_tuple__2; -static PyObject *__pyx_tuple__3; -static PyObject *__pyx_tuple__5; -static PyObject *__pyx_tuple__8; -static PyObject *__pyx_tuple__9; -static PyObject *__pyx_slice__11; -static PyObject *__pyx_slice__12; -static PyObject *__pyx_slice__14; -static PyObject *__pyx_slice__16; -static PyObject *__pyx_tuple__10; -static PyObject *__pyx_tuple__13; -static PyObject *__pyx_tuple__15; -static PyObject *__pyx_tuple__17; -static PyObject *__pyx_tuple__18; -static PyObject *__pyx_tuple__19; -static PyObject *__pyx_tuple__20; -static PyObject *__pyx_tuple__23; -static PyObject *__pyx_tuple__25; -static PyObject *__pyx_tuple__26; -static PyObject *__pyx_tuple__28; -static PyObject *__pyx_tuple__29; -static PyObject *__pyx_tuple__31; -static PyObject *__pyx_tuple__33; -static PyObject *__pyx_tuple__34; -static PyObject *__pyx_tuple__36; -static PyObject *__pyx_tuple__38; -static PyObject *__pyx_tuple__40; -static PyObject *__pyx_tuple__41; -static PyObject *__pyx_tuple__43; -static PyObject *__pyx_tuple__45; -static PyObject *__pyx_codeobj__24; -static PyObject *__pyx_codeobj__27; -static PyObject *__pyx_codeobj__30; -static PyObject *__pyx_codeobj__32; -static PyObject *__pyx_codeobj__35; -static PyObject *__pyx_codeobj__39; -static PyObject *__pyx_codeobj__42; -static PyObject *__pyx_codeobj__44; -#endif -/* #### Code section: late_includes ### */ -/* #### Code section: module_state ### */ -#if CYTHON_USE_MODULE_STATE -typedef struct { - PyObject *__pyx_d; - PyObject *__pyx_b; - PyObject *__pyx_cython_runtime; - PyObject *__pyx_empty_tuple; - PyObject *__pyx_empty_bytes; - PyObject *__pyx_empty_unicode; - #ifdef __Pyx_CyFunction_USED - PyTypeObject *__pyx_CyFunctionType; - #endif - #ifdef __Pyx_FusedFunction_USED - PyTypeObject *__pyx_FusedFunctionType; - #endif - PyObject *__pyx_n_s_BORDER_CONSTANT; - PyObject *__pyx_n_s_COLOR_BGR2HSV; - PyObject *__pyx_n_s_COLOR_BGR2YUV; - PyObject *__pyx_n_s_COLOR_HSV2BGR; - PyObject *__pyx_n_s_COLOR_RGB2YUV; - PyObject *__pyx_n_s_COLOR_YUV2BGR; - PyObject *__pyx_n_s_COLOR_YUV2RGB; - PyObject *__pyx_n_s_FILLED; - PyObject *__pyx_n_s_INTER_LINEAR; - PyObject *__pyx_n_s_LOGGER; - PyObject *__pyx_n_s_LUT; - PyObject *__pyx_n_s_T; - PyObject *__pyx_n_s__21; - PyObject *__pyx_kp_u__22; - PyObject *__pyx_n_s__37; - PyObject *__pyx_n_s__46; - PyObject *__pyx_n_s_all; - PyObject *__pyx_n_s_append; - PyObject *__pyx_n_s_apply; - PyObject *__pyx_n_s_ar; - PyObject *__pyx_n_s_ar_thr; - PyObject *__pyx_n_s_arange; - PyObject *__pyx_n_s_area_thr; - PyObject *__pyx_n_s_argsort; - PyObject *__pyx_n_s_array; - PyObject *__pyx_n_s_astype; - PyObject *__pyx_n_s_asyncio_coroutines; - PyObject *__pyx_n_s_augment_hsv; - PyObject *__pyx_n_s_auto; - PyObject *__pyx_n_s_axis; - PyObject *__pyx_n_s_bbox_ioa; - PyObject *__pyx_n_s_beta; - PyObject *__pyx_n_s_bgr; - PyObject *__pyx_n_s_bh; - PyObject *__pyx_n_s_bitwise_and; - PyObject *__pyx_n_s_bottom; - PyObject *__pyx_n_s_box; - PyObject *__pyx_n_s_box1; - PyObject *__pyx_n_s_box2; - PyObject *__pyx_n_s_box_candidates; - PyObject *__pyx_n_s_boxes; - PyObject *__pyx_n_s_bw; - PyObject *__pyx_n_s_c; - PyObject *__pyx_n_s_clahe; - PyObject *__pyx_n_s_class_getitem; - PyObject *__pyx_n_s_cline_in_traceback; - PyObject *__pyx_n_s_clip; - PyObject *__pyx_n_s_clipLimit; - PyObject *__pyx_n_s_color; - PyObject *__pyx_n_s_colorstr; - PyObject *__pyx_n_s_concatenate; - PyObject *__pyx_n_s_copyMakeBorder; - PyObject *__pyx_n_s_copy_paste; - PyObject *__pyx_n_s_createCLAHE; - PyObject *__pyx_n_s_cutout; - PyObject *__pyx_n_s_cv2; - PyObject *__pyx_n_s_cvtColor; - PyObject *__pyx_n_s_dh; - PyObject *__pyx_n_s_drawContours; - PyObject *__pyx_n_s_dst; - PyObject *__pyx_n_s_dtype; - PyObject *__pyx_n_s_dw; - PyObject *__pyx_n_s_eps; - PyObject *__pyx_n_s_equalizeHist; - PyObject *__pyx_n_s_flip; - PyObject *__pyx_n_s_float32; - PyObject *__pyx_n_s_h; - PyObject *__pyx_n_s_h1; - PyObject *__pyx_n_s_h2; - PyObject *__pyx_n_s_hgain; - PyObject *__pyx_n_s_hist_equalize; - PyObject *__pyx_n_s_hue; - PyObject *__pyx_n_s_i; - PyObject *__pyx_n_s_im; - PyObject *__pyx_n_s_im2; - PyObject *__pyx_n_s_im_hsv; - PyObject *__pyx_n_s_im_new; - PyObject *__pyx_n_s_import; - PyObject *__pyx_n_s_initializing; - PyObject *__pyx_n_s_int32; - PyObject *__pyx_n_s_interpolation; - PyObject *__pyx_n_s_ioa; - PyObject *__pyx_n_s_is_coroutine; - PyObject *__pyx_n_s_j; - PyObject *__pyx_n_s_k; - PyObject *__pyx_n_s_l; - PyObject *__pyx_n_s_labels; - PyObject *__pyx_n_s_labels2; - PyObject *__pyx_n_s_left; - PyObject *__pyx_n_s_letterbox; - PyObject *__pyx_n_s_lut_hue; - PyObject *__pyx_n_s_lut_sat; - PyObject *__pyx_n_s_lut_val; - PyObject *__pyx_n_s_main; - PyObject *__pyx_n_s_mask_h; - PyObject *__pyx_n_s_mask_w; - PyObject *__pyx_n_s_math; - PyObject *__pyx_n_s_maximum; - PyObject *__pyx_n_s_merge; - PyObject *__pyx_n_s_mixup; - PyObject *__pyx_n_s_mod; - PyObject *__pyx_n_s_n; - PyObject *__pyx_n_s_name; - PyObject *__pyx_n_s_new_shape; - PyObject *__pyx_n_s_new_unpad; - PyObject *__pyx_n_s_np; - PyObject *__pyx_n_s_numpy; - PyObject *__pyx_n_s_p; - PyObject *__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils; - PyObject *__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2; - PyObject *__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3; - PyObject *__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4; - PyObject *__pyx_n_s_r; - PyObject *__pyx_n_s_randint; - PyObject *__pyx_n_s_random; - PyObject *__pyx_n_s_range; - PyObject *__pyx_n_s_ratio; - PyObject *__pyx_n_s_replicate; - PyObject *__pyx_n_s_resize; - PyObject *__pyx_n_s_result; - PyObject *__pyx_n_s_right; - PyObject *__pyx_n_s_round; - PyObject *__pyx_n_s_s; - PyObject *__pyx_n_s_sample; - PyObject *__pyx_n_s_sat; - PyObject *__pyx_n_s_scaleFill; - PyObject *__pyx_n_s_scales; - PyObject *__pyx_n_s_scaleup; - PyObject *__pyx_n_s_segments; - PyObject *__pyx_n_s_sgain; - PyObject *__pyx_n_s_shape; - PyObject *__pyx_n_s_size; - PyObject *__pyx_n_s_spec; - PyObject *__pyx_n_s_split; - PyObject *__pyx_n_s_src1; - PyObject *__pyx_n_s_src2; - PyObject *__pyx_n_s_stride; - PyObject *__pyx_n_s_test; - PyObject *__pyx_n_s_tileGridSize; - PyObject *__pyx_n_s_top; - PyObject *__pyx_n_s_uint8; - PyObject *__pyx_n_s_uniform; - PyObject *__pyx_n_s_val; - PyObject *__pyx_n_s_value; - PyObject *__pyx_n_s_vgain; - PyObject *__pyx_n_s_w; - PyObject *__pyx_n_s_w1; - PyObject *__pyx_n_s_w2; - PyObject *__pyx_n_s_wh_thr; - PyObject *__pyx_n_s_x; - PyObject *__pyx_n_s_x1; - PyObject *__pyx_n_s_x1a; - PyObject *__pyx_n_s_x1b; - PyObject *__pyx_n_s_x2; - PyObject *__pyx_n_s_x2a; - PyObject *__pyx_n_s_x2b; - PyObject *__pyx_n_s_xc; - PyObject *__pyx_n_s_xmax; - PyObject *__pyx_n_s_xmin; - PyObject *__pyx_n_s_y1; - PyObject *__pyx_n_s_y1a; - PyObject *__pyx_n_s_y1b; - PyObject *__pyx_n_s_y2; - PyObject *__pyx_n_s_y2a; - PyObject *__pyx_n_s_y2b; - PyObject *__pyx_n_s_yc; - PyObject *__pyx_n_s_ymax; - PyObject *__pyx_n_s_ymin; - PyObject *__pyx_n_s_yuv; - PyObject *__pyx_n_s_zeros; - PyObject *__pyx_float_0_0; - PyObject *__pyx_float_0_1; - PyObject *__pyx_float_0_5; - PyObject *__pyx_float_2_0; - PyObject *__pyx_float_0_03; - PyObject *__pyx_float_0_25; - PyObject *__pyx_float_0_30; - PyObject *__pyx_float_0_60; - PyObject *__pyx_float_32_0; - PyObject *__pyx_float_0_125; - PyObject *__pyx_float_1eneg_16; - PyObject *__pyx_float_0_0625; - PyObject *__pyx_float_0_03125; - PyObject *__pyx_int_0; - PyObject *__pyx_int_1; - PyObject *__pyx_int_2; - PyObject *__pyx_int_3; - PyObject *__pyx_int_5; - PyObject *__pyx_int_8; - PyObject *__pyx_int_32; - PyObject *__pyx_int_64; - PyObject *__pyx_int_100; - PyObject *__pyx_int_114; - PyObject *__pyx_int_180; - PyObject *__pyx_int_191; - PyObject *__pyx_int_255; - PyObject *__pyx_int_256; - PyObject *__pyx_int_640; - PyObject *__pyx_int_neg_1; - PyObject *__pyx_tuple_; - PyObject *__pyx_slice__4; - PyObject *__pyx_slice__6; - PyObject *__pyx_slice__7; - PyObject *__pyx_tuple__2; - PyObject *__pyx_tuple__3; - PyObject *__pyx_tuple__5; - PyObject *__pyx_tuple__8; - PyObject *__pyx_tuple__9; - PyObject *__pyx_slice__11; - PyObject *__pyx_slice__12; - PyObject *__pyx_slice__14; - PyObject *__pyx_slice__16; - PyObject *__pyx_tuple__10; - PyObject *__pyx_tuple__13; - PyObject *__pyx_tuple__15; - PyObject *__pyx_tuple__17; - PyObject *__pyx_tuple__18; - PyObject *__pyx_tuple__19; - PyObject *__pyx_tuple__20; - PyObject *__pyx_tuple__23; - PyObject *__pyx_tuple__25; - PyObject *__pyx_tuple__26; - PyObject *__pyx_tuple__28; - PyObject *__pyx_tuple__29; - PyObject *__pyx_tuple__31; - PyObject *__pyx_tuple__33; - PyObject *__pyx_tuple__34; - PyObject *__pyx_tuple__36; - PyObject *__pyx_tuple__38; - PyObject *__pyx_tuple__40; - PyObject *__pyx_tuple__41; - PyObject *__pyx_tuple__43; - PyObject *__pyx_tuple__45; - PyObject *__pyx_codeobj__24; - PyObject *__pyx_codeobj__27; - PyObject *__pyx_codeobj__30; - PyObject *__pyx_codeobj__32; - PyObject *__pyx_codeobj__35; - PyObject *__pyx_codeobj__39; - PyObject *__pyx_codeobj__42; - PyObject *__pyx_codeobj__44; -} __pyx_mstate; - -#ifdef __cplusplus -namespace { - extern struct PyModuleDef __pyx_moduledef; -} /* anonymous namespace */ -#else -static struct PyModuleDef __pyx_moduledef; -#endif - -#define __pyx_mstate(o) ((__pyx_mstate *)__Pyx_PyModule_GetState(o)) - -#define __pyx_mstate_global (__pyx_mstate(PyState_FindModule(&__pyx_moduledef))) - -#define __pyx_m (PyState_FindModule(&__pyx_moduledef)) -#endif -/* #### Code section: module_state_clear ### */ -#if CYTHON_USE_MODULE_STATE -static int __pyx_m_clear(PyObject *m) { - __pyx_mstate *clear_module_state = __pyx_mstate(m); - if (!clear_module_state) return 0; - Py_CLEAR(clear_module_state->__pyx_d); - Py_CLEAR(clear_module_state->__pyx_b); - Py_CLEAR(clear_module_state->__pyx_cython_runtime); - Py_CLEAR(clear_module_state->__pyx_empty_tuple); - Py_CLEAR(clear_module_state->__pyx_empty_bytes); - Py_CLEAR(clear_module_state->__pyx_empty_unicode); - #ifdef __Pyx_CyFunction_USED - Py_CLEAR(clear_module_state->__pyx_CyFunctionType); - #endif - #ifdef __Pyx_FusedFunction_USED - Py_CLEAR(clear_module_state->__pyx_FusedFunctionType); - #endif - Py_CLEAR(clear_module_state->__pyx_n_s_BORDER_CONSTANT); - Py_CLEAR(clear_module_state->__pyx_n_s_COLOR_BGR2HSV); - Py_CLEAR(clear_module_state->__pyx_n_s_COLOR_BGR2YUV); - Py_CLEAR(clear_module_state->__pyx_n_s_COLOR_HSV2BGR); - Py_CLEAR(clear_module_state->__pyx_n_s_COLOR_RGB2YUV); - Py_CLEAR(clear_module_state->__pyx_n_s_COLOR_YUV2BGR); - Py_CLEAR(clear_module_state->__pyx_n_s_COLOR_YUV2RGB); - Py_CLEAR(clear_module_state->__pyx_n_s_FILLED); - Py_CLEAR(clear_module_state->__pyx_n_s_INTER_LINEAR); - Py_CLEAR(clear_module_state->__pyx_n_s_LOGGER); - Py_CLEAR(clear_module_state->__pyx_n_s_LUT); - Py_CLEAR(clear_module_state->__pyx_n_s_T); - Py_CLEAR(clear_module_state->__pyx_n_s__21); - Py_CLEAR(clear_module_state->__pyx_kp_u__22); - Py_CLEAR(clear_module_state->__pyx_n_s__37); - Py_CLEAR(clear_module_state->__pyx_n_s__46); - Py_CLEAR(clear_module_state->__pyx_n_s_all); - Py_CLEAR(clear_module_state->__pyx_n_s_append); - Py_CLEAR(clear_module_state->__pyx_n_s_apply); - Py_CLEAR(clear_module_state->__pyx_n_s_ar); - Py_CLEAR(clear_module_state->__pyx_n_s_ar_thr); - Py_CLEAR(clear_module_state->__pyx_n_s_arange); - Py_CLEAR(clear_module_state->__pyx_n_s_area_thr); - Py_CLEAR(clear_module_state->__pyx_n_s_argsort); - Py_CLEAR(clear_module_state->__pyx_n_s_array); - Py_CLEAR(clear_module_state->__pyx_n_s_astype); - Py_CLEAR(clear_module_state->__pyx_n_s_asyncio_coroutines); - Py_CLEAR(clear_module_state->__pyx_n_s_augment_hsv); - Py_CLEAR(clear_module_state->__pyx_n_s_auto); - Py_CLEAR(clear_module_state->__pyx_n_s_axis); - Py_CLEAR(clear_module_state->__pyx_n_s_bbox_ioa); - Py_CLEAR(clear_module_state->__pyx_n_s_beta); - Py_CLEAR(clear_module_state->__pyx_n_s_bgr); - Py_CLEAR(clear_module_state->__pyx_n_s_bh); - Py_CLEAR(clear_module_state->__pyx_n_s_bitwise_and); - Py_CLEAR(clear_module_state->__pyx_n_s_bottom); - Py_CLEAR(clear_module_state->__pyx_n_s_box); - Py_CLEAR(clear_module_state->__pyx_n_s_box1); - Py_CLEAR(clear_module_state->__pyx_n_s_box2); - Py_CLEAR(clear_module_state->__pyx_n_s_box_candidates); - Py_CLEAR(clear_module_state->__pyx_n_s_boxes); - Py_CLEAR(clear_module_state->__pyx_n_s_bw); - Py_CLEAR(clear_module_state->__pyx_n_s_c); - Py_CLEAR(clear_module_state->__pyx_n_s_clahe); - Py_CLEAR(clear_module_state->__pyx_n_s_class_getitem); - Py_CLEAR(clear_module_state->__pyx_n_s_cline_in_traceback); - Py_CLEAR(clear_module_state->__pyx_n_s_clip); - Py_CLEAR(clear_module_state->__pyx_n_s_clipLimit); - Py_CLEAR(clear_module_state->__pyx_n_s_color); - Py_CLEAR(clear_module_state->__pyx_n_s_colorstr); - Py_CLEAR(clear_module_state->__pyx_n_s_concatenate); - Py_CLEAR(clear_module_state->__pyx_n_s_copyMakeBorder); - Py_CLEAR(clear_module_state->__pyx_n_s_copy_paste); - Py_CLEAR(clear_module_state->__pyx_n_s_createCLAHE); - Py_CLEAR(clear_module_state->__pyx_n_s_cutout); - Py_CLEAR(clear_module_state->__pyx_n_s_cv2); - Py_CLEAR(clear_module_state->__pyx_n_s_cvtColor); - Py_CLEAR(clear_module_state->__pyx_n_s_dh); - Py_CLEAR(clear_module_state->__pyx_n_s_drawContours); - Py_CLEAR(clear_module_state->__pyx_n_s_dst); - Py_CLEAR(clear_module_state->__pyx_n_s_dtype); - Py_CLEAR(clear_module_state->__pyx_n_s_dw); - Py_CLEAR(clear_module_state->__pyx_n_s_eps); - Py_CLEAR(clear_module_state->__pyx_n_s_equalizeHist); - Py_CLEAR(clear_module_state->__pyx_n_s_flip); - Py_CLEAR(clear_module_state->__pyx_n_s_float32); - Py_CLEAR(clear_module_state->__pyx_n_s_h); - Py_CLEAR(clear_module_state->__pyx_n_s_h1); - Py_CLEAR(clear_module_state->__pyx_n_s_h2); - Py_CLEAR(clear_module_state->__pyx_n_s_hgain); - Py_CLEAR(clear_module_state->__pyx_n_s_hist_equalize); - Py_CLEAR(clear_module_state->__pyx_n_s_hue); - Py_CLEAR(clear_module_state->__pyx_n_s_i); - Py_CLEAR(clear_module_state->__pyx_n_s_im); - Py_CLEAR(clear_module_state->__pyx_n_s_im2); - Py_CLEAR(clear_module_state->__pyx_n_s_im_hsv); - Py_CLEAR(clear_module_state->__pyx_n_s_im_new); - Py_CLEAR(clear_module_state->__pyx_n_s_import); - Py_CLEAR(clear_module_state->__pyx_n_s_initializing); - Py_CLEAR(clear_module_state->__pyx_n_s_int32); - Py_CLEAR(clear_module_state->__pyx_n_s_interpolation); - Py_CLEAR(clear_module_state->__pyx_n_s_ioa); - Py_CLEAR(clear_module_state->__pyx_n_s_is_coroutine); - Py_CLEAR(clear_module_state->__pyx_n_s_j); - Py_CLEAR(clear_module_state->__pyx_n_s_k); - Py_CLEAR(clear_module_state->__pyx_n_s_l); - Py_CLEAR(clear_module_state->__pyx_n_s_labels); - Py_CLEAR(clear_module_state->__pyx_n_s_labels2); - Py_CLEAR(clear_module_state->__pyx_n_s_left); - Py_CLEAR(clear_module_state->__pyx_n_s_letterbox); - Py_CLEAR(clear_module_state->__pyx_n_s_lut_hue); - Py_CLEAR(clear_module_state->__pyx_n_s_lut_sat); - Py_CLEAR(clear_module_state->__pyx_n_s_lut_val); - Py_CLEAR(clear_module_state->__pyx_n_s_main); - Py_CLEAR(clear_module_state->__pyx_n_s_mask_h); - Py_CLEAR(clear_module_state->__pyx_n_s_mask_w); - Py_CLEAR(clear_module_state->__pyx_n_s_math); - Py_CLEAR(clear_module_state->__pyx_n_s_maximum); - Py_CLEAR(clear_module_state->__pyx_n_s_merge); - Py_CLEAR(clear_module_state->__pyx_n_s_mixup); - Py_CLEAR(clear_module_state->__pyx_n_s_mod); - Py_CLEAR(clear_module_state->__pyx_n_s_n); - Py_CLEAR(clear_module_state->__pyx_n_s_name); - Py_CLEAR(clear_module_state->__pyx_n_s_new_shape); - Py_CLEAR(clear_module_state->__pyx_n_s_new_unpad); - Py_CLEAR(clear_module_state->__pyx_n_s_np); - Py_CLEAR(clear_module_state->__pyx_n_s_numpy); - Py_CLEAR(clear_module_state->__pyx_n_s_p); - Py_CLEAR(clear_module_state->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils); - Py_CLEAR(clear_module_state->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2); - Py_CLEAR(clear_module_state->__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3); - Py_CLEAR(clear_module_state->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4); - Py_CLEAR(clear_module_state->__pyx_n_s_r); - Py_CLEAR(clear_module_state->__pyx_n_s_randint); - Py_CLEAR(clear_module_state->__pyx_n_s_random); - Py_CLEAR(clear_module_state->__pyx_n_s_range); - Py_CLEAR(clear_module_state->__pyx_n_s_ratio); - Py_CLEAR(clear_module_state->__pyx_n_s_replicate); - Py_CLEAR(clear_module_state->__pyx_n_s_resize); - Py_CLEAR(clear_module_state->__pyx_n_s_result); - Py_CLEAR(clear_module_state->__pyx_n_s_right); - Py_CLEAR(clear_module_state->__pyx_n_s_round); - Py_CLEAR(clear_module_state->__pyx_n_s_s); - Py_CLEAR(clear_module_state->__pyx_n_s_sample); - Py_CLEAR(clear_module_state->__pyx_n_s_sat); - Py_CLEAR(clear_module_state->__pyx_n_s_scaleFill); - Py_CLEAR(clear_module_state->__pyx_n_s_scales); - Py_CLEAR(clear_module_state->__pyx_n_s_scaleup); - Py_CLEAR(clear_module_state->__pyx_n_s_segments); - Py_CLEAR(clear_module_state->__pyx_n_s_sgain); - Py_CLEAR(clear_module_state->__pyx_n_s_shape); - Py_CLEAR(clear_module_state->__pyx_n_s_size); - Py_CLEAR(clear_module_state->__pyx_n_s_spec); - Py_CLEAR(clear_module_state->__pyx_n_s_split); - Py_CLEAR(clear_module_state->__pyx_n_s_src1); - Py_CLEAR(clear_module_state->__pyx_n_s_src2); - Py_CLEAR(clear_module_state->__pyx_n_s_stride); - Py_CLEAR(clear_module_state->__pyx_n_s_test); - Py_CLEAR(clear_module_state->__pyx_n_s_tileGridSize); - Py_CLEAR(clear_module_state->__pyx_n_s_top); - Py_CLEAR(clear_module_state->__pyx_n_s_uint8); - Py_CLEAR(clear_module_state->__pyx_n_s_uniform); - Py_CLEAR(clear_module_state->__pyx_n_s_val); - Py_CLEAR(clear_module_state->__pyx_n_s_value); - Py_CLEAR(clear_module_state->__pyx_n_s_vgain); - Py_CLEAR(clear_module_state->__pyx_n_s_w); - Py_CLEAR(clear_module_state->__pyx_n_s_w1); - Py_CLEAR(clear_module_state->__pyx_n_s_w2); - Py_CLEAR(clear_module_state->__pyx_n_s_wh_thr); - Py_CLEAR(clear_module_state->__pyx_n_s_x); - Py_CLEAR(clear_module_state->__pyx_n_s_x1); - Py_CLEAR(clear_module_state->__pyx_n_s_x1a); - Py_CLEAR(clear_module_state->__pyx_n_s_x1b); - Py_CLEAR(clear_module_state->__pyx_n_s_x2); - Py_CLEAR(clear_module_state->__pyx_n_s_x2a); - Py_CLEAR(clear_module_state->__pyx_n_s_x2b); - Py_CLEAR(clear_module_state->__pyx_n_s_xc); - Py_CLEAR(clear_module_state->__pyx_n_s_xmax); - Py_CLEAR(clear_module_state->__pyx_n_s_xmin); - Py_CLEAR(clear_module_state->__pyx_n_s_y1); - Py_CLEAR(clear_module_state->__pyx_n_s_y1a); - Py_CLEAR(clear_module_state->__pyx_n_s_y1b); - Py_CLEAR(clear_module_state->__pyx_n_s_y2); - Py_CLEAR(clear_module_state->__pyx_n_s_y2a); - Py_CLEAR(clear_module_state->__pyx_n_s_y2b); - Py_CLEAR(clear_module_state->__pyx_n_s_yc); - Py_CLEAR(clear_module_state->__pyx_n_s_ymax); - Py_CLEAR(clear_module_state->__pyx_n_s_ymin); - Py_CLEAR(clear_module_state->__pyx_n_s_yuv); - Py_CLEAR(clear_module_state->__pyx_n_s_zeros); - Py_CLEAR(clear_module_state->__pyx_float_0_0); - Py_CLEAR(clear_module_state->__pyx_float_0_1); - Py_CLEAR(clear_module_state->__pyx_float_0_5); - Py_CLEAR(clear_module_state->__pyx_float_2_0); - Py_CLEAR(clear_module_state->__pyx_float_0_03); - Py_CLEAR(clear_module_state->__pyx_float_0_25); - Py_CLEAR(clear_module_state->__pyx_float_0_30); - Py_CLEAR(clear_module_state->__pyx_float_0_60); - Py_CLEAR(clear_module_state->__pyx_float_32_0); - Py_CLEAR(clear_module_state->__pyx_float_0_125); - Py_CLEAR(clear_module_state->__pyx_float_1eneg_16); - Py_CLEAR(clear_module_state->__pyx_float_0_0625); - Py_CLEAR(clear_module_state->__pyx_float_0_03125); - Py_CLEAR(clear_module_state->__pyx_int_0); - Py_CLEAR(clear_module_state->__pyx_int_1); - Py_CLEAR(clear_module_state->__pyx_int_2); - Py_CLEAR(clear_module_state->__pyx_int_3); - Py_CLEAR(clear_module_state->__pyx_int_5); - Py_CLEAR(clear_module_state->__pyx_int_8); - Py_CLEAR(clear_module_state->__pyx_int_32); - Py_CLEAR(clear_module_state->__pyx_int_64); - Py_CLEAR(clear_module_state->__pyx_int_100); - Py_CLEAR(clear_module_state->__pyx_int_114); - Py_CLEAR(clear_module_state->__pyx_int_180); - Py_CLEAR(clear_module_state->__pyx_int_191); - Py_CLEAR(clear_module_state->__pyx_int_255); - Py_CLEAR(clear_module_state->__pyx_int_256); - Py_CLEAR(clear_module_state->__pyx_int_640); - Py_CLEAR(clear_module_state->__pyx_int_neg_1); - Py_CLEAR(clear_module_state->__pyx_tuple_); - Py_CLEAR(clear_module_state->__pyx_slice__4); - Py_CLEAR(clear_module_state->__pyx_slice__6); - Py_CLEAR(clear_module_state->__pyx_slice__7); - Py_CLEAR(clear_module_state->__pyx_tuple__2); - Py_CLEAR(clear_module_state->__pyx_tuple__3); - Py_CLEAR(clear_module_state->__pyx_tuple__5); - Py_CLEAR(clear_module_state->__pyx_tuple__8); - Py_CLEAR(clear_module_state->__pyx_tuple__9); - Py_CLEAR(clear_module_state->__pyx_slice__11); - Py_CLEAR(clear_module_state->__pyx_slice__12); - Py_CLEAR(clear_module_state->__pyx_slice__14); - Py_CLEAR(clear_module_state->__pyx_slice__16); - Py_CLEAR(clear_module_state->__pyx_tuple__10); - Py_CLEAR(clear_module_state->__pyx_tuple__13); - Py_CLEAR(clear_module_state->__pyx_tuple__15); - Py_CLEAR(clear_module_state->__pyx_tuple__17); - Py_CLEAR(clear_module_state->__pyx_tuple__18); - Py_CLEAR(clear_module_state->__pyx_tuple__19); - Py_CLEAR(clear_module_state->__pyx_tuple__20); - Py_CLEAR(clear_module_state->__pyx_tuple__23); - Py_CLEAR(clear_module_state->__pyx_tuple__25); - Py_CLEAR(clear_module_state->__pyx_tuple__26); - Py_CLEAR(clear_module_state->__pyx_tuple__28); - Py_CLEAR(clear_module_state->__pyx_tuple__29); - Py_CLEAR(clear_module_state->__pyx_tuple__31); - Py_CLEAR(clear_module_state->__pyx_tuple__33); - Py_CLEAR(clear_module_state->__pyx_tuple__34); - Py_CLEAR(clear_module_state->__pyx_tuple__36); - Py_CLEAR(clear_module_state->__pyx_tuple__38); - Py_CLEAR(clear_module_state->__pyx_tuple__40); - Py_CLEAR(clear_module_state->__pyx_tuple__41); - Py_CLEAR(clear_module_state->__pyx_tuple__43); - Py_CLEAR(clear_module_state->__pyx_tuple__45); - Py_CLEAR(clear_module_state->__pyx_codeobj__24); - Py_CLEAR(clear_module_state->__pyx_codeobj__27); - Py_CLEAR(clear_module_state->__pyx_codeobj__30); - Py_CLEAR(clear_module_state->__pyx_codeobj__32); - Py_CLEAR(clear_module_state->__pyx_codeobj__35); - Py_CLEAR(clear_module_state->__pyx_codeobj__39); - Py_CLEAR(clear_module_state->__pyx_codeobj__42); - Py_CLEAR(clear_module_state->__pyx_codeobj__44); - return 0; -} -#endif -/* #### Code section: module_state_traverse ### */ -#if CYTHON_USE_MODULE_STATE -static int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) { - __pyx_mstate *traverse_module_state = __pyx_mstate(m); - if (!traverse_module_state) return 0; - Py_VISIT(traverse_module_state->__pyx_d); - Py_VISIT(traverse_module_state->__pyx_b); - Py_VISIT(traverse_module_state->__pyx_cython_runtime); - Py_VISIT(traverse_module_state->__pyx_empty_tuple); - Py_VISIT(traverse_module_state->__pyx_empty_bytes); - Py_VISIT(traverse_module_state->__pyx_empty_unicode); - #ifdef __Pyx_CyFunction_USED - Py_VISIT(traverse_module_state->__pyx_CyFunctionType); - #endif - #ifdef __Pyx_FusedFunction_USED - Py_VISIT(traverse_module_state->__pyx_FusedFunctionType); - #endif - Py_VISIT(traverse_module_state->__pyx_n_s_BORDER_CONSTANT); - Py_VISIT(traverse_module_state->__pyx_n_s_COLOR_BGR2HSV); - Py_VISIT(traverse_module_state->__pyx_n_s_COLOR_BGR2YUV); - Py_VISIT(traverse_module_state->__pyx_n_s_COLOR_HSV2BGR); - Py_VISIT(traverse_module_state->__pyx_n_s_COLOR_RGB2YUV); - Py_VISIT(traverse_module_state->__pyx_n_s_COLOR_YUV2BGR); - Py_VISIT(traverse_module_state->__pyx_n_s_COLOR_YUV2RGB); - Py_VISIT(traverse_module_state->__pyx_n_s_FILLED); - Py_VISIT(traverse_module_state->__pyx_n_s_INTER_LINEAR); - Py_VISIT(traverse_module_state->__pyx_n_s_LOGGER); - Py_VISIT(traverse_module_state->__pyx_n_s_LUT); - Py_VISIT(traverse_module_state->__pyx_n_s_T); - Py_VISIT(traverse_module_state->__pyx_n_s__21); - Py_VISIT(traverse_module_state->__pyx_kp_u__22); - Py_VISIT(traverse_module_state->__pyx_n_s__37); - Py_VISIT(traverse_module_state->__pyx_n_s__46); - Py_VISIT(traverse_module_state->__pyx_n_s_all); - Py_VISIT(traverse_module_state->__pyx_n_s_append); - Py_VISIT(traverse_module_state->__pyx_n_s_apply); - Py_VISIT(traverse_module_state->__pyx_n_s_ar); - Py_VISIT(traverse_module_state->__pyx_n_s_ar_thr); - Py_VISIT(traverse_module_state->__pyx_n_s_arange); - Py_VISIT(traverse_module_state->__pyx_n_s_area_thr); - Py_VISIT(traverse_module_state->__pyx_n_s_argsort); - Py_VISIT(traverse_module_state->__pyx_n_s_array); - Py_VISIT(traverse_module_state->__pyx_n_s_astype); - Py_VISIT(traverse_module_state->__pyx_n_s_asyncio_coroutines); - Py_VISIT(traverse_module_state->__pyx_n_s_augment_hsv); - Py_VISIT(traverse_module_state->__pyx_n_s_auto); - Py_VISIT(traverse_module_state->__pyx_n_s_axis); - Py_VISIT(traverse_module_state->__pyx_n_s_bbox_ioa); - Py_VISIT(traverse_module_state->__pyx_n_s_beta); - Py_VISIT(traverse_module_state->__pyx_n_s_bgr); - Py_VISIT(traverse_module_state->__pyx_n_s_bh); - Py_VISIT(traverse_module_state->__pyx_n_s_bitwise_and); - Py_VISIT(traverse_module_state->__pyx_n_s_bottom); - Py_VISIT(traverse_module_state->__pyx_n_s_box); - Py_VISIT(traverse_module_state->__pyx_n_s_box1); - Py_VISIT(traverse_module_state->__pyx_n_s_box2); - Py_VISIT(traverse_module_state->__pyx_n_s_box_candidates); - Py_VISIT(traverse_module_state->__pyx_n_s_boxes); - Py_VISIT(traverse_module_state->__pyx_n_s_bw); - Py_VISIT(traverse_module_state->__pyx_n_s_c); - Py_VISIT(traverse_module_state->__pyx_n_s_clahe); - Py_VISIT(traverse_module_state->__pyx_n_s_class_getitem); - Py_VISIT(traverse_module_state->__pyx_n_s_cline_in_traceback); - Py_VISIT(traverse_module_state->__pyx_n_s_clip); - Py_VISIT(traverse_module_state->__pyx_n_s_clipLimit); - Py_VISIT(traverse_module_state->__pyx_n_s_color); - Py_VISIT(traverse_module_state->__pyx_n_s_colorstr); - Py_VISIT(traverse_module_state->__pyx_n_s_concatenate); - Py_VISIT(traverse_module_state->__pyx_n_s_copyMakeBorder); - Py_VISIT(traverse_module_state->__pyx_n_s_copy_paste); - Py_VISIT(traverse_module_state->__pyx_n_s_createCLAHE); - Py_VISIT(traverse_module_state->__pyx_n_s_cutout); - Py_VISIT(traverse_module_state->__pyx_n_s_cv2); - Py_VISIT(traverse_module_state->__pyx_n_s_cvtColor); - Py_VISIT(traverse_module_state->__pyx_n_s_dh); - Py_VISIT(traverse_module_state->__pyx_n_s_drawContours); - Py_VISIT(traverse_module_state->__pyx_n_s_dst); - Py_VISIT(traverse_module_state->__pyx_n_s_dtype); - Py_VISIT(traverse_module_state->__pyx_n_s_dw); - Py_VISIT(traverse_module_state->__pyx_n_s_eps); - Py_VISIT(traverse_module_state->__pyx_n_s_equalizeHist); - Py_VISIT(traverse_module_state->__pyx_n_s_flip); - Py_VISIT(traverse_module_state->__pyx_n_s_float32); - Py_VISIT(traverse_module_state->__pyx_n_s_h); - Py_VISIT(traverse_module_state->__pyx_n_s_h1); - Py_VISIT(traverse_module_state->__pyx_n_s_h2); - Py_VISIT(traverse_module_state->__pyx_n_s_hgain); - Py_VISIT(traverse_module_state->__pyx_n_s_hist_equalize); - Py_VISIT(traverse_module_state->__pyx_n_s_hue); - Py_VISIT(traverse_module_state->__pyx_n_s_i); - Py_VISIT(traverse_module_state->__pyx_n_s_im); - Py_VISIT(traverse_module_state->__pyx_n_s_im2); - Py_VISIT(traverse_module_state->__pyx_n_s_im_hsv); - Py_VISIT(traverse_module_state->__pyx_n_s_im_new); - Py_VISIT(traverse_module_state->__pyx_n_s_import); - Py_VISIT(traverse_module_state->__pyx_n_s_initializing); - Py_VISIT(traverse_module_state->__pyx_n_s_int32); - Py_VISIT(traverse_module_state->__pyx_n_s_interpolation); - Py_VISIT(traverse_module_state->__pyx_n_s_ioa); - Py_VISIT(traverse_module_state->__pyx_n_s_is_coroutine); - Py_VISIT(traverse_module_state->__pyx_n_s_j); - Py_VISIT(traverse_module_state->__pyx_n_s_k); - Py_VISIT(traverse_module_state->__pyx_n_s_l); - Py_VISIT(traverse_module_state->__pyx_n_s_labels); - Py_VISIT(traverse_module_state->__pyx_n_s_labels2); - Py_VISIT(traverse_module_state->__pyx_n_s_left); - Py_VISIT(traverse_module_state->__pyx_n_s_letterbox); - Py_VISIT(traverse_module_state->__pyx_n_s_lut_hue); - Py_VISIT(traverse_module_state->__pyx_n_s_lut_sat); - Py_VISIT(traverse_module_state->__pyx_n_s_lut_val); - Py_VISIT(traverse_module_state->__pyx_n_s_main); - Py_VISIT(traverse_module_state->__pyx_n_s_mask_h); - Py_VISIT(traverse_module_state->__pyx_n_s_mask_w); - Py_VISIT(traverse_module_state->__pyx_n_s_math); - Py_VISIT(traverse_module_state->__pyx_n_s_maximum); - Py_VISIT(traverse_module_state->__pyx_n_s_merge); - Py_VISIT(traverse_module_state->__pyx_n_s_mixup); - Py_VISIT(traverse_module_state->__pyx_n_s_mod); - Py_VISIT(traverse_module_state->__pyx_n_s_n); - Py_VISIT(traverse_module_state->__pyx_n_s_name); - Py_VISIT(traverse_module_state->__pyx_n_s_new_shape); - Py_VISIT(traverse_module_state->__pyx_n_s_new_unpad); - Py_VISIT(traverse_module_state->__pyx_n_s_np); - Py_VISIT(traverse_module_state->__pyx_n_s_numpy); - Py_VISIT(traverse_module_state->__pyx_n_s_p); - Py_VISIT(traverse_module_state->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils); - Py_VISIT(traverse_module_state->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2); - Py_VISIT(traverse_module_state->__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3); - Py_VISIT(traverse_module_state->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4); - Py_VISIT(traverse_module_state->__pyx_n_s_r); - Py_VISIT(traverse_module_state->__pyx_n_s_randint); - Py_VISIT(traverse_module_state->__pyx_n_s_random); - Py_VISIT(traverse_module_state->__pyx_n_s_range); - Py_VISIT(traverse_module_state->__pyx_n_s_ratio); - Py_VISIT(traverse_module_state->__pyx_n_s_replicate); - Py_VISIT(traverse_module_state->__pyx_n_s_resize); - Py_VISIT(traverse_module_state->__pyx_n_s_result); - Py_VISIT(traverse_module_state->__pyx_n_s_right); - Py_VISIT(traverse_module_state->__pyx_n_s_round); - Py_VISIT(traverse_module_state->__pyx_n_s_s); - Py_VISIT(traverse_module_state->__pyx_n_s_sample); - Py_VISIT(traverse_module_state->__pyx_n_s_sat); - Py_VISIT(traverse_module_state->__pyx_n_s_scaleFill); - Py_VISIT(traverse_module_state->__pyx_n_s_scales); - Py_VISIT(traverse_module_state->__pyx_n_s_scaleup); - Py_VISIT(traverse_module_state->__pyx_n_s_segments); - Py_VISIT(traverse_module_state->__pyx_n_s_sgain); - Py_VISIT(traverse_module_state->__pyx_n_s_shape); - Py_VISIT(traverse_module_state->__pyx_n_s_size); - Py_VISIT(traverse_module_state->__pyx_n_s_spec); - Py_VISIT(traverse_module_state->__pyx_n_s_split); - Py_VISIT(traverse_module_state->__pyx_n_s_src1); - Py_VISIT(traverse_module_state->__pyx_n_s_src2); - Py_VISIT(traverse_module_state->__pyx_n_s_stride); - Py_VISIT(traverse_module_state->__pyx_n_s_test); - Py_VISIT(traverse_module_state->__pyx_n_s_tileGridSize); - Py_VISIT(traverse_module_state->__pyx_n_s_top); - Py_VISIT(traverse_module_state->__pyx_n_s_uint8); - Py_VISIT(traverse_module_state->__pyx_n_s_uniform); - Py_VISIT(traverse_module_state->__pyx_n_s_val); - Py_VISIT(traverse_module_state->__pyx_n_s_value); - Py_VISIT(traverse_module_state->__pyx_n_s_vgain); - Py_VISIT(traverse_module_state->__pyx_n_s_w); - Py_VISIT(traverse_module_state->__pyx_n_s_w1); - Py_VISIT(traverse_module_state->__pyx_n_s_w2); - Py_VISIT(traverse_module_state->__pyx_n_s_wh_thr); - Py_VISIT(traverse_module_state->__pyx_n_s_x); - Py_VISIT(traverse_module_state->__pyx_n_s_x1); - Py_VISIT(traverse_module_state->__pyx_n_s_x1a); - Py_VISIT(traverse_module_state->__pyx_n_s_x1b); - Py_VISIT(traverse_module_state->__pyx_n_s_x2); - Py_VISIT(traverse_module_state->__pyx_n_s_x2a); - Py_VISIT(traverse_module_state->__pyx_n_s_x2b); - Py_VISIT(traverse_module_state->__pyx_n_s_xc); - Py_VISIT(traverse_module_state->__pyx_n_s_xmax); - Py_VISIT(traverse_module_state->__pyx_n_s_xmin); - Py_VISIT(traverse_module_state->__pyx_n_s_y1); - Py_VISIT(traverse_module_state->__pyx_n_s_y1a); - Py_VISIT(traverse_module_state->__pyx_n_s_y1b); - Py_VISIT(traverse_module_state->__pyx_n_s_y2); - Py_VISIT(traverse_module_state->__pyx_n_s_y2a); - Py_VISIT(traverse_module_state->__pyx_n_s_y2b); - Py_VISIT(traverse_module_state->__pyx_n_s_yc); - Py_VISIT(traverse_module_state->__pyx_n_s_ymax); - Py_VISIT(traverse_module_state->__pyx_n_s_ymin); - Py_VISIT(traverse_module_state->__pyx_n_s_yuv); - Py_VISIT(traverse_module_state->__pyx_n_s_zeros); - Py_VISIT(traverse_module_state->__pyx_float_0_0); - Py_VISIT(traverse_module_state->__pyx_float_0_1); - Py_VISIT(traverse_module_state->__pyx_float_0_5); - Py_VISIT(traverse_module_state->__pyx_float_2_0); - Py_VISIT(traverse_module_state->__pyx_float_0_03); - Py_VISIT(traverse_module_state->__pyx_float_0_25); - Py_VISIT(traverse_module_state->__pyx_float_0_30); - Py_VISIT(traverse_module_state->__pyx_float_0_60); - Py_VISIT(traverse_module_state->__pyx_float_32_0); - Py_VISIT(traverse_module_state->__pyx_float_0_125); - Py_VISIT(traverse_module_state->__pyx_float_1eneg_16); - Py_VISIT(traverse_module_state->__pyx_float_0_0625); - Py_VISIT(traverse_module_state->__pyx_float_0_03125); - Py_VISIT(traverse_module_state->__pyx_int_0); - Py_VISIT(traverse_module_state->__pyx_int_1); - Py_VISIT(traverse_module_state->__pyx_int_2); - Py_VISIT(traverse_module_state->__pyx_int_3); - Py_VISIT(traverse_module_state->__pyx_int_5); - Py_VISIT(traverse_module_state->__pyx_int_8); - Py_VISIT(traverse_module_state->__pyx_int_32); - Py_VISIT(traverse_module_state->__pyx_int_64); - Py_VISIT(traverse_module_state->__pyx_int_100); - Py_VISIT(traverse_module_state->__pyx_int_114); - Py_VISIT(traverse_module_state->__pyx_int_180); - Py_VISIT(traverse_module_state->__pyx_int_191); - Py_VISIT(traverse_module_state->__pyx_int_255); - Py_VISIT(traverse_module_state->__pyx_int_256); - Py_VISIT(traverse_module_state->__pyx_int_640); - Py_VISIT(traverse_module_state->__pyx_int_neg_1); - Py_VISIT(traverse_module_state->__pyx_tuple_); - Py_VISIT(traverse_module_state->__pyx_slice__4); - Py_VISIT(traverse_module_state->__pyx_slice__6); - Py_VISIT(traverse_module_state->__pyx_slice__7); - Py_VISIT(traverse_module_state->__pyx_tuple__2); - Py_VISIT(traverse_module_state->__pyx_tuple__3); - Py_VISIT(traverse_module_state->__pyx_tuple__5); - Py_VISIT(traverse_module_state->__pyx_tuple__8); - Py_VISIT(traverse_module_state->__pyx_tuple__9); - Py_VISIT(traverse_module_state->__pyx_slice__11); - Py_VISIT(traverse_module_state->__pyx_slice__12); - Py_VISIT(traverse_module_state->__pyx_slice__14); - Py_VISIT(traverse_module_state->__pyx_slice__16); - Py_VISIT(traverse_module_state->__pyx_tuple__10); - Py_VISIT(traverse_module_state->__pyx_tuple__13); - Py_VISIT(traverse_module_state->__pyx_tuple__15); - Py_VISIT(traverse_module_state->__pyx_tuple__17); - Py_VISIT(traverse_module_state->__pyx_tuple__18); - Py_VISIT(traverse_module_state->__pyx_tuple__19); - Py_VISIT(traverse_module_state->__pyx_tuple__20); - Py_VISIT(traverse_module_state->__pyx_tuple__23); - Py_VISIT(traverse_module_state->__pyx_tuple__25); - Py_VISIT(traverse_module_state->__pyx_tuple__26); - Py_VISIT(traverse_module_state->__pyx_tuple__28); - Py_VISIT(traverse_module_state->__pyx_tuple__29); - Py_VISIT(traverse_module_state->__pyx_tuple__31); - Py_VISIT(traverse_module_state->__pyx_tuple__33); - Py_VISIT(traverse_module_state->__pyx_tuple__34); - Py_VISIT(traverse_module_state->__pyx_tuple__36); - Py_VISIT(traverse_module_state->__pyx_tuple__38); - Py_VISIT(traverse_module_state->__pyx_tuple__40); - Py_VISIT(traverse_module_state->__pyx_tuple__41); - Py_VISIT(traverse_module_state->__pyx_tuple__43); - Py_VISIT(traverse_module_state->__pyx_tuple__45); - Py_VISIT(traverse_module_state->__pyx_codeobj__24); - Py_VISIT(traverse_module_state->__pyx_codeobj__27); - Py_VISIT(traverse_module_state->__pyx_codeobj__30); - Py_VISIT(traverse_module_state->__pyx_codeobj__32); - Py_VISIT(traverse_module_state->__pyx_codeobj__35); - Py_VISIT(traverse_module_state->__pyx_codeobj__39); - Py_VISIT(traverse_module_state->__pyx_codeobj__42); - Py_VISIT(traverse_module_state->__pyx_codeobj__44); - return 0; -} -#endif -/* #### Code section: module_state_defines ### */ -#if CYTHON_USE_MODULE_STATE -#define __pyx_d __pyx_mstate_global->__pyx_d -#define __pyx_b __pyx_mstate_global->__pyx_b -#define __pyx_cython_runtime __pyx_mstate_global->__pyx_cython_runtime -#define __pyx_empty_tuple __pyx_mstate_global->__pyx_empty_tuple -#define __pyx_empty_bytes __pyx_mstate_global->__pyx_empty_bytes -#define __pyx_empty_unicode __pyx_mstate_global->__pyx_empty_unicode -#ifdef __Pyx_CyFunction_USED -#define __pyx_CyFunctionType __pyx_mstate_global->__pyx_CyFunctionType -#endif -#ifdef __Pyx_FusedFunction_USED -#define __pyx_FusedFunctionType __pyx_mstate_global->__pyx_FusedFunctionType -#endif -#define __pyx_n_s_BORDER_CONSTANT __pyx_mstate_global->__pyx_n_s_BORDER_CONSTANT -#define __pyx_n_s_COLOR_BGR2HSV __pyx_mstate_global->__pyx_n_s_COLOR_BGR2HSV -#define __pyx_n_s_COLOR_BGR2YUV __pyx_mstate_global->__pyx_n_s_COLOR_BGR2YUV -#define __pyx_n_s_COLOR_HSV2BGR __pyx_mstate_global->__pyx_n_s_COLOR_HSV2BGR -#define __pyx_n_s_COLOR_RGB2YUV __pyx_mstate_global->__pyx_n_s_COLOR_RGB2YUV -#define __pyx_n_s_COLOR_YUV2BGR __pyx_mstate_global->__pyx_n_s_COLOR_YUV2BGR -#define __pyx_n_s_COLOR_YUV2RGB __pyx_mstate_global->__pyx_n_s_COLOR_YUV2RGB -#define __pyx_n_s_FILLED __pyx_mstate_global->__pyx_n_s_FILLED -#define __pyx_n_s_INTER_LINEAR __pyx_mstate_global->__pyx_n_s_INTER_LINEAR -#define __pyx_n_s_LOGGER __pyx_mstate_global->__pyx_n_s_LOGGER -#define __pyx_n_s_LUT __pyx_mstate_global->__pyx_n_s_LUT -#define __pyx_n_s_T __pyx_mstate_global->__pyx_n_s_T -#define __pyx_n_s__21 __pyx_mstate_global->__pyx_n_s__21 -#define __pyx_kp_u__22 __pyx_mstate_global->__pyx_kp_u__22 -#define __pyx_n_s__37 __pyx_mstate_global->__pyx_n_s__37 -#define __pyx_n_s__46 __pyx_mstate_global->__pyx_n_s__46 -#define __pyx_n_s_all __pyx_mstate_global->__pyx_n_s_all -#define __pyx_n_s_append __pyx_mstate_global->__pyx_n_s_append -#define __pyx_n_s_apply __pyx_mstate_global->__pyx_n_s_apply -#define __pyx_n_s_ar __pyx_mstate_global->__pyx_n_s_ar -#define __pyx_n_s_ar_thr __pyx_mstate_global->__pyx_n_s_ar_thr -#define __pyx_n_s_arange __pyx_mstate_global->__pyx_n_s_arange -#define __pyx_n_s_area_thr __pyx_mstate_global->__pyx_n_s_area_thr -#define __pyx_n_s_argsort __pyx_mstate_global->__pyx_n_s_argsort -#define __pyx_n_s_array __pyx_mstate_global->__pyx_n_s_array -#define __pyx_n_s_astype __pyx_mstate_global->__pyx_n_s_astype -#define __pyx_n_s_asyncio_coroutines __pyx_mstate_global->__pyx_n_s_asyncio_coroutines -#define __pyx_n_s_augment_hsv __pyx_mstate_global->__pyx_n_s_augment_hsv -#define __pyx_n_s_auto __pyx_mstate_global->__pyx_n_s_auto -#define __pyx_n_s_axis __pyx_mstate_global->__pyx_n_s_axis -#define __pyx_n_s_bbox_ioa __pyx_mstate_global->__pyx_n_s_bbox_ioa -#define __pyx_n_s_beta __pyx_mstate_global->__pyx_n_s_beta -#define __pyx_n_s_bgr __pyx_mstate_global->__pyx_n_s_bgr -#define __pyx_n_s_bh __pyx_mstate_global->__pyx_n_s_bh -#define __pyx_n_s_bitwise_and __pyx_mstate_global->__pyx_n_s_bitwise_and -#define __pyx_n_s_bottom __pyx_mstate_global->__pyx_n_s_bottom -#define __pyx_n_s_box __pyx_mstate_global->__pyx_n_s_box -#define __pyx_n_s_box1 __pyx_mstate_global->__pyx_n_s_box1 -#define __pyx_n_s_box2 __pyx_mstate_global->__pyx_n_s_box2 -#define __pyx_n_s_box_candidates __pyx_mstate_global->__pyx_n_s_box_candidates -#define __pyx_n_s_boxes __pyx_mstate_global->__pyx_n_s_boxes -#define __pyx_n_s_bw __pyx_mstate_global->__pyx_n_s_bw -#define __pyx_n_s_c __pyx_mstate_global->__pyx_n_s_c -#define __pyx_n_s_clahe __pyx_mstate_global->__pyx_n_s_clahe -#define __pyx_n_s_class_getitem __pyx_mstate_global->__pyx_n_s_class_getitem -#define __pyx_n_s_cline_in_traceback __pyx_mstate_global->__pyx_n_s_cline_in_traceback -#define __pyx_n_s_clip __pyx_mstate_global->__pyx_n_s_clip -#define __pyx_n_s_clipLimit __pyx_mstate_global->__pyx_n_s_clipLimit -#define __pyx_n_s_color __pyx_mstate_global->__pyx_n_s_color -#define __pyx_n_s_colorstr __pyx_mstate_global->__pyx_n_s_colorstr -#define __pyx_n_s_concatenate __pyx_mstate_global->__pyx_n_s_concatenate -#define __pyx_n_s_copyMakeBorder __pyx_mstate_global->__pyx_n_s_copyMakeBorder -#define __pyx_n_s_copy_paste __pyx_mstate_global->__pyx_n_s_copy_paste -#define __pyx_n_s_createCLAHE __pyx_mstate_global->__pyx_n_s_createCLAHE -#define __pyx_n_s_cutout __pyx_mstate_global->__pyx_n_s_cutout -#define __pyx_n_s_cv2 __pyx_mstate_global->__pyx_n_s_cv2 -#define __pyx_n_s_cvtColor __pyx_mstate_global->__pyx_n_s_cvtColor -#define __pyx_n_s_dh __pyx_mstate_global->__pyx_n_s_dh -#define __pyx_n_s_drawContours __pyx_mstate_global->__pyx_n_s_drawContours -#define __pyx_n_s_dst __pyx_mstate_global->__pyx_n_s_dst -#define __pyx_n_s_dtype __pyx_mstate_global->__pyx_n_s_dtype -#define __pyx_n_s_dw __pyx_mstate_global->__pyx_n_s_dw -#define __pyx_n_s_eps __pyx_mstate_global->__pyx_n_s_eps -#define __pyx_n_s_equalizeHist __pyx_mstate_global->__pyx_n_s_equalizeHist -#define __pyx_n_s_flip __pyx_mstate_global->__pyx_n_s_flip -#define __pyx_n_s_float32 __pyx_mstate_global->__pyx_n_s_float32 -#define __pyx_n_s_h __pyx_mstate_global->__pyx_n_s_h -#define __pyx_n_s_h1 __pyx_mstate_global->__pyx_n_s_h1 -#define __pyx_n_s_h2 __pyx_mstate_global->__pyx_n_s_h2 -#define __pyx_n_s_hgain __pyx_mstate_global->__pyx_n_s_hgain -#define __pyx_n_s_hist_equalize __pyx_mstate_global->__pyx_n_s_hist_equalize -#define __pyx_n_s_hue __pyx_mstate_global->__pyx_n_s_hue -#define __pyx_n_s_i __pyx_mstate_global->__pyx_n_s_i -#define __pyx_n_s_im __pyx_mstate_global->__pyx_n_s_im -#define __pyx_n_s_im2 __pyx_mstate_global->__pyx_n_s_im2 -#define __pyx_n_s_im_hsv __pyx_mstate_global->__pyx_n_s_im_hsv -#define __pyx_n_s_im_new __pyx_mstate_global->__pyx_n_s_im_new -#define __pyx_n_s_import __pyx_mstate_global->__pyx_n_s_import -#define __pyx_n_s_initializing __pyx_mstate_global->__pyx_n_s_initializing -#define __pyx_n_s_int32 __pyx_mstate_global->__pyx_n_s_int32 -#define __pyx_n_s_interpolation __pyx_mstate_global->__pyx_n_s_interpolation -#define __pyx_n_s_ioa __pyx_mstate_global->__pyx_n_s_ioa -#define __pyx_n_s_is_coroutine __pyx_mstate_global->__pyx_n_s_is_coroutine -#define __pyx_n_s_j __pyx_mstate_global->__pyx_n_s_j -#define __pyx_n_s_k __pyx_mstate_global->__pyx_n_s_k -#define __pyx_n_s_l __pyx_mstate_global->__pyx_n_s_l -#define __pyx_n_s_labels __pyx_mstate_global->__pyx_n_s_labels -#define __pyx_n_s_labels2 __pyx_mstate_global->__pyx_n_s_labels2 -#define __pyx_n_s_left __pyx_mstate_global->__pyx_n_s_left -#define __pyx_n_s_letterbox __pyx_mstate_global->__pyx_n_s_letterbox -#define __pyx_n_s_lut_hue __pyx_mstate_global->__pyx_n_s_lut_hue -#define __pyx_n_s_lut_sat __pyx_mstate_global->__pyx_n_s_lut_sat -#define __pyx_n_s_lut_val __pyx_mstate_global->__pyx_n_s_lut_val -#define __pyx_n_s_main __pyx_mstate_global->__pyx_n_s_main -#define __pyx_n_s_mask_h __pyx_mstate_global->__pyx_n_s_mask_h -#define __pyx_n_s_mask_w __pyx_mstate_global->__pyx_n_s_mask_w -#define __pyx_n_s_math __pyx_mstate_global->__pyx_n_s_math -#define __pyx_n_s_maximum __pyx_mstate_global->__pyx_n_s_maximum -#define __pyx_n_s_merge __pyx_mstate_global->__pyx_n_s_merge -#define __pyx_n_s_mixup __pyx_mstate_global->__pyx_n_s_mixup -#define __pyx_n_s_mod __pyx_mstate_global->__pyx_n_s_mod -#define __pyx_n_s_n __pyx_mstate_global->__pyx_n_s_n -#define __pyx_n_s_name __pyx_mstate_global->__pyx_n_s_name -#define __pyx_n_s_new_shape __pyx_mstate_global->__pyx_n_s_new_shape -#define __pyx_n_s_new_unpad __pyx_mstate_global->__pyx_n_s_new_unpad -#define __pyx_n_s_np __pyx_mstate_global->__pyx_n_s_np -#define __pyx_n_s_numpy __pyx_mstate_global->__pyx_n_s_numpy -#define __pyx_n_s_p __pyx_mstate_global->__pyx_n_s_p -#define __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils __pyx_mstate_global->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils -#define __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2 __pyx_mstate_global->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2 -#define __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3 __pyx_mstate_global->__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3 -#define __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4 __pyx_mstate_global->__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4 -#define __pyx_n_s_r __pyx_mstate_global->__pyx_n_s_r -#define __pyx_n_s_randint __pyx_mstate_global->__pyx_n_s_randint -#define __pyx_n_s_random __pyx_mstate_global->__pyx_n_s_random -#define __pyx_n_s_range __pyx_mstate_global->__pyx_n_s_range -#define __pyx_n_s_ratio __pyx_mstate_global->__pyx_n_s_ratio -#define __pyx_n_s_replicate __pyx_mstate_global->__pyx_n_s_replicate -#define __pyx_n_s_resize __pyx_mstate_global->__pyx_n_s_resize -#define __pyx_n_s_result __pyx_mstate_global->__pyx_n_s_result -#define __pyx_n_s_right __pyx_mstate_global->__pyx_n_s_right -#define __pyx_n_s_round __pyx_mstate_global->__pyx_n_s_round -#define __pyx_n_s_s __pyx_mstate_global->__pyx_n_s_s -#define __pyx_n_s_sample __pyx_mstate_global->__pyx_n_s_sample -#define __pyx_n_s_sat __pyx_mstate_global->__pyx_n_s_sat -#define __pyx_n_s_scaleFill __pyx_mstate_global->__pyx_n_s_scaleFill -#define __pyx_n_s_scales __pyx_mstate_global->__pyx_n_s_scales -#define __pyx_n_s_scaleup __pyx_mstate_global->__pyx_n_s_scaleup -#define __pyx_n_s_segments __pyx_mstate_global->__pyx_n_s_segments -#define __pyx_n_s_sgain __pyx_mstate_global->__pyx_n_s_sgain -#define __pyx_n_s_shape __pyx_mstate_global->__pyx_n_s_shape -#define __pyx_n_s_size __pyx_mstate_global->__pyx_n_s_size -#define __pyx_n_s_spec __pyx_mstate_global->__pyx_n_s_spec -#define __pyx_n_s_split __pyx_mstate_global->__pyx_n_s_split -#define __pyx_n_s_src1 __pyx_mstate_global->__pyx_n_s_src1 -#define __pyx_n_s_src2 __pyx_mstate_global->__pyx_n_s_src2 -#define __pyx_n_s_stride __pyx_mstate_global->__pyx_n_s_stride -#define __pyx_n_s_test __pyx_mstate_global->__pyx_n_s_test -#define __pyx_n_s_tileGridSize __pyx_mstate_global->__pyx_n_s_tileGridSize -#define __pyx_n_s_top __pyx_mstate_global->__pyx_n_s_top -#define __pyx_n_s_uint8 __pyx_mstate_global->__pyx_n_s_uint8 -#define __pyx_n_s_uniform __pyx_mstate_global->__pyx_n_s_uniform -#define __pyx_n_s_val __pyx_mstate_global->__pyx_n_s_val -#define __pyx_n_s_value __pyx_mstate_global->__pyx_n_s_value -#define __pyx_n_s_vgain __pyx_mstate_global->__pyx_n_s_vgain -#define __pyx_n_s_w __pyx_mstate_global->__pyx_n_s_w -#define __pyx_n_s_w1 __pyx_mstate_global->__pyx_n_s_w1 -#define __pyx_n_s_w2 __pyx_mstate_global->__pyx_n_s_w2 -#define __pyx_n_s_wh_thr __pyx_mstate_global->__pyx_n_s_wh_thr -#define __pyx_n_s_x __pyx_mstate_global->__pyx_n_s_x -#define __pyx_n_s_x1 __pyx_mstate_global->__pyx_n_s_x1 -#define __pyx_n_s_x1a __pyx_mstate_global->__pyx_n_s_x1a -#define __pyx_n_s_x1b __pyx_mstate_global->__pyx_n_s_x1b -#define __pyx_n_s_x2 __pyx_mstate_global->__pyx_n_s_x2 -#define __pyx_n_s_x2a __pyx_mstate_global->__pyx_n_s_x2a -#define __pyx_n_s_x2b __pyx_mstate_global->__pyx_n_s_x2b -#define __pyx_n_s_xc __pyx_mstate_global->__pyx_n_s_xc -#define __pyx_n_s_xmax __pyx_mstate_global->__pyx_n_s_xmax -#define __pyx_n_s_xmin __pyx_mstate_global->__pyx_n_s_xmin -#define __pyx_n_s_y1 __pyx_mstate_global->__pyx_n_s_y1 -#define __pyx_n_s_y1a __pyx_mstate_global->__pyx_n_s_y1a -#define __pyx_n_s_y1b __pyx_mstate_global->__pyx_n_s_y1b -#define __pyx_n_s_y2 __pyx_mstate_global->__pyx_n_s_y2 -#define __pyx_n_s_y2a __pyx_mstate_global->__pyx_n_s_y2a -#define __pyx_n_s_y2b __pyx_mstate_global->__pyx_n_s_y2b -#define __pyx_n_s_yc __pyx_mstate_global->__pyx_n_s_yc -#define __pyx_n_s_ymax __pyx_mstate_global->__pyx_n_s_ymax -#define __pyx_n_s_ymin __pyx_mstate_global->__pyx_n_s_ymin -#define __pyx_n_s_yuv __pyx_mstate_global->__pyx_n_s_yuv -#define __pyx_n_s_zeros __pyx_mstate_global->__pyx_n_s_zeros -#define __pyx_float_0_0 __pyx_mstate_global->__pyx_float_0_0 -#define __pyx_float_0_1 __pyx_mstate_global->__pyx_float_0_1 -#define __pyx_float_0_5 __pyx_mstate_global->__pyx_float_0_5 -#define __pyx_float_2_0 __pyx_mstate_global->__pyx_float_2_0 -#define __pyx_float_0_03 __pyx_mstate_global->__pyx_float_0_03 -#define __pyx_float_0_25 __pyx_mstate_global->__pyx_float_0_25 -#define __pyx_float_0_30 __pyx_mstate_global->__pyx_float_0_30 -#define __pyx_float_0_60 __pyx_mstate_global->__pyx_float_0_60 -#define __pyx_float_32_0 __pyx_mstate_global->__pyx_float_32_0 -#define __pyx_float_0_125 __pyx_mstate_global->__pyx_float_0_125 -#define __pyx_float_1eneg_16 __pyx_mstate_global->__pyx_float_1eneg_16 -#define __pyx_float_0_0625 __pyx_mstate_global->__pyx_float_0_0625 -#define __pyx_float_0_03125 __pyx_mstate_global->__pyx_float_0_03125 -#define __pyx_int_0 __pyx_mstate_global->__pyx_int_0 -#define __pyx_int_1 __pyx_mstate_global->__pyx_int_1 -#define __pyx_int_2 __pyx_mstate_global->__pyx_int_2 -#define __pyx_int_3 __pyx_mstate_global->__pyx_int_3 -#define __pyx_int_5 __pyx_mstate_global->__pyx_int_5 -#define __pyx_int_8 __pyx_mstate_global->__pyx_int_8 -#define __pyx_int_32 __pyx_mstate_global->__pyx_int_32 -#define __pyx_int_64 __pyx_mstate_global->__pyx_int_64 -#define __pyx_int_100 __pyx_mstate_global->__pyx_int_100 -#define __pyx_int_114 __pyx_mstate_global->__pyx_int_114 -#define __pyx_int_180 __pyx_mstate_global->__pyx_int_180 -#define __pyx_int_191 __pyx_mstate_global->__pyx_int_191 -#define __pyx_int_255 __pyx_mstate_global->__pyx_int_255 -#define __pyx_int_256 __pyx_mstate_global->__pyx_int_256 -#define __pyx_int_640 __pyx_mstate_global->__pyx_int_640 -#define __pyx_int_neg_1 __pyx_mstate_global->__pyx_int_neg_1 -#define __pyx_tuple_ __pyx_mstate_global->__pyx_tuple_ -#define __pyx_slice__4 __pyx_mstate_global->__pyx_slice__4 -#define __pyx_slice__6 __pyx_mstate_global->__pyx_slice__6 -#define __pyx_slice__7 __pyx_mstate_global->__pyx_slice__7 -#define __pyx_tuple__2 __pyx_mstate_global->__pyx_tuple__2 -#define __pyx_tuple__3 __pyx_mstate_global->__pyx_tuple__3 -#define __pyx_tuple__5 __pyx_mstate_global->__pyx_tuple__5 -#define __pyx_tuple__8 __pyx_mstate_global->__pyx_tuple__8 -#define __pyx_tuple__9 __pyx_mstate_global->__pyx_tuple__9 -#define __pyx_slice__11 __pyx_mstate_global->__pyx_slice__11 -#define __pyx_slice__12 __pyx_mstate_global->__pyx_slice__12 -#define __pyx_slice__14 __pyx_mstate_global->__pyx_slice__14 -#define __pyx_slice__16 __pyx_mstate_global->__pyx_slice__16 -#define __pyx_tuple__10 __pyx_mstate_global->__pyx_tuple__10 -#define __pyx_tuple__13 __pyx_mstate_global->__pyx_tuple__13 -#define __pyx_tuple__15 __pyx_mstate_global->__pyx_tuple__15 -#define __pyx_tuple__17 __pyx_mstate_global->__pyx_tuple__17 -#define __pyx_tuple__18 __pyx_mstate_global->__pyx_tuple__18 -#define __pyx_tuple__19 __pyx_mstate_global->__pyx_tuple__19 -#define __pyx_tuple__20 __pyx_mstate_global->__pyx_tuple__20 -#define __pyx_tuple__23 __pyx_mstate_global->__pyx_tuple__23 -#define __pyx_tuple__25 __pyx_mstate_global->__pyx_tuple__25 -#define __pyx_tuple__26 __pyx_mstate_global->__pyx_tuple__26 -#define __pyx_tuple__28 __pyx_mstate_global->__pyx_tuple__28 -#define __pyx_tuple__29 __pyx_mstate_global->__pyx_tuple__29 -#define __pyx_tuple__31 __pyx_mstate_global->__pyx_tuple__31 -#define __pyx_tuple__33 __pyx_mstate_global->__pyx_tuple__33 -#define __pyx_tuple__34 __pyx_mstate_global->__pyx_tuple__34 -#define __pyx_tuple__36 __pyx_mstate_global->__pyx_tuple__36 -#define __pyx_tuple__38 __pyx_mstate_global->__pyx_tuple__38 -#define __pyx_tuple__40 __pyx_mstate_global->__pyx_tuple__40 -#define __pyx_tuple__41 __pyx_mstate_global->__pyx_tuple__41 -#define __pyx_tuple__43 __pyx_mstate_global->__pyx_tuple__43 -#define __pyx_tuple__45 __pyx_mstate_global->__pyx_tuple__45 -#define __pyx_codeobj__24 __pyx_mstate_global->__pyx_codeobj__24 -#define __pyx_codeobj__27 __pyx_mstate_global->__pyx_codeobj__27 -#define __pyx_codeobj__30 __pyx_mstate_global->__pyx_codeobj__30 -#define __pyx_codeobj__32 __pyx_mstate_global->__pyx_codeobj__32 -#define __pyx_codeobj__35 __pyx_mstate_global->__pyx_codeobj__35 -#define __pyx_codeobj__39 __pyx_mstate_global->__pyx_codeobj__39 -#define __pyx_codeobj__42 __pyx_mstate_global->__pyx_codeobj__42 -#define __pyx_codeobj__44 __pyx_mstate_global->__pyx_codeobj__44 -#endif -/* #### Code section: module_code ### */ - -/* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":16 - * - * - * def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): # <<<<<<<<<<<<<< - * # HSV color-space augmentation - * if hgain or sgain or vgain: - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_1augment_hsv(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_1augment_hsv = {"augment_hsv", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_1augment_hsv, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_1augment_hsv(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_im = 0; - PyObject *__pyx_v_hgain = 0; - PyObject *__pyx_v_sgain = 0; - PyObject *__pyx_v_vgain = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("augment_hsv (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_hgain,&__pyx_n_s_sgain,&__pyx_n_s_vgain,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_hgain,&__pyx_n_s_sgain,&__pyx_n_s_vgain,0}; - #endif - PyObject* values[4] = {0,0,0,0}; - values[1] = ((PyObject *)((PyObject*)__pyx_float_0_5)); - values[2] = ((PyObject *)((PyObject*)__pyx_float_0_5)); - values[3] = ((PyObject *)((PyObject*)__pyx_float_0_5)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_im)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_hgain); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_sgain); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_vgain); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 16, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "augment_hsv") < 0)) __PYX_ERR(0, 16, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_im = values[0]; - __pyx_v_hgain = values[1]; - __pyx_v_sgain = values[2]; - __pyx_v_vgain = values[3]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("augment_hsv", 0, 1, 4, __pyx_nargs); __PYX_ERR(0, 16, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.augment_hsv", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_augment_hsv(__pyx_self, __pyx_v_im, __pyx_v_hgain, __pyx_v_sgain, __pyx_v_vgain); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_augment_hsv(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_hgain, PyObject *__pyx_v_sgain, PyObject *__pyx_v_vgain) { - PyObject *__pyx_v_r = NULL; - PyObject *__pyx_v_hue = NULL; - PyObject *__pyx_v_sat = NULL; - PyObject *__pyx_v_val = NULL; - PyObject *__pyx_v_dtype = NULL; - PyObject *__pyx_v_x = NULL; - PyObject *__pyx_v_lut_hue = NULL; - PyObject *__pyx_v_lut_sat = NULL; - PyObject *__pyx_v_lut_val = NULL; - PyObject *__pyx_v_im_hsv = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_t_9; - PyObject *(*__pyx_t_10)(PyObject *); - PyObject *__pyx_t_11 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("augment_hsv", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":18 - * def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): - * # HSV color-space augmentation - * if hgain or sgain or vgain: # <<<<<<<<<<<<<< - * r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - * hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) - */ - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_hgain); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18, __pyx_L1_error) - if (!__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_sgain); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18, __pyx_L1_error) - if (!__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_vgain); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(0, 18, __pyx_L1_error) - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (__pyx_t_1) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":19 - * # HSV color-space augmentation - * if hgain or sgain or vgain: - * r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains # <<<<<<<<<<<<<< - * hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) - * dtype = im.dtype # uint8 - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_uniform); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyList_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_hgain); - __Pyx_GIVEREF(__pyx_v_hgain); - PyList_SET_ITEM(__pyx_t_3, 0, __pyx_v_hgain); - __Pyx_INCREF(__pyx_v_sgain); - __Pyx_GIVEREF(__pyx_v_sgain); - PyList_SET_ITEM(__pyx_t_3, 1, __pyx_v_sgain); - __Pyx_INCREF(__pyx_v_vgain); - __Pyx_GIVEREF(__pyx_v_vgain); - PyList_SET_ITEM(__pyx_t_3, 2, __pyx_v_vgain); - __pyx_t_5 = PyNumber_Multiply(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyInt_AddObjC(__pyx_t_5, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_r = __pyx_t_3; - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":20 - * if hgain or sgain or vgain: - * r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - * hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) # <<<<<<<<<<<<<< - * dtype = im.dtype # uint8 - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_cv2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_split); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_cv2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_cvtColor); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_cv2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_COLOR_BGR2HSV); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_v_im, __pyx_t_8}; - __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+1-__pyx_t_9, 2+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - } - __pyx_t_7 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_5}; - __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) { - PyObject* sequence = __pyx_t_3; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 3)) { - if (size > 3) __Pyx_RaiseTooManyValuesError(3); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 20, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); - __pyx_t_7 = PyTuple_GET_ITEM(sequence, 2); - } else { - __pyx_t_4 = PyList_GET_ITEM(sequence, 0); - __pyx_t_5 = PyList_GET_ITEM(sequence, 1); - __pyx_t_7 = PyList_GET_ITEM(sequence, 2); - } - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(__pyx_t_7); - #else - __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_7 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_8 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 20, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_10 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_8); - index = 0; __pyx_t_4 = __pyx_t_10(__pyx_t_8); if (unlikely(!__pyx_t_4)) goto __pyx_L7_unpacking_failed; - __Pyx_GOTREF(__pyx_t_4); - index = 1; __pyx_t_5 = __pyx_t_10(__pyx_t_8); if (unlikely(!__pyx_t_5)) goto __pyx_L7_unpacking_failed; - __Pyx_GOTREF(__pyx_t_5); - index = 2; __pyx_t_7 = __pyx_t_10(__pyx_t_8); if (unlikely(!__pyx_t_7)) goto __pyx_L7_unpacking_failed; - __Pyx_GOTREF(__pyx_t_7); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_10(__pyx_t_8), 3) < 0) __PYX_ERR(0, 20, __pyx_L1_error) - __pyx_t_10 = NULL; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L8_unpacking_done; - __pyx_L7_unpacking_failed:; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_10 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 20, __pyx_L1_error) - __pyx_L8_unpacking_done:; - } - __pyx_v_hue = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_v_sat = __pyx_t_5; - __pyx_t_5 = 0; - __pyx_v_val = __pyx_t_7; - __pyx_t_7 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":21 - * r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - * hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) - * dtype = im.dtype # uint8 # <<<<<<<<<<<<<< - * - * x = np.arange(0, 256, dtype=r.dtype) - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_im, __pyx_n_s_dtype); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 21, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_dtype = __pyx_t_3; - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":23 - * dtype = im.dtype # uint8 - * - * x = np.arange(0, 256, dtype=r.dtype) # <<<<<<<<<<<<<< - * lut_hue = ((x * r[0]) % 180).astype(dtype) - * lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_arange); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_r, __pyx_n_s_dtype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_tuple__2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_x = __pyx_t_5; - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":24 - * - * x = np.arange(0, 256, dtype=r.dtype) - * lut_hue = ((x * r[0]) % 180).astype(dtype) # <<<<<<<<<<<<<< - * lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - * lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - */ - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_r, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = PyNumber_Multiply(__pyx_v_x, __pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyInt_RemainderObjC(__pyx_t_7, __pyx_int_180, 0xB4, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_astype); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_v_dtype}; - __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - } - __pyx_v_lut_hue = __pyx_t_5; - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":25 - * x = np.arange(0, 256, dtype=r.dtype) - * lut_hue = ((x * r[0]) % 180).astype(dtype) - * lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) # <<<<<<<<<<<<<< - * lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_clip); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_r, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = PyNumber_Multiply(__pyx_v_x, __pyx_t_3); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[4] = {__pyx_t_3, __pyx_t_8, __pyx_int_0, __pyx_int_255}; - __pyx_t_7 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_9, 3+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_astype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_v_dtype}; - __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 25, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __pyx_v_lut_sat = __pyx_t_5; - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":26 - * lut_hue = ((x * r[0]) % 180).astype(dtype) - * lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - * lut_val = np.clip(x * r[2], 0, 255).astype(dtype) # <<<<<<<<<<<<<< - * - * im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - */ - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_clip); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_GetItemInt(__pyx_v_r, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_3 = PyNumber_Multiply(__pyx_v_x, __pyx_t_7); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_8))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_8, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[4] = {__pyx_t_7, __pyx_t_3, __pyx_int_0, __pyx_int_255}; - __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_9, 3+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_astype); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_8); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_8, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v_dtype}; - __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 26, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __pyx_v_lut_val = __pyx_t_5; - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":28 - * lut_val = np.clip(x * r[2], 0, 255).astype(dtype) - * - * im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) # <<<<<<<<<<<<<< - * cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_cv2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_merge); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_cv2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_LUT); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_v_hue, __pyx_v_lut_hue}; - __pyx_t_8 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+1-__pyx_t_9, 2+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - } - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_cv2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_LUT); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_v_sat, __pyx_v_lut_sat}; - __pyx_t_7 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_9, 2+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_cv2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_LUT); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_11))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_11); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_11, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_v_val, __pyx_v_lut_val}; - __pyx_t_6 = __Pyx_PyObject_FastCall(__pyx_t_11, __pyx_callargs+1-__pyx_t_9, 2+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - } - __pyx_t_11 = PyTuple_New(3); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_GIVEREF(__pyx_t_8); - PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_8); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_11, 2, __pyx_t_6); - __pyx_t_8 = 0; - __pyx_t_7 = 0; - __pyx_t_6 = 0; - __pyx_t_6 = NULL; - __pyx_t_9 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_9 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_t_11}; - __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_9, 1+__pyx_t_9); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __pyx_v_im_hsv = __pyx_t_5; - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":29 - * - * im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) - * cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed # <<<<<<<<<<<<<< - * - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_cv2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_cvtColor); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_cv2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_COLOR_HSV2BGR); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_im_hsv); - __Pyx_GIVEREF(__pyx_v_im_hsv); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_im_hsv); - __Pyx_GIVEREF(__pyx_t_11); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_11); - __pyx_t_11 = 0; - __pyx_t_11 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - if (PyDict_SetItem(__pyx_t_11, __pyx_n_s_dst, __pyx_v_im) < 0) __PYX_ERR(0, 29, __pyx_L1_error) - __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, __pyx_t_11); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 29, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":18 - * def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): - * # HSV color-space augmentation - * if hgain or sgain or vgain: # <<<<<<<<<<<<<< - * r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains - * hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":16 - * - * - * def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): # <<<<<<<<<<<<<< - * # HSV color-space augmentation - * if hgain or sgain or vgain: - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.augment_hsv", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_r); - __Pyx_XDECREF(__pyx_v_hue); - __Pyx_XDECREF(__pyx_v_sat); - __Pyx_XDECREF(__pyx_v_val); - __Pyx_XDECREF(__pyx_v_dtype); - __Pyx_XDECREF(__pyx_v_x); - __Pyx_XDECREF(__pyx_v_lut_hue); - __Pyx_XDECREF(__pyx_v_lut_sat); - __Pyx_XDECREF(__pyx_v_lut_val); - __Pyx_XDECREF(__pyx_v_im_hsv); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":32 - * - * - * def hist_equalize(im, clahe=True, bgr=False): # <<<<<<<<<<<<<< - * # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - * yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_3hist_equalize(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_3hist_equalize = {"hist_equalize", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_3hist_equalize, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_3hist_equalize(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_im = 0; - PyObject *__pyx_v_clahe = 0; - PyObject *__pyx_v_bgr = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("hist_equalize (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_clahe,&__pyx_n_s_bgr,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_clahe,&__pyx_n_s_bgr,0}; - #endif - PyObject* values[3] = {0,0,0}; - values[1] = ((PyObject *)((PyObject *)Py_True)); - values[2] = ((PyObject *)((PyObject *)Py_False)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_im)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_clahe); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_bgr); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "hist_equalize") < 0)) __PYX_ERR(0, 32, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_im = values[0]; - __pyx_v_clahe = values[1]; - __pyx_v_bgr = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("hist_equalize", 0, 1, 3, __pyx_nargs); __PYX_ERR(0, 32, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.hist_equalize", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_2hist_equalize(__pyx_self, __pyx_v_im, __pyx_v_clahe, __pyx_v_bgr); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_2hist_equalize(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_clahe, PyObject *__pyx_v_bgr) { - PyObject *__pyx_v_yuv = NULL; - PyObject *__pyx_v_c = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("hist_equalize", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":34 - * def hist_equalize(im, clahe=True, bgr=False): - * # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - * yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) # <<<<<<<<<<<<<< - * if clahe: - * c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_cv2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_cvtColor); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_bgr); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 34, __pyx_L1_error) - if (__pyx_t_4) { - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_cv2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_COLOR_BGR2YUV); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_2 = __pyx_t_6; - __pyx_t_6 = 0; - } else { - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_cv2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_COLOR_RGB2YUV); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_2 = __pyx_t_5; - __pyx_t_5 = 0; - } - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_5, __pyx_v_im, __pyx_t_2}; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_7, 2+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 34, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_v_yuv = __pyx_t_1; - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":35 - * # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - * yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - * if clahe: # <<<<<<<<<<<<<< - * c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - * yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - */ - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_clahe); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 35, __pyx_L1_error) - if (__pyx_t_4) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":36 - * yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - * if clahe: - * c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) # <<<<<<<<<<<<<< - * yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - * else: - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_cv2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_createCLAHE); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_clipLimit, __pyx_float_2_0) < 0) __PYX_ERR(0, 36, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_tileGridSize, __pyx_tuple__3) < 0) __PYX_ERR(0, 36, __pyx_L1_error) - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_c = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":37 - * if clahe: - * c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - * yuv[:, :, 0] = c.apply(yuv[:, :, 0]) # <<<<<<<<<<<<<< - * else: - * yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_c, __pyx_n_s_apply); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_yuv, __pyx_tuple__5); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_7, 1+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - if (unlikely((PyObject_SetItem(__pyx_v_yuv, __pyx_tuple__5, __pyx_t_2) < 0))) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":35 - * # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - * yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - * if clahe: # <<<<<<<<<<<<<< - * c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - * yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - */ - goto __pyx_L3; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":39 - * yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - * else: - * yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram # <<<<<<<<<<<<<< - * return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB - * - */ - /*else*/ { - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_cv2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 39, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_equalizeHist); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 39, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_yuv, __pyx_tuple__5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 39, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_5, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_7, 1+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 39, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - if (unlikely((PyObject_SetItem(__pyx_v_yuv, __pyx_tuple__5, __pyx_t_2) < 0))) __PYX_ERR(0, 39, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_L3:; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":40 - * else: - * yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - * return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_cv2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_cvtColor); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_bgr); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 40, __pyx_L1_error) - if (__pyx_t_4) { - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_cv2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_COLOR_YUV2BGR); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_3 = __pyx_t_6; - __pyx_t_6 = 0; - } else { - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_cv2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_COLOR_YUV2RGB); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_3 = __pyx_t_5; - __pyx_t_5 = 0; - } - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_7 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_5, __pyx_v_yuv, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_7, 2+__pyx_t_7); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 40, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":32 - * - * - * def hist_equalize(im, clahe=True, bgr=False): # <<<<<<<<<<<<<< - * # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - * yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.hist_equalize", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_yuv); - __Pyx_XDECREF(__pyx_v_c); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":43 - * - * - * def replicate(im, labels): # <<<<<<<<<<<<<< - * # Replicate labels - * h, w = im.shape[:2] - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_5replicate(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_5replicate = {"replicate", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_5replicate, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_5replicate(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_im = 0; - PyObject *__pyx_v_labels = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("replicate (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_labels,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_labels,0}; - #endif - PyObject* values[2] = {0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_im)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_labels)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 43, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("replicate", 1, 2, 2, 1); __PYX_ERR(0, 43, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "replicate") < 0)) __PYX_ERR(0, 43, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 2)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - } - __pyx_v_im = values[0]; - __pyx_v_labels = values[1]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("replicate", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 43, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.replicate", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_4replicate(__pyx_self, __pyx_v_im, __pyx_v_labels); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_4replicate(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_labels) { - PyObject *__pyx_v_h = NULL; - PyObject *__pyx_v_w = NULL; - PyObject *__pyx_v_boxes = NULL; - PyObject *__pyx_v_x1 = NULL; - PyObject *__pyx_v_y1 = NULL; - PyObject *__pyx_v_x2 = NULL; - PyObject *__pyx_v_y2 = NULL; - PyObject *__pyx_v_s = NULL; - PyObject *__pyx_v_i = NULL; - PyObject *__pyx_v_x1b = NULL; - PyObject *__pyx_v_y1b = NULL; - PyObject *__pyx_v_x2b = NULL; - PyObject *__pyx_v_y2b = NULL; - PyObject *__pyx_v_bh = NULL; - PyObject *__pyx_v_bw = NULL; - PyObject *__pyx_v_yc = NULL; - PyObject *__pyx_v_xc = NULL; - PyObject *__pyx_v_x1a = NULL; - PyObject *__pyx_v_y1a = NULL; - PyObject *__pyx_v_x2a = NULL; - PyObject *__pyx_v_y2a = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *(*__pyx_t_5)(PyObject *); - int __pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - Py_ssize_t __pyx_t_9; - PyObject *(*__pyx_t_10)(PyObject *); - PyObject *__pyx_t_11 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("replicate", 0); - __Pyx_INCREF(__pyx_v_labels); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":45 - * def replicate(im, labels): - * # Replicate labels - * h, w = im.shape[:2] # <<<<<<<<<<<<<< - * boxes = labels[:, 1:].astype(int) - * x1, y1, x2, y2 = boxes.T - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_im, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 45, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_t_1, 0, 2, NULL, NULL, &__pyx_slice__6, 0, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { - PyObject* sequence = __pyx_t_2; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 45, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - } else { - __pyx_t_1 = PyList_GET_ITEM(sequence, 0); - __pyx_t_3 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - #else - __pyx_t_1 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 45, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 45, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - #endif - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_4 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 45, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_5 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_4); - index = 0; __pyx_t_1 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_1)) goto __pyx_L3_unpacking_failed; - __Pyx_GOTREF(__pyx_t_1); - index = 1; __pyx_t_3 = __pyx_t_5(__pyx_t_4); if (unlikely(!__pyx_t_3)) goto __pyx_L3_unpacking_failed; - __Pyx_GOTREF(__pyx_t_3); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_4), 2) < 0) __PYX_ERR(0, 45, __pyx_L1_error) - __pyx_t_5 = NULL; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - goto __pyx_L4_unpacking_done; - __pyx_L3_unpacking_failed:; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_5 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 45, __pyx_L1_error) - __pyx_L4_unpacking_done:; - } - __pyx_v_h = __pyx_t_1; - __pyx_t_1 = 0; - __pyx_v_w = __pyx_t_3; - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":46 - * # Replicate labels - * h, w = im.shape[:2] - * boxes = labels[:, 1:].astype(int) # <<<<<<<<<<<<<< - * x1, y1, x2, y2 = boxes.T - * s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - */ - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_labels, __pyx_tuple__8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 46, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_astype); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 46, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, ((PyObject *)(&PyInt_Type))}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_6, 1+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 46, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_v_boxes = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":47 - * h, w = im.shape[:2] - * boxes = labels[:, 1:].astype(int) - * x1, y1, x2, y2 = boxes.T # <<<<<<<<<<<<<< - * s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - * for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_boxes, __pyx_n_s_T); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { - PyObject* sequence = __pyx_t_2; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 4)) { - if (size > 4) __Pyx_RaiseTooManyValuesError(4); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 47, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 2); - __pyx_t_7 = PyTuple_GET_ITEM(sequence, 3); - } else { - __pyx_t_1 = PyList_GET_ITEM(sequence, 0); - __pyx_t_3 = PyList_GET_ITEM(sequence, 1); - __pyx_t_4 = PyList_GET_ITEM(sequence, 2); - __pyx_t_7 = PyList_GET_ITEM(sequence, 3); - } - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(__pyx_t_7); - #else - { - Py_ssize_t i; - PyObject** temps[4] = {&__pyx_t_1,&__pyx_t_3,&__pyx_t_4,&__pyx_t_7}; - for (i=0; i < 4; i++) { - PyObject* item = PySequence_ITEM(sequence, i); if (unlikely(!item)) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_GOTREF(item); - *(temps[i]) = item; - } - } - #endif - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - Py_ssize_t index = -1; - PyObject** temps[4] = {&__pyx_t_1,&__pyx_t_3,&__pyx_t_4,&__pyx_t_7}; - __pyx_t_8 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 47, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_5 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_8); - for (index=0; index < 4; index++) { - PyObject* item = __pyx_t_5(__pyx_t_8); if (unlikely(!item)) goto __pyx_L5_unpacking_failed; - __Pyx_GOTREF(item); - *(temps[index]) = item; - } - if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_8), 4) < 0) __PYX_ERR(0, 47, __pyx_L1_error) - __pyx_t_5 = NULL; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L6_unpacking_done; - __pyx_L5_unpacking_failed:; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_5 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 47, __pyx_L1_error) - __pyx_L6_unpacking_done:; - } - __pyx_v_x1 = __pyx_t_1; - __pyx_t_1 = 0; - __pyx_v_y1 = __pyx_t_3; - __pyx_t_3 = 0; - __pyx_v_x2 = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_v_y2 = __pyx_t_7; - __pyx_t_7 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":48 - * boxes = labels[:, 1:].astype(int) - * x1, y1, x2, y2 = boxes.T - * s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) # <<<<<<<<<<<<<< - * for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - * x1b, y1b, x2b, y2b = boxes[i] - */ - __pyx_t_2 = PyNumber_Subtract(__pyx_v_x2, __pyx_v_x1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = PyNumber_Subtract(__pyx_v_y2, __pyx_v_y1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_4 = PyNumber_Add(__pyx_t_2, __pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyInt_TrueDivideObjC(__pyx_t_4, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 48, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_s = __pyx_t_7; - __pyx_t_7 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":49 - * x1, y1, x2, y2 = boxes.T - * s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - * for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices # <<<<<<<<<<<<<< - * x1b, y1b, x2b, y2b = boxes[i] - * bh, bw = y2b - y1b, x2b - x1b - */ - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_s, __pyx_n_s_argsort); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_2, }; - __pyx_t_7 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_6, 0+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_s, __pyx_n_s_size); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyNumber_Multiply(__pyx_t_4, __pyx_float_0_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_round, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_t_7, 0, 0, NULL, &__pyx_t_4, NULL, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (likely(PyList_CheckExact(__pyx_t_2)) || PyTuple_CheckExact(__pyx_t_2)) { - __pyx_t_4 = __pyx_t_2; __Pyx_INCREF(__pyx_t_4); __pyx_t_9 = 0; - __pyx_t_10 = NULL; - } else { - __pyx_t_9 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_10 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_4); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 49, __pyx_L1_error) - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - for (;;) { - if (likely(!__pyx_t_10)) { - if (likely(PyList_CheckExact(__pyx_t_4))) { - if (__pyx_t_9 >= PyList_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_9); __Pyx_INCREF(__pyx_t_2); __pyx_t_9++; if (unlikely((0 < 0))) __PYX_ERR(0, 49, __pyx_L1_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_4, __pyx_t_9); __pyx_t_9++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - } else { - if (__pyx_t_9 >= PyTuple_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_9); __Pyx_INCREF(__pyx_t_2); __pyx_t_9++; if (unlikely((0 < 0))) __PYX_ERR(0, 49, __pyx_L1_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_4, __pyx_t_9); __pyx_t_9++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 49, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - } - } else { - __pyx_t_2 = __pyx_t_10(__pyx_t_4); - if (unlikely(!__pyx_t_2)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 49, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_2); - } - __Pyx_XDECREF_SET(__pyx_v_i, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":50 - * s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - * for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - * x1b, y1b, x2b, y2b = boxes[i] # <<<<<<<<<<<<<< - * bh, bw = y2b - y1b, x2b - x1b - * yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_boxes, __pyx_v_i); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 50, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { - PyObject* sequence = __pyx_t_2; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 4)) { - if (size > 4) __Pyx_RaiseTooManyValuesError(4); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 50, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_7 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); - __pyx_t_1 = PyTuple_GET_ITEM(sequence, 2); - __pyx_t_8 = PyTuple_GET_ITEM(sequence, 3); - } else { - __pyx_t_7 = PyList_GET_ITEM(sequence, 0); - __pyx_t_3 = PyList_GET_ITEM(sequence, 1); - __pyx_t_1 = PyList_GET_ITEM(sequence, 2); - __pyx_t_8 = PyList_GET_ITEM(sequence, 3); - } - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(__pyx_t_8); - #else - { - Py_ssize_t i; - PyObject** temps[4] = {&__pyx_t_7,&__pyx_t_3,&__pyx_t_1,&__pyx_t_8}; - for (i=0; i < 4; i++) { - PyObject* item = PySequence_ITEM(sequence, i); if (unlikely(!item)) __PYX_ERR(0, 50, __pyx_L1_error) - __Pyx_GOTREF(item); - *(temps[i]) = item; - } - } - #endif - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - Py_ssize_t index = -1; - PyObject** temps[4] = {&__pyx_t_7,&__pyx_t_3,&__pyx_t_1,&__pyx_t_8}; - __pyx_t_11 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 50, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_5 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_11); - for (index=0; index < 4; index++) { - PyObject* item = __pyx_t_5(__pyx_t_11); if (unlikely(!item)) goto __pyx_L9_unpacking_failed; - __Pyx_GOTREF(item); - *(temps[index]) = item; - } - if (__Pyx_IternextUnpackEndCheck(__pyx_t_5(__pyx_t_11), 4) < 0) __PYX_ERR(0, 50, __pyx_L1_error) - __pyx_t_5 = NULL; - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - goto __pyx_L10_unpacking_done; - __pyx_L9_unpacking_failed:; - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __pyx_t_5 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 50, __pyx_L1_error) - __pyx_L10_unpacking_done:; - } - __Pyx_XDECREF_SET(__pyx_v_x1b, __pyx_t_7); - __pyx_t_7 = 0; - __Pyx_XDECREF_SET(__pyx_v_y1b, __pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF_SET(__pyx_v_x2b, __pyx_t_1); - __pyx_t_1 = 0; - __Pyx_XDECREF_SET(__pyx_v_y2b, __pyx_t_8); - __pyx_t_8 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":51 - * for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices - * x1b, y1b, x2b, y2b = boxes[i] - * bh, bw = y2b - y1b, x2b - x1b # <<<<<<<<<<<<<< - * yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - * x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - */ - __pyx_t_2 = PyNumber_Subtract(__pyx_v_y2b, __pyx_v_y1b); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 51, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_8 = PyNumber_Subtract(__pyx_v_x2b, __pyx_v_x1b); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 51, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_XDECREF_SET(__pyx_v_bh, __pyx_t_2); - __pyx_t_2 = 0; - __Pyx_XDECREF_SET(__pyx_v_bw, __pyx_t_8); - __pyx_t_8 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":52 - * x1b, y1b, x2b, y2b = boxes[i] - * bh, bw = y2b - y1b, x2b - x1b - * yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y # <<<<<<<<<<<<<< - * x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - * im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_uniform); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Subtract(__pyx_v_h, __pyx_v_bh); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_int_0, __pyx_t_2}; - __pyx_t_8 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_6, 2+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_uniform); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_Subtract(__pyx_v_w, __pyx_v_bw); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_7, __pyx_int_0, __pyx_t_2}; - __pyx_t_8 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_6, 2+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 52, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_XDECREF_SET(__pyx_v_yc, __pyx_t_1); - __pyx_t_1 = 0; - __Pyx_XDECREF_SET(__pyx_v_xc, __pyx_t_3); - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":53 - * bh, bw = y2b - y1b, x2b - x1b - * yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - * x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] # <<<<<<<<<<<<<< - * im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] - * labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - */ - __pyx_t_3 = __pyx_v_xc; - __Pyx_INCREF(__pyx_t_3); - __pyx_t_1 = __pyx_v_yc; - __Pyx_INCREF(__pyx_t_1); - __pyx_t_8 = PyNumber_Add(__pyx_v_xc, __pyx_v_bw); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 53, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_2 = PyNumber_Add(__pyx_v_yc, __pyx_v_bh); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 53, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_XDECREF_SET(__pyx_v_x1a, __pyx_t_3); - __pyx_t_3 = 0; - __Pyx_XDECREF_SET(__pyx_v_y1a, __pyx_t_1); - __pyx_t_1 = 0; - __Pyx_XDECREF_SET(__pyx_v_x2a, __pyx_t_8); - __pyx_t_8 = 0; - __Pyx_XDECREF_SET(__pyx_v_y2a, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":54 - * yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y - * x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - * im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] # <<<<<<<<<<<<<< - * labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - * - */ - __pyx_t_2 = PySlice_New(__pyx_v_y1b, __pyx_v_y2b, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 54, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_8 = PySlice_New(__pyx_v_x1b, __pyx_v_x2b, Py_None); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 54, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 54, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_8); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_8); - __pyx_t_2 = 0; - __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_PyObject_GetItem(__pyx_v_im, __pyx_t_1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 54, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PySlice_New(__pyx_v_y1a, __pyx_v_y2a, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 54, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PySlice_New(__pyx_v_x1a, __pyx_v_x2a, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 54, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 54, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_im, __pyx_t_3, __pyx_t_8) < 0))) __PYX_ERR(0, 54, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":55 - * x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] - * im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] - * labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) # <<<<<<<<<<<<<< - * - * return im, labels - */ - __Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_append); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_INCREF(__pyx_v_i); - __Pyx_GIVEREF(__pyx_v_i); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_i); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_int_0); - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_labels, __pyx_t_8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = PyList_New(5); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_GIVEREF(__pyx_t_2); - PyList_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); - __Pyx_INCREF(__pyx_v_x1a); - __Pyx_GIVEREF(__pyx_v_x1a); - PyList_SET_ITEM(__pyx_t_8, 1, __pyx_v_x1a); - __Pyx_INCREF(__pyx_v_y1a); - __Pyx_GIVEREF(__pyx_v_y1a); - PyList_SET_ITEM(__pyx_t_8, 2, __pyx_v_y1a); - __Pyx_INCREF(__pyx_v_x2a); - __Pyx_GIVEREF(__pyx_v_x2a); - PyList_SET_ITEM(__pyx_t_8, 3, __pyx_v_x2a); - __Pyx_INCREF(__pyx_v_y2a); - __Pyx_GIVEREF(__pyx_v_y2a); - PyList_SET_ITEM(__pyx_t_8, 4, __pyx_v_y2a); - __pyx_t_2 = 0; - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_8); - PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_8); - __pyx_t_8 = 0; - __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_INCREF(__pyx_v_labels); - __Pyx_GIVEREF(__pyx_v_labels); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_labels); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_axis, __pyx_int_0) < 0) __PYX_ERR(0, 55, __pyx_L1_error) - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_8, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 55, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_labels, __pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":49 - * x1, y1, x2, y2 = boxes.T - * s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - * for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices # <<<<<<<<<<<<<< - * x1b, y1b, x2b, y2b = boxes[i] - * bh, bw = y2b - y1b, x2b - x1b - */ - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":57 - * labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) - * - * return im, labels # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 57, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_INCREF(__pyx_v_im); - __Pyx_GIVEREF(__pyx_v_im); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_im); - __Pyx_INCREF(__pyx_v_labels); - __Pyx_GIVEREF(__pyx_v_labels); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_labels); - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":43 - * - * - * def replicate(im, labels): # <<<<<<<<<<<<<< - * # Replicate labels - * h, w = im.shape[:2] - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.replicate", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_h); - __Pyx_XDECREF(__pyx_v_w); - __Pyx_XDECREF(__pyx_v_boxes); - __Pyx_XDECREF(__pyx_v_x1); - __Pyx_XDECREF(__pyx_v_y1); - __Pyx_XDECREF(__pyx_v_x2); - __Pyx_XDECREF(__pyx_v_y2); - __Pyx_XDECREF(__pyx_v_s); - __Pyx_XDECREF(__pyx_v_i); - __Pyx_XDECREF(__pyx_v_x1b); - __Pyx_XDECREF(__pyx_v_y1b); - __Pyx_XDECREF(__pyx_v_x2b); - __Pyx_XDECREF(__pyx_v_y2b); - __Pyx_XDECREF(__pyx_v_bh); - __Pyx_XDECREF(__pyx_v_bw); - __Pyx_XDECREF(__pyx_v_yc); - __Pyx_XDECREF(__pyx_v_xc); - __Pyx_XDECREF(__pyx_v_x1a); - __Pyx_XDECREF(__pyx_v_y1a); - __Pyx_XDECREF(__pyx_v_x2a); - __Pyx_XDECREF(__pyx_v_y2a); - __Pyx_XDECREF(__pyx_v_labels); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":60 - * - * - * def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): # <<<<<<<<<<<<<< - * # Resize and pad image while meeting stride-multiple constraints - * shape = im.shape[:2] # current shape [height, width] - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_7letterbox(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_7letterbox = {"letterbox", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_7letterbox, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_7letterbox(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_im = 0; - PyObject *__pyx_v_new_shape = 0; - PyObject *__pyx_v_color = 0; - PyObject *__pyx_v_auto = 0; - PyObject *__pyx_v_scaleFill = 0; - PyObject *__pyx_v_scaleup = 0; - PyObject *__pyx_v_stride = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("letterbox (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_new_shape,&__pyx_n_s_color,&__pyx_n_s_auto,&__pyx_n_s_scaleFill,&__pyx_n_s_scaleup,&__pyx_n_s_stride,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_new_shape,&__pyx_n_s_color,&__pyx_n_s_auto,&__pyx_n_s_scaleFill,&__pyx_n_s_scaleup,&__pyx_n_s_stride,0}; - #endif - PyObject* values[7] = {0,0,0,0,0,0,0}; - values[1] = ((PyObject *)((PyObject*)__pyx_tuple__9)); - values[2] = ((PyObject *)((PyObject*)__pyx_tuple__10)); - values[3] = ((PyObject *)((PyObject *)Py_True)); - values[4] = ((PyObject *)((PyObject *)Py_False)); - values[5] = ((PyObject *)((PyObject *)Py_True)); - values[6] = ((PyObject *)((PyObject *)__pyx_int_32)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 7: values[6] = __Pyx_Arg_FASTCALL(__pyx_args, 6); - CYTHON_FALLTHROUGH; - case 6: values[5] = __Pyx_Arg_FASTCALL(__pyx_args, 5); - CYTHON_FALLTHROUGH; - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_im)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_new_shape); - if (value) { values[1] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_color); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_auto); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_scaleFill); - if (value) { values[4] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 5: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_scaleup); - if (value) { values[5] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 6: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_stride); - if (value) { values[6] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "letterbox") < 0)) __PYX_ERR(0, 60, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 7: values[6] = __Pyx_Arg_FASTCALL(__pyx_args, 6); - CYTHON_FALLTHROUGH; - case 6: values[5] = __Pyx_Arg_FASTCALL(__pyx_args, 5); - CYTHON_FALLTHROUGH; - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_im = values[0]; - __pyx_v_new_shape = values[1]; - __pyx_v_color = values[2]; - __pyx_v_auto = values[3]; - __pyx_v_scaleFill = values[4]; - __pyx_v_scaleup = values[5]; - __pyx_v_stride = values[6]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("letterbox", 0, 1, 7, __pyx_nargs); __PYX_ERR(0, 60, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.letterbox", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_6letterbox(__pyx_self, __pyx_v_im, __pyx_v_new_shape, __pyx_v_color, __pyx_v_auto, __pyx_v_scaleFill, __pyx_v_scaleup, __pyx_v_stride); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_6letterbox(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_new_shape, PyObject *__pyx_v_color, PyObject *__pyx_v_auto, PyObject *__pyx_v_scaleFill, PyObject *__pyx_v_scaleup, PyObject *__pyx_v_stride) { - PyObject *__pyx_v_shape = NULL; - PyObject *__pyx_v_r = NULL; - PyObject *__pyx_v_ratio = NULL; - PyObject *__pyx_v_new_unpad = NULL; - PyObject *__pyx_v_dw = NULL; - PyObject *__pyx_v_dh = NULL; - PyObject *__pyx_v_top = NULL; - PyObject *__pyx_v_bottom = NULL; - PyObject *__pyx_v_left = NULL; - PyObject *__pyx_v_right = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - double __pyx_t_7; - int __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("letterbox", 0); - __Pyx_INCREF(__pyx_v_im); - __Pyx_INCREF(__pyx_v_new_shape); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":62 - * def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): - * # Resize and pad image while meeting stride-multiple constraints - * shape = im.shape[:2] # current shape [height, width] # <<<<<<<<<<<<<< - * if isinstance(new_shape, int): - * new_shape = (new_shape, new_shape) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_im, __pyx_n_s_shape); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetSlice(__pyx_t_1, 0, 2, NULL, NULL, &__pyx_slice__6, 0, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 62, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_shape = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":63 - * # Resize and pad image while meeting stride-multiple constraints - * shape = im.shape[:2] # current shape [height, width] - * if isinstance(new_shape, int): # <<<<<<<<<<<<<< - * new_shape = (new_shape, new_shape) - * - */ - __pyx_t_3 = PyInt_Check(__pyx_v_new_shape); - __pyx_t_4 = (__pyx_t_3 != 0); - if (__pyx_t_4) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":64 - * shape = im.shape[:2] # current shape [height, width] - * if isinstance(new_shape, int): - * new_shape = (new_shape, new_shape) # <<<<<<<<<<<<<< - * - * # Scale ratio (new / old) - */ - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 64, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_new_shape); - __Pyx_GIVEREF(__pyx_v_new_shape); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_new_shape); - __Pyx_INCREF(__pyx_v_new_shape); - __Pyx_GIVEREF(__pyx_v_new_shape); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_new_shape); - __Pyx_DECREF_SET(__pyx_v_new_shape, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":63 - * # Resize and pad image while meeting stride-multiple constraints - * shape = im.shape[:2] # current shape [height, width] - * if isinstance(new_shape, int): # <<<<<<<<<<<<<< - * new_shape = (new_shape, new_shape) - * - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":67 - * - * # Scale ratio (new / old) - * r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) # <<<<<<<<<<<<<< - * if not scaleup: # only scale down, do not scale up (for better val mAP) - * r = min(r, 1.0) - */ - __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_new_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyNumber_Divide(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_new_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_1 = PyObject_RichCompare(__pyx_t_5, __pyx_t_6, Py_LT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 67, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_4) { - __Pyx_INCREF(__pyx_t_5); - __pyx_t_2 = __pyx_t_5; - } else { - __Pyx_INCREF(__pyx_t_6); - __pyx_t_2 = __pyx_t_6; - } - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __pyx_t_2; - __Pyx_INCREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_r = __pyx_t_5; - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":68 - * # Scale ratio (new / old) - * r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - * if not scaleup: # only scale down, do not scale up (for better val mAP) # <<<<<<<<<<<<<< - * r = min(r, 1.0) - * - */ - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_scaleup); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 68, __pyx_L1_error) - __pyx_t_3 = ((!__pyx_t_4) != 0); - if (__pyx_t_3) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":69 - * r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - * if not scaleup: # only scale down, do not scale up (for better val mAP) - * r = min(r, 1.0) # <<<<<<<<<<<<<< - * - * # Compute padding - */ - __pyx_t_7 = 1.0; - __Pyx_INCREF(__pyx_v_r); - __pyx_t_5 = __pyx_v_r; - __pyx_t_6 = PyFloat_FromDouble(__pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 69, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = PyObject_RichCompare(__pyx_t_6, __pyx_t_5, Py_LT); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 69, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (__pyx_t_3) { - __pyx_t_1 = PyFloat_FromDouble(__pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __pyx_t_1; - __pyx_t_1 = 0; - } else { - __Pyx_INCREF(__pyx_t_5); - __pyx_t_2 = __pyx_t_5; - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __pyx_t_2; - __Pyx_INCREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_r, __pyx_t_5); - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":68 - * # Scale ratio (new / old) - * r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) - * if not scaleup: # only scale down, do not scale up (for better val mAP) # <<<<<<<<<<<<<< - * r = min(r, 1.0) - * - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":72 - * - * # Compute padding - * ratio = r, r # width, height ratios # <<<<<<<<<<<<<< - * new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - * dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - */ - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 72, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_r); - __Pyx_GIVEREF(__pyx_v_r); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_r); - __Pyx_INCREF(__pyx_v_r); - __Pyx_GIVEREF(__pyx_v_r); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_v_r); - __pyx_v_ratio = ((PyObject*)__pyx_t_5); - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":73 - * # Compute padding - * ratio = r, r # width, height ratios - * new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) # <<<<<<<<<<<<<< - * dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - * if auto: # minimum rectangle - */ - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_2 = PyNumber_Multiply(__pyx_t_5, __pyx_v_r); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_round, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Int(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyNumber_Multiply(__pyx_t_5, __pyx_v_r); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_round, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyNumber_Int(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = PyTuple_New(2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 73, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); - __pyx_t_2 = 0; - __pyx_t_1 = 0; - __pyx_v_new_unpad = ((PyObject*)__pyx_t_5); - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":74 - * ratio = r, r # width, height ratios - * new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - * dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding # <<<<<<<<<<<<<< - * if auto: # minimum rectangle - * dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - */ - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_new_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v_new_unpad, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyNumber_Subtract(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_new_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_GetItemInt_Tuple(__pyx_v_new_unpad, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyNumber_Subtract(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 74, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_dw = __pyx_t_2; - __pyx_t_2 = 0; - __pyx_v_dh = __pyx_t_6; - __pyx_t_6 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":75 - * new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - * dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - * if auto: # minimum rectangle # <<<<<<<<<<<<<< - * dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - * elif scaleFill: # stretch - */ - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_auto); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 75, __pyx_L1_error) - if (__pyx_t_3) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":76 - * dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - * if auto: # minimum rectangle - * dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding # <<<<<<<<<<<<<< - * elif scaleFill: # stretch - * dw, dh = 0.0, 0.0 - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_mod); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_8 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_2, __pyx_v_dw, __pyx_v_stride}; - __pyx_t_6 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_8, 2+__pyx_t_8); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_mod); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_8 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_2, __pyx_v_dh, __pyx_v_stride}; - __pyx_t_5 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_8, 2+__pyx_t_8); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 76, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __Pyx_DECREF_SET(__pyx_v_dw, __pyx_t_6); - __pyx_t_6 = 0; - __Pyx_DECREF_SET(__pyx_v_dh, __pyx_t_5); - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":75 - * new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) - * dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding - * if auto: # minimum rectangle # <<<<<<<<<<<<<< - * dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - * elif scaleFill: # stretch - */ - goto __pyx_L5; - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":77 - * if auto: # minimum rectangle - * dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - * elif scaleFill: # stretch # <<<<<<<<<<<<<< - * dw, dh = 0.0, 0.0 - * new_unpad = (new_shape[1], new_shape[0]) - */ - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_scaleFill); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 77, __pyx_L1_error) - if (__pyx_t_3) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":78 - * dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - * elif scaleFill: # stretch - * dw, dh = 0.0, 0.0 # <<<<<<<<<<<<<< - * new_unpad = (new_shape[1], new_shape[0]) - * ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - */ - __pyx_t_5 = __pyx_float_0_0; - __Pyx_INCREF(__pyx_t_5); - __pyx_t_6 = __pyx_float_0_0; - __Pyx_INCREF(__pyx_t_6); - __Pyx_DECREF_SET(__pyx_v_dw, __pyx_t_5); - __pyx_t_5 = 0; - __Pyx_DECREF_SET(__pyx_v_dh, __pyx_t_6); - __pyx_t_6 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":79 - * elif scaleFill: # stretch - * dw, dh = 0.0, 0.0 - * new_unpad = (new_shape[1], new_shape[0]) # <<<<<<<<<<<<<< - * ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - * - */ - __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_new_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_new_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 79, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_5); - __pyx_t_6 = 0; - __pyx_t_5 = 0; - __Pyx_DECREF_SET(__pyx_v_new_unpad, ((PyObject*)__pyx_t_1)); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":80 - * dw, dh = 0.0, 0.0 - * new_unpad = (new_shape[1], new_shape[0]) - * ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios # <<<<<<<<<<<<<< - * - * dw /= 2 # divide padding into 2 sides - */ - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_new_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_shape, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyNumber_Divide(__pyx_t_1, __pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_new_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_shape, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_t_5, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 80, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2); - __pyx_t_6 = 0; - __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_ratio, ((PyObject*)__pyx_t_1)); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":77 - * if auto: # minimum rectangle - * dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding - * elif scaleFill: # stretch # <<<<<<<<<<<<<< - * dw, dh = 0.0, 0.0 - * new_unpad = (new_shape[1], new_shape[0]) - */ - } - __pyx_L5:; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":82 - * ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios - * - * dw /= 2 # divide padding into 2 sides # <<<<<<<<<<<<<< - * dh /= 2 - * - */ - __pyx_t_1 = __Pyx_PyInt_TrueDivideObjC(__pyx_v_dw, __pyx_int_2, 2, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 82, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF_SET(__pyx_v_dw, __pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":83 - * - * dw /= 2 # divide padding into 2 sides - * dh /= 2 # <<<<<<<<<<<<<< - * - * if shape[::-1] != new_unpad: # resize - */ - __pyx_t_1 = __Pyx_PyInt_TrueDivideObjC(__pyx_v_dh, __pyx_int_2, 2, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 83, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF_SET(__pyx_v_dh, __pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":85 - * dh /= 2 - * - * if shape[::-1] != new_unpad: # resize # <<<<<<<<<<<<<< - * im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - * top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - */ - __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_shape, __pyx_slice__11); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyObject_RichCompare(__pyx_t_1, __pyx_v_new_unpad, Py_NE); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_3) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":86 - * - * if shape[::-1] != new_unpad: # resize - * im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) # <<<<<<<<<<<<<< - * top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - * left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_cv2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_resize); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_im); - __Pyx_GIVEREF(__pyx_v_im); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_im); - __Pyx_INCREF(__pyx_v_new_unpad); - __Pyx_GIVEREF(__pyx_v_new_unpad); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_new_unpad); - __pyx_t_6 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_cv2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_INTER_LINEAR); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (PyDict_SetItem(__pyx_t_6, __pyx_n_s_interpolation, __pyx_t_9) < 0) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_2, __pyx_t_6); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 86, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF_SET(__pyx_v_im, __pyx_t_9); - __pyx_t_9 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":85 - * dh /= 2 - * - * if shape[::-1] != new_unpad: # resize # <<<<<<<<<<<<<< - * im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - * top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":87 - * if shape[::-1] != new_unpad: # resize - * im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - * top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) # <<<<<<<<<<<<<< - * left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - * im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - */ - __pyx_t_9 = __Pyx_PyFloat_SubtractObjC(__pyx_v_dh, __pyx_float_0_1, 0.1, 0, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_builtin_round, __pyx_t_9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = __Pyx_PyNumber_Int(__pyx_t_6); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyFloat_AddObjC(__pyx_v_dh, __pyx_float_0_1, 0.1, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_round, __pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyNumber_Int(__pyx_t_2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 87, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_top = __pyx_t_9; - __pyx_t_9 = 0; - __pyx_v_bottom = __pyx_t_6; - __pyx_t_6 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":88 - * im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - * top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - * left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) # <<<<<<<<<<<<<< - * im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - * return im, ratio, (dw, dh) - */ - __pyx_t_6 = __Pyx_PyFloat_SubtractObjC(__pyx_v_dw, __pyx_float_0_1, 0.1, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_9 = __Pyx_PyObject_CallOneArg(__pyx_builtin_round, __pyx_t_6); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyNumber_Int(__pyx_t_9); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = __Pyx_PyFloat_AddObjC(__pyx_v_dw, __pyx_float_0_1, 0.1, 0, 0); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_round, __pyx_t_9); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = __Pyx_PyNumber_Int(__pyx_t_2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 88, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_left = __pyx_t_6; - __pyx_t_6 = 0; - __pyx_v_right = __pyx_t_9; - __pyx_t_9 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":89 - * top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - * left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - * im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border # <<<<<<<<<<<<<< - * return im, ratio, (dw, dh) - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_cv2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_copyMakeBorder); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_cv2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_BORDER_CONSTANT); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = PyTuple_New(6); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_INCREF(__pyx_v_im); - __Pyx_GIVEREF(__pyx_v_im); - PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_v_im); - __Pyx_INCREF(__pyx_v_top); - __Pyx_GIVEREF(__pyx_v_top); - PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_top); - __Pyx_INCREF(__pyx_v_bottom); - __Pyx_GIVEREF(__pyx_v_bottom); - PyTuple_SET_ITEM(__pyx_t_9, 2, __pyx_v_bottom); - __Pyx_INCREF(__pyx_v_left); - __Pyx_GIVEREF(__pyx_v_left); - PyTuple_SET_ITEM(__pyx_t_9, 3, __pyx_v_left); - __Pyx_INCREF(__pyx_v_right); - __Pyx_GIVEREF(__pyx_v_right); - PyTuple_SET_ITEM(__pyx_t_9, 4, __pyx_v_right); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_9, 5, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_value, __pyx_v_color) < 0) __PYX_ERR(0, 89, __pyx_L1_error) - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_9, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF_SET(__pyx_v_im, __pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":90 - * left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) - * im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border - * return im, ratio, (dw, dh) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 90, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v_dw); - __Pyx_GIVEREF(__pyx_v_dw); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_dw); - __Pyx_INCREF(__pyx_v_dh); - __Pyx_GIVEREF(__pyx_v_dh); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_dh); - __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 90, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_im); - __Pyx_GIVEREF(__pyx_v_im); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_im); - __Pyx_INCREF(__pyx_v_ratio); - __Pyx_GIVEREF(__pyx_v_ratio); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_ratio); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":60 - * - * - * def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): # <<<<<<<<<<<<<< - * # Resize and pad image while meeting stride-multiple constraints - * shape = im.shape[:2] # current shape [height, width] - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.letterbox", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_shape); - __Pyx_XDECREF(__pyx_v_r); - __Pyx_XDECREF(__pyx_v_ratio); - __Pyx_XDECREF(__pyx_v_new_unpad); - __Pyx_XDECREF(__pyx_v_dw); - __Pyx_XDECREF(__pyx_v_dh); - __Pyx_XDECREF(__pyx_v_top); - __Pyx_XDECREF(__pyx_v_bottom); - __Pyx_XDECREF(__pyx_v_left); - __Pyx_XDECREF(__pyx_v_right); - __Pyx_XDECREF(__pyx_v_im); - __Pyx_XDECREF(__pyx_v_new_shape); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":93 - * - * - * def copy_paste(im, labels, segments, p=0.5): # <<<<<<<<<<<<<< - * # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - * n = len(segments) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_9copy_paste(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_9copy_paste = {"copy_paste", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_9copy_paste, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_9copy_paste(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_im = 0; - PyObject *__pyx_v_labels = 0; - PyObject *__pyx_v_segments = 0; - PyObject *__pyx_v_p = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy_paste (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_labels,&__pyx_n_s_segments,&__pyx_n_s_p,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_labels,&__pyx_n_s_segments,&__pyx_n_s_p,0}; - #endif - PyObject* values[4] = {0,0,0,0}; - values[3] = ((PyObject *)((PyObject*)__pyx_float_0_5)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_im)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 93, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_labels)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 93, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("copy_paste", 0, 3, 4, 1); __PYX_ERR(0, 93, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_segments)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 93, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("copy_paste", 0, 3, 4, 2); __PYX_ERR(0, 93, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_p); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 93, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "copy_paste") < 0)) __PYX_ERR(0, 93, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_im = values[0]; - __pyx_v_labels = values[1]; - __pyx_v_segments = values[2]; - __pyx_v_p = values[3]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("copy_paste", 0, 3, 4, __pyx_nargs); __PYX_ERR(0, 93, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.copy_paste", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_8copy_paste(__pyx_self, __pyx_v_im, __pyx_v_labels, __pyx_v_segments, __pyx_v_p); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_8copy_paste(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_labels, PyObject *__pyx_v_segments, PyObject *__pyx_v_p) { - PyObject *__pyx_v_n = NULL; - CYTHON_UNUSED PyObject *__pyx_v_h = NULL; - PyObject *__pyx_v_w = NULL; - CYTHON_UNUSED PyObject *__pyx_v_c = NULL; - PyObject *__pyx_v_im_new = NULL; - PyObject *__pyx_v_j = NULL; - PyObject *__pyx_v_l = NULL; - PyObject *__pyx_v_s = NULL; - PyObject *__pyx_v_box = NULL; - PyObject *__pyx_v_ioa = NULL; - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_v_i = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *(*__pyx_t_9)(PyObject *); - int __pyx_t_10; - PyObject *(*__pyx_t_11)(PyObject *); - PyObject *__pyx_t_12 = NULL; - int __pyx_t_13; - PyObject *__pyx_t_14 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy_paste", 0); - __Pyx_INCREF(__pyx_v_labels); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":95 - * def copy_paste(im, labels, segments, p=0.5): - * # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - * n = len(segments) # <<<<<<<<<<<<<< - * if p and n: - * h, w, c = im.shape # height, width, channels - */ - __pyx_t_1 = PyObject_Length(__pyx_v_segments); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 95, __pyx_L1_error) - __pyx_t_2 = PyInt_FromSsize_t(__pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 95, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_n = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":96 - * # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - * n = len(segments) - * if p and n: # <<<<<<<<<<<<<< - * h, w, c = im.shape # height, width, channels - * im_new = np.zeros(im.shape, np.uint8) - */ - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_p); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 96, __pyx_L1_error) - if (__pyx_t_4) { - } else { - __pyx_t_3 = __pyx_t_4; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_n); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(0, 96, __pyx_L1_error) - __pyx_t_3 = __pyx_t_4; - __pyx_L4_bool_binop_done:; - if (__pyx_t_3) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":97 - * n = len(segments) - * if p and n: - * h, w, c = im.shape # height, width, channels # <<<<<<<<<<<<<< - * im_new = np.zeros(im.shape, np.uint8) - * for j in random.sample(range(n), k=round(p * n)): - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_im, __pyx_n_s_shape); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { - PyObject* sequence = __pyx_t_2; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 3)) { - if (size > 3) __Pyx_RaiseTooManyValuesError(3); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 97, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_5 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_6 = PyTuple_GET_ITEM(sequence, 1); - __pyx_t_7 = PyTuple_GET_ITEM(sequence, 2); - } else { - __pyx_t_5 = PyList_GET_ITEM(sequence, 0); - __pyx_t_6 = PyList_GET_ITEM(sequence, 1); - __pyx_t_7 = PyList_GET_ITEM(sequence, 2); - } - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(__pyx_t_7); - #else - __pyx_t_5 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_8 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 97, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_9 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_8); - index = 0; __pyx_t_5 = __pyx_t_9(__pyx_t_8); if (unlikely(!__pyx_t_5)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_5); - index = 1; __pyx_t_6 = __pyx_t_9(__pyx_t_8); if (unlikely(!__pyx_t_6)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_6); - index = 2; __pyx_t_7 = __pyx_t_9(__pyx_t_8); if (unlikely(!__pyx_t_7)) goto __pyx_L6_unpacking_failed; - __Pyx_GOTREF(__pyx_t_7); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_9(__pyx_t_8), 3) < 0) __PYX_ERR(0, 97, __pyx_L1_error) - __pyx_t_9 = NULL; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L7_unpacking_done; - __pyx_L6_unpacking_failed:; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_9 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 97, __pyx_L1_error) - __pyx_L7_unpacking_done:; - } - __pyx_v_h = __pyx_t_5; - __pyx_t_5 = 0; - __pyx_v_w = __pyx_t_6; - __pyx_t_6 = 0; - __pyx_v_c = __pyx_t_7; - __pyx_t_7 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":98 - * if p and n: - * h, w, c = im.shape # height, width, channels - * im_new = np.zeros(im.shape, np.uint8) # <<<<<<<<<<<<<< - * for j in random.sample(range(n), k=round(p * n)): - * l, s = labels[j], segments[j] - */ - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_zeros); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_im, __pyx_n_s_shape); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_uint8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = NULL; - __pyx_t_10 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_10 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_5, __pyx_t_7, __pyx_t_8}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_10, 2+__pyx_t_10); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 98, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __pyx_v_im_new = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":99 - * h, w, c = im.shape # height, width, channels - * im_new = np.zeros(im.shape, np.uint8) - * for j in random.sample(range(n), k=round(p * n)): # <<<<<<<<<<<<<< - * l, s = labels[j], segments[j] - * box = w - l[3], l[2], w - l[1], l[4] - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_sample); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_range, __pyx_v_n); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_7 = PyNumber_Multiply(__pyx_v_p, __pyx_v_n); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_builtin_round, __pyx_t_7); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_k, __pyx_t_5) < 0) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (likely(PyList_CheckExact(__pyx_t_5)) || PyTuple_CheckExact(__pyx_t_5)) { - __pyx_t_2 = __pyx_t_5; __Pyx_INCREF(__pyx_t_2); __pyx_t_1 = 0; - __pyx_t_11 = NULL; - } else { - __pyx_t_1 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_11 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 99, __pyx_L1_error) - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - for (;;) { - if (likely(!__pyx_t_11)) { - if (likely(PyList_CheckExact(__pyx_t_2))) { - if (__pyx_t_1 >= PyList_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely((0 < 0))) __PYX_ERR(0, 99, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } else { - if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely((0 < 0))) __PYX_ERR(0, 99, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 99, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } - } else { - __pyx_t_5 = __pyx_t_11(__pyx_t_2); - if (unlikely(!__pyx_t_5)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(0, 99, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_5); - } - __Pyx_XDECREF_SET(__pyx_v_j, __pyx_t_5); - __pyx_t_5 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":100 - * im_new = np.zeros(im.shape, np.uint8) - * for j in random.sample(range(n), k=round(p * n)): - * l, s = labels[j], segments[j] # <<<<<<<<<<<<<< - * box = w - l[3], l[2], w - l[1], l[4] - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - */ - __pyx_t_5 = __Pyx_PyObject_GetItem(__pyx_v_labels, __pyx_v_j); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_8 = __Pyx_PyObject_GetItem(__pyx_v_segments, __pyx_v_j); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 100, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_XDECREF_SET(__pyx_v_l, __pyx_t_5); - __pyx_t_5 = 0; - __Pyx_XDECREF_SET(__pyx_v_s, __pyx_t_8); - __pyx_t_8 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":101 - * for j in random.sample(range(n), k=round(p * n)): - * l, s = labels[j], segments[j] - * box = w - l[3], l[2], w - l[1], l[4] # <<<<<<<<<<<<<< - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - * if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - */ - __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_l, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_5 = PyNumber_Subtract(__pyx_v_w, __pyx_t_8); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_l, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_l, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = PyNumber_Subtract(__pyx_v_w, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_GetItemInt(__pyx_v_l, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_12 = PyTuple_New(4); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_8); - PyTuple_SET_ITEM(__pyx_t_12, 1, __pyx_t_8); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_12, 2, __pyx_t_7); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_12, 3, __pyx_t_6); - __pyx_t_5 = 0; - __pyx_t_8 = 0; - __pyx_t_7 = 0; - __pyx_t_6 = 0; - __Pyx_XDECREF_SET(__pyx_v_box, ((PyObject*)__pyx_t_12)); - __pyx_t_12 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":102 - * l, s = labels[j], segments[j] - * box = w - l[3], l[2], w - l[1], l[4] - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area # <<<<<<<<<<<<<< - * if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - * labels = np.concatenate((labels, [[l[0], *box]]), 0) - */ - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_bbox_ioa); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetItem(__pyx_v_labels, __pyx_tuple__13); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = NULL; - __pyx_t_10 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_10 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_8, __pyx_v_box, __pyx_t_7}; - __pyx_t_12 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_10, 2+__pyx_t_10); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __Pyx_XDECREF_SET(__pyx_v_ioa, __pyx_t_12); - __pyx_t_12 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":103 - * box = w - l[3], l[2], w - l[1], l[4] - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - * if (ioa < 0.30).all(): # allow 30% obscuration of existing labels # <<<<<<<<<<<<<< - * labels = np.concatenate((labels, [[l[0], *box]]), 0) - * segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - */ - __pyx_t_6 = PyObject_RichCompare(__pyx_v_ioa, __pyx_float_0_30, Py_LT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 103, __pyx_L1_error) - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_all); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 103, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = NULL; - __pyx_t_10 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - __pyx_t_10 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_6, }; - __pyx_t_12 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+1-__pyx_t_10, 0+__pyx_t_10); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 103, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - } - __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(0, 103, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - if (__pyx_t_3) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":104 - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - * if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - * labels = np.concatenate((labels, [[l[0], *box]]), 0) # <<<<<<<<<<<<<< - * segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - * cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - */ - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_concatenate); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_8 = __Pyx_GetItemInt(__pyx_v_l, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __pyx_t_5 = PyList_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_8); - PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_8); - __pyx_t_8 = 0; - __pyx_t_7 = __pyx_t_5; - __pyx_t_5 = 0; - if (__Pyx_PyList_Extend(__pyx_t_7, __pyx_v_box) < 0) __PYX_ERR(0, 104, __pyx_L1_error) - __pyx_t_5 = PyList_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_7); - PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_7); - __pyx_t_7 = 0; - __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_INCREF(__pyx_v_labels); - __Pyx_GIVEREF(__pyx_v_labels); - PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_v_labels); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_5); - __pyx_t_5 = 0; - __pyx_t_5 = NULL; - __pyx_t_10 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_10 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_5, __pyx_t_7, __pyx_int_0}; - __pyx_t_12 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_10, 2+__pyx_t_10); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 104, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __Pyx_DECREF_SET(__pyx_v_labels, __pyx_t_12); - __pyx_t_12 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":105 - * if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - * labels = np.concatenate((labels, [[l[0], *box]]), 0) - * segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) # <<<<<<<<<<<<<< - * cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_concatenate); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_s, __pyx_tuple__15); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_5 = PyNumber_Subtract(__pyx_v_w, __pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_s, __pyx_tuple__17); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = PyTuple_New(2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); - __pyx_t_5 = 0; - __pyx_t_6 = 0; - __pyx_t_6 = NULL; - __pyx_t_10 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - __pyx_t_10 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_t_8, __pyx_int_1}; - __pyx_t_12 = __Pyx_PyObject_FastCall(__pyx_t_7, __pyx_callargs+1-__pyx_t_10, 2+__pyx_t_10); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - } - __pyx_t_13 = __Pyx_PyObject_Append(__pyx_v_segments, __pyx_t_12); if (unlikely(__pyx_t_13 == ((int)-1))) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":106 - * labels = np.concatenate((labels, [[l[0], *box]]), 0) - * segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - * cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) # <<<<<<<<<<<<<< - * - * result = cv2.bitwise_and(src1=im, src2=im_new) - */ - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_cv2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_drawContours); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_6 = __Pyx_PyObject_GetItem(__pyx_v_segments, __pyx_v_j); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_astype); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_int32); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = NULL; - __pyx_t_10 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_10 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_t_14}; - __pyx_t_7 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_10, 1+__pyx_t_10); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_5 = PyList_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_7); - PyList_SET_ITEM(__pyx_t_5, 0, __pyx_t_7); - __pyx_t_7 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_cv2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_FILLED); if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_14); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __pyx_t_7 = NULL; - __pyx_t_10 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_8))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_8); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_8, function); - __pyx_t_10 = 1; - } - } - { - PyObject *__pyx_callargs[6] = {__pyx_t_7, __pyx_v_im_new, __pyx_t_5, __pyx_int_neg_1, __pyx_tuple__18, __pyx_t_14}; - __pyx_t_12 = __Pyx_PyObject_FastCall(__pyx_t_8, __pyx_callargs+1-__pyx_t_10, 5+__pyx_t_10); - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; - if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":103 - * box = w - l[3], l[2], w - l[1], l[4] - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - * if (ioa < 0.30).all(): # allow 30% obscuration of existing labels # <<<<<<<<<<<<<< - * labels = np.concatenate((labels, [[l[0], *box]]), 0) - * segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":99 - * h, w, c = im.shape # height, width, channels - * im_new = np.zeros(im.shape, np.uint8) - * for j in random.sample(range(n), k=round(p * n)): # <<<<<<<<<<<<<< - * l, s = labels[j], segments[j] - * box = w - l[3], l[2], w - l[1], l[4] - */ - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":108 - * cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - * - * result = cv2.bitwise_and(src1=im, src2=im_new) # <<<<<<<<<<<<<< - * result = cv2.flip(result, 1) # augment segments (flip left-right) - * i = result > 0 # pixels to replace - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_cv2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 108, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_bitwise_and); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 108, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyDict_NewPresized(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 108, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_src1, __pyx_v_im) < 0) __PYX_ERR(0, 108, __pyx_L1_error) - if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_src2, __pyx_v_im_new) < 0) __PYX_ERR(0, 108, __pyx_L1_error) - __pyx_t_8 = __Pyx_PyObject_Call(__pyx_t_12, __pyx_empty_tuple, __pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 108, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_result = __pyx_t_8; - __pyx_t_8 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":109 - * - * result = cv2.bitwise_and(src1=im, src2=im_new) - * result = cv2.flip(result, 1) # augment segments (flip left-right) # <<<<<<<<<<<<<< - * i = result > 0 # pixels to replace - * # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_cv2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 109, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_flip); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 109, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_12); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_10 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_12))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_12); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_12); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_12, function); - __pyx_t_10 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_2, __pyx_v_result, __pyx_int_1}; - __pyx_t_8 = __Pyx_PyObject_FastCall(__pyx_t_12, __pyx_callargs+1-__pyx_t_10, 2+__pyx_t_10); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 109, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0; - } - __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_8); - __pyx_t_8 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":110 - * result = cv2.bitwise_and(src1=im, src2=im_new) - * result = cv2.flip(result, 1) # augment segments (flip left-right) - * i = result > 0 # pixels to replace # <<<<<<<<<<<<<< - * # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - * im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug - */ - __pyx_t_8 = PyObject_RichCompare(__pyx_v_result, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 110, __pyx_L1_error) - __pyx_v_i = __pyx_t_8; - __pyx_t_8 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":112 - * i = result > 0 # pixels to replace - * # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch - * im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug # <<<<<<<<<<<<<< - * - * return im, labels, segments - */ - __pyx_t_8 = __Pyx_PyObject_GetItem(__pyx_v_result, __pyx_v_i); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 112, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - if (unlikely((PyObject_SetItem(__pyx_v_im, __pyx_v_i, __pyx_t_8) < 0))) __PYX_ERR(0, 112, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":96 - * # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - * n = len(segments) - * if p and n: # <<<<<<<<<<<<<< - * h, w, c = im.shape # height, width, channels - * im_new = np.zeros(im.shape, np.uint8) - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":114 - * im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug - * - * return im, labels, segments # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 114, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_INCREF(__pyx_v_im); - __Pyx_GIVEREF(__pyx_v_im); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_im); - __Pyx_INCREF(__pyx_v_labels); - __Pyx_GIVEREF(__pyx_v_labels); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_v_labels); - __Pyx_INCREF(__pyx_v_segments); - __Pyx_GIVEREF(__pyx_v_segments); - PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_v_segments); - __pyx_r = __pyx_t_8; - __pyx_t_8 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":93 - * - * - * def copy_paste(im, labels, segments, p=0.5): # <<<<<<<<<<<<<< - * # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - * n = len(segments) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_12); - __Pyx_XDECREF(__pyx_t_14); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.copy_paste", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_n); - __Pyx_XDECREF(__pyx_v_h); - __Pyx_XDECREF(__pyx_v_w); - __Pyx_XDECREF(__pyx_v_c); - __Pyx_XDECREF(__pyx_v_im_new); - __Pyx_XDECREF(__pyx_v_j); - __Pyx_XDECREF(__pyx_v_l); - __Pyx_XDECREF(__pyx_v_s); - __Pyx_XDECREF(__pyx_v_box); - __Pyx_XDECREF(__pyx_v_ioa); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_i); - __Pyx_XDECREF(__pyx_v_labels); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":117 - * - * - * def cutout(im, labels, p=0.5): # <<<<<<<<<<<<<< - * # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - * if random.random() < p: - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_11cutout(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_11cutout = {"cutout", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_11cutout, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_11cutout(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_im = 0; - PyObject *__pyx_v_labels = 0; - PyObject *__pyx_v_p = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("cutout (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_labels,&__pyx_n_s_p,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_labels,&__pyx_n_s_p,0}; - #endif - PyObject* values[3] = {0,0,0}; - values[2] = ((PyObject *)((PyObject*)__pyx_float_0_5)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_im)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 117, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_labels)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 117, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("cutout", 0, 2, 3, 1); __PYX_ERR(0, 117, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_p); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 117, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "cutout") < 0)) __PYX_ERR(0, 117, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_im = values[0]; - __pyx_v_labels = values[1]; - __pyx_v_p = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("cutout", 0, 2, 3, __pyx_nargs); __PYX_ERR(0, 117, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.cutout", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_10cutout(__pyx_self, __pyx_v_im, __pyx_v_labels, __pyx_v_p); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_10cutout(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_labels, PyObject *__pyx_v_p) { - PyObject *__pyx_v_h = NULL; - PyObject *__pyx_v_w = NULL; - PyObject *__pyx_v_scales = NULL; - PyObject *__pyx_v_s = NULL; - PyObject *__pyx_v_mask_h = NULL; - PyObject *__pyx_v_mask_w = NULL; - PyObject *__pyx_v_xmin = NULL; - PyObject *__pyx_v_ymin = NULL; - PyObject *__pyx_v_xmax = NULL; - PyObject *__pyx_v_ymax = NULL; - PyObject *__pyx_v_box = NULL; - PyObject *__pyx_v_ioa = NULL; - CYTHON_UNUSED long __pyx_7genexpr__pyx_v__; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *(*__pyx_t_7)(PyObject *); - Py_ssize_t __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - long __pyx_t_10; - Py_ssize_t __pyx_t_11; - int __pyx_t_12; - PyObject *__pyx_t_13 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("cutout", 0); - __Pyx_INCREF(__pyx_v_labels); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":119 - * def cutout(im, labels, p=0.5): - * # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - * if random.random() < p: # <<<<<<<<<<<<<< - * h, w = im.shape[:2] - * scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - */ - __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_random); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[1] = {__pyx_t_2, }; - __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 0+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_v_p, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 119, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (__pyx_t_5) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":120 - * # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - * if random.random() < p: - * h, w = im.shape[:2] # <<<<<<<<<<<<<< - * scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - * for s in scales: - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_im, __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetSlice(__pyx_t_3, 0, 2, NULL, NULL, &__pyx_slice__6, 0, 1, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 120, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if ((likely(PyTuple_CheckExact(__pyx_t_1))) || (PyList_CheckExact(__pyx_t_1))) { - PyObject* sequence = __pyx_t_1; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(0, 120, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - if (likely(PyTuple_CheckExact(sequence))) { - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_2 = PyTuple_GET_ITEM(sequence, 1); - } else { - __pyx_t_3 = PyList_GET_ITEM(sequence, 0); - __pyx_t_2 = PyList_GET_ITEM(sequence, 1); - } - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - #else - __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 120, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 120, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else { - Py_ssize_t index = -1; - __pyx_t_6 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 120, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_7 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_6); - index = 0; __pyx_t_3 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_3)) goto __pyx_L4_unpacking_failed; - __Pyx_GOTREF(__pyx_t_3); - index = 1; __pyx_t_2 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_2)) goto __pyx_L4_unpacking_failed; - __Pyx_GOTREF(__pyx_t_2); - if (__Pyx_IternextUnpackEndCheck(__pyx_t_7(__pyx_t_6), 2) < 0) __PYX_ERR(0, 120, __pyx_L1_error) - __pyx_t_7 = NULL; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - goto __pyx_L5_unpacking_done; - __pyx_L4_unpacking_failed:; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_7 = NULL; - if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); - __PYX_ERR(0, 120, __pyx_L1_error) - __pyx_L5_unpacking_done:; - } - __pyx_v_h = __pyx_t_3; - __pyx_t_3 = 0; - __pyx_v_w = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":121 - * if random.random() < p: - * h, w = im.shape[:2] - * scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction # <<<<<<<<<<<<<< - * for s in scales: - * mask_h = random.randint(1, int(h * s)) # create random masks - */ - __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_float_0_5); - __Pyx_GIVEREF(__pyx_float_0_5); - PyList_SET_ITEM(__pyx_t_1, 0, __pyx_float_0_5); - __pyx_t_2 = PyList_New(1 * 2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < 2; __pyx_temp++) { - __Pyx_INCREF(__pyx_float_0_25); - __Pyx_GIVEREF(__pyx_float_0_25); - PyList_SET_ITEM(__pyx_t_2, __pyx_temp, __pyx_float_0_25); - } - } - __pyx_t_3 = PyNumber_Add(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyList_New(1 * 4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < 4; __pyx_temp++) { - __Pyx_INCREF(__pyx_float_0_125); - __Pyx_GIVEREF(__pyx_float_0_125); - PyList_SET_ITEM(__pyx_t_2, __pyx_temp, __pyx_float_0_125); - } - } - __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyList_New(1 * 8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < 8; __pyx_temp++) { - __Pyx_INCREF(__pyx_float_0_0625); - __Pyx_GIVEREF(__pyx_float_0_0625); - PyList_SET_ITEM(__pyx_t_2, __pyx_temp, __pyx_float_0_0625); - } - } - __pyx_t_3 = PyNumber_Add(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyList_New(1 * 16); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < 16; __pyx_temp++) { - __Pyx_INCREF(__pyx_float_0_03125); - __Pyx_GIVEREF(__pyx_float_0_03125); - PyList_SET_ITEM(__pyx_t_2, __pyx_temp, __pyx_float_0_03125); - } - } - __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 121, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v_scales = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":122 - * h, w = im.shape[:2] - * scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - * for s in scales: # <<<<<<<<<<<<<< - * mask_h = random.randint(1, int(h * s)) # create random masks - * mask_w = random.randint(1, int(w * s)) - */ - __pyx_t_1 = __pyx_v_scales; __Pyx_INCREF(__pyx_t_1); __pyx_t_8 = 0; - for (;;) { - if (__pyx_t_8 >= PyList_GET_SIZE(__pyx_t_1)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_2 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++; if (unlikely((0 < 0))) __PYX_ERR(0, 122, __pyx_L1_error) - #else - __pyx_t_2 = PySequence_ITEM(__pyx_t_1, __pyx_t_8); __pyx_t_8++; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 122, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - #endif - __Pyx_XDECREF_SET(__pyx_v_s, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":123 - * scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - * for s in scales: - * mask_h = random.randint(1, int(h * s)) # create random masks # <<<<<<<<<<<<<< - * mask_w = random.randint(1, int(w * s)) - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 123, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_randint); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 123, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Multiply(__pyx_v_h, __pyx_v_s); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 123, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_9 = __Pyx_PyNumber_Int(__pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 123, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_int_1, __pyx_t_9}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_4, 2+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 123, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __Pyx_XDECREF_SET(__pyx_v_mask_h, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":124 - * for s in scales: - * mask_h = random.randint(1, int(h * s)) # create random masks - * mask_w = random.randint(1, int(w * s)) # <<<<<<<<<<<<<< - * - * # box - */ - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_random); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 124, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_randint); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 124, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = PyNumber_Multiply(__pyx_v_w, __pyx_v_s); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 124, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_3 = __Pyx_PyNumber_Int(__pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 124, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_9); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_9, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_6, __pyx_int_1, __pyx_t_3}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_9, __pyx_callargs+1-__pyx_t_4, 2+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 124, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } - __Pyx_XDECREF_SET(__pyx_v_mask_w, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":127 - * - * # box - * xmin = max(0, random.randint(0, w) - mask_w // 2) # <<<<<<<<<<<<<< - * ymin = max(0, random.randint(0, h) - mask_h // 2) - * xmax = min(w, xmin + mask_w) - */ - __Pyx_GetModuleGlobalName(__pyx_t_9, __pyx_n_s_random); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_9, __pyx_n_s_randint); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_9)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_9); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_9, __pyx_int_0, __pyx_v_w}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 2+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_t_3 = __Pyx_PyInt_FloorDivideObjC(__pyx_v_mask_w, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_9 = PyNumber_Subtract(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_10 = 0; - __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_t_10); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = PyObject_RichCompare(__pyx_t_9, __pyx_t_2, Py_GT); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (__pyx_t_5) { - __Pyx_INCREF(__pyx_t_9); - __pyx_t_3 = __pyx_t_9; - } else { - __pyx_t_6 = __Pyx_PyInt_From_long(__pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 127, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_3 = __pyx_t_6; - __pyx_t_6 = 0; - } - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_9 = __pyx_t_3; - __Pyx_INCREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF_SET(__pyx_v_xmin, __pyx_t_9); - __pyx_t_9 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":128 - * # box - * xmin = max(0, random.randint(0, w) - mask_w // 2) - * ymin = max(0, random.randint(0, h) - mask_h // 2) # <<<<<<<<<<<<<< - * xmax = min(w, xmin + mask_w) - * ymax = min(h, ymin + mask_h) - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_random); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_randint); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_int_0, __pyx_v_h}; - __pyx_t_9 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_4, 2+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __pyx_t_6 = __Pyx_PyInt_FloorDivideObjC(__pyx_v_mask_h, __pyx_int_2, 2, 0, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_3 = PyNumber_Subtract(__pyx_t_9, __pyx_t_6); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_10 = 0; - __pyx_t_9 = __Pyx_PyInt_From_long(__pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_2 = PyObject_RichCompare(__pyx_t_3, __pyx_t_9, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (__pyx_t_5) { - __Pyx_INCREF(__pyx_t_3); - __pyx_t_6 = __pyx_t_3; - } else { - __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_t_10); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 128, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_6 = __pyx_t_2; - __pyx_t_2 = 0; - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __pyx_t_6; - __Pyx_INCREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF_SET(__pyx_v_ymin, __pyx_t_3); - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":129 - * xmin = max(0, random.randint(0, w) - mask_w // 2) - * ymin = max(0, random.randint(0, h) - mask_h // 2) - * xmax = min(w, xmin + mask_w) # <<<<<<<<<<<<<< - * ymax = min(h, ymin + mask_h) - * - */ - __pyx_t_3 = PyNumber_Add(__pyx_v_xmin, __pyx_v_mask_w); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 129, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_w); - __pyx_t_6 = __pyx_v_w; - __pyx_t_9 = PyObject_RichCompare(__pyx_t_3, __pyx_t_6, Py_LT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 129, __pyx_L1_error) - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 129, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (__pyx_t_5) { - __Pyx_INCREF(__pyx_t_3); - __pyx_t_2 = __pyx_t_3; - } else { - __Pyx_INCREF(__pyx_t_6); - __pyx_t_2 = __pyx_t_6; - } - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __pyx_t_2; - __Pyx_INCREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_XDECREF_SET(__pyx_v_xmax, __pyx_t_3); - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":130 - * ymin = max(0, random.randint(0, h) - mask_h // 2) - * xmax = min(w, xmin + mask_w) - * ymax = min(h, ymin + mask_h) # <<<<<<<<<<<<<< - * - * # apply random color mask - */ - __pyx_t_3 = PyNumber_Add(__pyx_v_ymin, __pyx_v_mask_h); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_h); - __pyx_t_2 = __pyx_v_h; - __pyx_t_9 = PyObject_RichCompare(__pyx_t_3, __pyx_t_2, Py_LT); __Pyx_XGOTREF(__pyx_t_9); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 130, __pyx_L1_error) - __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely((__pyx_t_5 < 0))) __PYX_ERR(0, 130, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - if (__pyx_t_5) { - __Pyx_INCREF(__pyx_t_3); - __pyx_t_6 = __pyx_t_3; - } else { - __Pyx_INCREF(__pyx_t_2); - __pyx_t_6 = __pyx_t_2; - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __pyx_t_6; - __Pyx_INCREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF_SET(__pyx_v_ymax, __pyx_t_3); - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":133 - * - * # apply random color mask - * im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] # <<<<<<<<<<<<<< - * - * # return unobscured labels - */ - { /* enter inner scope */ - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - for (__pyx_t_10 = 0; __pyx_t_10 < 3; __pyx_t_10+=1) { - __pyx_7genexpr__pyx_v__ = __pyx_t_10; - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_random); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_randint); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - } /* exit inner scope */ - __pyx_t_6 = PySlice_New(__pyx_v_ymin, __pyx_v_ymax, Py_None); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_2 = PySlice_New(__pyx_v_xmin, __pyx_v_xmax, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_t_2); - __pyx_t_6 = 0; - __pyx_t_2 = 0; - if (unlikely((PyObject_SetItem(__pyx_v_im, __pyx_t_9, __pyx_t_3) < 0))) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":136 - * - * # return unobscured labels - * if len(labels) and s > 0.03: # <<<<<<<<<<<<<< - * box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - */ - __pyx_t_11 = PyObject_Length(__pyx_v_labels); if (unlikely(__pyx_t_11 == ((Py_ssize_t)-1))) __PYX_ERR(0, 136, __pyx_L1_error) - __pyx_t_12 = (__pyx_t_11 != 0); - if (__pyx_t_12) { - } else { - __pyx_t_5 = __pyx_t_12; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_3 = PyObject_RichCompare(__pyx_v_s, __pyx_float_0_03, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 136, __pyx_L1_error) - __pyx_t_12 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_12 < 0))) __PYX_ERR(0, 136, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_5 = __pyx_t_12; - __pyx_L11_bool_binop_done:; - if (__pyx_t_5) { - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":137 - * # return unobscured labels - * if len(labels) and s > 0.03: - * box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) # <<<<<<<<<<<<<< - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - * labels = labels[ioa < 0.60] # remove >60% obscured labels - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_array); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyList_New(4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_xmin); - __Pyx_GIVEREF(__pyx_v_xmin); - PyList_SET_ITEM(__pyx_t_3, 0, __pyx_v_xmin); - __Pyx_INCREF(__pyx_v_ymin); - __Pyx_GIVEREF(__pyx_v_ymin); - PyList_SET_ITEM(__pyx_t_3, 1, __pyx_v_ymin); - __Pyx_INCREF(__pyx_v_xmax); - __Pyx_GIVEREF(__pyx_v_xmax); - PyList_SET_ITEM(__pyx_t_3, 2, __pyx_v_xmax); - __Pyx_INCREF(__pyx_v_ymax); - __Pyx_GIVEREF(__pyx_v_ymax); - PyList_SET_ITEM(__pyx_t_3, 3, __pyx_v_ymax); - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3); - __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_float32); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_13); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_dtype, __pyx_t_13) < 0) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __pyx_t_13 = __Pyx_PyObject_Call(__pyx_t_9, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 137, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_13); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF_SET(__pyx_v_box, __pyx_t_13); - __pyx_t_13 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":138 - * if len(labels) and s > 0.03: - * box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area # <<<<<<<<<<<<<< - * labels = labels[ioa < 0.60] # remove >60% obscured labels - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_bbox_ioa); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_v_labels, __pyx_tuple__13); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_9 = NULL; - __pyx_t_4 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_9)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_9); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - __pyx_t_4 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_9, __pyx_v_box, __pyx_t_2}; - __pyx_t_13 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_4, 2+__pyx_t_4); - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 138, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_13); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __Pyx_XDECREF_SET(__pyx_v_ioa, __pyx_t_13); - __pyx_t_13 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":139 - * box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - * labels = labels[ioa < 0.60] # remove >60% obscured labels # <<<<<<<<<<<<<< - * - * return labels - */ - __pyx_t_13 = PyObject_RichCompare(__pyx_v_ioa, __pyx_float_0_60, Py_LT); __Pyx_XGOTREF(__pyx_t_13); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 139, __pyx_L1_error) - __pyx_t_3 = __Pyx_PyObject_GetItem(__pyx_v_labels, __pyx_t_13); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; - __Pyx_DECREF_SET(__pyx_v_labels, __pyx_t_3); - __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":136 - * - * # return unobscured labels - * if len(labels) and s > 0.03: # <<<<<<<<<<<<<< - * box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":122 - * h, w = im.shape[:2] - * scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - * for s in scales: # <<<<<<<<<<<<<< - * mask_h = random.randint(1, int(h * s)) # create random masks - * mask_w = random.randint(1, int(w * s)) - */ - } - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":119 - * def cutout(im, labels, p=0.5): - * # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - * if random.random() < p: # <<<<<<<<<<<<<< - * h, w = im.shape[:2] - * scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction - */ - } - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":141 - * labels = labels[ioa < 0.60] # remove >60% obscured labels - * - * return labels # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_labels); - __pyx_r = __pyx_v_labels; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":117 - * - * - * def cutout(im, labels, p=0.5): # <<<<<<<<<<<<<< - * # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - * if random.random() < p: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_XDECREF(__pyx_t_13); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.cutout", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_h); - __Pyx_XDECREF(__pyx_v_w); - __Pyx_XDECREF(__pyx_v_scales); - __Pyx_XDECREF(__pyx_v_s); - __Pyx_XDECREF(__pyx_v_mask_h); - __Pyx_XDECREF(__pyx_v_mask_w); - __Pyx_XDECREF(__pyx_v_xmin); - __Pyx_XDECREF(__pyx_v_ymin); - __Pyx_XDECREF(__pyx_v_xmax); - __Pyx_XDECREF(__pyx_v_ymax); - __Pyx_XDECREF(__pyx_v_box); - __Pyx_XDECREF(__pyx_v_ioa); - __Pyx_XDECREF(__pyx_v_labels); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":144 - * - * - * def mixup(im, labels, im2, labels2): # <<<<<<<<<<<<<< - * # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - * r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_13mixup(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_13mixup = {"mixup", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_13mixup, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_13mixup(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_im = 0; - PyObject *__pyx_v_labels = 0; - PyObject *__pyx_v_im2 = 0; - PyObject *__pyx_v_labels2 = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("mixup (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_labels,&__pyx_n_s_im2,&__pyx_n_s_labels2,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_im,&__pyx_n_s_labels,&__pyx_n_s_im2,&__pyx_n_s_labels2,0}; - #endif - PyObject* values[4] = {0,0,0,0}; - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_im)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_labels)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("mixup", 1, 4, 4, 1); __PYX_ERR(0, 144, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_im2)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("mixup", 1, 4, 4, 2); __PYX_ERR(0, 144, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (likely((values[3] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_labels2)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 144, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("mixup", 1, 4, 4, 3); __PYX_ERR(0, 144, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "mixup") < 0)) __PYX_ERR(0, 144, __pyx_L3_error) - } - } else if (unlikely(__pyx_nargs != 4)) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - } - __pyx_v_im = values[0]; - __pyx_v_labels = values[1]; - __pyx_v_im2 = values[2]; - __pyx_v_labels2 = values[3]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("mixup", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 144, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.mixup", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_12mixup(__pyx_self, __pyx_v_im, __pyx_v_labels, __pyx_v_im2, __pyx_v_labels2); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_12mixup(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_im, PyObject *__pyx_v_labels, PyObject *__pyx_v_im2, PyObject *__pyx_v_labels2) { - PyObject *__pyx_v_r = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_t_5; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("mixup", 0); - __Pyx_INCREF(__pyx_v_im); - __Pyx_INCREF(__pyx_v_labels); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":146 - * def mixup(im, labels, im2, labels2): - * # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - * r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 # <<<<<<<<<<<<<< - * im = (im * r + im2 * (1 - r)).astype(np.uint8) - * labels = np.concatenate((labels, labels2), 0) - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_random); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_beta); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_r = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":147 - * # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - * r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - * im = (im * r + im2 * (1 - r)).astype(np.uint8) # <<<<<<<<<<<<<< - * labels = np.concatenate((labels, labels2), 0) - * return im, labels - */ - __pyx_t_1 = PyNumber_Multiply(__pyx_v_im, __pyx_v_r); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyInt_SubtractCObj(__pyx_int_1, __pyx_v_r, 1, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyNumber_Multiply(__pyx_v_im2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyNumber_Add(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_astype); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_uint8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[2] = {__pyx_t_3, __pyx_t_1}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 147, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __Pyx_DECREF_SET(__pyx_v_im, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":148 - * r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - * im = (im * r + im2 * (1 - r)).astype(np.uint8) - * labels = np.concatenate((labels, labels2), 0) # <<<<<<<<<<<<<< - * return im, labels - * - */ - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_concatenate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_INCREF(__pyx_v_labels); - __Pyx_GIVEREF(__pyx_v_labels); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_labels); - __Pyx_INCREF(__pyx_v_labels2); - __Pyx_GIVEREF(__pyx_v_labels2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_labels2); - __pyx_t_3 = NULL; - __pyx_t_5 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_1))) { - __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); - if (likely(__pyx_t_3)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_1, function); - __pyx_t_5 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_3, __pyx_t_4, __pyx_int_0}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_5, 2+__pyx_t_5); - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } - __Pyx_DECREF_SET(__pyx_v_labels, __pyx_t_2); - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":149 - * im = (im * r + im2 * (1 - r)).astype(np.uint8) - * labels = np.concatenate((labels, labels2), 0) - * return im, labels # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_im); - __Pyx_GIVEREF(__pyx_v_im); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_im); - __Pyx_INCREF(__pyx_v_labels); - __Pyx_GIVEREF(__pyx_v_labels); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_labels); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":144 - * - * - * def mixup(im, labels, im2, labels2): # <<<<<<<<<<<<<< - * # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - * r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.mixup", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_r); - __Pyx_XDECREF(__pyx_v_im); - __Pyx_XDECREF(__pyx_v_labels); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":152 - * - * - * def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # <<<<<<<<<<<<<< - * # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - * w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_15box_candidates(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -); /*proto*/ -static PyMethodDef __pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_15box_candidates = {"box_candidates", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_15box_candidates, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_15box_candidates(PyObject *__pyx_self, -#if CYTHON_METH_FASTCALL -PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds -#else -PyObject *__pyx_args, PyObject *__pyx_kwds -#endif -) { - PyObject *__pyx_v_box1 = 0; - PyObject *__pyx_v_box2 = 0; - PyObject *__pyx_v_wh_thr = 0; - PyObject *__pyx_v_ar_thr = 0; - PyObject *__pyx_v_area_thr = 0; - PyObject *__pyx_v_eps = 0; - #if !CYTHON_METH_FASTCALL - CYTHON_UNUSED const Py_ssize_t __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); - #endif - CYTHON_UNUSED PyObject *const *__pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("box_candidates (wrapper)", 0); - { - #if CYTHON_USE_MODULE_STATE - PyObject **__pyx_pyargnames[] = {&__pyx_n_s_box1,&__pyx_n_s_box2,&__pyx_n_s_wh_thr,&__pyx_n_s_ar_thr,&__pyx_n_s_area_thr,&__pyx_n_s_eps,0}; - #else - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_box1,&__pyx_n_s_box2,&__pyx_n_s_wh_thr,&__pyx_n_s_ar_thr,&__pyx_n_s_area_thr,&__pyx_n_s_eps,0}; - #endif - PyObject* values[6] = {0,0,0,0,0,0}; - values[2] = ((PyObject *)((PyObject *)__pyx_int_2)); - values[3] = ((PyObject *)((PyObject *)__pyx_int_100)); - values[4] = ((PyObject *)((PyObject*)__pyx_float_0_1)); - values[5] = ((PyObject *)((PyObject*)__pyx_float_1eneg_16)); - if (__pyx_kwds) { - Py_ssize_t kw_args; - switch (__pyx_nargs) { - case 6: values[5] = __Pyx_Arg_FASTCALL(__pyx_args, 5); - CYTHON_FALLTHROUGH; - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); - switch (__pyx_nargs) { - case 0: - if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_box1)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 152, __pyx_L3_error) - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_box2)) != 0)) kw_args--; - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 152, __pyx_L3_error) - else { - __Pyx_RaiseArgtupleInvalid("box_candidates", 0, 2, 6, 1); __PYX_ERR(0, 152, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_wh_thr); - if (value) { values[2] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 152, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_ar_thr); - if (value) { values[3] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 152, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_area_thr); - if (value) { values[4] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 152, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 5: - if (kw_args > 0) { - PyObject* value = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_eps); - if (value) { values[5] = value; kw_args--; } - else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 152, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - const Py_ssize_t kwd_pos_args = __pyx_nargs; - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "box_candidates") < 0)) __PYX_ERR(0, 152, __pyx_L3_error) - } - } else { - switch (__pyx_nargs) { - case 6: values[5] = __Pyx_Arg_FASTCALL(__pyx_args, 5); - CYTHON_FALLTHROUGH; - case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); - values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_box1 = values[0]; - __pyx_v_box2 = values[1]; - __pyx_v_wh_thr = values[2]; - __pyx_v_ar_thr = values[3]; - __pyx_v_area_thr = values[4]; - __pyx_v_eps = values[5]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("box_candidates", 0, 2, 6, __pyx_nargs); __PYX_ERR(0, 152, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.box_candidates", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_14box_candidates(__pyx_self, __pyx_v_box1, __pyx_v_box2, __pyx_v_wh_thr, __pyx_v_ar_thr, __pyx_v_area_thr, __pyx_v_eps); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_14box_candidates(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_box1, PyObject *__pyx_v_box2, PyObject *__pyx_v_wh_thr, PyObject *__pyx_v_ar_thr, PyObject *__pyx_v_area_thr, PyObject *__pyx_v_eps) { - PyObject *__pyx_v_w1 = NULL; - PyObject *__pyx_v_h1 = NULL; - PyObject *__pyx_v_w2 = NULL; - PyObject *__pyx_v_h2 = NULL; - PyObject *__pyx_v_ar = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("box_candidates", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":154 - * def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) - * # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - * w1, h1 = box1[2] - box1[0], box1[3] - box1[1] # <<<<<<<<<<<<<< - * w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - * ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - */ - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_box1, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 154, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_box1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 154, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Subtract(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 154, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_box1, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 154, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_box1, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 154, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = PyNumber_Subtract(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 154, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_v_w1 = __pyx_t_3; - __pyx_t_3 = 0; - __pyx_v_h1 = __pyx_t_4; - __pyx_t_4 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":155 - * # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - * w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - * w2, h2 = box2[2] - box2[0], box2[3] - box2[1] # <<<<<<<<<<<<<< - * ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - * return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates - */ - __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_box2, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 155, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_box2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 155, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_1 = PyNumber_Subtract(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 155, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_box2, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 155, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_box2, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 155, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyNumber_Subtract(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 155, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_v_w2 = __pyx_t_1; - __pyx_t_1 = 0; - __pyx_v_h2 = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":156 - * w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - * w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - * ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio # <<<<<<<<<<<<<< - * return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates - */ - __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_maximum); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_v_h2, __pyx_v_eps); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = __Pyx_PyNumber_Divide(__pyx_v_w2, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = PyNumber_Add(__pyx_v_w2, __pyx_v_eps); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = __Pyx_PyNumber_Divide(__pyx_v_h2, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = NULL; - __pyx_t_6 = 0; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_1 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_1)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_1); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - __pyx_t_6 = 1; - } - } - { - PyObject *__pyx_callargs[3] = {__pyx_t_1, __pyx_t_3, __pyx_t_5}; - __pyx_t_2 = __Pyx_PyObject_FastCall(__pyx_t_4, __pyx_callargs+1-__pyx_t_6, 2+__pyx_t_6); - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 156, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - } - __pyx_v_ar = __pyx_t_2; - __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":157 - * w2, h2 = box2[2] - box2[0], box2[3] - box2[1] - * ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio - * return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates # <<<<<<<<<<<<<< - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyObject_RichCompare(__pyx_v_w2, __pyx_v_wh_thr, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 157, __pyx_L1_error) - __pyx_t_4 = PyObject_RichCompare(__pyx_v_h2, __pyx_v_wh_thr, Py_GT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 157, __pyx_L1_error) - __pyx_t_5 = PyNumber_And(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 157, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = PyNumber_Multiply(__pyx_v_w2, __pyx_v_h2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 157, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_2 = PyNumber_Multiply(__pyx_v_w1, __pyx_v_h1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 157, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_v_eps); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 157, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyNumber_Divide(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 157, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_RichCompare(__pyx_t_2, __pyx_v_area_thr, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 157, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyNumber_And(__pyx_t_5, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 157, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = PyObject_RichCompare(__pyx_v_ar, __pyx_v_ar_thr, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 157, __pyx_L1_error) - __pyx_t_5 = PyNumber_And(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 157, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":152 - * - * - * def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # <<<<<<<<<<<<<< - * # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - * w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("pdf_toolbox.lib.dia_yolov5.utils.augmentations.box_candidates", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_w1); - __Pyx_XDECREF(__pyx_v_h1); - __Pyx_XDECREF(__pyx_v_w2); - __Pyx_XDECREF(__pyx_v_h2); - __Pyx_XDECREF(__pyx_v_ar); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyMethodDef __pyx_methods[] = { - {0, 0, 0, 0} -}; -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif -/* #### Code section: pystring_table ### */ - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - #if CYTHON_USE_MODULE_STATE - {0, __pyx_k_BORDER_CONSTANT, sizeof(__pyx_k_BORDER_CONSTANT), 0, 0, 1, 1}, - {0, __pyx_k_COLOR_BGR2HSV, sizeof(__pyx_k_COLOR_BGR2HSV), 0, 0, 1, 1}, - {0, __pyx_k_COLOR_BGR2YUV, sizeof(__pyx_k_COLOR_BGR2YUV), 0, 0, 1, 1}, - {0, __pyx_k_COLOR_HSV2BGR, sizeof(__pyx_k_COLOR_HSV2BGR), 0, 0, 1, 1}, - {0, __pyx_k_COLOR_RGB2YUV, sizeof(__pyx_k_COLOR_RGB2YUV), 0, 0, 1, 1}, - {0, __pyx_k_COLOR_YUV2BGR, sizeof(__pyx_k_COLOR_YUV2BGR), 0, 0, 1, 1}, - {0, __pyx_k_COLOR_YUV2RGB, sizeof(__pyx_k_COLOR_YUV2RGB), 0, 0, 1, 1}, - {0, __pyx_k_FILLED, sizeof(__pyx_k_FILLED), 0, 0, 1, 1}, - {0, __pyx_k_INTER_LINEAR, sizeof(__pyx_k_INTER_LINEAR), 0, 0, 1, 1}, - {0, __pyx_k_LOGGER, sizeof(__pyx_k_LOGGER), 0, 0, 1, 1}, - {0, __pyx_k_LUT, sizeof(__pyx_k_LUT), 0, 0, 1, 1}, - {0, __pyx_k_T, sizeof(__pyx_k_T), 0, 0, 1, 1}, - {0, __pyx_k__21, sizeof(__pyx_k__21), 0, 0, 1, 1}, - {0, __pyx_k__22, sizeof(__pyx_k__22), 0, 1, 0, 0}, - {0, __pyx_k__37, sizeof(__pyx_k__37), 0, 0, 1, 1}, - {0, __pyx_k__46, sizeof(__pyx_k__46), 0, 0, 1, 1}, - {0, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, - {0, __pyx_k_append, sizeof(__pyx_k_append), 0, 0, 1, 1}, - {0, __pyx_k_apply, sizeof(__pyx_k_apply), 0, 0, 1, 1}, - {0, __pyx_k_ar, sizeof(__pyx_k_ar), 0, 0, 1, 1}, - {0, __pyx_k_ar_thr, sizeof(__pyx_k_ar_thr), 0, 0, 1, 1}, - {0, __pyx_k_arange, sizeof(__pyx_k_arange), 0, 0, 1, 1}, - {0, __pyx_k_area_thr, sizeof(__pyx_k_area_thr), 0, 0, 1, 1}, - {0, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1}, - {0, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1}, - {0, __pyx_k_astype, sizeof(__pyx_k_astype), 0, 0, 1, 1}, - {0, __pyx_k_asyncio_coroutines, sizeof(__pyx_k_asyncio_coroutines), 0, 0, 1, 1}, - {0, __pyx_k_augment_hsv, sizeof(__pyx_k_augment_hsv), 0, 0, 1, 1}, - {0, __pyx_k_auto, sizeof(__pyx_k_auto), 0, 0, 1, 1}, - {0, __pyx_k_axis, sizeof(__pyx_k_axis), 0, 0, 1, 1}, - {0, __pyx_k_bbox_ioa, sizeof(__pyx_k_bbox_ioa), 0, 0, 1, 1}, - {0, __pyx_k_beta, sizeof(__pyx_k_beta), 0, 0, 1, 1}, - {0, __pyx_k_bgr, sizeof(__pyx_k_bgr), 0, 0, 1, 1}, - {0, __pyx_k_bh, sizeof(__pyx_k_bh), 0, 0, 1, 1}, - {0, __pyx_k_bitwise_and, sizeof(__pyx_k_bitwise_and), 0, 0, 1, 1}, - {0, __pyx_k_bottom, sizeof(__pyx_k_bottom), 0, 0, 1, 1}, - {0, __pyx_k_box, sizeof(__pyx_k_box), 0, 0, 1, 1}, - {0, __pyx_k_box1, sizeof(__pyx_k_box1), 0, 0, 1, 1}, - {0, __pyx_k_box2, sizeof(__pyx_k_box2), 0, 0, 1, 1}, - {0, __pyx_k_box_candidates, sizeof(__pyx_k_box_candidates), 0, 0, 1, 1}, - {0, __pyx_k_boxes, sizeof(__pyx_k_boxes), 0, 0, 1, 1}, - {0, __pyx_k_bw, sizeof(__pyx_k_bw), 0, 0, 1, 1}, - {0, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, - {0, __pyx_k_clahe, sizeof(__pyx_k_clahe), 0, 0, 1, 1}, - {0, __pyx_k_class_getitem, sizeof(__pyx_k_class_getitem), 0, 0, 1, 1}, - {0, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {0, __pyx_k_clip, sizeof(__pyx_k_clip), 0, 0, 1, 1}, - {0, __pyx_k_clipLimit, sizeof(__pyx_k_clipLimit), 0, 0, 1, 1}, - {0, __pyx_k_color, sizeof(__pyx_k_color), 0, 0, 1, 1}, - {0, __pyx_k_colorstr, sizeof(__pyx_k_colorstr), 0, 0, 1, 1}, - {0, __pyx_k_concatenate, sizeof(__pyx_k_concatenate), 0, 0, 1, 1}, - {0, __pyx_k_copyMakeBorder, sizeof(__pyx_k_copyMakeBorder), 0, 0, 1, 1}, - {0, __pyx_k_copy_paste, sizeof(__pyx_k_copy_paste), 0, 0, 1, 1}, - {0, __pyx_k_createCLAHE, sizeof(__pyx_k_createCLAHE), 0, 0, 1, 1}, - {0, __pyx_k_cutout, sizeof(__pyx_k_cutout), 0, 0, 1, 1}, - {0, __pyx_k_cv2, sizeof(__pyx_k_cv2), 0, 0, 1, 1}, - {0, __pyx_k_cvtColor, sizeof(__pyx_k_cvtColor), 0, 0, 1, 1}, - {0, __pyx_k_dh, sizeof(__pyx_k_dh), 0, 0, 1, 1}, - {0, __pyx_k_drawContours, sizeof(__pyx_k_drawContours), 0, 0, 1, 1}, - {0, __pyx_k_dst, sizeof(__pyx_k_dst), 0, 0, 1, 1}, - {0, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, - {0, __pyx_k_dw, sizeof(__pyx_k_dw), 0, 0, 1, 1}, - {0, __pyx_k_eps, sizeof(__pyx_k_eps), 0, 0, 1, 1}, - {0, __pyx_k_equalizeHist, sizeof(__pyx_k_equalizeHist), 0, 0, 1, 1}, - {0, __pyx_k_flip, sizeof(__pyx_k_flip), 0, 0, 1, 1}, - {0, __pyx_k_float32, sizeof(__pyx_k_float32), 0, 0, 1, 1}, - {0, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1}, - {0, __pyx_k_h1, sizeof(__pyx_k_h1), 0, 0, 1, 1}, - {0, __pyx_k_h2, sizeof(__pyx_k_h2), 0, 0, 1, 1}, - {0, __pyx_k_hgain, sizeof(__pyx_k_hgain), 0, 0, 1, 1}, - {0, __pyx_k_hist_equalize, sizeof(__pyx_k_hist_equalize), 0, 0, 1, 1}, - {0, __pyx_k_hue, sizeof(__pyx_k_hue), 0, 0, 1, 1}, - {0, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, - {0, __pyx_k_im, sizeof(__pyx_k_im), 0, 0, 1, 1}, - {0, __pyx_k_im2, sizeof(__pyx_k_im2), 0, 0, 1, 1}, - {0, __pyx_k_im_hsv, sizeof(__pyx_k_im_hsv), 0, 0, 1, 1}, - {0, __pyx_k_im_new, sizeof(__pyx_k_im_new), 0, 0, 1, 1}, - {0, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {0, __pyx_k_initializing, sizeof(__pyx_k_initializing), 0, 0, 1, 1}, - {0, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 0, 1, 1}, - {0, __pyx_k_interpolation, sizeof(__pyx_k_interpolation), 0, 0, 1, 1}, - {0, __pyx_k_ioa, sizeof(__pyx_k_ioa), 0, 0, 1, 1}, - {0, __pyx_k_is_coroutine, sizeof(__pyx_k_is_coroutine), 0, 0, 1, 1}, - {0, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, - {0, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, - {0, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1}, - {0, __pyx_k_labels, sizeof(__pyx_k_labels), 0, 0, 1, 1}, - {0, __pyx_k_labels2, sizeof(__pyx_k_labels2), 0, 0, 1, 1}, - {0, __pyx_k_left, sizeof(__pyx_k_left), 0, 0, 1, 1}, - {0, __pyx_k_letterbox, sizeof(__pyx_k_letterbox), 0, 0, 1, 1}, - {0, __pyx_k_lut_hue, sizeof(__pyx_k_lut_hue), 0, 0, 1, 1}, - {0, __pyx_k_lut_sat, sizeof(__pyx_k_lut_sat), 0, 0, 1, 1}, - {0, __pyx_k_lut_val, sizeof(__pyx_k_lut_val), 0, 0, 1, 1}, - {0, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {0, __pyx_k_mask_h, sizeof(__pyx_k_mask_h), 0, 0, 1, 1}, - {0, __pyx_k_mask_w, sizeof(__pyx_k_mask_w), 0, 0, 1, 1}, - {0, __pyx_k_math, sizeof(__pyx_k_math), 0, 0, 1, 1}, - {0, __pyx_k_maximum, sizeof(__pyx_k_maximum), 0, 0, 1, 1}, - {0, __pyx_k_merge, sizeof(__pyx_k_merge), 0, 0, 1, 1}, - {0, __pyx_k_mixup, sizeof(__pyx_k_mixup), 0, 0, 1, 1}, - {0, __pyx_k_mod, sizeof(__pyx_k_mod), 0, 0, 1, 1}, - {0, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1}, - {0, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {0, __pyx_k_new_shape, sizeof(__pyx_k_new_shape), 0, 0, 1, 1}, - {0, __pyx_k_new_unpad, sizeof(__pyx_k_new_unpad), 0, 0, 1, 1}, - {0, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, - {0, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, - {0, __pyx_k_p, sizeof(__pyx_k_p), 0, 0, 1, 1}, - {0, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils), 0, 0, 1, 1}, - {0, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_2, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils_2), 0, 0, 1, 1}, - {0, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_3, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils_3), 0, 0, 1, 0}, - {0, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_4, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils_4), 0, 0, 1, 1}, - {0, __pyx_k_r, sizeof(__pyx_k_r), 0, 0, 1, 1}, - {0, __pyx_k_randint, sizeof(__pyx_k_randint), 0, 0, 1, 1}, - {0, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1}, - {0, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, - {0, __pyx_k_ratio, sizeof(__pyx_k_ratio), 0, 0, 1, 1}, - {0, __pyx_k_replicate, sizeof(__pyx_k_replicate), 0, 0, 1, 1}, - {0, __pyx_k_resize, sizeof(__pyx_k_resize), 0, 0, 1, 1}, - {0, __pyx_k_result, sizeof(__pyx_k_result), 0, 0, 1, 1}, - {0, __pyx_k_right, sizeof(__pyx_k_right), 0, 0, 1, 1}, - {0, __pyx_k_round, sizeof(__pyx_k_round), 0, 0, 1, 1}, - {0, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1}, - {0, __pyx_k_sample, sizeof(__pyx_k_sample), 0, 0, 1, 1}, - {0, __pyx_k_sat, sizeof(__pyx_k_sat), 0, 0, 1, 1}, - {0, __pyx_k_scaleFill, sizeof(__pyx_k_scaleFill), 0, 0, 1, 1}, - {0, __pyx_k_scales, sizeof(__pyx_k_scales), 0, 0, 1, 1}, - {0, __pyx_k_scaleup, sizeof(__pyx_k_scaleup), 0, 0, 1, 1}, - {0, __pyx_k_segments, sizeof(__pyx_k_segments), 0, 0, 1, 1}, - {0, __pyx_k_sgain, sizeof(__pyx_k_sgain), 0, 0, 1, 1}, - {0, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, - {0, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, - {0, __pyx_k_spec, sizeof(__pyx_k_spec), 0, 0, 1, 1}, - {0, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1}, - {0, __pyx_k_src1, sizeof(__pyx_k_src1), 0, 0, 1, 1}, - {0, __pyx_k_src2, sizeof(__pyx_k_src2), 0, 0, 1, 1}, - {0, __pyx_k_stride, sizeof(__pyx_k_stride), 0, 0, 1, 1}, - {0, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {0, __pyx_k_tileGridSize, sizeof(__pyx_k_tileGridSize), 0, 0, 1, 1}, - {0, __pyx_k_top, sizeof(__pyx_k_top), 0, 0, 1, 1}, - {0, __pyx_k_uint8, sizeof(__pyx_k_uint8), 0, 0, 1, 1}, - {0, __pyx_k_uniform, sizeof(__pyx_k_uniform), 0, 0, 1, 1}, - {0, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1}, - {0, __pyx_k_value, sizeof(__pyx_k_value), 0, 0, 1, 1}, - {0, __pyx_k_vgain, sizeof(__pyx_k_vgain), 0, 0, 1, 1}, - {0, __pyx_k_w, sizeof(__pyx_k_w), 0, 0, 1, 1}, - {0, __pyx_k_w1, sizeof(__pyx_k_w1), 0, 0, 1, 1}, - {0, __pyx_k_w2, sizeof(__pyx_k_w2), 0, 0, 1, 1}, - {0, __pyx_k_wh_thr, sizeof(__pyx_k_wh_thr), 0, 0, 1, 1}, - {0, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, - {0, __pyx_k_x1, sizeof(__pyx_k_x1), 0, 0, 1, 1}, - {0, __pyx_k_x1a, sizeof(__pyx_k_x1a), 0, 0, 1, 1}, - {0, __pyx_k_x1b, sizeof(__pyx_k_x1b), 0, 0, 1, 1}, - {0, __pyx_k_x2, sizeof(__pyx_k_x2), 0, 0, 1, 1}, - {0, __pyx_k_x2a, sizeof(__pyx_k_x2a), 0, 0, 1, 1}, - {0, __pyx_k_x2b, sizeof(__pyx_k_x2b), 0, 0, 1, 1}, - {0, __pyx_k_xc, sizeof(__pyx_k_xc), 0, 0, 1, 1}, - {0, __pyx_k_xmax, sizeof(__pyx_k_xmax), 0, 0, 1, 1}, - {0, __pyx_k_xmin, sizeof(__pyx_k_xmin), 0, 0, 1, 1}, - {0, __pyx_k_y1, sizeof(__pyx_k_y1), 0, 0, 1, 1}, - {0, __pyx_k_y1a, sizeof(__pyx_k_y1a), 0, 0, 1, 1}, - {0, __pyx_k_y1b, sizeof(__pyx_k_y1b), 0, 0, 1, 1}, - {0, __pyx_k_y2, sizeof(__pyx_k_y2), 0, 0, 1, 1}, - {0, __pyx_k_y2a, sizeof(__pyx_k_y2a), 0, 0, 1, 1}, - {0, __pyx_k_y2b, sizeof(__pyx_k_y2b), 0, 0, 1, 1}, - {0, __pyx_k_yc, sizeof(__pyx_k_yc), 0, 0, 1, 1}, - {0, __pyx_k_ymax, sizeof(__pyx_k_ymax), 0, 0, 1, 1}, - {0, __pyx_k_ymin, sizeof(__pyx_k_ymin), 0, 0, 1, 1}, - {0, __pyx_k_yuv, sizeof(__pyx_k_yuv), 0, 0, 1, 1}, - {0, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, - #else - {&__pyx_n_s_BORDER_CONSTANT, __pyx_k_BORDER_CONSTANT, sizeof(__pyx_k_BORDER_CONSTANT), 0, 0, 1, 1}, - {&__pyx_n_s_COLOR_BGR2HSV, __pyx_k_COLOR_BGR2HSV, sizeof(__pyx_k_COLOR_BGR2HSV), 0, 0, 1, 1}, - {&__pyx_n_s_COLOR_BGR2YUV, __pyx_k_COLOR_BGR2YUV, sizeof(__pyx_k_COLOR_BGR2YUV), 0, 0, 1, 1}, - {&__pyx_n_s_COLOR_HSV2BGR, __pyx_k_COLOR_HSV2BGR, sizeof(__pyx_k_COLOR_HSV2BGR), 0, 0, 1, 1}, - {&__pyx_n_s_COLOR_RGB2YUV, __pyx_k_COLOR_RGB2YUV, sizeof(__pyx_k_COLOR_RGB2YUV), 0, 0, 1, 1}, - {&__pyx_n_s_COLOR_YUV2BGR, __pyx_k_COLOR_YUV2BGR, sizeof(__pyx_k_COLOR_YUV2BGR), 0, 0, 1, 1}, - {&__pyx_n_s_COLOR_YUV2RGB, __pyx_k_COLOR_YUV2RGB, sizeof(__pyx_k_COLOR_YUV2RGB), 0, 0, 1, 1}, - {&__pyx_n_s_FILLED, __pyx_k_FILLED, sizeof(__pyx_k_FILLED), 0, 0, 1, 1}, - {&__pyx_n_s_INTER_LINEAR, __pyx_k_INTER_LINEAR, sizeof(__pyx_k_INTER_LINEAR), 0, 0, 1, 1}, - {&__pyx_n_s_LOGGER, __pyx_k_LOGGER, sizeof(__pyx_k_LOGGER), 0, 0, 1, 1}, - {&__pyx_n_s_LUT, __pyx_k_LUT, sizeof(__pyx_k_LUT), 0, 0, 1, 1}, - {&__pyx_n_s_T, __pyx_k_T, sizeof(__pyx_k_T), 0, 0, 1, 1}, - {&__pyx_n_s__21, __pyx_k__21, sizeof(__pyx_k__21), 0, 0, 1, 1}, - {&__pyx_kp_u__22, __pyx_k__22, sizeof(__pyx_k__22), 0, 1, 0, 0}, - {&__pyx_n_s__37, __pyx_k__37, sizeof(__pyx_k__37), 0, 0, 1, 1}, - {&__pyx_n_s__46, __pyx_k__46, sizeof(__pyx_k__46), 0, 0, 1, 1}, - {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1}, - {&__pyx_n_s_append, __pyx_k_append, sizeof(__pyx_k_append), 0, 0, 1, 1}, - {&__pyx_n_s_apply, __pyx_k_apply, sizeof(__pyx_k_apply), 0, 0, 1, 1}, - {&__pyx_n_s_ar, __pyx_k_ar, sizeof(__pyx_k_ar), 0, 0, 1, 1}, - {&__pyx_n_s_ar_thr, __pyx_k_ar_thr, sizeof(__pyx_k_ar_thr), 0, 0, 1, 1}, - {&__pyx_n_s_arange, __pyx_k_arange, sizeof(__pyx_k_arange), 0, 0, 1, 1}, - {&__pyx_n_s_area_thr, __pyx_k_area_thr, sizeof(__pyx_k_area_thr), 0, 0, 1, 1}, - {&__pyx_n_s_argsort, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1}, - {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1}, - {&__pyx_n_s_astype, __pyx_k_astype, sizeof(__pyx_k_astype), 0, 0, 1, 1}, - {&__pyx_n_s_asyncio_coroutines, __pyx_k_asyncio_coroutines, sizeof(__pyx_k_asyncio_coroutines), 0, 0, 1, 1}, - {&__pyx_n_s_augment_hsv, __pyx_k_augment_hsv, sizeof(__pyx_k_augment_hsv), 0, 0, 1, 1}, - {&__pyx_n_s_auto, __pyx_k_auto, sizeof(__pyx_k_auto), 0, 0, 1, 1}, - {&__pyx_n_s_axis, __pyx_k_axis, sizeof(__pyx_k_axis), 0, 0, 1, 1}, - {&__pyx_n_s_bbox_ioa, __pyx_k_bbox_ioa, sizeof(__pyx_k_bbox_ioa), 0, 0, 1, 1}, - {&__pyx_n_s_beta, __pyx_k_beta, sizeof(__pyx_k_beta), 0, 0, 1, 1}, - {&__pyx_n_s_bgr, __pyx_k_bgr, sizeof(__pyx_k_bgr), 0, 0, 1, 1}, - {&__pyx_n_s_bh, __pyx_k_bh, sizeof(__pyx_k_bh), 0, 0, 1, 1}, - {&__pyx_n_s_bitwise_and, __pyx_k_bitwise_and, sizeof(__pyx_k_bitwise_and), 0, 0, 1, 1}, - {&__pyx_n_s_bottom, __pyx_k_bottom, sizeof(__pyx_k_bottom), 0, 0, 1, 1}, - {&__pyx_n_s_box, __pyx_k_box, sizeof(__pyx_k_box), 0, 0, 1, 1}, - {&__pyx_n_s_box1, __pyx_k_box1, sizeof(__pyx_k_box1), 0, 0, 1, 1}, - {&__pyx_n_s_box2, __pyx_k_box2, sizeof(__pyx_k_box2), 0, 0, 1, 1}, - {&__pyx_n_s_box_candidates, __pyx_k_box_candidates, sizeof(__pyx_k_box_candidates), 0, 0, 1, 1}, - {&__pyx_n_s_boxes, __pyx_k_boxes, sizeof(__pyx_k_boxes), 0, 0, 1, 1}, - {&__pyx_n_s_bw, __pyx_k_bw, sizeof(__pyx_k_bw), 0, 0, 1, 1}, - {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, - {&__pyx_n_s_clahe, __pyx_k_clahe, sizeof(__pyx_k_clahe), 0, 0, 1, 1}, - {&__pyx_n_s_class_getitem, __pyx_k_class_getitem, sizeof(__pyx_k_class_getitem), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_n_s_clip, __pyx_k_clip, sizeof(__pyx_k_clip), 0, 0, 1, 1}, - {&__pyx_n_s_clipLimit, __pyx_k_clipLimit, sizeof(__pyx_k_clipLimit), 0, 0, 1, 1}, - {&__pyx_n_s_color, __pyx_k_color, sizeof(__pyx_k_color), 0, 0, 1, 1}, - {&__pyx_n_s_colorstr, __pyx_k_colorstr, sizeof(__pyx_k_colorstr), 0, 0, 1, 1}, - {&__pyx_n_s_concatenate, __pyx_k_concatenate, sizeof(__pyx_k_concatenate), 0, 0, 1, 1}, - {&__pyx_n_s_copyMakeBorder, __pyx_k_copyMakeBorder, sizeof(__pyx_k_copyMakeBorder), 0, 0, 1, 1}, - {&__pyx_n_s_copy_paste, __pyx_k_copy_paste, sizeof(__pyx_k_copy_paste), 0, 0, 1, 1}, - {&__pyx_n_s_createCLAHE, __pyx_k_createCLAHE, sizeof(__pyx_k_createCLAHE), 0, 0, 1, 1}, - {&__pyx_n_s_cutout, __pyx_k_cutout, sizeof(__pyx_k_cutout), 0, 0, 1, 1}, - {&__pyx_n_s_cv2, __pyx_k_cv2, sizeof(__pyx_k_cv2), 0, 0, 1, 1}, - {&__pyx_n_s_cvtColor, __pyx_k_cvtColor, sizeof(__pyx_k_cvtColor), 0, 0, 1, 1}, - {&__pyx_n_s_dh, __pyx_k_dh, sizeof(__pyx_k_dh), 0, 0, 1, 1}, - {&__pyx_n_s_drawContours, __pyx_k_drawContours, sizeof(__pyx_k_drawContours), 0, 0, 1, 1}, - {&__pyx_n_s_dst, __pyx_k_dst, sizeof(__pyx_k_dst), 0, 0, 1, 1}, - {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, - {&__pyx_n_s_dw, __pyx_k_dw, sizeof(__pyx_k_dw), 0, 0, 1, 1}, - {&__pyx_n_s_eps, __pyx_k_eps, sizeof(__pyx_k_eps), 0, 0, 1, 1}, - {&__pyx_n_s_equalizeHist, __pyx_k_equalizeHist, sizeof(__pyx_k_equalizeHist), 0, 0, 1, 1}, - {&__pyx_n_s_flip, __pyx_k_flip, sizeof(__pyx_k_flip), 0, 0, 1, 1}, - {&__pyx_n_s_float32, __pyx_k_float32, sizeof(__pyx_k_float32), 0, 0, 1, 1}, - {&__pyx_n_s_h, __pyx_k_h, sizeof(__pyx_k_h), 0, 0, 1, 1}, - {&__pyx_n_s_h1, __pyx_k_h1, sizeof(__pyx_k_h1), 0, 0, 1, 1}, - {&__pyx_n_s_h2, __pyx_k_h2, sizeof(__pyx_k_h2), 0, 0, 1, 1}, - {&__pyx_n_s_hgain, __pyx_k_hgain, sizeof(__pyx_k_hgain), 0, 0, 1, 1}, - {&__pyx_n_s_hist_equalize, __pyx_k_hist_equalize, sizeof(__pyx_k_hist_equalize), 0, 0, 1, 1}, - {&__pyx_n_s_hue, __pyx_k_hue, sizeof(__pyx_k_hue), 0, 0, 1, 1}, - {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, - {&__pyx_n_s_im, __pyx_k_im, sizeof(__pyx_k_im), 0, 0, 1, 1}, - {&__pyx_n_s_im2, __pyx_k_im2, sizeof(__pyx_k_im2), 0, 0, 1, 1}, - {&__pyx_n_s_im_hsv, __pyx_k_im_hsv, sizeof(__pyx_k_im_hsv), 0, 0, 1, 1}, - {&__pyx_n_s_im_new, __pyx_k_im_new, sizeof(__pyx_k_im_new), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_s_initializing, __pyx_k_initializing, sizeof(__pyx_k_initializing), 0, 0, 1, 1}, - {&__pyx_n_s_int32, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 0, 1, 1}, - {&__pyx_n_s_interpolation, __pyx_k_interpolation, sizeof(__pyx_k_interpolation), 0, 0, 1, 1}, - {&__pyx_n_s_ioa, __pyx_k_ioa, sizeof(__pyx_k_ioa), 0, 0, 1, 1}, - {&__pyx_n_s_is_coroutine, __pyx_k_is_coroutine, sizeof(__pyx_k_is_coroutine), 0, 0, 1, 1}, - {&__pyx_n_s_j, __pyx_k_j, sizeof(__pyx_k_j), 0, 0, 1, 1}, - {&__pyx_n_s_k, __pyx_k_k, sizeof(__pyx_k_k), 0, 0, 1, 1}, - {&__pyx_n_s_l, __pyx_k_l, sizeof(__pyx_k_l), 0, 0, 1, 1}, - {&__pyx_n_s_labels, __pyx_k_labels, sizeof(__pyx_k_labels), 0, 0, 1, 1}, - {&__pyx_n_s_labels2, __pyx_k_labels2, sizeof(__pyx_k_labels2), 0, 0, 1, 1}, - {&__pyx_n_s_left, __pyx_k_left, sizeof(__pyx_k_left), 0, 0, 1, 1}, - {&__pyx_n_s_letterbox, __pyx_k_letterbox, sizeof(__pyx_k_letterbox), 0, 0, 1, 1}, - {&__pyx_n_s_lut_hue, __pyx_k_lut_hue, sizeof(__pyx_k_lut_hue), 0, 0, 1, 1}, - {&__pyx_n_s_lut_sat, __pyx_k_lut_sat, sizeof(__pyx_k_lut_sat), 0, 0, 1, 1}, - {&__pyx_n_s_lut_val, __pyx_k_lut_val, sizeof(__pyx_k_lut_val), 0, 0, 1, 1}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_mask_h, __pyx_k_mask_h, sizeof(__pyx_k_mask_h), 0, 0, 1, 1}, - {&__pyx_n_s_mask_w, __pyx_k_mask_w, sizeof(__pyx_k_mask_w), 0, 0, 1, 1}, - {&__pyx_n_s_math, __pyx_k_math, sizeof(__pyx_k_math), 0, 0, 1, 1}, - {&__pyx_n_s_maximum, __pyx_k_maximum, sizeof(__pyx_k_maximum), 0, 0, 1, 1}, - {&__pyx_n_s_merge, __pyx_k_merge, sizeof(__pyx_k_merge), 0, 0, 1, 1}, - {&__pyx_n_s_mixup, __pyx_k_mixup, sizeof(__pyx_k_mixup), 0, 0, 1, 1}, - {&__pyx_n_s_mod, __pyx_k_mod, sizeof(__pyx_k_mod), 0, 0, 1, 1}, - {&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1}, - {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {&__pyx_n_s_new_shape, __pyx_k_new_shape, sizeof(__pyx_k_new_shape), 0, 0, 1, 1}, - {&__pyx_n_s_new_unpad, __pyx_k_new_unpad, sizeof(__pyx_k_new_unpad), 0, 0, 1, 1}, - {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, - {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, - {&__pyx_n_s_p, __pyx_k_p, sizeof(__pyx_k_p), 0, 0, 1, 1}, - {&__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils), 0, 0, 1, 1}, - {&__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_2, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils_2), 0, 0, 1, 1}, - {&__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_3, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils_3), 0, 0, 1, 0}, - {&__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4, __pyx_k_pdf_toolbox_lib_dia_yolov5_utils_4, sizeof(__pyx_k_pdf_toolbox_lib_dia_yolov5_utils_4), 0, 0, 1, 1}, - {&__pyx_n_s_r, __pyx_k_r, sizeof(__pyx_k_r), 0, 0, 1, 1}, - {&__pyx_n_s_randint, __pyx_k_randint, sizeof(__pyx_k_randint), 0, 0, 1, 1}, - {&__pyx_n_s_random, __pyx_k_random, sizeof(__pyx_k_random), 0, 0, 1, 1}, - {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, - {&__pyx_n_s_ratio, __pyx_k_ratio, sizeof(__pyx_k_ratio), 0, 0, 1, 1}, - {&__pyx_n_s_replicate, __pyx_k_replicate, sizeof(__pyx_k_replicate), 0, 0, 1, 1}, - {&__pyx_n_s_resize, __pyx_k_resize, sizeof(__pyx_k_resize), 0, 0, 1, 1}, - {&__pyx_n_s_result, __pyx_k_result, sizeof(__pyx_k_result), 0, 0, 1, 1}, - {&__pyx_n_s_right, __pyx_k_right, sizeof(__pyx_k_right), 0, 0, 1, 1}, - {&__pyx_n_s_round, __pyx_k_round, sizeof(__pyx_k_round), 0, 0, 1, 1}, - {&__pyx_n_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1}, - {&__pyx_n_s_sample, __pyx_k_sample, sizeof(__pyx_k_sample), 0, 0, 1, 1}, - {&__pyx_n_s_sat, __pyx_k_sat, sizeof(__pyx_k_sat), 0, 0, 1, 1}, - {&__pyx_n_s_scaleFill, __pyx_k_scaleFill, sizeof(__pyx_k_scaleFill), 0, 0, 1, 1}, - {&__pyx_n_s_scales, __pyx_k_scales, sizeof(__pyx_k_scales), 0, 0, 1, 1}, - {&__pyx_n_s_scaleup, __pyx_k_scaleup, sizeof(__pyx_k_scaleup), 0, 0, 1, 1}, - {&__pyx_n_s_segments, __pyx_k_segments, sizeof(__pyx_k_segments), 0, 0, 1, 1}, - {&__pyx_n_s_sgain, __pyx_k_sgain, sizeof(__pyx_k_sgain), 0, 0, 1, 1}, - {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, - {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, - {&__pyx_n_s_spec, __pyx_k_spec, sizeof(__pyx_k_spec), 0, 0, 1, 1}, - {&__pyx_n_s_split, __pyx_k_split, sizeof(__pyx_k_split), 0, 0, 1, 1}, - {&__pyx_n_s_src1, __pyx_k_src1, sizeof(__pyx_k_src1), 0, 0, 1, 1}, - {&__pyx_n_s_src2, __pyx_k_src2, sizeof(__pyx_k_src2), 0, 0, 1, 1}, - {&__pyx_n_s_stride, __pyx_k_stride, sizeof(__pyx_k_stride), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {&__pyx_n_s_tileGridSize, __pyx_k_tileGridSize, sizeof(__pyx_k_tileGridSize), 0, 0, 1, 1}, - {&__pyx_n_s_top, __pyx_k_top, sizeof(__pyx_k_top), 0, 0, 1, 1}, - {&__pyx_n_s_uint8, __pyx_k_uint8, sizeof(__pyx_k_uint8), 0, 0, 1, 1}, - {&__pyx_n_s_uniform, __pyx_k_uniform, sizeof(__pyx_k_uniform), 0, 0, 1, 1}, - {&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1}, - {&__pyx_n_s_value, __pyx_k_value, sizeof(__pyx_k_value), 0, 0, 1, 1}, - {&__pyx_n_s_vgain, __pyx_k_vgain, sizeof(__pyx_k_vgain), 0, 0, 1, 1}, - {&__pyx_n_s_w, __pyx_k_w, sizeof(__pyx_k_w), 0, 0, 1, 1}, - {&__pyx_n_s_w1, __pyx_k_w1, sizeof(__pyx_k_w1), 0, 0, 1, 1}, - {&__pyx_n_s_w2, __pyx_k_w2, sizeof(__pyx_k_w2), 0, 0, 1, 1}, - {&__pyx_n_s_wh_thr, __pyx_k_wh_thr, sizeof(__pyx_k_wh_thr), 0, 0, 1, 1}, - {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, - {&__pyx_n_s_x1, __pyx_k_x1, sizeof(__pyx_k_x1), 0, 0, 1, 1}, - {&__pyx_n_s_x1a, __pyx_k_x1a, sizeof(__pyx_k_x1a), 0, 0, 1, 1}, - {&__pyx_n_s_x1b, __pyx_k_x1b, sizeof(__pyx_k_x1b), 0, 0, 1, 1}, - {&__pyx_n_s_x2, __pyx_k_x2, sizeof(__pyx_k_x2), 0, 0, 1, 1}, - {&__pyx_n_s_x2a, __pyx_k_x2a, sizeof(__pyx_k_x2a), 0, 0, 1, 1}, - {&__pyx_n_s_x2b, __pyx_k_x2b, sizeof(__pyx_k_x2b), 0, 0, 1, 1}, - {&__pyx_n_s_xc, __pyx_k_xc, sizeof(__pyx_k_xc), 0, 0, 1, 1}, - {&__pyx_n_s_xmax, __pyx_k_xmax, sizeof(__pyx_k_xmax), 0, 0, 1, 1}, - {&__pyx_n_s_xmin, __pyx_k_xmin, sizeof(__pyx_k_xmin), 0, 0, 1, 1}, - {&__pyx_n_s_y1, __pyx_k_y1, sizeof(__pyx_k_y1), 0, 0, 1, 1}, - {&__pyx_n_s_y1a, __pyx_k_y1a, sizeof(__pyx_k_y1a), 0, 0, 1, 1}, - {&__pyx_n_s_y1b, __pyx_k_y1b, sizeof(__pyx_k_y1b), 0, 0, 1, 1}, - {&__pyx_n_s_y2, __pyx_k_y2, sizeof(__pyx_k_y2), 0, 0, 1, 1}, - {&__pyx_n_s_y2a, __pyx_k_y2a, sizeof(__pyx_k_y2a), 0, 0, 1, 1}, - {&__pyx_n_s_y2b, __pyx_k_y2b, sizeof(__pyx_k_y2b), 0, 0, 1, 1}, - {&__pyx_n_s_yc, __pyx_k_yc, sizeof(__pyx_k_yc), 0, 0, 1, 1}, - {&__pyx_n_s_ymax, __pyx_k_ymax, sizeof(__pyx_k_ymax), 0, 0, 1, 1}, - {&__pyx_n_s_ymin, __pyx_k_ymin, sizeof(__pyx_k_ymin), 0, 0, 1, 1}, - {&__pyx_n_s_yuv, __pyx_k_yuv, sizeof(__pyx_k_yuv), 0, 0, 1, 1}, - {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, - #endif - {0, 0, 0, 0, 0, 0, 0} -}; -/* #### Code section: cached_builtins ### */ -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_round = __Pyx_GetBuiltinName(__pyx_n_s_round); if (!__pyx_builtin_round) __PYX_ERR(0, 49, __pyx_L1_error) - __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 99, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} -/* #### Code section: cached_constants ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":19 - * # HSV color-space augmentation - * if hgain or sgain or vgain: - * r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains # <<<<<<<<<<<<<< - * hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) - * dtype = im.dtype # uint8 - */ - __pyx_tuple_ = PyTuple_Pack(3, __pyx_int_neg_1, __pyx_int_1, __pyx_int_3); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 19, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple_); - __Pyx_GIVEREF(__pyx_tuple_); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":23 - * dtype = im.dtype # uint8 - * - * x = np.arange(0, 256, dtype=r.dtype) # <<<<<<<<<<<<<< - * lut_hue = ((x * r[0]) % 180).astype(dtype) - * lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) - */ - __pyx_tuple__2 = PyTuple_Pack(2, __pyx_int_0, __pyx_int_256); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 23, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__2); - __Pyx_GIVEREF(__pyx_tuple__2); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":36 - * yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - * if clahe: - * c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) # <<<<<<<<<<<<<< - * yuv[:, :, 0] = c.apply(yuv[:, :, 0]) - * else: - */ - __pyx_tuple__3 = PyTuple_Pack(2, __pyx_int_8, __pyx_int_8); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 36, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__3); - __Pyx_GIVEREF(__pyx_tuple__3); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":37 - * if clahe: - * c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) - * yuv[:, :, 0] = c.apply(yuv[:, :, 0]) # <<<<<<<<<<<<<< - * else: - * yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram - */ - __pyx_slice__4 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__4)) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__4); - __Pyx_GIVEREF(__pyx_slice__4); - __pyx_tuple__5 = PyTuple_Pack(3, __pyx_slice__4, __pyx_slice__4, __pyx_int_0); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 37, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__5); - __Pyx_GIVEREF(__pyx_tuple__5); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":45 - * def replicate(im, labels): - * # Replicate labels - * h, w = im.shape[:2] # <<<<<<<<<<<<<< - * boxes = labels[:, 1:].astype(int) - * x1, y1, x2, y2 = boxes.T - */ - __pyx_slice__6 = PySlice_New(Py_None, __pyx_int_2, Py_None); if (unlikely(!__pyx_slice__6)) __PYX_ERR(0, 45, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__6); - __Pyx_GIVEREF(__pyx_slice__6); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":46 - * # Replicate labels - * h, w = im.shape[:2] - * boxes = labels[:, 1:].astype(int) # <<<<<<<<<<<<<< - * x1, y1, x2, y2 = boxes.T - * s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) - */ - __pyx_slice__7 = PySlice_New(__pyx_int_1, Py_None, Py_None); if (unlikely(!__pyx_slice__7)) __PYX_ERR(0, 46, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__7); - __Pyx_GIVEREF(__pyx_slice__7); - __pyx_tuple__8 = PyTuple_Pack(2, __pyx_slice__4, __pyx_slice__7); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 46, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__8); - __Pyx_GIVEREF(__pyx_tuple__8); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":60 - * - * - * def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): # <<<<<<<<<<<<<< - * # Resize and pad image while meeting stride-multiple constraints - * shape = im.shape[:2] # current shape [height, width] - */ - __pyx_tuple__9 = PyTuple_Pack(2, __pyx_int_640, __pyx_int_640); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__9); - __Pyx_GIVEREF(__pyx_tuple__9); - __pyx_tuple__10 = PyTuple_Pack(3, __pyx_int_114, __pyx_int_114, __pyx_int_114); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__10); - __Pyx_GIVEREF(__pyx_tuple__10); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":85 - * dh /= 2 - * - * if shape[::-1] != new_unpad: # resize # <<<<<<<<<<<<<< - * im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) - * top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) - */ - __pyx_slice__11 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_slice__11)) __PYX_ERR(0, 85, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__11); - __Pyx_GIVEREF(__pyx_slice__11); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":102 - * l, s = labels[j], segments[j] - * box = w - l[3], l[2], w - l[1], l[4] - * ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area # <<<<<<<<<<<<<< - * if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - * labels = np.concatenate((labels, [[l[0], *box]]), 0) - */ - __pyx_slice__12 = PySlice_New(__pyx_int_1, __pyx_int_5, Py_None); if (unlikely(!__pyx_slice__12)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__12); - __Pyx_GIVEREF(__pyx_slice__12); - __pyx_tuple__13 = PyTuple_Pack(2, __pyx_slice__4, __pyx_slice__12); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 102, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__13); - __Pyx_GIVEREF(__pyx_tuple__13); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":105 - * if (ioa < 0.30).all(): # allow 30% obscuration of existing labels - * labels = np.concatenate((labels, [[l[0], *box]]), 0) - * segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) # <<<<<<<<<<<<<< - * cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) - * - */ - __pyx_slice__14 = PySlice_New(__pyx_int_0, __pyx_int_1, Py_None); if (unlikely(!__pyx_slice__14)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__14); - __Pyx_GIVEREF(__pyx_slice__14); - __pyx_tuple__15 = PyTuple_Pack(2, __pyx_slice__4, __pyx_slice__14); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__15); - __Pyx_GIVEREF(__pyx_tuple__15); - __pyx_slice__16 = PySlice_New(__pyx_int_1, __pyx_int_2, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__16); - __Pyx_GIVEREF(__pyx_slice__16); - __pyx_tuple__17 = PyTuple_Pack(2, __pyx_slice__4, __pyx_slice__16); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(0, 105, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__17); - __Pyx_GIVEREF(__pyx_tuple__17); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":106 - * labels = np.concatenate((labels, [[l[0], *box]]), 0) - * segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) - * cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) # <<<<<<<<<<<<<< - * - * result = cv2.bitwise_and(src1=im, src2=im_new) - */ - __pyx_tuple__18 = PyTuple_Pack(3, __pyx_int_255, __pyx_int_255, __pyx_int_255); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(0, 106, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__18); - __Pyx_GIVEREF(__pyx_tuple__18); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":133 - * - * # apply random color mask - * im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] # <<<<<<<<<<<<<< - * - * # return unobscured labels - */ - __pyx_tuple__19 = PyTuple_Pack(2, __pyx_int_64, __pyx_int_191); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(0, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__19); - __Pyx_GIVEREF(__pyx_tuple__19); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":146 - * def mixup(im, labels, im2, labels2): - * # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - * r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 # <<<<<<<<<<<<<< - * im = (im * r + im2 * (1 - r)).astype(np.uint8) - * labels = np.concatenate((labels, labels2), 0) - */ - __pyx_tuple__20 = PyTuple_Pack(2, __pyx_float_32_0, __pyx_float_32_0); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(0, 146, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__20); - __Pyx_GIVEREF(__pyx_tuple__20); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":16 - * - * - * def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): # <<<<<<<<<<<<<< - * # HSV color-space augmentation - * if hgain or sgain or vgain: - */ - __pyx_tuple__23 = PyTuple_Pack(14, __pyx_n_s_im, __pyx_n_s_hgain, __pyx_n_s_sgain, __pyx_n_s_vgain, __pyx_n_s_r, __pyx_n_s_hue, __pyx_n_s_sat, __pyx_n_s_val, __pyx_n_s_dtype, __pyx_n_s_x, __pyx_n_s_lut_hue, __pyx_n_s_lut_sat, __pyx_n_s_lut_val, __pyx_n_s_im_hsv); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__23); - __Pyx_GIVEREF(__pyx_tuple__23); - __pyx_codeobj__24 = (PyObject*)__Pyx_PyCode_New(4, 0, 0, 14, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__23, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_n_s_augment_hsv, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__24)) __PYX_ERR(0, 16, __pyx_L1_error) - __pyx_tuple__25 = PyTuple_Pack(3, ((PyObject*)__pyx_float_0_5), ((PyObject*)__pyx_float_0_5), ((PyObject*)__pyx_float_0_5)); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__25); - __Pyx_GIVEREF(__pyx_tuple__25); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":32 - * - * - * def hist_equalize(im, clahe=True, bgr=False): # <<<<<<<<<<<<<< - * # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - * yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - */ - __pyx_tuple__26 = PyTuple_Pack(5, __pyx_n_s_im, __pyx_n_s_clahe, __pyx_n_s_bgr, __pyx_n_s_yuv, __pyx_n_s_c); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 32, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__26); - __Pyx_GIVEREF(__pyx_tuple__26); - __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(3, 0, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_n_s_hist_equalize, 32, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 32, __pyx_L1_error) - __pyx_tuple__28 = PyTuple_Pack(2, ((PyObject *)Py_True), ((PyObject *)Py_False)); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(0, 32, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__28); - __Pyx_GIVEREF(__pyx_tuple__28); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":43 - * - * - * def replicate(im, labels): # <<<<<<<<<<<<<< - * # Replicate labels - * h, w = im.shape[:2] - */ - __pyx_tuple__29 = PyTuple_Pack(23, __pyx_n_s_im, __pyx_n_s_labels, __pyx_n_s_h, __pyx_n_s_w, __pyx_n_s_boxes, __pyx_n_s_x1, __pyx_n_s_y1, __pyx_n_s_x2, __pyx_n_s_y2, __pyx_n_s_s, __pyx_n_s_i, __pyx_n_s_x1b, __pyx_n_s_y1b, __pyx_n_s_x2b, __pyx_n_s_y2b, __pyx_n_s_bh, __pyx_n_s_bw, __pyx_n_s_yc, __pyx_n_s_xc, __pyx_n_s_x1a, __pyx_n_s_y1a, __pyx_n_s_x2a, __pyx_n_s_y2a); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__29); - __Pyx_GIVEREF(__pyx_tuple__29); - __pyx_codeobj__30 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 23, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__29, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_n_s_replicate, 43, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__30)) __PYX_ERR(0, 43, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":60 - * - * - * def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): # <<<<<<<<<<<<<< - * # Resize and pad image while meeting stride-multiple constraints - * shape = im.shape[:2] # current shape [height, width] - */ - __pyx_tuple__31 = PyTuple_Pack(17, __pyx_n_s_im, __pyx_n_s_new_shape, __pyx_n_s_color, __pyx_n_s_auto, __pyx_n_s_scaleFill, __pyx_n_s_scaleup, __pyx_n_s_stride, __pyx_n_s_shape, __pyx_n_s_r, __pyx_n_s_ratio, __pyx_n_s_new_unpad, __pyx_n_s_dw, __pyx_n_s_dh, __pyx_n_s_top, __pyx_n_s_bottom, __pyx_n_s_left, __pyx_n_s_right); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__31); - __Pyx_GIVEREF(__pyx_tuple__31); - __pyx_codeobj__32 = (PyObject*)__Pyx_PyCode_New(7, 0, 0, 17, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__31, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_n_s_letterbox, 60, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__32)) __PYX_ERR(0, 60, __pyx_L1_error) - __pyx_tuple__33 = PyTuple_Pack(6, ((PyObject*)__pyx_tuple__9), ((PyObject*)__pyx_tuple__10), ((PyObject *)Py_True), ((PyObject *)Py_False), ((PyObject *)Py_True), ((PyObject *)__pyx_int_32)); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__33); - __Pyx_GIVEREF(__pyx_tuple__33); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":93 - * - * - * def copy_paste(im, labels, segments, p=0.5): # <<<<<<<<<<<<<< - * # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - * n = len(segments) - */ - __pyx_tuple__34 = PyTuple_Pack(16, __pyx_n_s_im, __pyx_n_s_labels, __pyx_n_s_segments, __pyx_n_s_p, __pyx_n_s_n, __pyx_n_s_h, __pyx_n_s_w, __pyx_n_s_c, __pyx_n_s_im_new, __pyx_n_s_j, __pyx_n_s_l, __pyx_n_s_s, __pyx_n_s_box, __pyx_n_s_ioa, __pyx_n_s_result, __pyx_n_s_i); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__34); - __Pyx_GIVEREF(__pyx_tuple__34); - __pyx_codeobj__35 = (PyObject*)__Pyx_PyCode_New(4, 0, 0, 16, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__34, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_n_s_copy_paste, 93, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__35)) __PYX_ERR(0, 93, __pyx_L1_error) - __pyx_tuple__36 = PyTuple_Pack(1, ((PyObject*)__pyx_float_0_5)); if (unlikely(!__pyx_tuple__36)) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__36); - __Pyx_GIVEREF(__pyx_tuple__36); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":117 - * - * - * def cutout(im, labels, p=0.5): # <<<<<<<<<<<<<< - * # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - * if random.random() < p: - */ - __pyx_tuple__38 = PyTuple_Pack(16, __pyx_n_s_im, __pyx_n_s_labels, __pyx_n_s_p, __pyx_n_s_h, __pyx_n_s_w, __pyx_n_s_scales, __pyx_n_s_s, __pyx_n_s_mask_h, __pyx_n_s_mask_w, __pyx_n_s_xmin, __pyx_n_s_ymin, __pyx_n_s_xmax, __pyx_n_s_ymax, __pyx_n_s_box, __pyx_n_s_ioa, __pyx_n_s__37); if (unlikely(!__pyx_tuple__38)) __PYX_ERR(0, 117, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__38); - __Pyx_GIVEREF(__pyx_tuple__38); - __pyx_codeobj__39 = (PyObject*)__Pyx_PyCode_New(3, 0, 0, 16, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__38, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_n_s_cutout, 117, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__39)) __PYX_ERR(0, 117, __pyx_L1_error) - __pyx_tuple__40 = PyTuple_Pack(1, ((PyObject*)__pyx_float_0_5)); if (unlikely(!__pyx_tuple__40)) __PYX_ERR(0, 117, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__40); - __Pyx_GIVEREF(__pyx_tuple__40); - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":144 - * - * - * def mixup(im, labels, im2, labels2): # <<<<<<<<<<<<<< - * # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - * r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - */ - __pyx_tuple__41 = PyTuple_Pack(5, __pyx_n_s_im, __pyx_n_s_labels, __pyx_n_s_im2, __pyx_n_s_labels2, __pyx_n_s_r); if (unlikely(!__pyx_tuple__41)) __PYX_ERR(0, 144, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__41); - __Pyx_GIVEREF(__pyx_tuple__41); - __pyx_codeobj__42 = (PyObject*)__Pyx_PyCode_New(4, 0, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__41, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_n_s_mixup, 144, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__42)) __PYX_ERR(0, 144, __pyx_L1_error) - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":152 - * - * - * def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # <<<<<<<<<<<<<< - * # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - * w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - */ - __pyx_tuple__43 = PyTuple_Pack(11, __pyx_n_s_box1, __pyx_n_s_box2, __pyx_n_s_wh_thr, __pyx_n_s_ar_thr, __pyx_n_s_area_thr, __pyx_n_s_eps, __pyx_n_s_w1, __pyx_n_s_h1, __pyx_n_s_w2, __pyx_n_s_h2, __pyx_n_s_ar); if (unlikely(!__pyx_tuple__43)) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__43); - __Pyx_GIVEREF(__pyx_tuple__43); - __pyx_codeobj__44 = (PyObject*)__Pyx_PyCode_New(6, 0, 0, 11, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__43, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3, __pyx_n_s_box_candidates, 152, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__44)) __PYX_ERR(0, 152, __pyx_L1_error) - __pyx_tuple__45 = PyTuple_Pack(4, ((PyObject *)__pyx_int_2), ((PyObject *)__pyx_int_100), ((PyObject*)__pyx_float_0_1), ((PyObject*)__pyx_float_1eneg_16)); if (unlikely(!__pyx_tuple__45)) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__45); - __Pyx_GIVEREF(__pyx_tuple__45); - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} -/* #### Code section: init_constants ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitConstants(void) { - #if CYTHON_USE_MODULE_STATE - if (__Pyx_InitString(__pyx_string_tab[0], &__pyx_n_s_BORDER_CONSTANT) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[1], &__pyx_n_s_COLOR_BGR2HSV) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[2], &__pyx_n_s_COLOR_BGR2YUV) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[3], &__pyx_n_s_COLOR_HSV2BGR) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[4], &__pyx_n_s_COLOR_RGB2YUV) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[5], &__pyx_n_s_COLOR_YUV2BGR) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[6], &__pyx_n_s_COLOR_YUV2RGB) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[7], &__pyx_n_s_FILLED) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[8], &__pyx_n_s_INTER_LINEAR) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[9], &__pyx_n_s_LOGGER) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[10], &__pyx_n_s_LUT) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[11], &__pyx_n_s_T) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[12], &__pyx_n_s__21) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[13], &__pyx_kp_u__22) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[14], &__pyx_n_s__37) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[15], &__pyx_n_s__46) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[16], &__pyx_n_s_all) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[17], &__pyx_n_s_append) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[18], &__pyx_n_s_apply) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[19], &__pyx_n_s_ar) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[20], &__pyx_n_s_ar_thr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[21], &__pyx_n_s_arange) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[22], &__pyx_n_s_area_thr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[23], &__pyx_n_s_argsort) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[24], &__pyx_n_s_array) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[25], &__pyx_n_s_astype) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[26], &__pyx_n_s_asyncio_coroutines) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[27], &__pyx_n_s_augment_hsv) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[28], &__pyx_n_s_auto) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[29], &__pyx_n_s_axis) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[30], &__pyx_n_s_bbox_ioa) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[31], &__pyx_n_s_beta) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[32], &__pyx_n_s_bgr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[33], &__pyx_n_s_bh) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[34], &__pyx_n_s_bitwise_and) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[35], &__pyx_n_s_bottom) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[36], &__pyx_n_s_box) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[37], &__pyx_n_s_box1) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[38], &__pyx_n_s_box2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[39], &__pyx_n_s_box_candidates) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[40], &__pyx_n_s_boxes) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[41], &__pyx_n_s_bw) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[42], &__pyx_n_s_c) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[43], &__pyx_n_s_clahe) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[44], &__pyx_n_s_class_getitem) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[45], &__pyx_n_s_cline_in_traceback) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[46], &__pyx_n_s_clip) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[47], &__pyx_n_s_clipLimit) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[48], &__pyx_n_s_color) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[49], &__pyx_n_s_colorstr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[50], &__pyx_n_s_concatenate) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[51], &__pyx_n_s_copyMakeBorder) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[52], &__pyx_n_s_copy_paste) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[53], &__pyx_n_s_createCLAHE) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[54], &__pyx_n_s_cutout) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[55], &__pyx_n_s_cv2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[56], &__pyx_n_s_cvtColor) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[57], &__pyx_n_s_dh) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[58], &__pyx_n_s_drawContours) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[59], &__pyx_n_s_dst) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[60], &__pyx_n_s_dtype) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[61], &__pyx_n_s_dw) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[62], &__pyx_n_s_eps) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[63], &__pyx_n_s_equalizeHist) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[64], &__pyx_n_s_flip) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[65], &__pyx_n_s_float32) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[66], &__pyx_n_s_h) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[67], &__pyx_n_s_h1) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[68], &__pyx_n_s_h2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[69], &__pyx_n_s_hgain) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[70], &__pyx_n_s_hist_equalize) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[71], &__pyx_n_s_hue) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[72], &__pyx_n_s_i) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[73], &__pyx_n_s_im) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[74], &__pyx_n_s_im2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[75], &__pyx_n_s_im_hsv) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[76], &__pyx_n_s_im_new) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[77], &__pyx_n_s_import) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[78], &__pyx_n_s_initializing) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[79], &__pyx_n_s_int32) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[80], &__pyx_n_s_interpolation) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[81], &__pyx_n_s_ioa) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[82], &__pyx_n_s_is_coroutine) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[83], &__pyx_n_s_j) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[84], &__pyx_n_s_k) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[85], &__pyx_n_s_l) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[86], &__pyx_n_s_labels) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[87], &__pyx_n_s_labels2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[88], &__pyx_n_s_left) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[89], &__pyx_n_s_letterbox) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[90], &__pyx_n_s_lut_hue) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[91], &__pyx_n_s_lut_sat) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[92], &__pyx_n_s_lut_val) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[93], &__pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[94], &__pyx_n_s_mask_h) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[95], &__pyx_n_s_mask_w) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[96], &__pyx_n_s_math) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[97], &__pyx_n_s_maximum) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[98], &__pyx_n_s_merge) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[99], &__pyx_n_s_mixup) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[100], &__pyx_n_s_mod) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[101], &__pyx_n_s_n) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[102], &__pyx_n_s_name) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[103], &__pyx_n_s_new_shape) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[104], &__pyx_n_s_new_unpad) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[105], &__pyx_n_s_np) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[106], &__pyx_n_s_numpy) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[107], &__pyx_n_s_p) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[108], &__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[109], &__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[110], &__pyx_kp_s_pdf_toolbox_lib_dia_yolov5_utils_3) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[111], &__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[112], &__pyx_n_s_r) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[113], &__pyx_n_s_randint) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[114], &__pyx_n_s_random) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[115], &__pyx_n_s_range) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[116], &__pyx_n_s_ratio) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[117], &__pyx_n_s_replicate) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[118], &__pyx_n_s_resize) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[119], &__pyx_n_s_result) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[120], &__pyx_n_s_right) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[121], &__pyx_n_s_round) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[122], &__pyx_n_s_s) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[123], &__pyx_n_s_sample) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[124], &__pyx_n_s_sat) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[125], &__pyx_n_s_scaleFill) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[126], &__pyx_n_s_scales) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[127], &__pyx_n_s_scaleup) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[128], &__pyx_n_s_segments) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[129], &__pyx_n_s_sgain) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[130], &__pyx_n_s_shape) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[131], &__pyx_n_s_size) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[132], &__pyx_n_s_spec) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[133], &__pyx_n_s_split) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[134], &__pyx_n_s_src1) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[135], &__pyx_n_s_src2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[136], &__pyx_n_s_stride) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[137], &__pyx_n_s_test) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[138], &__pyx_n_s_tileGridSize) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[139], &__pyx_n_s_top) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[140], &__pyx_n_s_uint8) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[141], &__pyx_n_s_uniform) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[142], &__pyx_n_s_val) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[143], &__pyx_n_s_value) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[144], &__pyx_n_s_vgain) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[145], &__pyx_n_s_w) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[146], &__pyx_n_s_w1) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[147], &__pyx_n_s_w2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[148], &__pyx_n_s_wh_thr) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[149], &__pyx_n_s_x) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[150], &__pyx_n_s_x1) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[151], &__pyx_n_s_x1a) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[152], &__pyx_n_s_x1b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[153], &__pyx_n_s_x2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[154], &__pyx_n_s_x2a) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[155], &__pyx_n_s_x2b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[156], &__pyx_n_s_xc) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[157], &__pyx_n_s_xmax) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[158], &__pyx_n_s_xmin) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[159], &__pyx_n_s_y1) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[160], &__pyx_n_s_y1a) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[161], &__pyx_n_s_y1b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[162], &__pyx_n_s_y2) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[163], &__pyx_n_s_y2a) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[164], &__pyx_n_s_y2b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[165], &__pyx_n_s_yc) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[166], &__pyx_n_s_ymax) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[167], &__pyx_n_s_ymin) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[168], &__pyx_n_s_yuv) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - if (__Pyx_InitString(__pyx_string_tab[169], &__pyx_n_s_zeros) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - #endif - #if !CYTHON_USE_MODULE_STATE - if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - #endif - __pyx_float_0_0 = PyFloat_FromDouble(0.0); if (unlikely(!__pyx_float_0_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_1 = PyFloat_FromDouble(0.1); if (unlikely(!__pyx_float_0_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_5 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_float_0_5)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_2_0 = PyFloat_FromDouble(2.0); if (unlikely(!__pyx_float_2_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_03 = PyFloat_FromDouble(0.03); if (unlikely(!__pyx_float_0_03)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_25 = PyFloat_FromDouble(0.25); if (unlikely(!__pyx_float_0_25)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_30 = PyFloat_FromDouble(0.30); if (unlikely(!__pyx_float_0_30)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_60 = PyFloat_FromDouble(0.60); if (unlikely(!__pyx_float_0_60)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_32_0 = PyFloat_FromDouble(32.0); if (unlikely(!__pyx_float_32_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_125 = PyFloat_FromDouble(0.125); if (unlikely(!__pyx_float_0_125)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_1eneg_16 = PyFloat_FromDouble(1e-16); if (unlikely(!__pyx_float_1eneg_16)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_0625 = PyFloat_FromDouble(0.0625); if (unlikely(!__pyx_float_0_0625)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_float_0_03125 = PyFloat_FromDouble(0.03125); if (unlikely(!__pyx_float_0_03125)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_5 = PyInt_FromLong(5); if (unlikely(!__pyx_int_5)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_8 = PyInt_FromLong(8); if (unlikely(!__pyx_int_8)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_32 = PyInt_FromLong(32); if (unlikely(!__pyx_int_32)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_64 = PyInt_FromLong(64); if (unlikely(!__pyx_int_64)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_100 = PyInt_FromLong(100); if (unlikely(!__pyx_int_100)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_114 = PyInt_FromLong(114); if (unlikely(!__pyx_int_114)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_180 = PyInt_FromLong(180); if (unlikely(!__pyx_int_180)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_191 = PyInt_FromLong(191); if (unlikely(!__pyx_int_191)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_255 = PyInt_FromLong(255); if (unlikely(!__pyx_int_255)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_256 = PyInt_FromLong(256); if (unlikely(!__pyx_int_256)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_640 = PyInt_FromLong(640); if (unlikely(!__pyx_int_640)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} -/* #### Code section: init_globals ### */ - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - return 0; -} -/* #### Code section: init_module ### */ - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_augmentations(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_augmentations}, - {0, NULL} -}; -#endif - -#ifdef __cplusplus -namespace { - struct PyModuleDef __pyx_moduledef = - #else - static struct PyModuleDef __pyx_moduledef = - #endif - { - PyModuleDef_HEAD_INIT, - "augmentations", - __pyx_k_Image_augmentation_functions, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #elif CYTHON_USE_MODULE_STATE - sizeof(__pyx_mstate), /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - #if CYTHON_USE_MODULE_STATE - __pyx_m_traverse, /* m_traverse */ - __pyx_m_clear, /* m_clear */ - NULL /* m_free */ - #else - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ - #endif - }; - #ifdef __cplusplus -} /* anonymous namespace */ -#endif -#endif - -#ifndef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#elif PY_MAJOR_VERSION < 3 -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" void -#else -#define __Pyx_PyMODINIT_FUNC void -#endif -#else -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyObject * -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initaugmentations(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initaugmentations(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_augmentations(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_augmentations(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -#if CYTHON_COMPILING_IN_LIMITED_API -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *module, const char* from_name, const char* to_name, int allow_none) -#else -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) -#endif -{ - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { -#if CYTHON_COMPILING_IN_LIMITED_API - result = PyModule_AddObject(module, to_name, value); -#else - result = PyDict_SetItemString(moddict, to_name, value); -#endif - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - CYTHON_UNUSED_VAR(def); - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; -#if CYTHON_COMPILING_IN_LIMITED_API - moddict = module; -#else - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; -#endif - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_augmentations(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - int stringtab_initialized = 0; - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'augmentations' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("augmentations", __pyx_methods, __pyx_k_Image_augmentation_functions, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #elif CYTHON_COMPILING_IN_LIMITED_API - __pyx_t_1 = PyModule_Create(&__pyx_moduledef); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - { - int add_module_result = PyState_AddModule(__pyx_t_1, &__pyx_moduledef); - Py_DECREF(__pyx_t_1); __pyx_t_1 = 0; - if (unlikely((add_module_result < 0))) __PYX_ERR(0, 1, __pyx_L1_error) - } - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #endif - CYTHON_UNUSED_VAR(__pyx_t_1); - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_augmentations(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - PyEval_InitThreads(); - #endif - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - stringtab_initialized = 1; - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_pdf_toolbox__lib__dia_yolov5__utils__augmentations) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "pdf_toolbox.lib.dia_yolov5.utils.augmentations")) { - if (unlikely((PyDict_SetItemString(modules, "pdf_toolbox.lib.dia_yolov5.utils.augmentations", __pyx_m) < 0))) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - (void)__Pyx_modinit_type_init_code(); - (void)__Pyx_modinit_type_import_code(); - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":6 - * """ - * - * import math # <<<<<<<<<<<<<< - * import random - * - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_math, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_math, __pyx_t_2) < 0) __PYX_ERR(0, 6, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":7 - * - * import math - * import random # <<<<<<<<<<<<<< - * - * import cv2 - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_random, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_random, __pyx_t_2) < 0) __PYX_ERR(0, 7, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":9 - * import random - * - * import cv2 # <<<<<<<<<<<<<< - * import numpy as np - * - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_cv2, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 9, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_cv2, __pyx_t_2) < 0) __PYX_ERR(0, 9, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":10 - * - * import cv2 - * import numpy as np # <<<<<<<<<<<<<< - * - * from pdf_toolbox.lib.dia_yolov5.utils.general import LOGGER, colorstr - */ - __pyx_t_2 = __Pyx_ImportDottedModule(__pyx_n_s_numpy, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_2) < 0) __PYX_ERR(0, 10, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":12 - * import numpy as np - * - * from pdf_toolbox.lib.dia_yolov5.utils.general import LOGGER, colorstr # <<<<<<<<<<<<<< - * from pdf_toolbox.lib.dia_yolov5.utils.metrics import bbox_ioa - * - */ - __pyx_t_2 = PyList_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_n_s_LOGGER); - __Pyx_GIVEREF(__pyx_n_s_LOGGER); - PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_LOGGER); - __Pyx_INCREF(__pyx_n_s_colorstr); - __Pyx_GIVEREF(__pyx_n_s_colorstr); - PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_colorstr); - __pyx_t_3 = __Pyx_Import(__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_LOGGER); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_LOGGER, __pyx_t_2) < 0) __PYX_ERR(0, 12, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_colorstr); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 12, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_colorstr, __pyx_t_2) < 0) __PYX_ERR(0, 12, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":13 - * - * from pdf_toolbox.lib.dia_yolov5.utils.general import LOGGER, colorstr - * from pdf_toolbox.lib.dia_yolov5.utils.metrics import bbox_ioa # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = PyList_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_n_s_bbox_ioa); - __Pyx_GIVEREF(__pyx_n_s_bbox_ioa); - PyList_SET_ITEM(__pyx_t_3, 0, __pyx_n_s_bbox_ioa); - __pyx_t_2 = __Pyx_Import(__pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_2, __pyx_t_3, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_bbox_ioa); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_bbox_ioa, __pyx_t_3) < 0) __PYX_ERR(0, 13, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":16 - * - * - * def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): # <<<<<<<<<<<<<< - * # HSV color-space augmentation - * if hgain or sgain or vgain: - */ - __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_1augment_hsv, 0, __pyx_n_s_augment_hsv, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4, __pyx_d, ((PyObject *)__pyx_codeobj__24)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_tuple__25); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_augment_hsv, __pyx_t_2) < 0) __PYX_ERR(0, 16, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":32 - * - * - * def hist_equalize(im, clahe=True, bgr=False): # <<<<<<<<<<<<<< - * # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 - * yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) - */ - __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_3hist_equalize, 0, __pyx_n_s_hist_equalize, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4, __pyx_d, ((PyObject *)__pyx_codeobj__27)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_tuple__28); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_hist_equalize, __pyx_t_2) < 0) __PYX_ERR(0, 32, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":43 - * - * - * def replicate(im, labels): # <<<<<<<<<<<<<< - * # Replicate labels - * h, w = im.shape[:2] - */ - __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_5replicate, 0, __pyx_n_s_replicate, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4, __pyx_d, ((PyObject *)__pyx_codeobj__30)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_replicate, __pyx_t_2) < 0) __PYX_ERR(0, 43, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":60 - * - * - * def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): # <<<<<<<<<<<<<< - * # Resize and pad image while meeting stride-multiple constraints - * shape = im.shape[:2] # current shape [height, width] - */ - __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_7letterbox, 0, __pyx_n_s_letterbox, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4, __pyx_d, ((PyObject *)__pyx_codeobj__32)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_tuple__33); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_letterbox, __pyx_t_2) < 0) __PYX_ERR(0, 60, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":93 - * - * - * def copy_paste(im, labels, segments, p=0.5): # <<<<<<<<<<<<<< - * # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) - * n = len(segments) - */ - __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_9copy_paste, 0, __pyx_n_s_copy_paste, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4, __pyx_d, ((PyObject *)__pyx_codeobj__35)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_tuple__36); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_copy_paste, __pyx_t_2) < 0) __PYX_ERR(0, 93, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":117 - * - * - * def cutout(im, labels, p=0.5): # <<<<<<<<<<<<<< - * # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 - * if random.random() < p: - */ - __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_11cutout, 0, __pyx_n_s_cutout, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4, __pyx_d, ((PyObject *)__pyx_codeobj__39)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 117, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_tuple__40); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_cutout, __pyx_t_2) < 0) __PYX_ERR(0, 117, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":144 - * - * - * def mixup(im, labels, im2, labels2): # <<<<<<<<<<<<<< - * # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf - * r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 - */ - __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_13mixup, 0, __pyx_n_s_mixup, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4, __pyx_d, ((PyObject *)__pyx_codeobj__42)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 144, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_mixup, __pyx_t_2) < 0) __PYX_ERR(0, 144, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":152 - * - * - * def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) # <<<<<<<<<<<<<< - * # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio - * w1, h1 = box1[2] - box1[0], box1[3] - box1[1] - */ - __pyx_t_2 = __Pyx_CyFunction_New(&__pyx_mdef_11pdf_toolbox_3lib_10dia_yolov5_5utils_13augmentations_15box_candidates, 0, __pyx_n_s_box_candidates, NULL, __pyx_n_s_pdf_toolbox_lib_dia_yolov5_utils_4, __pyx_d, ((PyObject *)__pyx_codeobj__44)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_CyFunction_SetDefaultsTuple(__pyx_t_2, __pyx_tuple__45); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_box_candidates, __pyx_t_2) < 0) __PYX_ERR(0, 152, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "pdf_toolbox/lib/dia_yolov5/utils/augmentations.py":1 - * # YOLOv5 by Ultralytics, GPL-3.0 license # <<<<<<<<<<<<<< - * """ - * Image augmentation functions - */ - __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - if (__pyx_m) { - if (__pyx_d && stringtab_initialized) { - __Pyx_AddTraceback("init pdf_toolbox.lib.dia_yolov5.utils.augmentations", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - #if !CYTHON_USE_MODULE_STATE - Py_CLEAR(__pyx_m); - #endif - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init pdf_toolbox.lib.dia_yolov5.utils.augmentations"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} -/* #### Code section: cleanup_globals ### */ -/* #### Code section: cleanup_module ### */ -/* #### Code section: main_method ### */ -/* #### Code section: utility_code_pragmas ### */ -#if _MSC_VER -#pragma warning( push ) -/* Warning 4127: conditional expression is constant - * Cython uses constant conditional expressions to allow in inline functions to be optimized at - * compile-time, so this warning is not useful - */ -#pragma warning( disable : 4127 ) -#endif - - - -/* #### Code section: utility_code_def ### */ - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* PyErrExceptionMatches */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i<n; i++) { - if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; - } -#endif - for (i=0; i<n; i++) { - if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; - } - return 0; -} -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { - PyObject *exc_type = tstate->curexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; - if (unlikely(PyTuple_Check(err))) - return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); - return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); -} -#endif - -/* PyErrFetchRestore */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} -#endif - -/* PyObjectGetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* PyObjectGetAttrStrNoError */ -static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - __Pyx_PyErr_Clear(); -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { - PyObject *result; -#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { - return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); - } -#endif - result = __Pyx_PyObject_GetAttrStr(obj, attr_name); - if (unlikely(!result)) { - __Pyx_PyObject_GetAttrStr_ClearAttributeError(); - } - return result; -} - -/* GetBuiltinName */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStrNoError(__pyx_b, name); - if (unlikely(!result) && !PyErr_Occurred()) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* TupleAndListFromArray */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE void __Pyx_copy_object_array(PyObject *const *CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) { - PyObject *v; - Py_ssize_t i; - for (i = 0; i < length; i++) { - v = dest[i] = src[i]; - Py_INCREF(v); - } -} -static CYTHON_INLINE PyObject * -__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n) -{ - PyObject *res; - if (n <= 0) { - Py_INCREF(__pyx_empty_tuple); - return __pyx_empty_tuple; - } - res = PyTuple_New(n); - if (unlikely(res == NULL)) return NULL; - __Pyx_copy_object_array(src, ((PyTupleObject*)res)->ob_item, n); - return res; -} -static CYTHON_INLINE PyObject * -__Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n) -{ - PyObject *res; - if (n <= 0) { - return PyList_New(0); - } - res = PyList_New(n); - if (unlikely(res == NULL)) return NULL; - __Pyx_copy_object_array(src, ((PyListObject*)res)->ob_item, n); - return res; -} -#endif - -/* BytesEquals */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API - return PyObject_RichCompareBool(s1, s2, equals); -#else - if (s1 == s2) { - return (equals == Py_EQ); - } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { - const char *ps1, *ps2; - Py_ssize_t length = PyBytes_GET_SIZE(s1); - if (length != PyBytes_GET_SIZE(s2)) - return (equals == Py_NE); - ps1 = PyBytes_AS_STRING(s1); - ps2 = PyBytes_AS_STRING(s2); - if (ps1[0] != ps2[0]) { - return (equals == Py_NE); - } else if (length == 1) { - return (equals == Py_EQ); - } else { - int result; -#if CYTHON_USE_UNICODE_INTERNALS - Py_hash_t hash1, hash2; - hash1 = ((PyBytesObject*)s1)->ob_shash; - hash2 = ((PyBytesObject*)s2)->ob_shash; - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - return (equals == Py_NE); - } -#endif - result = memcmp(ps1, ps2, (size_t)length); - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -#endif -} - -/* UnicodeEquals */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API - return PyObject_RichCompareBool(s1, s2, equals); -#else -#if PY_MAJOR_VERSION < 3 - PyObject* owned_ref = NULL; -#endif - int s1_is_unicode, s2_is_unicode; - if (s1 == s2) { - goto return_eq; - } - s1_is_unicode = PyUnicode_CheckExact(s1); - s2_is_unicode = PyUnicode_CheckExact(s2); -#if PY_MAJOR_VERSION < 3 - if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { - owned_ref = PyUnicode_FromObject(s2); - if (unlikely(!owned_ref)) - return -1; - s2 = owned_ref; - s2_is_unicode = 1; - } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { - owned_ref = PyUnicode_FromObject(s1); - if (unlikely(!owned_ref)) - return -1; - s1 = owned_ref; - s1_is_unicode = 1; - } else if (((!s2_is_unicode) & (!s1_is_unicode))) { - return __Pyx_PyBytes_Equals(s1, s2, equals); - } -#endif - if (s1_is_unicode & s2_is_unicode) { - Py_ssize_t length; - int kind; - void *data1, *data2; - if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) - return -1; - length = __Pyx_PyUnicode_GET_LENGTH(s1); - if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { - goto return_ne; - } -#if CYTHON_USE_UNICODE_INTERNALS - { - Py_hash_t hash1, hash2; - #if CYTHON_PEP393_ENABLED - hash1 = ((PyASCIIObject*)s1)->hash; - hash2 = ((PyASCIIObject*)s2)->hash; - #else - hash1 = ((PyUnicodeObject*)s1)->hash; - hash2 = ((PyUnicodeObject*)s2)->hash; - #endif - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - goto return_ne; - } - } -#endif - kind = __Pyx_PyUnicode_KIND(s1); - if (kind != __Pyx_PyUnicode_KIND(s2)) { - goto return_ne; - } - data1 = __Pyx_PyUnicode_DATA(s1); - data2 = __Pyx_PyUnicode_DATA(s2); - if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { - goto return_ne; - } else if (length == 1) { - goto return_eq; - } else { - int result = memcmp(data1, data2, (size_t)(length * kind)); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & s2_is_unicode) { - goto return_ne; - } else if ((s2 == Py_None) & s1_is_unicode) { - goto return_ne; - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -return_eq: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ); -return_ne: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_NE); -#endif -} - -/* fastcall */ -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s) -{ - Py_ssize_t i, n = PyTuple_GET_SIZE(kwnames); - for (i = 0; i < n; i++) - { - if (s == PyTuple_GET_ITEM(kwnames, i)) return kwvalues[i]; - } - for (i = 0; i < n; i++) - { - int eq = __Pyx_PyUnicode_Equals(s, PyTuple_GET_ITEM(kwnames, i), Py_EQ); - if (unlikely(eq != 0)) { - if (unlikely(eq < 0)) return NULL; // error - return kwvalues[i]; - } - } - return NULL; // not found (no exception set) -} -#endif - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject *const *kwvalues, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - int kwds_is_tuple = CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds)); - while (1) { - if (kwds_is_tuple) { - if (pos >= PyTuple_GET_SIZE(kwds)) break; - key = PyTuple_GET_ITEM(kwds, pos); - value = kwvalues[pos]; - pos++; - } - else - { - if (!PyDict_Next(kwds, &pos, &key, &value)) break; - } - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = ( - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key) - ); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - #if PY_MAJOR_VERSION < 3 - PyErr_Format(PyExc_TypeError, - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - PyErr_Format(PyExc_TypeError, - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* PyIntBinop */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long x; - long a = PyInt_AS_LONG(op1); - - x = (long)((unsigned long)a + b); - if (likely((x^a) >= 0 || (x^b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (unlikely(size == 0)) { - return __Pyx_NewRef(op2); - } - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - } - x = a + b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla + llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; -#if CYTHON_COMPILING_IN_LIMITED_API - double a = __pyx_PyFloat_AsDouble(op1); -#else - double a = PyFloat_AS_DOUBLE(op1); -#endif - double result; - - PyFPE_START_PROTECT("add", return NULL) - result = ((double)a) + (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); -} -#endif - -/* PyDictVersioning */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#elif CYTHON_COMPILING_IN_LIMITED_API - if (unlikely(!__pyx_m)) { - return NULL; - } - result = PyObject_GetAttr(__pyx_m, name); - if (likely(result)) { - return result; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* PyObjectCall */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = Py_TYPE(func)->tp_call; - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyFunctionFastCall */ -#if CYTHON_FAST_PYCALL && !CYTHON_VECTORCALL -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - return result; -} -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) { - return NULL; - } - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); -#endif - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif - -/* PyObjectCallMethO */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectFastCall */ -static PyObject* __Pyx_PyObject_FastCall_fallback(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs) { - PyObject *argstuple; - PyObject *result; - size_t i; - argstuple = PyTuple_New((Py_ssize_t)nargs); - if (unlikely(!argstuple)) return NULL; - for (i = 0; i < nargs; i++) { - Py_INCREF(args[i]); - PyTuple_SET_ITEM(argstuple, (Py_ssize_t)i, args[i]); - } - result = __Pyx_PyObject_Call(func, argstuple, kwargs); - Py_DECREF(argstuple); - return result; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t _nargs, PyObject *kwargs) { - Py_ssize_t nargs = __Pyx_PyVectorcall_NARGS(_nargs); -#if CYTHON_COMPILING_IN_CPYTHON - if (nargs == 0 && kwargs == NULL) { -#ifdef __Pyx_CyFunction_USED - if (__Pyx_IsCyOrPyCFunction(func)) -#else - if (PyCFunction_Check(func)) -#endif - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { - return __Pyx_PyObject_CallMethO(func, NULL); - } - } - } - else if (nargs == 1 && kwargs == NULL) { - if (PyCFunction_Check(func)) - { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - return __Pyx_PyObject_CallMethO(func, args[0]); - } - } - } -#endif - #if PY_VERSION_HEX < 0x030800B1 - #if CYTHON_FAST_PYCCALL - if (PyCFunction_Check(func)) { - if (kwargs) { - return _PyCFunction_FastCallDict(func, args, nargs, kwargs); - } else { - return _PyCFunction_FastCallKeywords(func, args, nargs, NULL); - } - } - #if PY_VERSION_HEX >= 0x030700A1 - if (!kwargs && __Pyx_IS_TYPE(func, &PyMethodDescr_Type)) { - return _PyMethodDescr_FastCallKeywords(func, args, nargs, NULL); - } - #endif - #endif - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs); - } - #endif - #endif - #if CYTHON_VECTORCALL - vectorcallfunc f = _PyVectorcall_Function(func); - if (f) { - return f(func, args, (size_t)nargs, kwargs); - } - #elif defined(__Pyx_CyFunction_USED) && CYTHON_BACKPORT_VECTORCALL - if (__Pyx_CyFunction_CheckExact(func)) { - __pyx_vectorcallfunc f = __Pyx_CyFunction_func_vectorcall(func); - if (f) return f(func, args, (size_t)nargs, kwargs); - } - #endif - if (nargs == 0) { - return __Pyx_PyObject_Call(func, __pyx_empty_tuple, kwargs); - } - return __Pyx_PyObject_FastCall_fallback(func, args, (size_t)nargs, kwargs); -} - -/* RaiseTooManyValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); -} - -/* RaiseNeedMoreValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", - index, (index == 1) ? "" : "s"); -} - -/* IterFinish */ -static CYTHON_INLINE int __Pyx_IterFinish(void) { -#if CYTHON_FAST_THREAD_STATE - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* exc_type = tstate->curexc_type; - if (unlikely(exc_type)) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { - PyObject *exc_value, *exc_tb; - exc_value = tstate->curexc_value; - exc_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; - Py_DECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_tb); - return 0; - } else { - return -1; - } - } - return 0; -#else - if (unlikely(PyErr_Occurred())) { - if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { - PyErr_Clear(); - return 0; - } else { - return -1; - } - } - return 0; -#endif -} - -/* UnpackItemEndCheck */ -static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { - if (unlikely(retval)) { - Py_DECREF(retval); - __Pyx_RaiseTooManyValuesError(expected); - return -1; - } - return __Pyx_IterFinish(); -} - -/* PyIntBinop */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_RemainderObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long x; - long a = PyInt_AS_LONG(op1); - - x = a % b; - x += ((x != 0) & ((x ^ b) < 0)) * b; - return PyInt_FromLong(x); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (unlikely(size == 0)) { - return __Pyx_NewRef(op1); - } - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_remainder(op1, op2); - } - } - x = a % b; - x += ((x != 0) & ((x ^ b) < 0)) * b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla % llb; - llx += ((llx != 0) & ((llx ^ llb) < 0)) * llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - return (inplace ? PyNumber_InPlaceRemainder : PyNumber_Remainder)(op1, op2); -} -#endif - -/* GetItemInt */ -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (unlikely(!j)) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyList_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyTuple_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; - PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; - if (mm && mm->mp_subscript) { - PyObject *r, *key = PyInt_FromSsize_t(i); - if (unlikely(!key)) return NULL; - r = mm->mp_subscript(o, key); - Py_DECREF(key); - return r; - } - if (likely(sm && sm->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { - Py_ssize_t l = sm->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return sm->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/* PyObjectCallOneArg */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *args[2] = {NULL, arg}; - return __Pyx_PyObject_FastCall(func, args+1, 1 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); -} - -/* ObjectGetItem */ -#if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject *index) { - PyObject *runerr; - Py_ssize_t key_value; - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - __Pyx_TypeName index_type_name = __Pyx_PyType_GetName(Py_TYPE(index)); - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, - "cannot fit '" __Pyx_FMT_TYPENAME "' into an index-sized integer", index_type_name); - __Pyx_DECREF_TypeName(index_type_name); - } - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem_Slow(PyObject *obj, PyObject *key) { - __Pyx_TypeName obj_type_name; - if (likely(PyType_Check(obj))) { - PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(obj, __pyx_n_s_class_getitem); - if (meth) { - PyObject *result = __Pyx_PyObject_CallOneArg(meth, key); - Py_DECREF(meth); - return result; - } - } - obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - PyErr_Format(PyExc_TypeError, - "'" __Pyx_FMT_TYPENAME "' object is not subscriptable", obj_type_name); - __Pyx_DECREF_TypeName(obj_type_name); - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key) { - PyTypeObject *tp = Py_TYPE(obj); - PyMappingMethods *mm = tp->tp_as_mapping; - PySequenceMethods *sm = tp->tp_as_sequence; - if (likely(mm && mm->mp_subscript)) { - return mm->mp_subscript(obj, key); - } - if (likely(sm && sm->sq_item)) { - return __Pyx_PyObject_GetIndex(obj, key); - } - return __Pyx_PyObject_GetItem_Slow(obj, key); -} -#endif - -/* SliceObject */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, - Py_ssize_t cstart, Py_ssize_t cstop, - PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, - int has_cstart, int has_cstop, int wraparound) { - __Pyx_TypeName obj_type_name; -#if CYTHON_USE_TYPE_SLOTS - PyMappingMethods* mp; -#if PY_MAJOR_VERSION < 3 - PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; - if (likely(ms && ms->sq_slice)) { - if (!has_cstart) { - if (_py_start && (*_py_start != Py_None)) { - cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); - if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; - } else - cstart = 0; - } - if (!has_cstop) { - if (_py_stop && (*_py_stop != Py_None)) { - cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); - if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; - } else - cstop = PY_SSIZE_T_MAX; - } - if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { - Py_ssize_t l = ms->sq_length(obj); - if (likely(l >= 0)) { - if (cstop < 0) { - cstop += l; - if (cstop < 0) cstop = 0; - } - if (cstart < 0) { - cstart += l; - if (cstart < 0) cstart = 0; - } - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - goto bad; - PyErr_Clear(); - } - } - return ms->sq_slice(obj, cstart, cstop); - } -#else - CYTHON_UNUSED_VAR(wraparound); -#endif - mp = Py_TYPE(obj)->tp_as_mapping; - if (likely(mp && mp->mp_subscript)) -#else - CYTHON_UNUSED_VAR(wraparound); -#endif - { - PyObject* result; - PyObject *py_slice, *py_start, *py_stop; - if (_py_slice) { - py_slice = *_py_slice; - } else { - PyObject* owned_start = NULL; - PyObject* owned_stop = NULL; - if (_py_start) { - py_start = *_py_start; - } else { - if (has_cstart) { - owned_start = py_start = PyInt_FromSsize_t(cstart); - if (unlikely(!py_start)) goto bad; - } else - py_start = Py_None; - } - if (_py_stop) { - py_stop = *_py_stop; - } else { - if (has_cstop) { - owned_stop = py_stop = PyInt_FromSsize_t(cstop); - if (unlikely(!py_stop)) { - Py_XDECREF(owned_start); - goto bad; - } - } else - py_stop = Py_None; - } - py_slice = PySlice_New(py_start, py_stop, Py_None); - Py_XDECREF(owned_start); - Py_XDECREF(owned_stop); - if (unlikely(!py_slice)) goto bad; - } -#if CYTHON_USE_TYPE_SLOTS - result = mp->mp_subscript(obj, py_slice); -#else - result = PyObject_GetItem(obj, py_slice); -#endif - if (!_py_slice) { - Py_DECREF(py_slice); - } - return result; - } - obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); - PyErr_Format(PyExc_TypeError, - "'" __Pyx_FMT_TYPENAME "' object is unsliceable", obj_type_name); - __Pyx_DECREF_TypeName(obj_type_name); -bad: - return NULL; -} - -/* PyIntBinop */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_TrueDivideObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long a = PyInt_AS_LONG(op1); - - if (8 * sizeof(long) <= 53 || likely(labs(a) <= ((PY_LONG_LONG)1 << 53))) { - return PyFloat_FromDouble((double)a / (double)b); - } - return PyInt_Type.tp_as_number->nb_true_divide(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (unlikely(size == 0)) { - } - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT && 1 * PyLong_SHIFT < 53) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT && 1 * PyLong_SHIFT < 53) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT && 2 * PyLong_SHIFT < 53) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT && 2 * PyLong_SHIFT < 53) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT && 3 * PyLong_SHIFT < 53) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT && 3 * PyLong_SHIFT < 53) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_true_divide(op1, op2); - } - } - if ((8 * sizeof(long) <= 53 || likely(labs(a) <= ((PY_LONG_LONG)1 << 53))) - || __Pyx_sst_abs(size) <= 52 / PyLong_SHIFT) { - return PyFloat_FromDouble((double)a / (double)b); - } - return PyLong_Type.tp_as_number->nb_true_divide(op1, op2); - return PyLong_FromLong(x); - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; -#if CYTHON_COMPILING_IN_LIMITED_API - double a = __pyx_PyFloat_AsDouble(op1); -#else - double a = PyFloat_AS_DOUBLE(op1); -#endif - double result; - - PyFPE_START_PROTECT("divide", return NULL) - result = ((double)a) / (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceTrueDivide : PyNumber_TrueDivide)(op1, op2); -} -#endif - -/* PyFloatBinop */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyFloat_SubtractObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { - const double b = floatval; - double a, result; - (void)inplace; (void)zerodivision_check; - if (likely(PyFloat_CheckExact(op1))) { -#if CYTHON_COMPILING_IN_LIMITED_API - a = __pyx_PyFloat_AsDouble(op1); -#else - a = PyFloat_AS_DOUBLE(op1); -#endif - - } else - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - a = (double) PyInt_AS_LONG(op1); - - } else - #endif - if (likely(PyLong_CheckExact(op1))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - switch (size) { - case 0: a = 0.0; break; - case -1: a = -(double) digits[0]; break; - case 1: a = (double) digits[0]; break; - case -2: - case 2: - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { - a = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { - if (size == -2) - a = -a; - break; - } - } - CYTHON_FALLTHROUGH; - case -3: - case 3: - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { - a = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { - if (size == -3) - a = -a; - break; - } - } - CYTHON_FALLTHROUGH; - case -4: - case 4: - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { - a = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { - if (size == -4) - a = -a; - break; - } - } - CYTHON_FALLTHROUGH; - default: - #else - { - #endif - a = PyLong_AsDouble(op1); - if (unlikely(a == -1.0 && PyErr_Occurred())) return NULL; - } - } else { - return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); - } - PyFPE_START_PROTECT("subtract", return NULL) - result = a - b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); -} -#endif - -/* PyFloatBinop */ - #if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyFloat_AddObjC(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { - const double b = floatval; - double a, result; - (void)inplace; (void)zerodivision_check; - if (likely(PyFloat_CheckExact(op1))) { -#if CYTHON_COMPILING_IN_LIMITED_API - a = __pyx_PyFloat_AsDouble(op1); -#else - a = PyFloat_AS_DOUBLE(op1); -#endif - - } else - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - a = (double) PyInt_AS_LONG(op1); - - } else - #endif - if (likely(PyLong_CheckExact(op1))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - switch (size) { - case 0: a = 0.0; break; - case -1: a = -(double) digits[0]; break; - case 1: a = (double) digits[0]; break; - case -2: - case 2: - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { - a = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { - if (size == -2) - a = -a; - break; - } - } - CYTHON_FALLTHROUGH; - case -3: - case 3: - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { - a = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { - if (size == -3) - a = -a; - break; - } - } - CYTHON_FALLTHROUGH; - case -4: - case 4: - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { - a = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (a < (double) ((PY_LONG_LONG)1 << 53))) { - if (size == -4) - a = -a; - break; - } - } - CYTHON_FALLTHROUGH; - default: - #else - { - #endif - a = PyLong_AsDouble(op1); - if (unlikely(a == -1.0 && PyErr_Occurred())) return NULL; - } - } else { - return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); - } - PyFPE_START_PROTECT("add", return NULL) - result = a + b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); -} -#endif - -/* PyObjectCall2Args */ - static CYTHON_INLINE PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { - PyObject *args[3] = {NULL, arg1, arg2}; - return __Pyx_PyObject_FastCall(function, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); -} - -/* PyObjectGetMethod */ - static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { - PyObject *attr; -#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP - __Pyx_TypeName type_name; - PyTypeObject *tp = Py_TYPE(obj); - PyObject *descr; - descrgetfunc f = NULL; - PyObject **dictptr, *dict; - int meth_found = 0; - assert (*method == NULL); - if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; - } - if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { - return 0; - } - descr = _PyType_Lookup(tp, name); - if (likely(descr != NULL)) { - Py_INCREF(descr); -#if defined(Py_TPFLAGS_METHOD_DESCRIPTOR) && Py_TPFLAGS_METHOD_DESCRIPTOR - if (__Pyx_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)) -#elif PY_MAJOR_VERSION >= 3 - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type))) - #endif -#else - #ifdef __Pyx_CyFunction_USED - if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) - #else - if (likely(PyFunction_Check(descr))) - #endif -#endif - { - meth_found = 1; - } else { - f = Py_TYPE(descr)->tp_descr_get; - if (f != NULL && PyDescr_IsData(descr)) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - } - } - dictptr = _PyObject_GetDictPtr(obj); - if (dictptr != NULL && (dict = *dictptr) != NULL) { - Py_INCREF(dict); - attr = __Pyx_PyDict_GetItemStr(dict, name); - if (attr != NULL) { - Py_INCREF(attr); - Py_DECREF(dict); - Py_XDECREF(descr); - goto try_unpack; - } - Py_DECREF(dict); - } - if (meth_found) { - *method = descr; - return 1; - } - if (f != NULL) { - attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); - Py_DECREF(descr); - goto try_unpack; - } - if (likely(descr != NULL)) { - *method = descr; - return 0; - } - type_name = __Pyx_PyType_GetName(tp); - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", - type_name, name); -#else - "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", - type_name, PyString_AS_STRING(name)); -#endif - __Pyx_DECREF_TypeName(type_name); - return 0; -#else - attr = __Pyx_PyObject_GetAttrStr(obj, name); - goto try_unpack; -#endif -try_unpack: -#if CYTHON_UNPACK_METHODS - if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { - PyObject *function = PyMethod_GET_FUNCTION(attr); - Py_INCREF(function); - Py_DECREF(attr); - *method = function; - return 1; - } -#endif - *method = attr; - return 0; -} - -/* PyObjectCallMethod1 */ - static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) { - PyObject *result = __Pyx_PyObject_CallOneArg(method, arg); - Py_DECREF(method); - return result; -} -static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) { - PyObject *method = NULL, *result; - int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); - if (likely(is_method)) { - result = __Pyx_PyObject_Call2Args(method, obj, arg); - Py_DECREF(method); - return result; - } - if (unlikely(!method)) return NULL; - return __Pyx__PyObject_CallMethod1(method, arg); -} - -/* append */ - static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x) { - if (likely(PyList_CheckExact(L))) { - if (unlikely(__Pyx_PyList_Append(L, x) < 0)) return -1; - } else { - PyObject* retval = __Pyx_PyObject_CallMethod1(L, __pyx_n_s_append, x); - if (unlikely(!retval)) - return -1; - Py_DECREF(retval); - } - return 0; -} - -/* PyIntBinop */ - #if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_FloorDivideObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op1))) { - const long b = intval; - long x; - long a = PyInt_AS_LONG(op1); - - if (unlikely(b == -1 && ((unsigned long)a) == 0-(unsigned long)a)) - return PyInt_Type.tp_as_number->nb_floor_divide(op1, op2); - else { - long q, r; - q = a / b; - r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - x = q; - } - return PyInt_FromLong(x); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (unlikely(size == 0)) { - return __Pyx_NewRef(op1); - } - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_floor_divide(op1, op2); - } - } - { - long q, r; - q = a / b; - r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - x = q; - } - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - { - PY_LONG_LONG q, r; - q = lla / llb; - r = lla - q*llb; - q -= ((r != 0) & ((r ^ llb) < 0)); - llx = q; - } - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - return (inplace ? PyNumber_InPlaceFloorDivide : PyNumber_FloorDivide)(op1, op2); -} -#endif - -/* PyIntBinop */ - #if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_SubtractCObj(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check) { - CYTHON_MAYBE_UNUSED_VAR(intval); - CYTHON_MAYBE_UNUSED_VAR(inplace); - CYTHON_UNUSED_VAR(zerodivision_check); - #if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(op2))) { - const long a = intval; - long x; - long b = PyInt_AS_LONG(op2); - - x = (long)((unsigned long)a - b); - if (likely((x^a) >= 0 || (x^~b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_subtract(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op2))) { - const long a = intval; - long b, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG lla = intval; - PY_LONG_LONG llb, llx; -#endif - const digit* digits = ((PyLongObject*)op2)->ob_digit; - const Py_ssize_t size = Py_SIZE(op2); - if (unlikely(size == 0)) { - return __Pyx_NewRef(op1); - } - if (likely(__Pyx_sst_abs(size) <= 1)) { - b = likely(size) ? digits[0] : 0; - if (size == -1) b = -b; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - b = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - llb = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - b = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - llb = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - b = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - llb = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - b = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - llb = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - b = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - llb = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - b = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; - #ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - llb = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; - #endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_subtract(op1, op2); - } - } - x = a - b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla - llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op2)) { - const long a = intval; -#if CYTHON_COMPILING_IN_LIMITED_API - double b = __pyx_PyFloat_AsDouble(op2); -#else - double b = PyFloat_AS_DOUBLE(op2); -#endif - double result; - - PyFPE_START_PROTECT("subtract", return NULL) - result = ((double)a) - (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceSubtract : PyNumber_Subtract)(op1, op2); -} -#endif - -/* Import */ - static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *module = 0; - PyObject *empty_dict = 0; - PyObject *empty_list = 0; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (unlikely(!py_import)) - goto bad; - if (!from_list) { - empty_list = PyList_New(0); - if (unlikely(!empty_list)) - goto bad; - from_list = empty_list; - } - #endif - empty_dict = PyDict_New(); - if (unlikely(!empty_dict)) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { - #if CYTHON_COMPILING_IN_LIMITED_API - module = PyImport_ImportModuleLevelObject( - name, empty_dict, empty_dict, from_list, 1); - #else - module = PyImport_ImportModuleLevelObject( - name, __pyx_d, empty_dict, from_list, 1); - #endif - if (unlikely(!module)) { - if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError))) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (unlikely(!py_level)) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, __pyx_d, empty_dict, from_list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - #if CYTHON_COMPILING_IN_LIMITED_API - module = PyImport_ImportModuleLevelObject( - name, empty_dict, empty_dict, from_list, level); - #else - module = PyImport_ImportModuleLevelObject( - name, __pyx_d, empty_dict, from_list, level); - #endif - #endif - } - } -bad: - Py_XDECREF(empty_dict); - Py_XDECREF(empty_list); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - return module; -} - -/* ImportDottedModule */ - #if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx__ImportDottedModule_Error(PyObject *name, PyObject *parts_tuple, Py_ssize_t count) { - PyObject *partial_name = NULL, *slice = NULL, *sep = NULL; - if (unlikely(PyErr_Occurred())) { - PyErr_Clear(); - } - if (likely(PyTuple_GET_SIZE(parts_tuple) == count)) { - partial_name = name; - } else { - slice = PySequence_GetSlice(parts_tuple, 0, count); - if (unlikely(!slice)) - goto bad; - sep = PyUnicode_FromStringAndSize(".", 1); - if (unlikely(!sep)) - goto bad; - partial_name = PyUnicode_Join(sep, slice); - } - PyErr_Format( -#if PY_MAJOR_VERSION < 3 - PyExc_ImportError, - "No module named '%s'", PyString_AS_STRING(partial_name)); -#else -#if PY_VERSION_HEX >= 0x030600B1 - PyExc_ModuleNotFoundError, -#else - PyExc_ImportError, -#endif - "No module named '%U'", partial_name); -#endif -bad: - Py_XDECREF(sep); - Py_XDECREF(slice); - Py_XDECREF(partial_name); - return NULL; -} -#endif -#if PY_MAJOR_VERSION >= 3 -static PyObject *__Pyx__ImportDottedModule_Lookup(PyObject *name) { - PyObject *imported_module; -#if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) - PyObject *modules = PyImport_GetModuleDict(); - if (unlikely(!modules)) - return NULL; - imported_module = __Pyx_PyDict_GetItemStr(modules, name); - Py_XINCREF(imported_module); -#else - imported_module = PyImport_GetModule(name); -#endif - return imported_module; -} -#endif -static PyObject *__Pyx__ImportDottedModule(PyObject *name, PyObject *parts_tuple) { -#if PY_MAJOR_VERSION < 3 - PyObject *module, *from_list, *star = __pyx_n_s__21; - CYTHON_UNUSED_VAR(parts_tuple); - from_list = PyList_New(1); - if (unlikely(!from_list)) - return NULL; - Py_INCREF(star); - PyList_SET_ITEM(from_list, 0, star); - module = __Pyx_Import(name, from_list, 0); - Py_DECREF(from_list); - return module; -#else - Py_ssize_t i, nparts; - PyObject *imported_module; - PyObject *module = __Pyx_Import(name, NULL, 0); - if (!parts_tuple || unlikely(!module)) - return module; - imported_module = __Pyx__ImportDottedModule_Lookup(name); - if (likely(imported_module)) { - Py_DECREF(module); - return imported_module; - } - PyErr_Clear(); - nparts = PyTuple_GET_SIZE(parts_tuple); - for (i=1; i < nparts && module; i++) { - PyObject *part, *submodule; -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - part = PyTuple_GET_ITEM(parts_tuple, i); -#else - part = PySequence_ITEM(parts_tuple, i); -#endif - submodule = __Pyx_PyObject_GetAttrStrNoError(module, part); -#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(part); -#endif - Py_DECREF(module); - module = submodule; - } - if (likely(module)) - return module; - return __Pyx__ImportDottedModule_Error(name, parts_tuple, i); -#endif -} -static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple) { -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030400B1 - PyObject *module = __Pyx__ImportDottedModule_Lookup(name); - if (likely(module)) { - PyObject *spec = __Pyx_PyObject_GetAttrStrNoError(module, __pyx_n_s_spec); - if (likely(spec)) { - PyObject *unsafe = __Pyx_PyObject_GetAttrStrNoError(spec, __pyx_n_s_initializing); - if (likely(!unsafe || !__Pyx_PyObject_IsTrue(unsafe))) { - Py_DECREF(spec); - spec = NULL; - } - Py_XDECREF(unsafe); - } - if (likely(!spec)) { - PyErr_Clear(); - return module; - } - Py_DECREF(spec); - Py_DECREF(module); - } else if (PyErr_Occurred()) { - PyErr_Clear(); - } -#endif - return __Pyx__ImportDottedModule(name, parts_tuple); -} - -/* ImportFrom */ - static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { - PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); - if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { - const char* module_name_str = 0; - PyObject* module_name = 0; - PyObject* module_dot = 0; - PyObject* full_name = 0; - PyErr_Clear(); - module_name_str = PyModule_GetName(module); - if (unlikely(!module_name_str)) { goto modbad; } - module_name = PyUnicode_FromString(module_name_str); - if (unlikely(!module_name)) { goto modbad; } - module_dot = PyUnicode_Concat(module_name, __pyx_kp_u__22); - if (unlikely(!module_dot)) { goto modbad; } - full_name = PyUnicode_Concat(module_dot, name); - if (unlikely(!full_name)) { goto modbad; } - #if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) - { - PyObject *modules = PyImport_GetModuleDict(); - if (unlikely(!modules)) - goto modbad; - value = PyObject_GetItem(modules, full_name); - } - #else - value = PyImport_GetModule(full_name); - #endif - modbad: - Py_XDECREF(full_name); - Py_XDECREF(module_dot); - Py_XDECREF(module_name); - } - if (unlikely(!value)) { - PyErr_Format(PyExc_ImportError, - #if PY_MAJOR_VERSION < 3 - "cannot import name %.230s", PyString_AS_STRING(name)); - #else - "cannot import name %S", name); - #endif - } - return value; -} - -/* FixUpExtensionType */ - #if CYTHON_USE_TYPE_SPECS -static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type) { -#if PY_VERSION_HEX > 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API - (void) spec; - (void) type; -#else - const PyType_Slot *slot = spec->slots; - while (slot && slot->slot && slot->slot != Py_tp_members) - slot++; - if (slot && slot->slot == Py_tp_members) { - int changed = 0; -#if !(PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON) - const -#endif - PyMemberDef *memb = (PyMemberDef*) slot->pfunc; - while (memb && memb->name) { - if (memb->name[0] == '_' && memb->name[1] == '_') { -#if PY_VERSION_HEX < 0x030900b1 - if (strcmp(memb->name, "__weaklistoffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); - type->tp_weaklistoffset = memb->offset; - changed = 1; - } - else if (strcmp(memb->name, "__dictoffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); - type->tp_dictoffset = memb->offset; - changed = 1; - } -#if CYTHON_METH_FASTCALL - else if (strcmp(memb->name, "__vectorcalloffset__") == 0) { - assert(memb->type == T_PYSSIZET); - assert(memb->flags == READONLY); -#if PY_VERSION_HEX >= 0x030800b4 - type->tp_vectorcall_offset = memb->offset; -#else - type->tp_print = (printfunc) memb->offset; -#endif - changed = 1; - } -#endif -#else - if ((0)); -#endif -#if PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON - else if (strcmp(memb->name, "__module__") == 0) { - PyObject *descr; - assert(memb->type == T_OBJECT); - assert(memb->flags == 0 || memb->flags == READONLY); - descr = PyDescr_NewMember(type, memb); - if (unlikely(!descr)) - return -1; - if (unlikely(PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr) < 0)) { - Py_DECREF(descr); - return -1; - } - Py_DECREF(descr); - changed = 1; - } -#endif - } - memb++; - } - if (changed) - PyType_Modified(type); - } -#endif - return 0; -} -#endif - -/* FetchCommonType */ - static PyObject *__Pyx_FetchSharedCythonABIModule(void) { - PyObject *abi_module = PyImport_AddModule((char*) __PYX_ABI_MODULE_NAME); - if (!abi_module) return NULL; - Py_INCREF(abi_module); - return abi_module; -} -static int __Pyx_VerifyCachedType(PyObject *cached_type, - const char *name, - Py_ssize_t basicsize, - Py_ssize_t expected_basicsize) { - if (!PyType_Check(cached_type)) { - PyErr_Format(PyExc_TypeError, - "Shared Cython type %.200s is not a type object", name); - return -1; - } - if (basicsize != expected_basicsize) { - PyErr_Format(PyExc_TypeError, - "Shared Cython type %.200s has the wrong size, try recompiling", - name); - return -1; - } - return 0; -} -#if !CYTHON_USE_TYPE_SPECS -static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { - PyObject* abi_module; - const char* object_name; - PyTypeObject *cached_type = NULL; - abi_module = __Pyx_FetchSharedCythonABIModule(); - if (!abi_module) return NULL; - object_name = strrchr(type->tp_name, '.'); - object_name = object_name ? object_name+1 : type->tp_name; - cached_type = (PyTypeObject*) PyObject_GetAttrString(abi_module, object_name); - if (cached_type) { - if (__Pyx_VerifyCachedType( - (PyObject *)cached_type, - object_name, - cached_type->tp_basicsize, - type->tp_basicsize) < 0) { - goto bad; - } - goto done; - } - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; - PyErr_Clear(); - if (PyType_Ready(type) < 0) goto bad; - if (PyObject_SetAttrString(abi_module, object_name, (PyObject *)type) < 0) - goto bad; - Py_INCREF(type); - cached_type = type; -done: - Py_DECREF(abi_module); - return cached_type; -bad: - Py_XDECREF(cached_type); - cached_type = NULL; - goto done; -} -#else -static PyTypeObject *__Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases) { - PyObject *abi_module, *cached_type = NULL; - const char* object_name = strrchr(spec->name, '.'); - object_name = object_name ? object_name+1 : spec->name; - abi_module = __Pyx_FetchSharedCythonABIModule(); - if (!abi_module) return NULL; - cached_type = PyObject_GetAttrString(abi_module, object_name); - if (cached_type) { - Py_ssize_t basicsize; -#if CYTHON_COMPILING_IN_LIMITED_API - PyObject *py_basicsize; - py_basicsize = PyObject_GetAttrString(cached_type, "__basicsize__"); - if (unlikely(!py_basicsize)) goto bad; - basicsize = PyLong_AsSsize_t(py_basicsize); - Py_DECREF(py_basicsize); - py_basicsize = 0; - if (unlikely(basicsize == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; -#else - basicsize = likely(PyType_Check(cached_type)) ? ((PyTypeObject*) cached_type)->tp_basicsize : -1; -#endif - if (__Pyx_VerifyCachedType( - cached_type, - object_name, - basicsize, - spec->basicsize) < 0) { - goto bad; - } - goto done; - } - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; - PyErr_Clear(); - (void) module; - cached_type = __Pyx_PyType_FromModuleAndSpec(abi_module, spec, bases); - if (unlikely(!cached_type)) goto bad; - if (unlikely(__Pyx_fix_up_extension_type_from_spec(spec, (PyTypeObject *) cached_type) < 0)) goto bad; - if (PyObject_SetAttrString(abi_module, object_name, cached_type) < 0) goto bad; -done: - Py_DECREF(abi_module); - assert(cached_type == NULL || PyType_Check(cached_type)); - return (PyTypeObject *) cached_type; -bad: - Py_XDECREF(cached_type); - cached_type = NULL; - goto done; -} -#endif - -/* PyVectorcallFastCallDict */ - #if CYTHON_METH_FASTCALL -static PyObject *__Pyx_PyVectorcall_FastCallDict_kw(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) -{ - PyObject *res = NULL; - PyObject *kwnames; - PyObject **newargs; - PyObject **kwvalues; - Py_ssize_t i, pos; - size_t j; - PyObject *key, *value; - unsigned long keys_are_strings; - Py_ssize_t nkw = PyDict_GET_SIZE(kw); - newargs = (PyObject **)PyMem_Malloc((nargs + (size_t)nkw) * sizeof(args[0])); - if (unlikely(newargs == NULL)) { - PyErr_NoMemory(); - return NULL; - } - for (j = 0; j < nargs; j++) newargs[j] = args[j]; - kwnames = PyTuple_New(nkw); - if (unlikely(kwnames == NULL)) { - PyMem_Free(newargs); - return NULL; - } - kwvalues = newargs + nargs; - pos = i = 0; - keys_are_strings = Py_TPFLAGS_UNICODE_SUBCLASS; - while (PyDict_Next(kw, &pos, &key, &value)) { - keys_are_strings &= Py_TYPE(key)->tp_flags; - Py_INCREF(key); - Py_INCREF(value); - PyTuple_SET_ITEM(kwnames, i, key); - kwvalues[i] = value; - i++; - } - if (unlikely(!keys_are_strings)) { - PyErr_SetString(PyExc_TypeError, "keywords must be strings"); - goto cleanup; - } - res = vc(func, newargs, nargs, kwnames); -cleanup: - Py_DECREF(kwnames); - for (i = 0; i < nkw; i++) - Py_DECREF(kwvalues[i]); - PyMem_Free(newargs); - return res; -} -static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) -{ - if (likely(kw == NULL) || PyDict_GET_SIZE(kw) == 0) { - return vc(func, args, nargs, NULL); - } - return __Pyx_PyVectorcall_FastCallDict_kw(func, vc, args, nargs, kw); -} -#endif - -/* CythonFunctionShared */ - static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj) { -#if PY_VERSION_HEX < 0x030900B1 - __Pyx_Py_XDECREF_SET( - __Pyx_CyFunction_GetClassObj(f), - ((classobj) ? __Pyx_NewRef(classobj) : NULL)); -#else - __Pyx_Py_XDECREF_SET( - ((PyCMethodObject *) (f))->mm_class, - (PyTypeObject*)((classobj) ? __Pyx_NewRef(classobj) : NULL)); -#endif -} -static PyObject * -__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, void *closure) -{ - CYTHON_UNUSED_VAR(closure); - if (unlikely(op->func_doc == NULL)) { - if (((PyCFunctionObject*)op)->m_ml->ml_doc) { -#if PY_MAJOR_VERSION >= 3 - op->func_doc = PyUnicode_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); -#else - op->func_doc = PyString_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); -#endif - if (unlikely(op->func_doc == NULL)) - return NULL; - } else { - Py_INCREF(Py_None); - return Py_None; - } - } - Py_INCREF(op->func_doc); - return op->func_doc; -} -static int -__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (value == NULL) { - value = Py_None; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_doc, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(op->func_name == NULL)) { -#if PY_MAJOR_VERSION >= 3 - op->func_name = PyUnicode_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); -#else - op->func_name = PyString_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); -#endif - if (unlikely(op->func_name == NULL)) - return NULL; - } - Py_INCREF(op->func_name); - return op->func_name; -} -static int -__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__name__ must be set to a string object"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_name, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - Py_INCREF(op->func_qualname); - return op->func_qualname; -} -static int -__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); -#if PY_MAJOR_VERSION >= 3 - if (unlikely(value == NULL || !PyUnicode_Check(value))) -#else - if (unlikely(value == NULL || !PyString_Check(value))) -#endif - { - PyErr_SetString(PyExc_TypeError, - "__qualname__ must be set to a string object"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_qualname, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(op->func_dict == NULL)) { - op->func_dict = PyDict_New(); - if (unlikely(op->func_dict == NULL)) - return NULL; - } - Py_INCREF(op->func_dict); - return op->func_dict; -} -static int -__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, void *context) -{ - CYTHON_UNUSED_VAR(context); - if (unlikely(value == NULL)) { - PyErr_SetString(PyExc_TypeError, - "function's dictionary may not be deleted"); - return -1; - } - if (unlikely(!PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "setting function's dictionary to a non-dict"); - return -1; - } - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->func_dict, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(context); - Py_INCREF(op->func_globals); - return op->func_globals; -} -static PyObject * -__Pyx_CyFunction_get_closure(__pyx_CyFunctionObject *op, void *context) -{ - CYTHON_UNUSED_VAR(op); - CYTHON_UNUSED_VAR(context); - Py_INCREF(Py_None); - return Py_None; -} -static PyObject * -__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, void *context) -{ - PyObject* result = (op->func_code) ? op->func_code : Py_None; - CYTHON_UNUSED_VAR(context); - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { - int result = 0; - PyObject *res = op->defaults_getter((PyObject *) op); - if (unlikely(!res)) - return -1; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - op->defaults_tuple = PyTuple_GET_ITEM(res, 0); - Py_INCREF(op->defaults_tuple); - op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); - Py_INCREF(op->defaults_kwdict); - #else - op->defaults_tuple = PySequence_ITEM(res, 0); - if (unlikely(!op->defaults_tuple)) result = -1; - else { - op->defaults_kwdict = PySequence_ITEM(res, 1); - if (unlikely(!op->defaults_kwdict)) result = -1; - } - #endif - Py_DECREF(res); - return result; -} -static int -__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value) { - value = Py_None; - } else if (unlikely(value != Py_None && !PyTuple_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__defaults__ must be set to a tuple object"); - return -1; - } - PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__defaults__ will not " - "currently affect the values used in function calls", 1); - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->defaults_tuple, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->defaults_tuple; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - if (op->defaults_getter) { - if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; - result = op->defaults_tuple; - } else { - result = Py_None; - } - } - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value) { - value = Py_None; - } else if (unlikely(value != Py_None && !PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__kwdefaults__ must be set to a dict object"); - return -1; - } - PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__kwdefaults__ will not " - "currently affect the values used in function calls", 1); - Py_INCREF(value); - __Pyx_Py_XDECREF_SET(op->defaults_kwdict, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->defaults_kwdict; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - if (op->defaults_getter) { - if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; - result = op->defaults_kwdict; - } else { - result = Py_None; - } - } - Py_INCREF(result); - return result; -} -static int -__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, void *context) { - CYTHON_UNUSED_VAR(context); - if (!value || value == Py_None) { - value = NULL; - } else if (unlikely(!PyDict_Check(value))) { - PyErr_SetString(PyExc_TypeError, - "__annotations__ must be set to a dict object"); - return -1; - } - Py_XINCREF(value); - __Pyx_Py_XDECREF_SET(op->func_annotations, value); - return 0; -} -static PyObject * -__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, void *context) { - PyObject* result = op->func_annotations; - CYTHON_UNUSED_VAR(context); - if (unlikely(!result)) { - result = PyDict_New(); - if (unlikely(!result)) return NULL; - op->func_annotations = result; - } - Py_INCREF(result); - return result; -} -static PyObject * -__Pyx_CyFunction_get_is_coroutine(__pyx_CyFunctionObject *op, void *context) { - int is_coroutine; - CYTHON_UNUSED_VAR(context); - if (op->func_is_coroutine) { - return __Pyx_NewRef(op->func_is_coroutine); - } - is_coroutine = op->flags & __Pyx_CYFUNCTION_COROUTINE; -#if PY_VERSION_HEX >= 0x03050000 - if (is_coroutine) { - PyObject *module, *fromlist, *marker = __pyx_n_s_is_coroutine; - fromlist = PyList_New(1); - if (unlikely(!fromlist)) return NULL; - Py_INCREF(marker); - PyList_SET_ITEM(fromlist, 0, marker); - module = PyImport_ImportModuleLevelObject(__pyx_n_s_asyncio_coroutines, NULL, NULL, fromlist, 0); - Py_DECREF(fromlist); - if (unlikely(!module)) goto ignore; - op->func_is_coroutine = __Pyx_PyObject_GetAttrStr(module, marker); - Py_DECREF(module); - if (likely(op->func_is_coroutine)) { - return __Pyx_NewRef(op->func_is_coroutine); - } -ignore: - PyErr_Clear(); - } -#endif - op->func_is_coroutine = __Pyx_PyBool_FromLong(is_coroutine); - return __Pyx_NewRef(op->func_is_coroutine); -} -static PyGetSetDef __pyx_CyFunction_getsets[] = { - {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, - {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, - {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, - {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, - {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, - {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, - {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, - {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, - {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, - {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, - {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, - {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, - {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, - {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, - {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, - {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, - {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, - {(char *) "_is_coroutine", (getter)__Pyx_CyFunction_get_is_coroutine, 0, 0, 0}, - {0, 0, 0, 0, 0} -}; -static PyMemberDef __pyx_CyFunction_members[] = { - {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), 0, 0}, -#if CYTHON_USE_TYPE_SPECS - {(char *) "__dictoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_dict), READONLY, 0}, -#if CYTHON_METH_FASTCALL -#if CYTHON_BACKPORT_VECTORCALL - {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_vectorcall), READONLY, 0}, -#else - {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(PyCFunctionObject, vectorcall), READONLY, 0}, -#endif -#endif -#if PY_VERSION_HEX < 0x030500A0 - {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_weakreflist), READONLY, 0}, -#else - {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(PyCFunctionObject, m_weakreflist), READONLY, 0}, -#endif -#endif - {0, 0, 0, 0, 0} -}; -static PyObject * -__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, PyObject *args) -{ - CYTHON_UNUSED_VAR(args); -#if PY_MAJOR_VERSION >= 3 - Py_INCREF(m->func_qualname); - return m->func_qualname; -#else - return PyString_FromString(((PyCFunctionObject*)m)->m_ml->ml_name); -#endif -} -static PyMethodDef __pyx_CyFunction_methods[] = { - {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, - {0, 0, 0, 0} -}; -#if PY_VERSION_HEX < 0x030500A0 -#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) -#else -#define __Pyx_CyFunction_weakreflist(cyfunc) (((PyCFunctionObject*)cyfunc)->m_weakreflist) -#endif -static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, - PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { - PyCFunctionObject *cf = (PyCFunctionObject*) op; - if (unlikely(op == NULL)) - return NULL; - op->flags = flags; - __Pyx_CyFunction_weakreflist(op) = NULL; - cf->m_ml = ml; - cf->m_self = (PyObject *) op; - Py_XINCREF(closure); - op->func_closure = closure; - Py_XINCREF(module); - cf->m_module = module; - op->func_dict = NULL; - op->func_name = NULL; - Py_INCREF(qualname); - op->func_qualname = qualname; - op->func_doc = NULL; -#if PY_VERSION_HEX < 0x030900B1 - op->func_classobj = NULL; -#else - ((PyCMethodObject*)op)->mm_class = NULL; -#endif - op->func_globals = globals; - Py_INCREF(op->func_globals); - Py_XINCREF(code); - op->func_code = code; - op->defaults_pyobjects = 0; - op->defaults_size = 0; - op->defaults = NULL; - op->defaults_tuple = NULL; - op->defaults_kwdict = NULL; - op->defaults_getter = NULL; - op->func_annotations = NULL; - op->func_is_coroutine = NULL; -#if CYTHON_METH_FASTCALL - switch (ml->ml_flags & (METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O | METH_KEYWORDS | METH_METHOD)) { - case METH_NOARGS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_NOARGS; - break; - case METH_O: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_O; - break; - case METH_METHOD | METH_FASTCALL | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD; - break; - case METH_FASTCALL | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS; - break; - case METH_VARARGS | METH_KEYWORDS: - __Pyx_CyFunction_func_vectorcall(op) = NULL; - break; - default: - PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); - Py_DECREF(op); - return NULL; - } -#endif - return (PyObject *) op; -} -static int -__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) -{ - Py_CLEAR(m->func_closure); - Py_CLEAR(((PyCFunctionObject*)m)->m_module); - Py_CLEAR(m->func_dict); - Py_CLEAR(m->func_name); - Py_CLEAR(m->func_qualname); - Py_CLEAR(m->func_doc); - Py_CLEAR(m->func_globals); - Py_CLEAR(m->func_code); -#if PY_VERSION_HEX < 0x030900B1 - Py_CLEAR(__Pyx_CyFunction_GetClassObj(m)); -#else - { - PyObject *cls = (PyObject*) ((PyCMethodObject *) (m))->mm_class; - ((PyCMethodObject *) (m))->mm_class = NULL; - Py_XDECREF(cls); - } -#endif - Py_CLEAR(m->defaults_tuple); - Py_CLEAR(m->defaults_kwdict); - Py_CLEAR(m->func_annotations); - Py_CLEAR(m->func_is_coroutine); - if (m->defaults) { - PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); - int i; - for (i = 0; i < m->defaults_pyobjects; i++) - Py_XDECREF(pydefaults[i]); - PyObject_Free(m->defaults); - m->defaults = NULL; - } - return 0; -} -static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) -{ - if (__Pyx_CyFunction_weakreflist(m) != NULL) - PyObject_ClearWeakRefs((PyObject *) m); - __Pyx_CyFunction_clear(m); - __Pyx_PyHeapTypeObject_GC_Del(m); -} -static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) -{ - PyObject_GC_UnTrack(m); - __Pyx__CyFunction_dealloc(m); -} -static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) -{ - Py_VISIT(m->func_closure); - Py_VISIT(((PyCFunctionObject*)m)->m_module); - Py_VISIT(m->func_dict); - Py_VISIT(m->func_name); - Py_VISIT(m->func_qualname); - Py_VISIT(m->func_doc); - Py_VISIT(m->func_globals); - Py_VISIT(m->func_code); - Py_VISIT(__Pyx_CyFunction_GetClassObj(m)); - Py_VISIT(m->defaults_tuple); - Py_VISIT(m->defaults_kwdict); - Py_VISIT(m->func_is_coroutine); - if (m->defaults) { - PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); - int i; - for (i = 0; i < m->defaults_pyobjects; i++) - Py_VISIT(pydefaults[i]); - } - return 0; -} -static PyObject* -__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) -{ -#if PY_MAJOR_VERSION >= 3 - return PyUnicode_FromFormat("<cyfunction %U at %p>", - op->func_qualname, (void *)op); -#else - return PyString_FromFormat("<cyfunction %s at %p>", - PyString_AsString(op->func_qualname), (void *)op); -#endif -} -static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { - PyCFunctionObject* f = (PyCFunctionObject*)func; - PyCFunction meth = f->m_ml->ml_meth; - Py_ssize_t size; - switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { - case METH_VARARGS: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) - return (*meth)(self, arg); - break; - case METH_VARARGS | METH_KEYWORDS: - return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); - case METH_NOARGS: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) { - size = PyTuple_GET_SIZE(arg); - if (likely(size == 0)) - return (*meth)(self, NULL); - PyErr_Format(PyExc_TypeError, - "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", - f->m_ml->ml_name, size); - return NULL; - } - break; - case METH_O: - if (likely(kw == NULL || PyDict_Size(kw) == 0)) { - size = PyTuple_GET_SIZE(arg); - if (likely(size == 1)) { - PyObject *result, *arg0; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - arg0 = PyTuple_GET_ITEM(arg, 0); - #else - arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; - #endif - result = (*meth)(self, arg0); - #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) - Py_DECREF(arg0); - #endif - return result; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", - f->m_ml->ml_name, size); - return NULL; - } - break; - default: - PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); - return NULL; - } - PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", - f->m_ml->ml_name); - return NULL; -} -static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { - return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); -} -static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { - PyObject *result; - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; -#if CYTHON_METH_FASTCALL - __pyx_vectorcallfunc vc = __Pyx_CyFunction_func_vectorcall(cyfunc); - if (vc) { -#if CYTHON_ASSUME_SAFE_MACROS - return __Pyx_PyVectorcall_FastCallDict(func, vc, &PyTuple_GET_ITEM(args, 0), (size_t)PyTuple_GET_SIZE(args), kw); -#else - (void) &__Pyx_PyVectorcall_FastCallDict; - return PyVectorcall_Call(func, args, kw); -#endif - } -#endif - if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { - Py_ssize_t argc; - PyObject *new_args; - PyObject *self; - argc = PyTuple_GET_SIZE(args); - new_args = PyTuple_GetSlice(args, 1, argc); - if (unlikely(!new_args)) - return NULL; - self = PyTuple_GetItem(args, 0); - if (unlikely(!self)) { - Py_DECREF(new_args); - return NULL; - } - result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); - Py_DECREF(new_args); - } else { - result = __Pyx_CyFunction_Call(func, args, kw); - } - return result; -} -#if CYTHON_METH_FASTCALL -static CYTHON_INLINE int __Pyx_CyFunction_Vectorcall_CheckArgs(__pyx_CyFunctionObject *cyfunc, Py_ssize_t nargs, PyObject *kwnames) -{ - int ret = 0; - if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { - if (unlikely(nargs < 1)) { - PyErr_Format(PyExc_TypeError, "%.200s() needs an argument", - ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); - return -1; - } - ret = 1; - } - if (unlikely(kwnames) && unlikely(PyTuple_GET_SIZE(kwnames))) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes no keyword arguments", ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); - return -1; - } - return ret; -} -static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - if (unlikely(nargs != 0)) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", - def->ml_name, nargs); - return NULL; - } - return def->ml_meth(self, NULL); -} -static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - if (unlikely(nargs != 1)) { - PyErr_Format(PyExc_TypeError, - "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", - def->ml_name, nargs); - return NULL; - } - return def->ml_meth(self, args[0]); -} -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - return ((_PyCFunctionFastWithKeywords)(void(*)(void))def->ml_meth)(self, args, nargs, kwnames); -} -static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) -{ - __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; - PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; - PyTypeObject *cls = (PyTypeObject *) __Pyx_CyFunction_GetClassObj(cyfunc); -#if CYTHON_BACKPORT_VECTORCALL - Py_ssize_t nargs = (Py_ssize_t)nargsf; -#else - Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); -#endif - PyObject *self; - switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { - case 1: - self = args[0]; - args += 1; - nargs -= 1; - break; - case 0: - self = ((PyCFunctionObject*)cyfunc)->m_self; - break; - default: - return NULL; - } - return ((__Pyx_PyCMethod)(void(*)(void))def->ml_meth)(self, cls, args, nargs, kwnames); -} -#endif -#if CYTHON_USE_TYPE_SPECS -static PyType_Slot __pyx_CyFunctionType_slots[] = { - {Py_tp_dealloc, (void *)__Pyx_CyFunction_dealloc}, - {Py_tp_repr, (void *)__Pyx_CyFunction_repr}, - {Py_tp_call, (void *)__Pyx_CyFunction_CallAsMethod}, - {Py_tp_traverse, (void *)__Pyx_CyFunction_traverse}, - {Py_tp_clear, (void *)__Pyx_CyFunction_clear}, - {Py_tp_methods, (void *)__pyx_CyFunction_methods}, - {Py_tp_members, (void *)__pyx_CyFunction_members}, - {Py_tp_getset, (void *)__pyx_CyFunction_getsets}, - {Py_tp_descr_get, (void *)__Pyx_PyMethod_New}, - {0, 0}, -}; -static PyType_Spec __pyx_CyFunctionType_spec = { - __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", - sizeof(__pyx_CyFunctionObject), - 0, -#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR - Py_TPFLAGS_METHOD_DESCRIPTOR | -#endif -#if (defined(_Py_TPFLAGS_HAVE_VECTORCALL) && CYTHON_METH_FASTCALL) - _Py_TPFLAGS_HAVE_VECTORCALL | -#endif - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, - __pyx_CyFunctionType_slots -}; -#else -static PyTypeObject __pyx_CyFunctionType_type = { - PyVarObject_HEAD_INIT(0, 0) - __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", - sizeof(__pyx_CyFunctionObject), - 0, - (destructor) __Pyx_CyFunction_dealloc, -#if !CYTHON_METH_FASTCALL - 0, -#elif CYTHON_BACKPORT_VECTORCALL - (printfunc)offsetof(__pyx_CyFunctionObject, func_vectorcall), -#else - offsetof(PyCFunctionObject, vectorcall), -#endif - 0, - 0, -#if PY_MAJOR_VERSION < 3 - 0, -#else - 0, -#endif - (reprfunc) __Pyx_CyFunction_repr, - 0, - 0, - 0, - 0, - __Pyx_CyFunction_CallAsMethod, - 0, - 0, - 0, - 0, -#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR - Py_TPFLAGS_METHOD_DESCRIPTOR | -#endif -#ifdef _Py_TPFLAGS_HAVE_VECTORCALL - _Py_TPFLAGS_HAVE_VECTORCALL | -#endif - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, - 0, - (traverseproc) __Pyx_CyFunction_traverse, - (inquiry) __Pyx_CyFunction_clear, - 0, -#if PY_VERSION_HEX < 0x030500A0 - offsetof(__pyx_CyFunctionObject, func_weakreflist), -#else - offsetof(PyCFunctionObject, m_weakreflist), -#endif - 0, - 0, - __pyx_CyFunction_methods, - __pyx_CyFunction_members, - __pyx_CyFunction_getsets, - 0, - 0, - __Pyx_PyMethod_New, - 0, - offsetof(__pyx_CyFunctionObject, func_dict), - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, -#if PY_VERSION_HEX >= 0x030400a1 - 0, -#endif -#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) - 0, -#endif -#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, -#endif -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 - 0, -#endif -}; -#endif -static int __pyx_CyFunction_init(PyObject *module) { -#if CYTHON_USE_TYPE_SPECS - __pyx_CyFunctionType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CyFunctionType_spec, NULL); -#else - (void) module; - __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); -#endif - if (unlikely(__pyx_CyFunctionType == NULL)) { - return -1; - } - return 0; -} -static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults = PyObject_Malloc(size); - if (unlikely(!m->defaults)) - return PyErr_NoMemory(); - memset(m->defaults, 0, size); - m->defaults_pyobjects = pyobjects; - m->defaults_size = size; - return m->defaults; -} -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults_tuple = tuple; - Py_INCREF(tuple); -} -static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->defaults_kwdict = dict; - Py_INCREF(dict); -} -static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { - __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; - m->func_annotations = dict; - Py_INCREF(dict); -} - -/* CythonFunction */ - static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, - PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { - PyObject *op = __Pyx_CyFunction_Init( - PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), - ml, flags, qualname, closure, module, globals, code - ); - if (likely(op)) { - PyObject_GC_Track(op); - } - return op; -} - -/* CLineInTraceback */ - #ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStrNoError(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - (void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ - #if !CYTHON_COMPILING_IN_LIMITED_API -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} -#endif - -/* AddTraceback */ - #include "compile.h" -#include "frameobject.h" -#include "traceback.h" -#if CYTHON_COMPILING_IN_LIMITED_API -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - if (c_line) { - (void) __pyx_cfilenm; - c_line = __Pyx_CLineForTraceback(__Pyx_PyThreadState_Current, c_line); - } - _PyTraceback_Add(funcname, filename, c_line ? -c_line : py_line); -} -#else -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = NULL; - PyObject *py_funcname = NULL; - #if PY_MAJOR_VERSION < 3 - PyObject *py_srcfile = NULL; - py_srcfile = PyString_FromString(filename); - if (!py_srcfile) goto bad; - #endif - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - if (!py_funcname) goto bad; - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - if (!py_funcname) goto bad; - funcname = PyUnicode_AsUTF8(py_funcname); - if (!funcname) goto bad; - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - if (!py_funcname) goto bad; - #endif - } - #if PY_MAJOR_VERSION < 3 - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - #else - py_code = PyCode_NewEmpty(filename, funcname, py_line); - #endif - Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline - return py_code; -bad: - Py_XDECREF(py_funcname); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_srcfile); - #endif - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) goto bad; - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} -#endif - -/* CIntFromPyVerify */ - #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* CIntFromPy */ - static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const long neg_one = (long) -1, const_zero = (long) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if ((sizeof(long) < sizeof(long))) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) - case 2: - if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 2 * PyLong_SHIFT)) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 3 * PyLong_SHIFT)) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) >= 4 * PyLong_SHIFT)) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if ((sizeof(long) <= sizeof(unsigned long))) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(long) <= sizeof(unsigned PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) - case -2: - if ((8 * sizeof(long) - 1 > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } -#endif - if ((sizeof(long) <= sizeof(long))) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(long) <= sizeof(PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if (CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available, cannot convert large numbers"); -#else - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* FormatTypeName */ - #if CYTHON_COMPILING_IN_LIMITED_API -static __Pyx_TypeName -__Pyx_PyType_GetName(PyTypeObject* tp) -{ - PyObject *name = __Pyx_PyObject_GetAttrStr((PyObject *)tp, - __pyx_n_s_name); - if (unlikely(name == NULL) || unlikely(!PyUnicode_Check(name))) { - PyErr_Clear(); - Py_XSETREF(name, __Pyx_NewRef(__pyx_n_s__46)); - } - return name; -} -#endif - -/* CIntFromPy */ - static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wconversion" -#endif - const int neg_one = (int) -1, const_zero = (int) 0; -#ifdef __Pyx_HAS_GCC_DIAGNOSTIC -#pragma GCC diagnostic pop -#endif - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if ((sizeof(int) < sizeof(long))) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) - case 2: - if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 2 * PyLong_SHIFT)) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 3 * PyLong_SHIFT)) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) >= 4 * PyLong_SHIFT)) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if ((sizeof(int) <= sizeof(unsigned long))) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(int) <= sizeof(unsigned PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) - case -2: - if ((8 * sizeof(int) - 1 > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { - if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } -#endif - if ((sizeof(int) <= sizeof(long))) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if ((sizeof(int) <= sizeof(PY_LONG_LONG))) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if (CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available, cannot convert large numbers"); -#else - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* FastTypeChecks */ - #if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*); - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (cls == a || cls == b) return 1; - mro = cls->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - PyObject *base = PyTuple_GET_ITEM(mro, i); - if (base == (PyObject *)a || base == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - if (exc_type1) { - return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2); - } else { - return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i<n; i++) { - if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; - } -#endif - for (i=0; i<n; i++) { - PyObject *t = PyTuple_GET_ITEM(tuple, i); - #if PY_MAJOR_VERSION < 3 - if (likely(exc_type == t)) return 1; - #endif - if (likely(PyExceptionClass_Check(t))) { - if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1; - } else { - } - } - return 0; -} -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { - if (likely(err == exc_type)) return 1; - if (likely(PyExceptionClass_Check(err))) { - if (likely(PyExceptionClass_Check(exc_type))) { - return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); - } else if (likely(PyTuple_Check(exc_type))) { - return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type); - } else { - } - } - return PyErr_GivenExceptionMatches(err, exc_type); -} -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { - assert(PyExceptionClass_Check(exc_type1)); - assert(PyExceptionClass_Check(exc_type2)); - if (likely(err == exc_type1 || err == exc_type2)) return 1; - if (likely(PyExceptionClass_Check(err))) { - return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); - } - return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); -} -#endif - -/* CheckBinaryVersion */ - static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compile time version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - return PyErr_WarnEx(NULL, message, 1); - } - return 0; -} - -/* InitStrings */ - #if PY_MAJOR_VERSION >= 3 -static int __Pyx_InitString(__Pyx_StringTabEntry t, PyObject **str) { - if (t.is_unicode | t.is_str) { - if (t.intern) { - *str = PyUnicode_InternFromString(t.s); - } else if (t.encoding) { - *str = PyUnicode_Decode(t.s, t.n - 1, t.encoding, NULL); - } else { - *str = PyUnicode_FromStringAndSize(t.s, t.n - 1); - } - } else { - *str = PyBytes_FromStringAndSize(t.s, t.n - 1); - } - if (!*str) - return -1; - if (PyObject_Hash(*str) == -1) - return -1; - return 0; -} -#endif -#if !CYTHON_COMPILING_IN_LIMITED_API -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION >= 3 - __Pyx_InitString(*t, t->p); - #else - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - #endif - ++t; - } - return 0; -} -#endif - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY && !CYTHON_COMPILING_IN_LIMITED_API) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { - __Pyx_TypeName result_type_name = __Pyx_PyType_GetName(Py_TYPE(result)); -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type " __Pyx_FMT_TYPENAME "). " - "The ability to return an instance of a strict subclass of int is deprecated, " - "and may be removed in a future version of Python.", - result_type_name)) { - __Pyx_DECREF_TypeName(result_type_name); - Py_DECREF(result); - return NULL; - } - __Pyx_DECREF_TypeName(result_type_name); - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type " __Pyx_FMT_TYPENAME ")", - type_name, type_name, result_type_name); - __Pyx_DECREF_TypeName(result_type_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)b)->ob_digit; - const Py_ssize_t size = Py_SIZE(b); - if (likely(__Pyx_sst_abs(size) <= 1)) { - ival = likely(size) ? digits[0] : 0; - if (size == -1) ival = -ival; - return ival; - } else { - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { - if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { - return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); -#if PY_MAJOR_VERSION < 3 - } else if (likely(PyInt_CheckExact(o))) { - return PyInt_AS_LONG(o); -#endif - } else { - Py_ssize_t ival; - PyObject *x; - x = PyNumber_Index(o); - if (!x) return -1; - ival = PyInt_AsLong(x); - Py_DECREF(x); - return ival; - } -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -/* #### Code section: utility_code_pragmas_end ### */ -#if _MSV_VER -#pragma warning( pop ) -#endif - - - -/* #### Code section: end ### */ -#endif /* Py_PYTHON_H */ diff --git a/spaces/matthoffner/AudioCraft_Plus/audiocraft/data/zip.py b/spaces/matthoffner/AudioCraft_Plus/audiocraft/data/zip.py deleted file mode 100644 index f0b17849d36991e7def35a14d3d518b9d867ce36..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/AudioCraft_Plus/audiocraft/data/zip.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -"""Utility for reading some info from inside a zip file. -""" - -import typing -import zipfile - -from dataclasses import dataclass -from functools import lru_cache -from typing_extensions import Literal - - -DEFAULT_SIZE = 32 -MODE = Literal['r', 'w', 'x', 'a'] - - -@dataclass(order=True) -class PathInZip: - """Hold a path of file within a zip file. - - Args: - path (str): The convention is <path_to_zip>:<relative_path_inside_zip>. - Let's assume there is a zip file /some/location/foo.zip - and inside of it is a json file located at /data/file1.json, - Then we expect path = "/some/location/foo.zip:/data/file1.json". - """ - - INFO_PATH_SEP = ':' - zip_path: str - file_path: str - - def __init__(self, path: str) -> None: - split_path = path.split(self.INFO_PATH_SEP) - assert len(split_path) == 2 - self.zip_path, self.file_path = split_path - - @classmethod - def from_paths(cls, zip_path: str, file_path: str): - return cls(zip_path + cls.INFO_PATH_SEP + file_path) - - def __str__(self) -> str: - return self.zip_path + self.INFO_PATH_SEP + self.file_path - - -def _open_zip(path: str, mode: MODE = 'r'): - return zipfile.ZipFile(path, mode) - - -_cached_open_zip = lru_cache(DEFAULT_SIZE)(_open_zip) - - -def set_zip_cache_size(max_size: int): - """Sets the maximal LRU caching for zip file opening. - - Args: - max_size (int): the maximal LRU cache. - """ - global _cached_open_zip - _cached_open_zip = lru_cache(max_size)(_open_zip) - - -def open_file_in_zip(path_in_zip: PathInZip, mode: str = 'r') -> typing.IO: - """Opens a file stored inside a zip and returns a file-like object. - - Args: - path_in_zip (PathInZip): A PathInZip object representing the file to return a file-like object of. - mode (str): The mode in which to open the file with. - Returns: - A file-like object for PathInZip. - """ - zf = _cached_open_zip(path_in_zip.zip_path) - return zf.open(path_in_zip.file_path) diff --git a/spaces/matthoffner/starchat-ui/components/Promptbar/Promptbar.state.tsx b/spaces/matthoffner/starchat-ui/components/Promptbar/Promptbar.state.tsx deleted file mode 100644 index fec0eefba323fc025f8f5a69df7c8f807c142479..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/starchat-ui/components/Promptbar/Promptbar.state.tsx +++ /dev/null @@ -1,11 +0,0 @@ -import { Prompt } from '@/types/prompt'; - -export interface PromptbarInitialState { - searchTerm: string; - filteredPrompts: Prompt[]; -} - -export const initialState: PromptbarInitialState = { - searchTerm: '', - filteredPrompts: [], -}; diff --git a/spaces/matthoffner/starchat-ui/components/Spinner/Spinner.tsx b/spaces/matthoffner/starchat-ui/components/Spinner/Spinner.tsx deleted file mode 100644 index f0cf09fca8da7c8479319670d0736db2ce84cad2..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/starchat-ui/components/Spinner/Spinner.tsx +++ /dev/null @@ -1,34 +0,0 @@ -import { FC } from 'react'; - -interface Props { - size?: string; - className?: string; -} - -const Spinner = ({ size = '1em', className = '' }: Props) => { - return ( - <svg - stroke="currentColor" - fill="none" - strokeWidth="2" - viewBox="0 0 24 24" - strokeLinecap="round" - strokeLinejoin="round" - className={`animate-spin ${className}`} - height={size} - width={size} - xmlns="http://www.w3.org/2000/svg" - > - <line x1="12" y1="2" x2="12" y2="6"></line> - <line x1="12" y1="18" x2="12" y2="22"></line> - <line x1="4.93" y1="4.93" x2="7.76" y2="7.76"></line> - <line x1="16.24" y1="16.24" x2="19.07" y2="19.07"></line> - <line x1="2" y1="12" x2="6" y2="12"></line> - <line x1="18" y1="12" x2="22" y2="12"></line> - <line x1="4.93" y1="19.07" x2="7.76" y2="16.24"></line> - <line x1="16.24" y1="7.76" x2="19.07" y2="4.93"></line> - </svg> - ); -}; - -export default Spinner; diff --git a/spaces/maxmax20160403/sovits5.0/vits_decoder/__init__.py b/spaces/maxmax20160403/sovits5.0/vits_decoder/__init__.py deleted file mode 100644 index 986a0cfe522626f45f6c2d4dede44374c86bbe71..0000000000000000000000000000000000000000 --- a/spaces/maxmax20160403/sovits5.0/vits_decoder/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .alias.act import SnakeAlias \ No newline at end of file diff --git a/spaces/maxmax20160403/vits_chinese/attentions.py b/spaces/maxmax20160403/vits_chinese/attentions.py deleted file mode 100644 index 84759e83a75dccbf4d9e84c7d4c4141725ba462a..0000000000000000000000000000000000000000 --- a/spaces/maxmax20160403/vits_chinese/attentions.py +++ /dev/null @@ -1,417 +0,0 @@ -import copy -import math -import numpy as np -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - window_size=4, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - window_size=window_size, - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention( - hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout - ) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN( - hidden_channels, - hidden_channels, - filter_channels, - kernel_size, - p_dropout=p_dropout, - causal=True, - ) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to( - device=x.device, dtype=x.dtype - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__( - self, - channels, - out_channels, - n_heads, - p_dropout=0.0, - window_size=None, - heads_share=True, - block_length=None, - proximal_bias=False, - proximal_init=False, - ): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - self.emb_rel_v = nn.Parameter( - torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) - * rel_stddev - ) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert ( - t_s == t_t - ), "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys( - query / math.sqrt(self.k_channels), key_relative_embeddings - ) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to( - device=scores.device, dtype=scores.dtype - ) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert ( - t_s == t_t - ), "Local attention is only available for self-attention." - block_mask = ( - torch.ones_like(scores) - .triu(-self.block_length) - .tril(self.block_length) - ) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings( - self.emb_rel_v, t_s - ) - output = output + self._matmul_with_relative_values( - relative_weights, value_relative_embeddings - ) - output = ( - output.transpose(2, 3).contiguous().view(b, d, t_t) - ) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]), - ) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[ - :, slice_start_position:slice_end_position - ] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad( - x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]) - ) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[ - :, :, :length, length - 1 : - ] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad( - x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]) - ) - x_flat = x.view([batch, heads, length**2 + length * (length - 1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__( - self, - in_channels, - out_channels, - filter_channels, - kernel_size, - p_dropout=0.0, - activation=None, - causal=False, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/merve/Grounding_DINO_demo/groundingdino/models/GroundingDINO/fuse_modules.py b/spaces/merve/Grounding_DINO_demo/groundingdino/models/GroundingDINO/fuse_modules.py deleted file mode 100644 index 2753b3ddee43c7a9fe28d1824db5d786e7e1ad59..0000000000000000000000000000000000000000 --- a/spaces/merve/Grounding_DINO_demo/groundingdino/models/GroundingDINO/fuse_modules.py +++ /dev/null @@ -1,297 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ - -import torch -import torch.nn as nn -import torch.nn.functional as F -from timm.models.layers import DropPath - - -class FeatureResizer(nn.Module): - """ - This class takes as input a set of embeddings of dimension C1 and outputs a set of - embedding of dimension C2, after a linear transformation, dropout and normalization (LN). - """ - - def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True): - super().__init__() - self.do_ln = do_ln - # Object feature encoding - self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True) - self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12) - self.dropout = nn.Dropout(dropout) - - def forward(self, encoder_features): - x = self.fc(encoder_features) - if self.do_ln: - x = self.layer_norm(x) - output = self.dropout(x) - return output - - -def l1norm(X, dim, eps=1e-8): - """L1-normalize columns of X""" - norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps - X = torch.div(X, norm) - return X - - -def l2norm(X, dim, eps=1e-8): - """L2-normalize columns of X""" - norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps - X = torch.div(X, norm) - return X - - -def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8): - """ - query: (n_context, queryL, d) - context: (n_context, sourceL, d) - """ - batch_size_q, queryL = query.size(0), query.size(1) - batch_size, sourceL = context.size(0), context.size(1) - - # Get attention - # --> (batch, d, queryL) - queryT = torch.transpose(query, 1, 2) - - # (batch, sourceL, d)(batch, d, queryL) - # --> (batch, sourceL, queryL) - attn = torch.bmm(context, queryT) - if raw_feature_norm == "softmax": - # --> (batch*sourceL, queryL) - attn = attn.view(batch_size * sourceL, queryL) - attn = nn.Softmax()(attn) - # --> (batch, sourceL, queryL) - attn = attn.view(batch_size, sourceL, queryL) - elif raw_feature_norm == "l2norm": - attn = l2norm(attn, 2) - elif raw_feature_norm == "clipped_l2norm": - attn = nn.LeakyReLU(0.1)(attn) - attn = l2norm(attn, 2) - else: - raise ValueError("unknown first norm type:", raw_feature_norm) - # --> (batch, queryL, sourceL) - attn = torch.transpose(attn, 1, 2).contiguous() - # --> (batch*queryL, sourceL) - attn = attn.view(batch_size * queryL, sourceL) - attn = nn.Softmax()(attn * smooth) - # --> (batch, queryL, sourceL) - attn = attn.view(batch_size, queryL, sourceL) - # --> (batch, sourceL, queryL) - attnT = torch.transpose(attn, 1, 2).contiguous() - - # --> (batch, d, sourceL) - contextT = torch.transpose(context, 1, 2) - # (batch x d x sourceL)(batch x sourceL x queryL) - # --> (batch, d, queryL) - weightedContext = torch.bmm(contextT, attnT) - # --> (batch, queryL, d) - weightedContext = torch.transpose(weightedContext, 1, 2) - - return weightedContext, attnT - - -class BiMultiHeadAttention(nn.Module): - def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None): - super(BiMultiHeadAttention, self).__init__() - - self.embed_dim = embed_dim - self.num_heads = num_heads - self.head_dim = embed_dim // num_heads - self.v_dim = v_dim - self.l_dim = l_dim - - assert ( - self.head_dim * self.num_heads == self.embed_dim - ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})." - self.scale = self.head_dim ** (-0.5) - self.dropout = dropout - - self.v_proj = nn.Linear(self.v_dim, self.embed_dim) - self.l_proj = nn.Linear(self.l_dim, self.embed_dim) - self.values_v_proj = nn.Linear(self.v_dim, self.embed_dim) - self.values_l_proj = nn.Linear(self.l_dim, self.embed_dim) - - self.out_v_proj = nn.Linear(self.embed_dim, self.v_dim) - self.out_l_proj = nn.Linear(self.embed_dim, self.l_dim) - - self.stable_softmax_2d = True - self.clamp_min_for_underflow = True - self.clamp_max_for_overflow = True - - self._reset_parameters() - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def _reset_parameters(self): - nn.init.xavier_uniform_(self.v_proj.weight) - self.v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.l_proj.weight) - self.l_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.values_v_proj.weight) - self.values_v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.values_l_proj.weight) - self.values_l_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.out_v_proj.weight) - self.out_v_proj.bias.data.fill_(0) - nn.init.xavier_uniform_(self.out_l_proj.weight) - self.out_l_proj.bias.data.fill_(0) - - def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): - """_summary_ - - Args: - v (_type_): bs, n_img, dim - l (_type_): bs, n_text, dim - attention_mask_v (_type_, optional): _description_. bs, n_img - attention_mask_l (_type_, optional): _description_. bs, n_text - - Returns: - _type_: _description_ - """ - # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO': - # import ipdb; ipdb.set_trace() - bsz, tgt_len, _ = v.size() - - query_states = self.v_proj(v) * self.scale - key_states = self._shape(self.l_proj(l), -1, bsz) - value_v_states = self._shape(self.values_v_proj(v), -1, bsz) - value_l_states = self._shape(self.values_l_proj(l), -1, bsz) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) - key_states = key_states.view(*proj_shape) - value_v_states = value_v_states.view(*proj_shape) - value_l_states = value_l_states.view(*proj_shape) - - src_len = key_states.size(1) - attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt - - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" - ) - - if self.stable_softmax_2d: - attn_weights = attn_weights - attn_weights.max() - - if self.clamp_min_for_underflow: - attn_weights = torch.clamp( - attn_weights, min=-50000 - ) # Do not increase -50000, data type half has quite limited range - if self.clamp_max_for_overflow: - attn_weights = torch.clamp( - attn_weights, max=50000 - ) # Do not increase 50000, data type half has quite limited range - - attn_weights_T = attn_weights.transpose(1, 2) - attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0] - if self.clamp_min_for_underflow: - attn_weights_l = torch.clamp( - attn_weights_l, min=-50000 - ) # Do not increase -50000, data type half has quite limited range - if self.clamp_max_for_overflow: - attn_weights_l = torch.clamp( - attn_weights_l, max=50000 - ) # Do not increase 50000, data type half has quite limited range - - # mask vison for language - if attention_mask_v is not None: - attention_mask_v = ( - attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) - ) - attn_weights_l.masked_fill_(attention_mask_v, float("-inf")) - - attn_weights_l = attn_weights_l.softmax(dim=-1) - - # mask language for vision - if attention_mask_l is not None: - attention_mask_l = ( - attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1) - ) - attn_weights.masked_fill_(attention_mask_l, float("-inf")) - attn_weights_v = attn_weights.softmax(dim=-1) - - attn_probs_v = F.dropout(attn_weights_v, p=self.dropout, training=self.training) - attn_probs_l = F.dropout(attn_weights_l, p=self.dropout, training=self.training) - - attn_output_v = torch.bmm(attn_probs_v, value_l_states) - attn_output_l = torch.bmm(attn_probs_l, value_v_states) - - if attn_output_v.size() != (bsz * self.num_heads, tgt_len, self.head_dim): - raise ValueError( - f"`attn_output_v` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output_v.size()}" - ) - - if attn_output_l.size() != (bsz * self.num_heads, src_len, self.head_dim): - raise ValueError( - f"`attn_output_l` should be of size {(bsz, self.num_heads, src_len, self.head_dim)}, but is {attn_output_l.size()}" - ) - - attn_output_v = attn_output_v.view(bsz, self.num_heads, tgt_len, self.head_dim) - attn_output_v = attn_output_v.transpose(1, 2) - attn_output_v = attn_output_v.reshape(bsz, tgt_len, self.embed_dim) - - attn_output_l = attn_output_l.view(bsz, self.num_heads, src_len, self.head_dim) - attn_output_l = attn_output_l.transpose(1, 2) - attn_output_l = attn_output_l.reshape(bsz, src_len, self.embed_dim) - - attn_output_v = self.out_v_proj(attn_output_v) - attn_output_l = self.out_l_proj(attn_output_l) - - return attn_output_v, attn_output_l - - -# Bi-Direction MHA (text->image, image->text) -class BiAttentionBlock(nn.Module): - def __init__( - self, - v_dim, - l_dim, - embed_dim, - num_heads, - dropout=0.1, - drop_path=0.0, - init_values=1e-4, - cfg=None, - ): - """ - Inputs: - embed_dim - Dimensionality of input and attention feature vectors - hidden_dim - Dimensionality of hidden layer in feed-forward network - (usually 2-4x larger than embed_dim) - num_heads - Number of heads to use in the Multi-Head Attention block - dropout - Amount of dropout to apply in the feed-forward network - """ - super(BiAttentionBlock, self).__init__() - - # pre layer norm - self.layer_norm_v = nn.LayerNorm(v_dim) - self.layer_norm_l = nn.LayerNorm(l_dim) - self.attn = BiMultiHeadAttention( - v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout - ) - - # add layer scale for training stability - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True) - self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True) - - def forward(self, v, l, attention_mask_v=None, attention_mask_l=None): - v = self.layer_norm_v(v) - l = self.layer_norm_l(l) - delta_v, delta_l = self.attn( - v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l - ) - # v, l = v + delta_v, l + delta_l - v = v + self.drop_path(self.gamma_v * delta_v) - l = l + self.drop_path(self.gamma_l * delta_l) - return v, l - - # def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None) diff --git a/spaces/merve/Grounding_DINO_demo/groundingdino/models/registry.py b/spaces/merve/Grounding_DINO_demo/groundingdino/models/registry.py deleted file mode 100644 index 2d22a59eec79a2a19b83fa1779f2adaf5753aec6..0000000000000000000000000000000000000000 --- a/spaces/merve/Grounding_DINO_demo/groundingdino/models/registry.py +++ /dev/null @@ -1,66 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# -*- coding: utf-8 -*- -# @Author: Yihao Chen -# @Date: 2021-08-16 16:03:17 -# @Last Modified by: Shilong Liu -# @Last Modified time: 2022-01-23 15:26 -# modified from mmcv - -import inspect -from functools import partial - - -class Registry(object): - def __init__(self, name): - self._name = name - self._module_dict = dict() - - def __repr__(self): - format_str = self.__class__.__name__ + "(name={}, items={})".format( - self._name, list(self._module_dict.keys()) - ) - return format_str - - def __len__(self): - return len(self._module_dict) - - @property - def name(self): - return self._name - - @property - def module_dict(self): - return self._module_dict - - def get(self, key): - return self._module_dict.get(key, None) - - def registe_with_name(self, module_name=None, force=False): - return partial(self.register, module_name=module_name, force=force) - - def register(self, module_build_function, module_name=None, force=False): - """Register a module build function. - Args: - module (:obj:`nn.Module`): Module to be registered. - """ - if not inspect.isfunction(module_build_function): - raise TypeError( - "module_build_function must be a function, but got {}".format( - type(module_build_function) - ) - ) - if module_name is None: - module_name = module_build_function.__name__ - if not force and module_name in self._module_dict: - raise KeyError("{} is already registered in {}".format(module_name, self.name)) - self._module_dict[module_name] = module_build_function - - return module_build_function - - -MODULE_BUILD_FUNCS = Registry("model build functions") diff --git a/spaces/merve/data-leak/source/measuring-fairness/graph-scroll.css b/spaces/merve/data-leak/source/measuring-fairness/graph-scroll.css deleted file mode 100644 index e3757d99ca305478165c6f7e4781ec0ce95b6291..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/source/measuring-fairness/graph-scroll.css +++ /dev/null @@ -1,119 +0,0 @@ -#container{ - position: relative; - width: auto; -} - -#sections{ - width: 340px; -} - -#sections > div{ - background: white; - opacity: .2; - margin-bottom: 400px; - line-height: 1.4em; - transition: opacity .2s; -} -#sections > div:first-child{ - opacity: 1; -} -#sections > div:last-child{ - /*padding-bottom: 80vh;*/ - padding-bottom: 80px; - margin-bottom: 0px; -} -#sections > div:first-child > h1{ - padding-top: 40px; -} - -#sections > div.graph-scroll-active{ - opacity: 1; -} - -#graph{ - margin-left: 40px; - width: 500px; - position: -webkit-sticky; - position: sticky; - top: 0px; - float: right; - height: 580px; - font-family: 'Google Sans', sans-serif; - -} - -.slider{ - font-family: 'Google Sans', sans-serif; -} - -#sections h1{ - text-align: left !important; -} - -@media (max-width: 1000px) and (min-width: 926px){ - #sections{ - margin-left: 20px; - } -} - -@media (max-width: 925px) { - #container{ - margin-left: 0px; - } - - #graph{ - width: 100%; - margin-left: 10px; - float: none; - max-width: 500px; - margin: 0px auto; - } - - #graph > div{ - position: relative; - top: 0px; - } - #sections{ - width: auto; - position: relative; - margin: 0px auto; - } - - #sections > div{ - background: rgba(255,255,255,.8); - padding: 10px; - border-top: 1px solid; - border-bottom: 1px solid; - margin-bottom: 80vh; - width: calc(100vw - 20px); - margin-left: -5px; - } - - #sections > div > *{ - max-width: 750px; - } - .mini, .slider, i, .gated{ - margin: 0px auto; - } - - #sections > div:first-child{ - opacity: 1; - margin-top: -140px; - } - - #sections > div:last-child{ - padding-bottom: 0px; - margin-bottom: 0px; - } - - - #sections h1{ - margin: 10px; - padding-top: 0px !important; - } - - #sections h3{ - margin-top: .5em; - } - -} diff --git a/spaces/merve/dataset-worldviews/public/data-leak/players0.js b/spaces/merve/dataset-worldviews/public/data-leak/players0.js deleted file mode 100644 index 5f1640268c5aa31e0ed73ec7f763b4c64d65f587..0000000000000000000000000000000000000000 --- a/spaces/merve/dataset-worldviews/public/data-leak/players0.js +++ /dev/null @@ -1,456 +0,0 @@ -var players0 = [ - [ - 1.305925030229746, - 38.016928657799276 - ], - [ - 20.894800483675937, - 23.071342200725514 - ], - [ - 24.232164449818622, - 50.35066505441355 - ], - [ - 37.29141475211608, - 4.643288996372431 - ], - [ - 57.89600967351874, - 25.24788391777509 - ], - [ - 41.20918984280532, - 34.389359129383315 - ], - [ - 42.51511487303507, - 54.26844014510278 - ], - [ - 31.77750906892382, - 67.9081015719468 - ], - [ - 63.84522370012092, - 54.41354292623942 - ], - [ - 70.37484885126965, - 42.22490931076179 - ], - [ - 39.32285368802902, - 56.44498186215236 - ], - [ - 35.550181378476424, - 58.91172914147521 - ], - [ - 46.57799274486094, - 52.8174123337364 - ], - [ - 39.6130592503023, - 37.14631197097945 - ], - [ - 42.51511487303507, - 30.90689238210399 - ], - [ - 50.64087061668682, - 8.706166868198308 - ], - [ - 71.10036275695285, - 8.996372430471585 - ], - [ - 75.01813784764208, - 26.844014510278114 - ], - [ - 77.3397823458283, - 47.44860943168077 - ], - [ - 76.17896009673518, - 59.34703748488513 - ], - [ - 105.05441354292624, - 39.177750906892385 - ], - [ - 59.34703748488513, - 33.083434099153564 - ] -] - - -var players1 = [ - [ - 6.819830713422007, - 27.569528415961305 - ], - [ - 31.05199516324063, - 30.03627569528416 - ], - [ - 28.440145102781138, - 43.24062877871826 - ], - [ - 48.02902055622733, - 13.639661426844015 - ], - [ - 62.249093107617895, - 35.69528415961306 - ], - [ - 49.915356711003625, - 26.553808948004836 - ], - [ - 53.68802902055623, - 47.88391777509069 - ], - [ - 45.85247883917775, - 54.123337363966144 - ], - [ - 72.8415961305925, - 46.57799274486094 - ], - [ - 70.81015719467956, - 23.216444981862153 - ], - [ - 35.98548972188634, - 44.11124546553809 - ], - [ - 49.48004836759371, - 59.92744860943168 - ], - [ - 46.86819830713422, - 45.417170495767834 - ], - [ - 39.6130592503023, - 37.14631197097945 - ], - [ - 42.37001209189843, - 24.812575574365177 - ], - [ - 53.252720677146314, - 9.721886336154776 - ], - [ - 73.5671100362757, - 8.996372430471585 - ], - [ - 80.96735187424426, - 26.698911729141475 - ], - [ - 85.75574365175332, - 37.43651753325272 - ], - [ - 87.35187424425635, - 47.88391777509069 - ], - [ - 112.59975816203143, - 31.77750906892382 - ], - [ - 58.041112454655384, - 25.97339782345828 - ] -] - -var players2 = [ - [ - 22.6360338573156, - 36.27569528415961 - ], - [ - 49.48004836759371, - 18.71825876662636 - ], - [ - 43.82103990326481, - 34.82466747279323 - ], - [ - 94.89721886336154, - 6.674727932285369 - ], - [ - 103.31318016928658, - 24.522370012091898 - ], - [ - 82.12817412333736, - 32.0677146311971 - ], - [ - 52.8174123337364, - 56.009673518742446 - ], - [ - 91.26964933494558, - 55.28415961305925 - ], - [ - 99.68561064087062, - 40.33857315598549 - ], - [ - 105.19951632406288, - 40.33857315598549 - ], - [ - 53.542926239419586, - 43.966142684401454 - ], - [ - 49.48004836759371, - 59.92744860943168 - ], - [ - 58.18621523579202, - 37.87182587666263 - ], - [ - 86.91656590084644, - 37.58162031438936 - ], - [ - 59.34703748488513, - 18.137847642079805 - ], - [ - 96.34824667472793, - 25.24788391777509 - ], - [ - 90.97944377267231, - 8.996372430471585 - ], - [ - 104.47400241837968, - 31.342200725513905 - ], - [ - 109.8428053204353, - 28.295042321644498 - ], - [ - 105.05441354292624, - 43.24062877871826 - ], - [ - 116.2273276904474, - 25.538089480048367 - ], - [ - 86.62636033857315, - 29.165659008464328 - ] -] - - -playersleakhigh = [ - [ - 2.71764705882353, - 22 - ], - [ - 38.11764705882353, - 44.75294117647059 - ], - [ - 31.058823529411764, - 53.22352941176471 - ], - [ - 52.94117647058824, - 51.10588235294118 - ], - [ - 58.023529411764706, - 50.11764705882353 - ], - [ - 46.305882352941175, - 51.247058823529414 - ], - [ - 46.023529411764706, - 42.635294117647064 - ], - [ - 41.082352941176474, - 48.98823529411765 - ], - [ - 49.411764705882355, - 43.76470588235294 - ], - [ - 59.71764705882353, - 43.48235294117647 - ], - [ - 39.32285368802902, - 56.44498186215236 - ], - [ - 67.76470588235294, - 30.494117647058825 - ], - [ - 78.07058823529412, - 48.28235294117647 - ], - [ - 69.60000000000001, - 40.23529411764706 - ], - [ - 76.09411764705882, - 23.152941176470588 - ], - [ - 85.9764705882353, - 24.282352941176473 - ], - [ - 84.56470588235294, - 48.98823529411765 - ], - [ - 74.68235294117648, - 39.38823529411765 - ], - [ - 79.3529411764706, - 22 - ], - [ - 93.1764705882353, - 34.44705882352941 - ], - [ - 86.68235294117648, - 33.45882352941177 - ], - [ - 81.74117647058824, - 41.92941176470588 - ] -] - -playersleaklow = [ - [ - 2.71764705882353, - 73.12941176470588 - ], - [ - 38.11764705882353, - 44.75294117647059 - ], - [ - 31.058823529411764, - 53.22352941176471 - ], - [ - 52.94117647058824, - 51.10588235294118 - ], - [ - 58.023529411764706, - 50.11764705882353 - ], - [ - 46.305882352941175, - 51.247058823529414 - ], - [ - 46.023529411764706, - 42.635294117647064 - ], - [ - 41.082352941176474, - 48.98823529411765 - ], - [ - 49.411764705882355, - 43.76470588235294 - ], - [ - 59.71764705882353, - 43.48235294117647 - ], - [ - 39.32285368802902, - 56.44498186215236 - ], - [ - 67.76470588235294, - 30.494117647058825 - ], - [ - 78.07058823529412, - 48.28235294117647 - ], - [ - 69.60000000000001, - 40.23529411764706 - ], - [ - 76.09411764705882, - 23.152941176470588 - ], - [ - 85.9764705882353, - 24.282352941176473 - ], - [ - 84.56470588235294, - 48.98823529411765 - ], - [ - 74.68235294117648, - 39.38823529411765 - ], - [ - 79.3529411764706, - 72.70588235294117 - ], - [ - 93.1764705882353, - 34.44705882352941 - ], - [ - 86.68235294117648, - 33.45882352941177 - ], - [ - 81.74117647058824, - 41.92941176470588 - ] -] \ No newline at end of file diff --git a/spaces/merve/fill-in-the-blank/source/_posts/2021-08-27-private-and-fair.md b/spaces/merve/fill-in-the-blank/source/_posts/2021-08-27-private-and-fair.md deleted file mode 100644 index bde65270e5991e7e765eb97294838e8732bc65e9..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/source/_posts/2021-08-27-private-and-fair.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -template: post.html -title: Can a Model Be Differentially Private and Fair? -summary: Training models with differential privacy stops models from inadvertently leaking sensitive data, but there's an unexpected side-effect: reduced accuracy on underrepresented subgroups. -shareimg: https://pair.withgoogle.com/explorables/images/private-and-fair.png -shareimgabstract: https://pair.withgoogle.com/explorables/images/private-and-fair-abstract.png -permalink: /private-and-fair/ ---- - -Imagine you want to use machine learning to suggest new bands to listen to. You could do this by having lots of people list their favorite bands and using them to train a model. The trained model might be quite useful and fun, but if someone pokes and prods at the model in just the right way, they could [extract](https://www.wired.com/2007/12/why-anonymous-data-sometimes-isnt/) the music preferences of someone whose data was used to train the model. Other kinds of models are potentially vulnerable; [credit card numbers](https://bair.berkeley.edu/blog/2019/08/13/memorization/) have been pulled out of language models and [actual faces](https://rist.tech.cornell.edu/papers/mi-ccs.pdf) reconstructed from image models. - -Training with [differential privacy](https://desfontain.es/privacy/differential-privacy-awesomeness.html) limits the information about any one data point that is extractable but in some cases there's an unexpected side-effect: reduced accuracy with underrepresented subgroups disparately impacted. - -<div class='info-box'></div> - -Recall that machine learning models are typically trained with [gradient descent](https://playground.tensorflow.org/), a series of small steps taken to minimize an error function. To show how a model can leak its training data, we've trained two simple models to separate red and blue dots using two simple datasets that differ in one way: a single isolated data point in the upper left has been switched from red to blue. - -<div class='epoch-graph'></div> - -Notice that the two models have very different boundary lines near the isolated point by the end of the training. Someone with access to the trained model might be able to [infer](https://pair.withgoogle.com/explorables/data-leak/) if the point in the upper left is red or blue — if the color represented sensitive information, like someone's [voting record](https://gothamist.com/news/researchers-know-how-dante-de-blasio-hundreds-other-new-yorkers-voted), that could be quite bad! - -### Protecting the Privacy of Training Points - -We can prevent a single data point from drastically altering the model by [adding](http://www.cleverhans.io/privacy/2019/03/26/machine-learning-with-differential-privacy-in-tensorflow.html) two operations to each training step:<a class='footstart'>²</a> -- ⚬ Clipping the gradient (here, limiting how much the boundary line can move with each step) to bound the maximum impact a single data point can have on the final model. -- ⚬ Adding random noise to the gradient. - -Try **increasing** the random noise below. We're now training lots of differentially private models; the more the potential models for the red and blue outlier points overlap, the more [plausible deniability](https://pair.withgoogle.com/explorables/anonymization/) the person in the upper left has.<a class='footstart'></a> - -<div class='decision-boundry'></div> - -You can also try dragging the other points around and adjusting the gradient clipping. Are points in the center or outliers more likely to modify the boundary lines? In two dimensions there's a limited number of outliers, but in higher dimensions [more points](https://observablehq.com/@tophtucker/theres-plenty-of-room-in-the-corners) are outliers and much more information can be extracted from a trained model. - -Correctly combined, adding gradient clipping and random noise to gradient descent make it possible to train a model with [differential privacy](https://desfontain.es/privacy/differential-privacy-awesomeness.html) – we can guarantee that a model trained on a given dataset is essentially indistinguishable from a model trained on the same dataset with a single point changed. -### Predictions on Outliers Change the Most - -What does this look like in practice? In [Distribution Density, Tails, and Outliers in Machine Learning](https://arxiv.org/abs/1910.13427), a series of increasingly differentially private models were trained on [MNIST digits](https://en.wikipedia.org/wiki/MNIST_database). Every digit in the training set was ranked according to the highest level of privacy that correctly classified it. - -<div class='top-bot-digits'></div> - -On the lower left, you can see digits labeled as "3" in the training data that look more like a "2" and a "9". They're very different from the other "3"s in the training data so adding just a bit of privacy protection causes the model to no longer classify them as "3". Under some [specific circumstances](https://arxiv.org/abs/1411.2664), differential privacy can actually improve how well the model generalizes to data it wasn't trained on by limiting the influence of spurious examples.<a class='footstart'></a> - -The right side shows more canonical digits which are classified correctly even with high levels of privacy because they're quite similar to other digits in the training data.<a class='footstart'></a> -### The Accuracy Tradeoff -Limiting how much a model can learn from a single example does have a downside: it can also decrease the model's accuracy. With <tp class='tp75'>7,500 training points</tp>, 90% accuracy on MNIST digits is only [achievable](https://colab.research.google.com/github/PAIR-code/ai-explorables/blob/master/server-side/private-and-fair/MNIST_DP_Model_Grid.ipynb) with an extremely low level of privacy protection; increasing privacy quickly lowers the model's accuracy. - -Collecting more training data offers a way out of this accuracy/privacy tradeoff. With <tp class='tp60'>60,000 training points,</tp> 90% accuracy can be reached with a higher privacy level than almost all [real-world deployments](https://desfontain.es/privacy/real-world-differential-privacy.html) of differential privacy. - -<div class='accuracy-v-privacy-dataset_size'></div> - -Looking at the differences between predictions by digit class shows another potential complication: some classes are harder to identify than others. Detecting an "8" with high confidence requires more training data and/or lower privacy than detecting a "0" with high confidence. - -<div class='accuracy-v-privacy-class'></div> - -This problem is exacerbated if the training data has fewer examples of one class than the others. Trying to predict an uncommon event with a differentially private model can require an enormous amount of data.<a class='footstart'></a> - -### Implications for Fairness - -Outliers also aren't evenly distributed within a class. Below, MNIST digits are colored by their sensitivity to higher privacy levels and projected with [UMAP](https://pair-code.github.io/understanding-umap/), forming several clusters of privacy-sensitive yellow digits. It's possible to inadvertently train a model with good overall accuracy on a class but very low accuracy on a smaller group within the class. - -<div class='umap-digit'></div> - -There's nothing that makes a "1" slanted to the left intrinsically harder to classify, but because there are only a few slanted "1"s in the training data it's difficult to make a model that classifies them accurately without leaking information. - -This disparate impact doesn't just happen in datasets of differently drawn digits: increased levels of differential privacy in a range of image and language models [disproportionality decreased accuracy](https://arxiv.org/pdf/1905.12101.pdf) on underrepresented subgroups. And adding differential privacy to a medical model [reduced](https://arxiv.org/pdf/2010.06667v1.pdf) the influence of Black patients' data on the model while increasing the influence of white patients' data. - -Lowering the privacy level might not help non-majoritarian data points either – they're the ones most [susceptible](https://arxiv.org/abs/1906.00389) to having their information exposed. Again, escaping the accuracy/privacy tradeoff requires collecting more data – this time from underrepresented subgroups.<a class='footstart'></a> -### More Reading - -There are deep connections between [generalization, memorization and privacy](https://arxiv.org/abs/1906.05271) that are still not well understood. Slightly changing the privacy constraints, for example, can create new options. If public, unlabeled data exists, a "[Private Aggregation of Teacher Ensembles](http://www.cleverhans.io/privacy/2018/04/29/privacy-and-machine-learning.html)" could be used instead of gradient clipping and random noise to train a differentially private model with a [smaller disparate impact](https://arxiv.org/pdf/2106.12576.pdf) on accuracy. - -Finding ways to increase privacy with a smaller impact on accuracy is an active area of research – [model architectures](https://arxiv.org/abs/2007.14191) designed with privacy in mind and better [dataset cleaning](https://arxiv.org/pdf/2107.06499.pdf) look like promising avenues. - -There are also additional [accuracy/privacy/fairness](http://proceedings.mlr.press/v97/jagielski19a/jagielski19a.pdf) tradeoffs beyond what's discussed in this post. Even if a differentially private model doesn't have large accuracy gaps between subgroups, enforcing [fairness metrics](https://pair.withgoogle.com/explorables/measuring-fairness/) can reduce privacy or accuracy. - -This post focuses on protecting the privacy of individual data points. In practice more work might be necessary to ensure that the [privacy of users](https://queue.acm.org/detail.cfm?id=3501293#:~:text=Computing%20and%20Verifying%20Anonymous%20Aggregates) – who could contribute much more than a single data point each – is also protected. - -These questions are also significant outside of machine learning. [Allocating resources](https://arxiv.org/abs/2105.07513) based on a differentially private dataset – with no machine learning model involved – can also disproportionately affect different groups. The 2020 Census is the first to use differential privacy and this could have a wide range of impacts, including how [congressional districts](https://statmodeling.stat.columbia.edu/2021/10/20/how-does-post-processed-differentially-private-census-data-affect-redistricting-how-concerned-should-we-be-about-gerrymandering-with-the-new-das/) are drawn. - -### Credits - -Adam Pearce // January 2022 - -Thanks to Abhradeep Thakurta, Andreas Terzis, Andy Coenen, Asma Ghandeharioun, Brendan McMahan, Ellen Jiang, Emily Reif, Fernanda Viégas, James Wexler, Kevin Robinson, Matthew Jagielski, Martin Wattenberg, Meredith Morris, Miguel Guevara, Nicolas Papernot and Nithum Thain for their help with this piece. - -### Footnotes - -<a class='footend'></a> To speed up training at the cost of looser privacy bounds, gradients, clipping and noise can be calculated on a group of data points instead of individual data points. - -<a class='footend'></a> The "ε" in ε-differential privacy essentially [measures](https://desfontain.es/privacy/differential-privacy-in-more-detail.html) the overlap in two distributions after changing a single data point. - -<a class='footend'></a> [Clipping](https://openreview.net/forum?id=BJgnXpVYwS) and [noising](https://arxiv.org/pdf/1511.06807.pdf) are also used outside of differential privacy as regularization techniques to improve accuracy. <br><br> In addition to accidently mislabeled examples, differential privacy can also provide some protection against [data poisoning attacks](https://dp-ml.github.io/2021-workshop-ICLR/files/23.pdf). - -<a class='footend'></a> While visually similar digits aren't necessarily interpreted in similar ways by the model, the clustering of visually similar digits in the UMAP diagram at the bottom of the page (which projects embedding from the penultimate layer of digit classifier) suggests there is a close connection here. - -<a class='footend'></a> Rebalancing the dataset without collecting more data doesn't avoid this privacy/accuracy tradeoff – upsampling the smaller class reduces privacy and downsampling the larger class reduces data and lowers accuracy. - -<a class='footend'></a> See the appendix on [Subgroup Size and Accuracy](#appendix-subgroup-size-and-accuracy) for more detail. - -### Appendix: Subgroup Size and Accuracy - -How, exactly, does the amount of training data, the privacy level and the percentage of data from a subgroup impact accuracy? Using MNIST digits rotated 90° as a stand-in for a smaller subgroup, we can see how the accuracy of a series of simple [models](https://colab.research.google.com/github/PAIR-code/ai-explorables/blob/master/server-side/private-and-fair/MNIST_Generate_UMAP.ipynb) that classify "1"s and "7"s change based on these attributes. - -On the far left, models without any rotated digits in the training data never classify those digits more accurately than random guessing. By rotating 5% of the training digits, a small slice of models with lots of training data and low privacy can accurately classify rotated digits. - -<div class='rotated-accuracy-heatmap'></div> - -Increasing the proportion of rotated digits to 10% or 20% or even more makes it possible to train a higher privacy model that performs well on both types of digits with the same amount of training data. - -Click on one of the models above and you can see how the accuracy gap shifts as number of training points, privacy level and percentage of rotated digits are independently changed. - -<div class='rotated-accuracy'></div> - -Intuitively, adding more training data has diminishing marginal increases to accuracy. Accuracy on the smaller group of rotated digits, which may just be on the cusp of being learned, falls off faster as the effective amount of training data is decreased — a disparate reduction in accuracy. - - -### More Explorables - - -<p id='recirc'></p> -<link rel="stylesheet" href="style.css"> - -<script type='module'> - import npyjs from '../third_party/npyjs.js' - window.npyjs = npyjs -</script> -<script src='../third_party/d3_.js'></script> -<script src='../third_party/d3-scale-chromatic.v1.min.js'></script> -<script src='../third_party/alea.js'></script> - - -<script type='module' src='util.js'></script> - -<script type='module' src='2d-privacy.js'></script> - -<script type='module' src='top-bot-digits.js'></script> -<script type='module' src='accuracy-v-privacy-class.js'></script> -<script type='module' src='accuracy-v-privacy-dataset_size.js'></script> -<script type='module' src='umap-digit.js'></script> - -<script type='module' src='rotated-accuracy.js'></script> - - -<script type='module' src='footnote.js'></script> -<script src='../third_party/recirc.js'></script> \ No newline at end of file diff --git a/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/training/networks_stylegan.py b/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/training/networks_stylegan.py deleted file mode 100644 index adc4b260f6f94570c793b0086280f757d2e19ad1..0000000000000000000000000000000000000000 --- a/spaces/mfrashad/CharacterGAN/models/stylegan/stylegan_tf/training/networks_stylegan.py +++ /dev/null @@ -1,661 +0,0 @@ -# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. -# -# This work is licensed under the Creative Commons Attribution-NonCommercial -# 4.0 International License. To view a copy of this license, visit -# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to -# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA. - -"""Network architectures used in the StyleGAN paper.""" - -import numpy as np -import tensorflow as tf -import dnnlib -import dnnlib.tflib as tflib - -# NOTE: Do not import any application-specific modules here! -# Specify all network parameters as kwargs. - -#---------------------------------------------------------------------------- -# Primitive ops for manipulating 4D activation tensors. -# The gradients of these are not necessary efficient or even meaningful. - -def _blur2d(x, f=[1,2,1], normalize=True, flip=False, stride=1): - assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) - assert isinstance(stride, int) and stride >= 1 - - # Finalize filter kernel. - f = np.array(f, dtype=np.float32) - if f.ndim == 1: - f = f[:, np.newaxis] * f[np.newaxis, :] - assert f.ndim == 2 - if normalize: - f /= np.sum(f) - if flip: - f = f[::-1, ::-1] - f = f[:, :, np.newaxis, np.newaxis] - f = np.tile(f, [1, 1, int(x.shape[1]), 1]) - - # No-op => early exit. - if f.shape == (1, 1) and f[0,0] == 1: - return x - - # Convolve using depthwise_conv2d. - orig_dtype = x.dtype - x = tf.cast(x, tf.float32) # tf.nn.depthwise_conv2d() doesn't support fp16 - f = tf.constant(f, dtype=x.dtype, name='filter') - strides = [1, 1, stride, stride] - x = tf.nn.depthwise_conv2d(x, f, strides=strides, padding='SAME', data_format='NCHW') - x = tf.cast(x, orig_dtype) - return x - -def _upscale2d(x, factor=2, gain=1): - assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) - assert isinstance(factor, int) and factor >= 1 - - # Apply gain. - if gain != 1: - x *= gain - - # No-op => early exit. - if factor == 1: - return x - - # Upscale using tf.tile(). - s = x.shape - x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1]) - x = tf.tile(x, [1, 1, 1, factor, 1, factor]) - x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor]) - return x - -def _downscale2d(x, factor=2, gain=1): - assert x.shape.ndims == 4 and all(dim.value is not None for dim in x.shape[1:]) - assert isinstance(factor, int) and factor >= 1 - - # 2x2, float32 => downscale using _blur2d(). - if factor == 2 and x.dtype == tf.float32: - f = [np.sqrt(gain) / factor] * factor - return _blur2d(x, f=f, normalize=False, stride=factor) - - # Apply gain. - if gain != 1: - x *= gain - - # No-op => early exit. - if factor == 1: - return x - - # Large factor => downscale using tf.nn.avg_pool(). - # NOTE: Requires tf_config['graph_options.place_pruned_graph']=True to work. - ksize = [1, 1, factor, factor] - return tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') - -#---------------------------------------------------------------------------- -# High-level ops for manipulating 4D activation tensors. -# The gradients of these are meant to be as efficient as possible. - -def blur2d(x, f=[1,2,1], normalize=True): - with tf.variable_scope('Blur2D'): - @tf.custom_gradient - def func(x): - y = _blur2d(x, f, normalize) - @tf.custom_gradient - def grad(dy): - dx = _blur2d(dy, f, normalize, flip=True) - return dx, lambda ddx: _blur2d(ddx, f, normalize) - return y, grad - return func(x) - -def upscale2d(x, factor=2): - with tf.variable_scope('Upscale2D'): - @tf.custom_gradient - def func(x): - y = _upscale2d(x, factor) - @tf.custom_gradient - def grad(dy): - dx = _downscale2d(dy, factor, gain=factor**2) - return dx, lambda ddx: _upscale2d(ddx, factor) - return y, grad - return func(x) - -def downscale2d(x, factor=2): - with tf.variable_scope('Downscale2D'): - @tf.custom_gradient - def func(x): - y = _downscale2d(x, factor) - @tf.custom_gradient - def grad(dy): - dx = _upscale2d(dy, factor, gain=1/factor**2) - return dx, lambda ddx: _downscale2d(ddx, factor) - return y, grad - return func(x) - -#---------------------------------------------------------------------------- -# Get/create weight tensor for a convolutional or fully-connected layer. - -def get_weight(shape, gain=np.sqrt(2), use_wscale=False, lrmul=1): - fan_in = np.prod(shape[:-1]) # [kernel, kernel, fmaps_in, fmaps_out] or [in, out] - he_std = gain / np.sqrt(fan_in) # He init - - # Equalized learning rate and custom learning rate multiplier. - if use_wscale: - init_std = 1.0 / lrmul - runtime_coef = he_std * lrmul - else: - init_std = he_std / lrmul - runtime_coef = lrmul - - # Create variable. - init = tf.initializers.random_normal(0, init_std) - return tf.get_variable('weight', shape=shape, initializer=init) * runtime_coef - -#---------------------------------------------------------------------------- -# Fully-connected layer. - -def dense(x, fmaps, **kwargs): - if len(x.shape) > 2: - x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])]) - w = get_weight([x.shape[1].value, fmaps], **kwargs) - w = tf.cast(w, x.dtype) - return tf.matmul(x, w) - -#---------------------------------------------------------------------------- -# Convolutional layer. - -def conv2d(x, fmaps, kernel, **kwargs): - assert kernel >= 1 and kernel % 2 == 1 - w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) - w = tf.cast(w, x.dtype) - return tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='SAME', data_format='NCHW') - -#---------------------------------------------------------------------------- -# Fused convolution + scaling. -# Faster and uses less memory than performing the operations separately. - -def upscale2d_conv2d(x, fmaps, kernel, fused_scale='auto', **kwargs): - assert kernel >= 1 and kernel % 2 == 1 - assert fused_scale in [True, False, 'auto'] - if fused_scale == 'auto': - fused_scale = min(x.shape[2:]) * 2 >= 128 - - # Not fused => call the individual ops directly. - if not fused_scale: - return conv2d(upscale2d(x), fmaps, kernel, **kwargs) - - # Fused => perform both ops simultaneously using tf.nn.conv2d_transpose(). - w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) - w = tf.transpose(w, [0, 1, 3, 2]) # [kernel, kernel, fmaps_out, fmaps_in] - w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') - w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) - w = tf.cast(w, x.dtype) - os = [tf.shape(x)[0], fmaps, x.shape[2] * 2, x.shape[3] * 2] - return tf.nn.conv2d_transpose(x, w, os, strides=[1,1,2,2], padding='SAME', data_format='NCHW') - -def conv2d_downscale2d(x, fmaps, kernel, fused_scale='auto', **kwargs): - assert kernel >= 1 and kernel % 2 == 1 - assert fused_scale in [True, False, 'auto'] - if fused_scale == 'auto': - fused_scale = min(x.shape[2:]) >= 128 - - # Not fused => call the individual ops directly. - if not fused_scale: - return downscale2d(conv2d(x, fmaps, kernel, **kwargs)) - - # Fused => perform both ops simultaneously using tf.nn.conv2d(). - w = get_weight([kernel, kernel, x.shape[1].value, fmaps], **kwargs) - w = tf.pad(w, [[1,1], [1,1], [0,0], [0,0]], mode='CONSTANT') - w = tf.add_n([w[1:, 1:], w[:-1, 1:], w[1:, :-1], w[:-1, :-1]]) * 0.25 - w = tf.cast(w, x.dtype) - return tf.nn.conv2d(x, w, strides=[1,1,2,2], padding='SAME', data_format='NCHW') - -#---------------------------------------------------------------------------- -# Apply bias to the given activation tensor. - -def apply_bias(x, lrmul=1): - b = tf.get_variable('bias', shape=[x.shape[1]], initializer=tf.initializers.zeros()) * lrmul - b = tf.cast(b, x.dtype) - if len(x.shape) == 2: - return x + b - return x + tf.reshape(b, [1, -1, 1, 1]) - -#---------------------------------------------------------------------------- -# Leaky ReLU activation. More efficient than tf.nn.leaky_relu() and supports FP16. - -def leaky_relu(x, alpha=0.2): - with tf.variable_scope('LeakyReLU'): - alpha = tf.constant(alpha, dtype=x.dtype, name='alpha') - @tf.custom_gradient - def func(x): - y = tf.maximum(x, x * alpha) - @tf.custom_gradient - def grad(dy): - dx = tf.where(y >= 0, dy, dy * alpha) - return dx, lambda ddx: tf.where(y >= 0, ddx, ddx * alpha) - return y, grad - return func(x) - -#---------------------------------------------------------------------------- -# Pixelwise feature vector normalization. - -def pixel_norm(x, epsilon=1e-8): - with tf.variable_scope('PixelNorm'): - epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon') - return x * tf.rsqrt(tf.reduce_mean(tf.square(x), axis=1, keepdims=True) + epsilon) - -#---------------------------------------------------------------------------- -# Instance normalization. - -def instance_norm(x, epsilon=1e-8): - assert len(x.shape) == 4 # NCHW - with tf.variable_scope('InstanceNorm'): - orig_dtype = x.dtype - x = tf.cast(x, tf.float32) - x -= tf.reduce_mean(x, axis=[2,3], keepdims=True) - epsilon = tf.constant(epsilon, dtype=x.dtype, name='epsilon') - x *= tf.rsqrt(tf.reduce_mean(tf.square(x), axis=[2,3], keepdims=True) + epsilon) - x = tf.cast(x, orig_dtype) - return x - -#---------------------------------------------------------------------------- -# Style modulation. - -def style_mod(x, dlatent, **kwargs): - with tf.variable_scope('StyleMod'): - style = apply_bias(dense(dlatent, fmaps=x.shape[1]*2, gain=1, **kwargs)) - style = tf.reshape(style, [-1, 2, x.shape[1]] + [1] * (len(x.shape) - 2)) - return x * (style[:,0] + 1) + style[:,1] - -#---------------------------------------------------------------------------- -# Noise input. - -def apply_noise(x, noise_var=None, randomize_noise=True): - assert len(x.shape) == 4 # NCHW - with tf.variable_scope('Noise'): - if noise_var is None or randomize_noise: - noise = tf.random_normal([tf.shape(x)[0], 1, x.shape[2], x.shape[3]], dtype=x.dtype) - else: - noise = tf.cast(noise_var, x.dtype) - weight = tf.get_variable('weight', shape=[x.shape[1].value], initializer=tf.initializers.zeros()) - return x + noise * tf.reshape(tf.cast(weight, x.dtype), [1, -1, 1, 1]) - -#---------------------------------------------------------------------------- -# Minibatch standard deviation. - -def minibatch_stddev_layer(x, group_size=4, num_new_features=1): - with tf.variable_scope('MinibatchStddev'): - group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size. - s = x.shape # [NCHW] Input shape. - y = tf.reshape(x, [group_size, -1, num_new_features, s[1]//num_new_features, s[2], s[3]]) # [GMncHW] Split minibatch into M groups of size G. Split channels into n channel groups c. - y = tf.cast(y, tf.float32) # [GMncHW] Cast to FP32. - y -= tf.reduce_mean(y, axis=0, keepdims=True) # [GMncHW] Subtract mean over group. - y = tf.reduce_mean(tf.square(y), axis=0) # [MncHW] Calc variance over group. - y = tf.sqrt(y + 1e-8) # [MncHW] Calc stddev over group. - y = tf.reduce_mean(y, axis=[2,3,4], keepdims=True) # [Mn111] Take average over fmaps and pixels. - y = tf.reduce_mean(y, axis=[2]) # [Mn11] Split channels into c channel groups - y = tf.cast(y, x.dtype) # [Mn11] Cast back to original data type. - y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [NnHW] Replicate over group and pixels. - return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap. - -#---------------------------------------------------------------------------- -# Style-based generator used in the StyleGAN paper. -# Composed of two sub-networks (G_mapping and G_synthesis) that are defined below. - -def G_style( - latents_in, # First input: Latent vectors (Z) [minibatch, latent_size]. - labels_in, # Second input: Conditioning labels [minibatch, label_size]. - truncation_psi = 0.7, # Style strength multiplier for the truncation trick. None = disable. - truncation_cutoff = 8, # Number of layers for which to apply the truncation trick. None = disable. - truncation_psi_val = None, # Value for truncation_psi to use during validation. - truncation_cutoff_val = None, # Value for truncation_cutoff to use during validation. - dlatent_avg_beta = 0.995, # Decay for tracking the moving average of W during training. None = disable. - style_mixing_prob = 0.9, # Probability of mixing styles during training. None = disable. - is_training = False, # Network is under training? Enables and disables specific features. - is_validation = False, # Network is under validation? Chooses which value to use for truncation_psi. - is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. - components = dnnlib.EasyDict(), # Container for sub-networks. Retained between calls. - **kwargs): # Arguments for sub-networks (G_mapping and G_synthesis). - - # Validate arguments. - assert not is_training or not is_validation - assert isinstance(components, dnnlib.EasyDict) - if is_validation: - truncation_psi = truncation_psi_val - truncation_cutoff = truncation_cutoff_val - if is_training or (truncation_psi is not None and not tflib.is_tf_expression(truncation_psi) and truncation_psi == 1): - truncation_psi = None - if is_training or (truncation_cutoff is not None and not tflib.is_tf_expression(truncation_cutoff) and truncation_cutoff <= 0): - truncation_cutoff = None - if not is_training or (dlatent_avg_beta is not None and not tflib.is_tf_expression(dlatent_avg_beta) and dlatent_avg_beta == 1): - dlatent_avg_beta = None - if not is_training or (style_mixing_prob is not None and not tflib.is_tf_expression(style_mixing_prob) and style_mixing_prob <= 0): - style_mixing_prob = None - - # Setup components. - if 'synthesis' not in components: - components.synthesis = tflib.Network('G_synthesis', func_name=G_synthesis, **kwargs) - num_layers = components.synthesis.input_shape[1] - dlatent_size = components.synthesis.input_shape[2] - if 'mapping' not in components: - components.mapping = tflib.Network('G_mapping', func_name=G_mapping, dlatent_broadcast=num_layers, **kwargs) - - # Setup variables. - lod_in = tf.get_variable('lod', initializer=np.float32(0), trainable=False) - dlatent_avg = tf.get_variable('dlatent_avg', shape=[dlatent_size], initializer=tf.initializers.zeros(), trainable=False) - - # Evaluate mapping network. - dlatents = components.mapping.get_output_for(latents_in, labels_in, **kwargs) - - # Update moving average of W. - if dlatent_avg_beta is not None: - with tf.variable_scope('DlatentAvg'): - batch_avg = tf.reduce_mean(dlatents[:, 0], axis=0) - update_op = tf.assign(dlatent_avg, tflib.lerp(batch_avg, dlatent_avg, dlatent_avg_beta)) - with tf.control_dependencies([update_op]): - dlatents = tf.identity(dlatents) - - # Perform style mixing regularization. - if style_mixing_prob is not None: - with tf.name_scope('StyleMix'): - latents2 = tf.random_normal(tf.shape(latents_in)) - dlatents2 = components.mapping.get_output_for(latents2, labels_in, **kwargs) - layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis] - cur_layers = num_layers - tf.cast(lod_in, tf.int32) * 2 - mixing_cutoff = tf.cond( - tf.random_uniform([], 0.0, 1.0) < style_mixing_prob, - lambda: tf.random_uniform([], 1, cur_layers, dtype=tf.int32), - lambda: cur_layers) - dlatents = tf.where(tf.broadcast_to(layer_idx < mixing_cutoff, tf.shape(dlatents)), dlatents, dlatents2) - - # Apply truncation trick. - if truncation_psi is not None and truncation_cutoff is not None: - with tf.variable_scope('Truncation'): - layer_idx = np.arange(num_layers)[np.newaxis, :, np.newaxis] - ones = np.ones(layer_idx.shape, dtype=np.float32) - coefs = tf.where(layer_idx < truncation_cutoff, truncation_psi * ones, ones) - dlatents = tflib.lerp(dlatent_avg, dlatents, coefs) - - # Evaluate synthesis network. - with tf.control_dependencies([tf.assign(components.synthesis.find_var('lod'), lod_in)]): - images_out = components.synthesis.get_output_for(dlatents, force_clean_graph=is_template_graph, **kwargs) - return tf.identity(images_out, name='images_out') - -#---------------------------------------------------------------------------- -# Mapping network used in the StyleGAN paper. - -def G_mapping( - latents_in, # First input: Latent vectors (Z) [minibatch, latent_size]. - labels_in, # Second input: Conditioning labels [minibatch, label_size]. - latent_size = 512, # Latent vector (Z) dimensionality. - label_size = 0, # Label dimensionality, 0 if no labels. - dlatent_size = 512, # Disentangled latent (W) dimensionality. - dlatent_broadcast = None, # Output disentangled latent (W) as [minibatch, dlatent_size] or [minibatch, dlatent_broadcast, dlatent_size]. - mapping_layers = 8, # Number of mapping layers. - mapping_fmaps = 512, # Number of activations in the mapping layers. - mapping_lrmul = 0.01, # Learning rate multiplier for the mapping layers. - mapping_nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu'. - use_wscale = True, # Enable equalized learning rate? - normalize_latents = True, # Normalize latent vectors (Z) before feeding them to the mapping layers? - dtype = 'float32', # Data type to use for activations and outputs. - **_kwargs): # Ignore unrecognized keyword args. - - act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[mapping_nonlinearity] - - # Inputs. - latents_in.set_shape([None, latent_size]) - labels_in.set_shape([None, label_size]) - latents_in = tf.cast(latents_in, dtype) - labels_in = tf.cast(labels_in, dtype) - x = latents_in - - # Embed labels and concatenate them with latents. - if label_size: - with tf.variable_scope('LabelConcat'): - w = tf.get_variable('weight', shape=[label_size, latent_size], initializer=tf.initializers.random_normal()) - y = tf.matmul(labels_in, tf.cast(w, dtype)) - x = tf.concat([x, y], axis=1) - - # Normalize latents. - if normalize_latents: - x = pixel_norm(x) - - # Mapping layers. - for layer_idx in range(mapping_layers): - with tf.variable_scope('Dense%d' % layer_idx): - fmaps = dlatent_size if layer_idx == mapping_layers - 1 else mapping_fmaps - x = dense(x, fmaps=fmaps, gain=gain, use_wscale=use_wscale, lrmul=mapping_lrmul) - x = apply_bias(x, lrmul=mapping_lrmul) - x = act(x) - - # Broadcast. - if dlatent_broadcast is not None: - with tf.variable_scope('Broadcast'): - x = tf.tile(x[:, np.newaxis], [1, dlatent_broadcast, 1]) - - # Output. - assert x.dtype == tf.as_dtype(dtype) - return tf.identity(x, name='dlatents_out') - -#---------------------------------------------------------------------------- -# Synthesis network used in the StyleGAN paper. - -def G_synthesis( - dlatents_in, # Input: Disentangled latents (W) [minibatch, num_layers, dlatent_size]. - dlatent_size = 512, # Disentangled latent (W) dimensionality. - num_channels = 3, # Number of output color channels. - resolution = 1024, # Output resolution. - fmap_base = 8192, # Overall multiplier for the number of feature maps. - fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. - fmap_max = 512, # Maximum number of feature maps in any layer. - use_styles = True, # Enable style inputs? - const_input_layer = True, # First layer is a learned constant? - use_noise = True, # Enable noise inputs? - randomize_noise = True, # True = randomize noise inputs every time (non-deterministic), False = read noise inputs from variables. - nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu' - use_wscale = True, # Enable equalized learning rate? - use_pixel_norm = False, # Enable pixelwise feature vector normalization? - use_instance_norm = True, # Enable instance normalization? - dtype = 'float32', # Data type to use for activations and outputs. - fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically. - blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering. - structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically. - is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. - force_clean_graph = False, # True = construct a clean graph that looks nice in TensorBoard, False = default behavior. - **_kwargs): # Ignore unrecognized keyword args. - - resolution_log2 = int(np.log2(resolution)) - assert resolution == 2**resolution_log2 and resolution >= 4 - def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) - def blur(x): return blur2d(x, blur_filter) if blur_filter else x - if is_template_graph: force_clean_graph = True - if force_clean_graph: randomize_noise = False - if structure == 'auto': structure = 'linear' if force_clean_graph else 'recursive' - act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity] - num_layers = resolution_log2 * 2 - 2 - num_styles = num_layers if use_styles else 1 - images_out = None - - # Primary inputs. - dlatents_in.set_shape([None, num_styles, dlatent_size]) - dlatents_in = tf.cast(dlatents_in, dtype) - lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0), trainable=False), dtype) - - # Noise inputs. - noise_inputs = [] - if use_noise: - for layer_idx in range(num_layers): - res = layer_idx // 2 + 2 - shape = [1, use_noise, 2**res, 2**res] - noise_inputs.append(tf.get_variable('noise%d' % layer_idx, shape=shape, initializer=tf.initializers.random_normal(), trainable=False)) - - # Things to do at the end of each layer. - def layer_epilogue(x, layer_idx): - if use_noise: - x = apply_noise(x, noise_inputs[layer_idx], randomize_noise=randomize_noise) - x = apply_bias(x) - x = act(x) - if use_pixel_norm: - x = pixel_norm(x) - if use_instance_norm: - x = instance_norm(x) - if use_styles: - x = style_mod(x, dlatents_in[:, layer_idx], use_wscale=use_wscale) - return x - - # Early layers. - with tf.variable_scope('4x4'): - if const_input_layer: - with tf.variable_scope('Const'): - x = tf.get_variable('const', shape=[1, nf(1), 4, 4], initializer=tf.initializers.ones()) - x = layer_epilogue(tf.tile(tf.cast(x, dtype), [tf.shape(dlatents_in)[0], 1, 1, 1]), 0) - else: - with tf.variable_scope('Dense'): - x = dense(dlatents_in[:, 0], fmaps=nf(1)*16, gain=gain/4, use_wscale=use_wscale) # tweak gain to match the official implementation of Progressing GAN - x = layer_epilogue(tf.reshape(x, [-1, nf(1), 4, 4]), 0) - with tf.variable_scope('Conv'): - x = layer_epilogue(conv2d(x, fmaps=nf(1), kernel=3, gain=gain, use_wscale=use_wscale), 1) - - # Building blocks for remaining layers. - def block(res, x): # res = 3..resolution_log2 - with tf.variable_scope('%dx%d' % (2**res, 2**res)): - with tf.variable_scope('Conv0_up'): - x = layer_epilogue(blur(upscale2d_conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale)), res*2-4) - with tf.variable_scope('Conv1'): - x = layer_epilogue(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale), res*2-3) - return x - def torgb(res, x): # res = 2..resolution_log2 - lod = resolution_log2 - res - with tf.variable_scope('ToRGB_lod%d' % lod): - return apply_bias(conv2d(x, fmaps=num_channels, kernel=1, gain=1, use_wscale=use_wscale)) - - # Fixed structure: simple and efficient, but does not support progressive growing. - if structure == 'fixed': - for res in range(3, resolution_log2 + 1): - x = block(res, x) - images_out = torgb(resolution_log2, x) - - # Linear structure: simple but inefficient. - if structure == 'linear': - images_out = torgb(2, x) - for res in range(3, resolution_log2 + 1): - lod = resolution_log2 - res - x = block(res, x) - img = torgb(res, x) - images_out = upscale2d(images_out) - with tf.variable_scope('Grow_lod%d' % lod): - images_out = tflib.lerp_clip(img, images_out, lod_in - lod) - - # Recursive structure: complex but efficient. - if structure == 'recursive': - def cset(cur_lambda, new_cond, new_lambda): - return lambda: tf.cond(new_cond, new_lambda, cur_lambda) - def grow(x, res, lod): - y = block(res, x) - img = lambda: upscale2d(torgb(res, y), 2**lod) - img = cset(img, (lod_in > lod), lambda: upscale2d(tflib.lerp(torgb(res, y), upscale2d(torgb(res - 1, x)), lod_in - lod), 2**lod)) - if lod > 0: img = cset(img, (lod_in < lod), lambda: grow(y, res + 1, lod - 1)) - return img() - images_out = grow(x, 3, resolution_log2 - 3) - - assert images_out.dtype == tf.as_dtype(dtype) - return tf.identity(images_out, name='images_out') - -#---------------------------------------------------------------------------- -# Discriminator used in the StyleGAN paper. - -def D_basic( - images_in, # First input: Images [minibatch, channel, height, width]. - labels_in, # Second input: Labels [minibatch, label_size]. - num_channels = 1, # Number of input color channels. Overridden based on dataset. - resolution = 32, # Input resolution. Overridden based on dataset. - label_size = 0, # Dimensionality of the labels, 0 if no labels. Overridden based on dataset. - fmap_base = 8192, # Overall multiplier for the number of feature maps. - fmap_decay = 1.0, # log2 feature map reduction when doubling the resolution. - fmap_max = 512, # Maximum number of feature maps in any layer. - nonlinearity = 'lrelu', # Activation function: 'relu', 'lrelu', - use_wscale = True, # Enable equalized learning rate? - mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, 0 = disable. - mbstd_num_features = 1, # Number of features for the minibatch standard deviation layer. - dtype = 'float32', # Data type to use for activations and outputs. - fused_scale = 'auto', # True = fused convolution + scaling, False = separate ops, 'auto' = decide automatically. - blur_filter = [1,2,1], # Low-pass filter to apply when resampling activations. None = no filtering. - structure = 'auto', # 'fixed' = no progressive growing, 'linear' = human-readable, 'recursive' = efficient, 'auto' = select automatically. - is_template_graph = False, # True = template graph constructed by the Network class, False = actual evaluation. - **_kwargs): # Ignore unrecognized keyword args. - - resolution_log2 = int(np.log2(resolution)) - assert resolution == 2**resolution_log2 and resolution >= 4 - def nf(stage): return min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max) - def blur(x): return blur2d(x, blur_filter) if blur_filter else x - if structure == 'auto': structure = 'linear' if is_template_graph else 'recursive' - act, gain = {'relu': (tf.nn.relu, np.sqrt(2)), 'lrelu': (leaky_relu, np.sqrt(2))}[nonlinearity] - - images_in.set_shape([None, num_channels, resolution, resolution]) - labels_in.set_shape([None, label_size]) - images_in = tf.cast(images_in, dtype) - labels_in = tf.cast(labels_in, dtype) - lod_in = tf.cast(tf.get_variable('lod', initializer=np.float32(0.0), trainable=False), dtype) - scores_out = None - - # Building blocks. - def fromrgb(x, res): # res = 2..resolution_log2 - with tf.variable_scope('FromRGB_lod%d' % (resolution_log2 - res)): - return act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=1, gain=gain, use_wscale=use_wscale))) - def block(x, res): # res = 2..resolution_log2 - with tf.variable_scope('%dx%d' % (2**res, 2**res)): - if res >= 3: # 8x8 and up - with tf.variable_scope('Conv0'): - x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale))) - with tf.variable_scope('Conv1_down'): - x = act(apply_bias(conv2d_downscale2d(blur(x), fmaps=nf(res-2), kernel=3, gain=gain, use_wscale=use_wscale, fused_scale=fused_scale))) - else: # 4x4 - if mbstd_group_size > 1: - x = minibatch_stddev_layer(x, mbstd_group_size, mbstd_num_features) - with tf.variable_scope('Conv'): - x = act(apply_bias(conv2d(x, fmaps=nf(res-1), kernel=3, gain=gain, use_wscale=use_wscale))) - with tf.variable_scope('Dense0'): - x = act(apply_bias(dense(x, fmaps=nf(res-2), gain=gain, use_wscale=use_wscale))) - with tf.variable_scope('Dense1'): - x = apply_bias(dense(x, fmaps=max(label_size, 1), gain=1, use_wscale=use_wscale)) - return x - - # Fixed structure: simple and efficient, but does not support progressive growing. - if structure == 'fixed': - x = fromrgb(images_in, resolution_log2) - for res in range(resolution_log2, 2, -1): - x = block(x, res) - scores_out = block(x, 2) - - # Linear structure: simple but inefficient. - if structure == 'linear': - img = images_in - x = fromrgb(img, resolution_log2) - for res in range(resolution_log2, 2, -1): - lod = resolution_log2 - res - x = block(x, res) - img = downscale2d(img) - y = fromrgb(img, res - 1) - with tf.variable_scope('Grow_lod%d' % lod): - x = tflib.lerp_clip(x, y, lod_in - lod) - scores_out = block(x, 2) - - # Recursive structure: complex but efficient. - if structure == 'recursive': - def cset(cur_lambda, new_cond, new_lambda): - return lambda: tf.cond(new_cond, new_lambda, cur_lambda) - def grow(res, lod): - x = lambda: fromrgb(downscale2d(images_in, 2**lod), res) - if lod > 0: x = cset(x, (lod_in < lod), lambda: grow(res + 1, lod - 1)) - x = block(x(), res); y = lambda: x - if res > 2: y = cset(y, (lod_in > lod), lambda: tflib.lerp(x, fromrgb(downscale2d(images_in, 2**(lod+1)), res - 1), lod_in - lod)) - return y() - scores_out = grow(2, resolution_log2 - 2) - - # Label conditioning from "Which Training Methods for GANs do actually Converge?" - if label_size: - with tf.variable_scope('LabelSwitch'): - scores_out = tf.reduce_sum(scores_out * labels_in, axis=1, keepdims=True) - - assert scores_out.dtype == tf.as_dtype(dtype) - scores_out = tf.identity(scores_out, name='scores_out') - return scores_out - -#---------------------------------------------------------------------------- diff --git a/spaces/mgfrantz/reading_practice/app.py b/spaces/mgfrantz/reading_practice/app.py deleted file mode 100644 index 45ddf7927bd1b215b91822f4ec4eead20c4aa15c..0000000000000000000000000000000000000000 --- a/spaces/mgfrantz/reading_practice/app.py +++ /dev/null @@ -1,66 +0,0 @@ -import difflib -import gradio as gr -from transformers import pipeline -from tokenizers.pre_tokenizers import Whitespace -from tokenizers.normalizers import BertNormalizer - -audio_input = gr.inputs.Audio(source='microphone', label='Read the passage', type="filepath") -text_input = gr.inputs.Textbox(label='Sample passage') -text_output = gr.outputs.Textbox(label='Output') -highlighted_text_output = gr.outputs.HighlightedText(color_map={"+": "green", "-": "pink"}) - -speech_to_text = pipeline('automatic-speech-recognition') - -sm = difflib.SequenceMatcher(None) - -splitter = Whitespace() -normalizer = BertNormalizer() - -def preprocess(s): - return [i[0] for i in splitter.pre_tokenize_str(normalizer.normalize_str(s))] - -def diff_texts(text1, text2): - d = difflib.Differ() - return [ - (token[2:], token[0] if token[0] != " " else None) - for token in d.compare(preprocess(text1), preprocess(text2)) - ] - -def func(audio, text): - # print(audio) - # print(text) - results = speech_to_text(audio)['text'].lower() - text = text.lower() - sm.set_seqs(preprocess(results), preprocess(text)) - - r = f""" - Original passage: - {text} - - What we heard: - {results} - - Ratio: - {sm.ratio()} - """ - - d = diff_texts(results, text) - - return r, d - -title = "Reading Practice Application" -description = """ -This application is a POC for reading practice. -It compares some input text against an audio recording. -The intention is to help individuals with reading challenges identify how to improve their reading. -""" - -gr.Interface( - func, - inputs=[audio_input, text_input], - outputs=[text_output, highlighted_text_output], - title=title, - description=description, - examples=[["sample.wav", "the quick brown fox jumped over the lazy dog"]] -).launch(debug=True) - diff --git a/spaces/mikkoar/marco/src/components/chat-panel.tsx b/spaces/mikkoar/marco/src/components/chat-panel.tsx deleted file mode 100644 index 1fbc3c2bf05b914e0c229661832fbb560745f488..0000000000000000000000000000000000000000 --- a/spaces/mikkoar/marco/src/components/chat-panel.tsx +++ /dev/null @@ -1,153 +0,0 @@ -'use client' - -import * as React from 'react' -import Image from 'next/image' -import Textarea from 'react-textarea-autosize' -import { useAtomValue } from 'jotai' -import { useEnterSubmit } from '@/lib/hooks/use-enter-submit' -import { cn } from '@/lib/utils' - -import BrushIcon from '@/assets/images/brush.svg' -import ChatIcon from '@/assets/images/chat.svg' -import VisualSearchIcon from '@/assets/images/visual-search.svg' -import SendIcon from '@/assets/images/send.svg' -import PinIcon from '@/assets/images/pin.svg' -import PinFillIcon from '@/assets/images/pin-fill.svg' - -import { useBing } from '@/lib/hooks/use-bing' -import { voiceListenAtom } from '@/state' -import Voice from './voice' -import { ChatImage } from './chat-image' -import { ChatAttachments } from './chat-attachments' - -export interface ChatPanelProps - extends Pick< - ReturnType<typeof useBing>, - | 'generating' - | 'input' - | 'setInput' - | 'sendMessage' - | 'resetConversation' - | 'isSpeaking' - | 'attachmentList' - | 'uploadImage' - | 'setAttachmentList' - > { - id?: string - className?: string -} - -export function ChatPanel({ - isSpeaking, - generating, - input, - setInput, - className, - sendMessage, - resetConversation, - attachmentList, - uploadImage, - setAttachmentList -}: ChatPanelProps) { - const inputRef = React.useRef<HTMLTextAreaElement>(null) - const {formRef, onKeyDown} = useEnterSubmit() - const [focused, setFocused] = React.useState(false) - const [active, setActive] = React.useState(false) - const [pin, setPin] = React.useState(false) - const [tid, setTid] = React.useState<any>() - const voiceListening = useAtomValue(voiceListenAtom) - - const setBlur = React.useCallback(() => { - clearTimeout(tid) - setActive(false) - const _tid = setTimeout(() => setFocused(false), 2000); - setTid(_tid) - }, [tid]) - - const setFocus = React.useCallback(() => { - setFocused(true) - setActive(true) - clearTimeout(tid) - inputRef.current?.focus() - }, [tid]) - - React.useEffect(() => { - if (input) { - setFocus() - } - }, [input]) - - return ( - <form - className={cn('chat-panel', className)} - onSubmit={async e => { - e.preventDefault() - if (generating) { - return; - } - if (!input?.trim()) { - return - } - setInput('') - setPin(false) - await sendMessage(input) - }} - ref={formRef} - > - <div className="action-bar pb-4"> - <div className={cn('action-root', { focus: active || pin })} speech-state="hidden" visual-search="" drop-target=""> - <div className="fade bottom"> - <div className="background"></div> - </div> - <div className={cn('outside-left-container', { collapsed: focused })}> - <div className="button-compose-wrapper"> - <button className="body-2 button-compose" type="button" aria-label="新主题" onClick={resetConversation}> - <div className="button-compose-content"> - <Image className="pl-2" alt="brush" src={BrushIcon} width={40} /> - <div className="button-compose-text">新主题</div> - </div> - </button> - </div> - </div> - <div - className={cn('main-container', { active: active || pin })} - style={{ minHeight: pin ? '360px' : undefined }} - onClick={setFocus} - onBlur={setBlur} - > - <div className="main-bar"> - <Image alt="chat" src={ChatIcon} width={20} color="blue" /> - <Textarea - ref={inputRef} - tabIndex={0} - onKeyDown={onKeyDown} - rows={1} - value={input} - onChange={e => setInput(e.target.value.slice(0, 4000))} - placeholder={voiceListening ? '持续对话中...对话完成说“发送”即可' : 'Shift + Enter 换行'} - spellCheck={false} - className="message-input min-h-[24px] -mx-1 w-full text-base resize-none bg-transparent focus-within:outline-none" - /> - <ChatImage uploadImage={uploadImage}> - <Image alt="visual-search" src={VisualSearchIcon} width={24} /> - </ChatImage> - <Voice setInput={setInput} sendMessage={sendMessage} isSpeaking={isSpeaking} input={input} /> - <button type="submit"> - <Image alt="send" src={SendIcon} width={20} style={{ marginTop: '2px' }} /> - </button> - </div> - <ChatAttachments attachmentList={attachmentList} setAttachmentList={setAttachmentList} uploadImage={uploadImage} /> - <div className="body-1 bottom-bar"> - <div className="letter-counter"><span>{input.length}</span>/4000</div> - <button onClick={() => { - setPin(!pin) - }} className="pr-2"> - <Image alt="pin" src={pin ? PinFillIcon : PinIcon} width={20} /> - </button> - </div> - </div> - </div> - </div> - </form> - ) -} diff --git a/spaces/miku-hutao/vits-uma-genshin-honkai/attentions.py b/spaces/miku-hutao/vits-uma-genshin-honkai/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/miku-hutao/vits-uma-genshin-honkai/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang.sh b/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang.sh deleted file mode 100644 index e9a80001eb47d5af863d6aab11a59362a59cef61..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/wav2vec/unsupervised/kaldi_self_train/st/local/prepare_lang.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -sil_prob=0.5 -num_sil_states=3 -num_nonsil_states=1 - -. ./cmd.sh -. ./path.sh -. parse_options.sh - -set -eux - -dict=$1 -data_dir=$2 - -dict_dir=$data_dir/local/dict -tmplm_dir=$data_dir/local/lang_tmp -lm_dir=$data_dir/lang - -mkdir -p $dict_dir $tmplm_dir $lm_dir - -# prepare dict -echo "SIL" > $dict_dir/silence_phones.txt -echo "SIL" > $dict_dir/optional_silence.txt -awk '{print $1}' $dict > $dict_dir/nonsilence_phones.txt - -echo "SIL SIL" > $dict_dir/lexicon.txt -echo "<UNK> SIL" >> $dict_dir/lexicon.txt -awk '{print $1" "$1}' $dict >> $dict_dir/lexicon.txt - -echo "SIL" > $dict_dir/extra_questions.txt -awk '{printf $1" "} END {printf "\n"}' $dict >> $dict_dir/extra_questions.txt - -# prepare lang -utils/prepare_lang.sh --sil-prob $sil_prob --position-dependent-phones false \ - --num_sil_states $num_sil_states --num_nonsil_states $num_nonsil_states \ - $dict_dir "<UNK>" $tmplm_dir $lm_dir diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/dynamicconv_layer/setup.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/dynamicconv_layer/setup.py deleted file mode 100644 index 6a21f7e2ee0840a3b251522275a0b32a856951d7..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/dynamicconv_layer/setup.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python3 -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -from setuptools import setup -from torch.utils.cpp_extension import BuildExtension, CUDAExtension - - -setup( - name="dynamicconv_layer", - ext_modules=[ - CUDAExtension( - name="dynamicconv_cuda", - sources=[ - "dynamicconv_cuda.cpp", - "dynamicconv_cuda_kernel.cu", - ], - ), - ], - cmdclass={"build_ext": BuildExtension}, -) diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/layer_drop.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/layer_drop.py deleted file mode 100644 index 8961d8bcbc492c40c6b30973234416ce5a414f5a..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/layer_drop.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -LayerDrop as described in https://arxiv.org/abs/1909.11556. -""" - -import torch -import torch.nn as nn - - -class LayerDropModuleList(nn.ModuleList): - """ - A LayerDrop implementation based on :class:`torch.nn.ModuleList`. - - We refresh the choice of which layers to drop every time we iterate - over the LayerDropModuleList instance. During evaluation we always - iterate over all layers. - - Usage:: - - layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3]) - for layer in layers: # this might iterate over layers 1 and 3 - x = layer(x) - for layer in layers: # this might iterate over all layers - x = layer(x) - for layer in layers: # this might not iterate over any layers - x = layer(x) - - Args: - p (float): probability of dropping out each layer - modules (iterable, optional): an iterable of modules to add - """ - - def __init__(self, p, modules=None): - super().__init__(modules) - self.p = p - - def __iter__(self): - dropout_probs = torch.empty(len(self)).uniform_() - for i, m in enumerate(super().__iter__()): - if not self.training or (dropout_probs[i] > self.p): - yield m diff --git a/spaces/mueller-franzes/medfusion-app/medical_diffusion/metrics/torchmetrics_pr_recall.py b/spaces/mueller-franzes/medfusion-app/medical_diffusion/metrics/torchmetrics_pr_recall.py deleted file mode 100644 index 1b47664d191097e9c599904cd9f05ff6835121c8..0000000000000000000000000000000000000000 --- a/spaces/mueller-franzes/medfusion-app/medical_diffusion/metrics/torchmetrics_pr_recall.py +++ /dev/null @@ -1,170 +0,0 @@ -from typing import Optional, List - -import torch -from torch import Tensor -from torchmetrics import Metric -import torchvision.models as models -from torchvision import transforms - - - -from torchmetrics.utilities.imports import _TORCH_FIDELITY_AVAILABLE - -if _TORCH_FIDELITY_AVAILABLE: - from torch_fidelity.feature_extractor_inceptionv3 import FeatureExtractorInceptionV3 -else: - class FeatureExtractorInceptionV3(Module): # type: ignore - pass - __doctest_skip__ = ["ImprovedPrecessionRecall", "IPR"] - -class NoTrainInceptionV3(FeatureExtractorInceptionV3): - def __init__( - self, - name: str, - features_list: List[str], - feature_extractor_weights_path: Optional[str] = None, - ) -> None: - super().__init__(name, features_list, feature_extractor_weights_path) - # put into evaluation mode - self.eval() - - def train(self, mode: bool) -> "NoTrainInceptionV3": - """the inception network should not be able to be switched away from evaluation mode.""" - return super().train(False) - - def forward(self, x: Tensor) -> Tensor: - out = super().forward(x) - return out[0].reshape(x.shape[0], -1) - - -# -------------------------- VGG Trans --------------------------- -# class Normalize(object): -# """Rescale the image from 0-255 (uint8) to [0,1] (float32). -# Note, this doesn't ensure that min=0 and max=1 as a min-max scale would do!""" - -# def __call__(self, image): -# return image/255 - -# # see https://pytorch.org/vision/main/models/generated/torchvision.models.vgg16.html -# VGG_Trans = transforms.Compose([ -# transforms.Resize([224, 224], interpolation=transforms.InterpolationMode.BILINEAR, antialias=True), -# # transforms.Resize([256, 256], interpolation=InterpolationMode.BILINEAR), -# # transforms.CenterCrop(224), -# Normalize(), # scale to [0, 1] -# transforms.Normalize([0.485, 0.456, 0.406],[0.229, 0.224, 0.225]) -# ]) - - - -class ImprovedPrecessionRecall(Metric): - is_differentiable: bool = False - higher_is_better: bool = True - full_state_update: bool = False - - - def __init__(self, feature=2048, knn=3, splits_real=1, splits_fake=5): - super().__init__() - - - # ------------------------- Init Feature Extractor (VGG or Inception) ------------------------------ - # Original VGG: https://github.com/kynkaat/improved-precision-and-recall-metric/blob/b0247eafdead494a5d243bd2efb1b0b124379ae9/utils.py#L40 - # Compare Inception: https://github.com/openai/guided-diffusion/blob/22e0df8183507e13a7813f8d38d51b072ca1e67c/evaluations/evaluator.py#L574 - # TODO: Add option to switch between Inception and VGG feature extractor - # self.vgg_model = models.vgg16(weights='IMAGENET1K_V1').eval() - # self.feature_extractor = transforms.Compose([ - # VGG_Trans, - # self.vgg_model.features, - # transforms.Lambda(lambda x: torch.flatten(x, 1)), - # self.vgg_model.classifier[:4] # [:4] corresponds to 4096 features - # ]) - - if isinstance(feature, int): - if not _TORCH_FIDELITY_AVAILABLE: - raise ModuleNotFoundError( - "FrechetInceptionDistance metric requires that `Torch-fidelity` is installed." - " Either install as `pip install torchmetrics[image]` or `pip install torch-fidelity`." - ) - valid_int_input = [64, 192, 768, 2048] - if feature not in valid_int_input: - raise ValueError( - f"Integer input to argument `feature` must be one of {valid_int_input}, but got {feature}." - ) - - self.feature_extractor = NoTrainInceptionV3(name="inception-v3-compat", features_list=[str(feature)]) - elif isinstance(feature, torch.nn.Module): - self.feature_extractor = feature - else: - raise TypeError("Got unknown input to argument `feature`") - - # --------------------------- End Feature Extractor --------------------------------------------------------------- - - self.knn = knn - self.splits_real = splits_real - self.splits_fake = splits_fake - self.add_state("real_features", [], dist_reduce_fx=None) - self.add_state("fake_features", [], dist_reduce_fx=None) - - - - def update(self, imgs: Tensor, real: bool) -> None: # type: ignore - """Update the state with extracted features. - - Args: - imgs: tensor with images feed to the feature extractor - real: bool indicating if ``imgs`` belong to the real or the fake distribution - """ - assert torch.is_tensor(imgs) and imgs.dtype == torch.uint8, 'Expecting image as torch.Tensor with dtype=torch.uint8' - - features = self.feature_extractor(imgs).view(imgs.shape[0], -1) - - if real: - self.real_features.append(features) - else: - self.fake_features.append(features) - - def compute(self): - real_features = torch.concat(self.real_features) - fake_features = torch.concat(self.fake_features) - - real_distances = _compute_pairwise_distances(real_features, self.splits_real) - real_radii = _distances2radii(real_distances, self.knn) - - fake_distances = _compute_pairwise_distances(fake_features, self.splits_fake) - fake_radii = _distances2radii(fake_distances, self.knn) - - precision = _compute_metric(real_features, real_radii, self.splits_real, fake_features, self.splits_fake) - recall = _compute_metric(fake_features, fake_radii, self.splits_fake, real_features, self.splits_real) - - return precision, recall - -def _compute_metric(ref_features, ref_radii, ref_splits, pred_features, pred_splits): - dist = _compute_pairwise_distances(ref_features, ref_splits, pred_features, pred_splits) - num_feat = pred_features.shape[0] - count = 0 - for i in range(num_feat): - count += (dist[:, i] < ref_radii).any() - return count / num_feat - -def _distances2radii(distances, knn): - return torch.topk(distances, knn+1, dim=1, largest=False)[0].max(dim=1)[0] - -def _compute_pairwise_distances(X, splits_x, Y=None, splits_y=None): - # X = [B, features] - # Y = [B', features] - Y = X if Y is None else Y - # X = X.double() - # Y = Y.double() - splits_y = splits_x if splits_y is None else splits_y - dist = torch.concat([ - torch.concat([ - (torch.sum(X_batch**2, dim=1, keepdim=True) + - torch.sum(Y_batch**2, dim=1, keepdim=True).t() - - 2 * torch.einsum("bd,dn->bn", X_batch, Y_batch.t())) - for Y_batch in Y.chunk(splits_y, dim=0)], dim=1) - for X_batch in X.chunk(splits_x, dim=0)]) - - # dist = torch.maximum(dist, torch.zeros_like(dist)) - dist[dist<0] = 0 - return torch.sqrt(dist) - - \ No newline at end of file diff --git a/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/segment_anything/utils/amg.py b/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/segment_anything/utils/amg.py deleted file mode 100644 index be064071ef399fea96c673ad173689656c23534a..0000000000000000000000000000000000000000 --- a/spaces/myrad01/Inpaint-Anything/third_party/segment-anything/segment_anything/utils/amg.py +++ /dev/null @@ -1,346 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import numpy as np -import torch - -import math -from copy import deepcopy -from itertools import product -from typing import Any, Dict, Generator, ItemsView, List, Tuple - - -class MaskData: - """ - A structure for storing masks and their related data in batched format. - Implements basic filtering and concatenation. - """ - - def __init__(self, **kwargs) -> None: - for v in kwargs.values(): - assert isinstance( - v, (list, np.ndarray, torch.Tensor) - ), "MaskData only supports list, numpy arrays, and torch tensors." - self._stats = dict(**kwargs) - - def __setitem__(self, key: str, item: Any) -> None: - assert isinstance( - item, (list, np.ndarray, torch.Tensor) - ), "MaskData only supports list, numpy arrays, and torch tensors." - self._stats[key] = item - - def __delitem__(self, key: str) -> None: - del self._stats[key] - - def __getitem__(self, key: str) -> Any: - return self._stats[key] - - def items(self) -> ItemsView[str, Any]: - return self._stats.items() - - def filter(self, keep: torch.Tensor) -> None: - for k, v in self._stats.items(): - if v is None: - self._stats[k] = None - elif isinstance(v, torch.Tensor): - self._stats[k] = v[torch.as_tensor(keep, device=v.device)] - elif isinstance(v, np.ndarray): - self._stats[k] = v[keep.detach().cpu().numpy()] - elif isinstance(v, list) and keep.dtype == torch.bool: - self._stats[k] = [a for i, a in enumerate(v) if keep[i]] - elif isinstance(v, list): - self._stats[k] = [v[i] for i in keep] - else: - raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") - - def cat(self, new_stats: "MaskData") -> None: - for k, v in new_stats.items(): - if k not in self._stats or self._stats[k] is None: - self._stats[k] = deepcopy(v) - elif isinstance(v, torch.Tensor): - self._stats[k] = torch.cat([self._stats[k], v], dim=0) - elif isinstance(v, np.ndarray): - self._stats[k] = np.concatenate([self._stats[k], v], axis=0) - elif isinstance(v, list): - self._stats[k] = self._stats[k] + deepcopy(v) - else: - raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") - - def to_numpy(self) -> None: - for k, v in self._stats.items(): - if isinstance(v, torch.Tensor): - self._stats[k] = v.detach().cpu().numpy() - - -def is_box_near_crop_edge( - boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 -) -> torch.Tensor: - """Filter masks at the edge of a crop, but not at the edge of the original image.""" - crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) - orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) - boxes = uncrop_boxes_xyxy(boxes, crop_box).float() - near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) - near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) - near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) - return torch.any(near_crop_edge, dim=1) - - -def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor: - box_xywh = deepcopy(box_xyxy) - box_xywh[2] = box_xywh[2] - box_xywh[0] - box_xywh[3] = box_xywh[3] - box_xywh[1] - return box_xywh - - -def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: - assert len(args) > 0 and all( - len(a) == len(args[0]) for a in args - ), "Batched iteration must have inputs of all the same size." - n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) - for b in range(n_batches): - yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] - - -def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: - """ - Encodes masks to an uncompressed RLE, in the format expected by - pycoco tools. - """ - # Put in fortran order and flatten h,w - b, h, w = tensor.shape - tensor = tensor.permute(0, 2, 1).flatten(1) - - # Compute change indices - diff = tensor[:, 1:] ^ tensor[:, :-1] - change_indices = diff.nonzero() - - # Encode run length - out = [] - for i in range(b): - cur_idxs = change_indices[change_indices[:, 0] == i, 1] - cur_idxs = torch.cat( - [ - torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device), - cur_idxs + 1, - torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), - ] - ) - btw_idxs = cur_idxs[1:] - cur_idxs[:-1] - counts = [] if tensor[i, 0] == 0 else [0] - counts.extend(btw_idxs.detach().cpu().tolist()) - out.append({"size": [h, w], "counts": counts}) - return out - - -def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: - """Compute a binary mask from an uncompressed RLE.""" - h, w = rle["size"] - mask = np.empty(h * w, dtype=bool) - idx = 0 - parity = False - for count in rle["counts"]: - mask[idx : idx + count] = parity - idx += count - parity ^= True - mask = mask.reshape(w, h) - return mask.transpose() # Put in C order - - -def area_from_rle(rle: Dict[str, Any]) -> int: - return sum(rle["counts"][1::2]) - - -def calculate_stability_score( - masks: torch.Tensor, mask_threshold: float, threshold_offset: float -) -> torch.Tensor: - """ - Computes the stability score for a batch of masks. The stability - score is the IoU between the binary masks obtained by thresholding - the predicted mask logits at high and low values. - """ - # One mask is always contained inside the other. - # Save memory by preventing unnecessary cast to torch.int64 - intersections = ( - (masks > (mask_threshold + threshold_offset)) - .sum(-1, dtype=torch.int16) - .sum(-1, dtype=torch.int32) - ) - unions = ( - (masks > (mask_threshold - threshold_offset)) - .sum(-1, dtype=torch.int16) - .sum(-1, dtype=torch.int32) - ) - return intersections / unions - - -def build_point_grid(n_per_side: int) -> np.ndarray: - """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" - offset = 1 / (2 * n_per_side) - points_one_side = np.linspace(offset, 1 - offset, n_per_side) - points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) - points_y = np.tile(points_one_side[:, None], (1, n_per_side)) - points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) - return points - - -def build_all_layer_point_grids( - n_per_side: int, n_layers: int, scale_per_layer: int -) -> List[np.ndarray]: - """Generates point grids for all crop layers.""" - points_by_layer = [] - for i in range(n_layers + 1): - n_points = int(n_per_side / (scale_per_layer**i)) - points_by_layer.append(build_point_grid(n_points)) - return points_by_layer - - -def generate_crop_boxes( - im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float -) -> Tuple[List[List[int]], List[int]]: - """ - Generates a list of crop boxes of different sizes. Each layer - has (2**i)**2 boxes for the ith layer. - """ - crop_boxes, layer_idxs = [], [] - im_h, im_w = im_size - short_side = min(im_h, im_w) - - # Original image - crop_boxes.append([0, 0, im_w, im_h]) - layer_idxs.append(0) - - def crop_len(orig_len, n_crops, overlap): - return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)) - - for i_layer in range(n_layers): - n_crops_per_side = 2 ** (i_layer + 1) - overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) - - crop_w = crop_len(im_w, n_crops_per_side, overlap) - crop_h = crop_len(im_h, n_crops_per_side, overlap) - - crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)] - crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)] - - # Crops in XYWH format - for x0, y0 in product(crop_box_x0, crop_box_y0): - box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)] - crop_boxes.append(box) - layer_idxs.append(i_layer + 1) - - return crop_boxes, layer_idxs - - -def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor: - x0, y0, _, _ = crop_box - offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device) - # Check if boxes has a channel dimension - if len(boxes.shape) == 3: - offset = offset.unsqueeze(1) - return boxes + offset - - -def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: - x0, y0, _, _ = crop_box - offset = torch.tensor([[x0, y0]], device=points.device) - # Check if points has a channel dimension - if len(points.shape) == 3: - offset = offset.unsqueeze(1) - return points + offset - - -def uncrop_masks( - masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int -) -> torch.Tensor: - x0, y0, x1, y1 = crop_box - if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: - return masks - # Coordinate transform masks - pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0) - pad = (x0, pad_x - x0, y0, pad_y - y0) - return torch.nn.functional.pad(masks, pad, value=0) - - -def remove_small_regions( - mask: np.ndarray, area_thresh: float, mode: str -) -> Tuple[np.ndarray, bool]: - """ - Removes small disconnected regions and holes in a mask. Returns the - mask and an indicator of if the mask has been modified. - """ - import cv2 # type: ignore - - assert mode in ["holes", "islands"] - correct_holes = mode == "holes" - working_mask = (correct_holes ^ mask).astype(np.uint8) - n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) - sizes = stats[:, -1][1:] # Row 0 is background label - small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] - if len(small_regions) == 0: - return mask, False - fill_labels = [0] + small_regions - if not correct_holes: - fill_labels = [i for i in range(n_labels) if i not in fill_labels] - # If every region is below threshold, keep largest - if len(fill_labels) == 0: - fill_labels = [int(np.argmax(sizes)) + 1] - mask = np.isin(regions, fill_labels) - return mask, True - - -def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]: - from pycocotools import mask as mask_utils # type: ignore - - h, w = uncompressed_rle["size"] - rle = mask_utils.frPyObjects(uncompressed_rle, h, w) - rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json - return rle - - -def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: - """ - Calculates boxes in XYXY format around masks. Return [0,0,0,0] for - an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. - """ - # torch.max below raises an error on empty inputs, just skip in this case - if torch.numel(masks) == 0: - return torch.zeros(*masks.shape[:-2], 4, device=masks.device) - - # Normalize shape to CxHxW - shape = masks.shape - h, w = shape[-2:] - if len(shape) > 2: - masks = masks.flatten(0, -3) - else: - masks = masks.unsqueeze(0) - - # Get top and bottom edges - in_height, _ = torch.max(masks, dim=-1) - in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :] - bottom_edges, _ = torch.max(in_height_coords, dim=-1) - in_height_coords = in_height_coords + h * (~in_height) - top_edges, _ = torch.min(in_height_coords, dim=-1) - - # Get left and right edges - in_width, _ = torch.max(masks, dim=-2) - in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :] - right_edges, _ = torch.max(in_width_coords, dim=-1) - in_width_coords = in_width_coords + w * (~in_width) - left_edges, _ = torch.min(in_width_coords, dim=-1) - - # If the mask is empty the right edge will be to the left of the left edge. - # Replace these boxes with [0, 0, 0, 0] - empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) - out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) - out = out * (~empty_filter).unsqueeze(-1) - - # Return to original shape - if len(shape) > 2: - out = out.reshape(*shape[:-2], 4) - else: - out = out[0] - - return out diff --git a/spaces/nakas/MusicGenDemucs/audiocraft/models/musicgen.py b/spaces/nakas/MusicGenDemucs/audiocraft/models/musicgen.py deleted file mode 100644 index 007dd9e0ed1cfd359fb4889e7f4108248e189941..0000000000000000000000000000000000000000 --- a/spaces/nakas/MusicGenDemucs/audiocraft/models/musicgen.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -""" -Main model for using MusicGen. This will combine all the required components -and provide easy access to the generation API. -""" - -import os -import typing as tp - -import torch - -from .encodec import CompressionModel -from .lm import LMModel -from .builders import get_debug_compression_model, get_debug_lm_model -from .loaders import load_compression_model, load_lm_model, HF_MODEL_CHECKPOINTS_MAP -from ..data.audio_utils import convert_audio -from ..modules.conditioners import ConditioningAttributes, WavCondition -from ..utils.autocast import TorchAutocast - - -MelodyList = tp.List[tp.Optional[torch.Tensor]] -MelodyType = tp.Union[torch.Tensor, MelodyList] - - -class MusicGen: - """MusicGen main model with convenient generation API. - - Args: - name (str): name of the model. - compression_model (CompressionModel): Compression model - used to map audio to invertible discrete representations. - lm (LMModel): Language model over discrete representations. - """ - def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel, - max_duration: float = 30): - self.name = name - self.compression_model = compression_model - self.lm = lm - self.max_duration = max_duration - self.device = next(iter(lm.parameters())).device - self.generation_params: dict = {} - self.set_generation_params(duration=15) # 15 seconds by default - self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None - if self.device.type == 'cpu': - self.autocast = TorchAutocast(enabled=False) - else: - self.autocast = TorchAutocast( - enabled=True, device_type=self.device.type, dtype=torch.float16) - - @property - def frame_rate(self) -> int: - """Roughly the number of AR steps per seconds.""" - return self.compression_model.frame_rate - - @property - def sample_rate(self) -> int: - """Sample rate of the generated audio.""" - return self.compression_model.sample_rate - - @property - def audio_channels(self) -> int: - """Audio channels of the generated audio.""" - return self.compression_model.channels - - @staticmethod - def get_pretrained(name: str = 'melody', device=None): - """Return pretrained model, we provide four models: - - small (300M), text to music, # see: https://huggingface.co/facebook/musicgen-small - - medium (1.5B), text to music, # see: https://huggingface.co/facebook/musicgen-medium - - melody (1.5B) text to music and text+melody to music, # see: https://huggingface.co/facebook/musicgen-melody - - large (3.3B), text to music, # see: https://huggingface.co/facebook/musicgen-large - """ - - if device is None: - if torch.cuda.device_count(): - device = 'cuda' - else: - device = 'cpu' - - if name == 'debug': - # used only for unit tests - compression_model = get_debug_compression_model(device) - lm = get_debug_lm_model(device) - return MusicGen(name, compression_model, lm) - - if name not in HF_MODEL_CHECKPOINTS_MAP: - if not os.path.isfile(name) and not os.path.isdir(name): - raise ValueError( - f"{name} is not a valid checkpoint name. " - f"Choose one of {', '.join(HF_MODEL_CHECKPOINTS_MAP.keys())}" - ) - - cache_dir = os.environ.get('MUSICGEN_ROOT', None) - compression_model = load_compression_model(name, device=device, cache_dir=cache_dir) - lm = load_lm_model(name, device=device, cache_dir=cache_dir) - if name == 'melody': - lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True - - return MusicGen(name, compression_model, lm) - - def set_generation_params(self, use_sampling: bool = True, top_k: int = 250, - top_p: float = 0.0, temperature: float = 1.0, - duration: float = 30.0, cfg_coef: float = 3.0, - two_step_cfg: bool = False, extend_stride: float = 18): - """Set the generation parameters for MusicGen. - - Args: - use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True. - top_k (int, optional): top_k used for sampling. Defaults to 250. - top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0. - temperature (float, optional): Softmax temperature parameter. Defaults to 1.0. - duration (float, optional): Duration of the generated waveform. Defaults to 30.0. - cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0. - two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance, - instead of batching together the two. This has some impact on how things - are padded but seems to have little impact in practice. - extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much - should we extend the audio each time. Larger values will mean less context is - preserved, and shorter value will require extra computations. - """ - assert extend_stride < self.max_duration, "Cannot stride by more than max generation duration." - self.extend_stride = extend_stride - self.duration = duration - self.generation_params = { - 'use_sampling': use_sampling, - 'temp': temperature, - 'top_k': top_k, - 'top_p': top_p, - 'cfg_coef': cfg_coef, - 'two_step_cfg': two_step_cfg, - } - - def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None): - """Override the default progress callback.""" - self._progress_callback = progress_callback - - def generate_unconditional(self, num_samples: int, progress: bool = False) -> torch.Tensor: - """Generate samples in an unconditional manner. - - Args: - num_samples (int): Number of samples to be generated. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - descriptions: tp.List[tp.Optional[str]] = [None] * num_samples - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate(self, descriptions: tp.List[str], progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on text. - - Args: - descriptions (tp.List[str]): A list of strings used as text conditioning. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None) - assert prompt_tokens is None - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType, - melody_sample_rate: int, progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on text and melody. - - Args: - descriptions (tp.List[str]): A list of strings used as text conditioning. - melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as - melody conditioning. Should have shape [B, C, T] with B matching the description length, - C=1 or 2. It can be [C, T] if there is a single description. It can also be - a list of [C, T] tensors. - melody_sample_rate: (int): Sample rate of the melody waveforms. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - if isinstance(melody_wavs, torch.Tensor): - if melody_wavs.dim() == 2: - melody_wavs = melody_wavs[None] - if melody_wavs.dim() != 3: - raise ValueError("Melody wavs should have a shape [B, C, T].") - melody_wavs = list(melody_wavs) - else: - for melody in melody_wavs: - if melody is not None: - assert melody.dim() == 2, "One melody in the list has the wrong number of dims." - - melody_wavs = [ - convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels) - if wav is not None else None - for wav in melody_wavs] - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, - melody_wavs=melody_wavs) - assert prompt_tokens is None - return self._generate_tokens(attributes, prompt_tokens, progress) - - def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int, - descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None, - progress: bool = False) -> torch.Tensor: - """Generate samples conditioned on audio prompts. - - Args: - prompt (torch.Tensor): A batch of waveforms used for continuation. - Prompt should be [B, C, T], or [C, T] if only one sample is generated. - prompt_sample_rate (int): Sampling rate of the given audio waveforms. - descriptions (tp.List[str], optional): A list of strings used as text conditioning. Defaults to None. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - """ - if prompt.dim() == 2: - prompt = prompt[None] - if prompt.dim() != 3: - raise ValueError("prompt should have 3 dimensions: [B, C, T] (C = 1).") - prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels) - if descriptions is None: - descriptions = [None] * len(prompt) - attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt) - assert prompt_tokens is not None - return self._generate_tokens(attributes, prompt_tokens, progress) - - @torch.no_grad() - def _prepare_tokens_and_attributes( - self, - descriptions: tp.Sequence[tp.Optional[str]], - prompt: tp.Optional[torch.Tensor], - melody_wavs: tp.Optional[MelodyList] = None, - ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]: - """Prepare model inputs. - - Args: - descriptions (tp.List[str]): A list of strings used as text conditioning. - prompt (torch.Tensor): A batch of waveforms used for continuation. - melody_wavs (tp.Optional[torch.Tensor], optional): A batch of waveforms - used as melody conditioning. Defaults to None. - """ - attributes = [ - ConditioningAttributes(text={'description': description}) - for description in descriptions] - - if melody_wavs is None: - for attr in attributes: - attr.wav['self_wav'] = WavCondition( - torch.zeros((1, 1), device=self.device), - torch.tensor([0], device=self.device), - path='null_wav') # type: ignore - else: - if self.name != "melody": - raise RuntimeError("This model doesn't support melody conditioning. " - "Use the `melody` model.") - assert len(melody_wavs) == len(descriptions), \ - f"number of melody wavs must match number of descriptions! " \ - f"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}" - for attr, melody in zip(attributes, melody_wavs): - if melody is None: - attr.wav['self_wav'] = WavCondition( - torch.zeros((1, 1), device=self.device), - torch.tensor([0], device=self.device), - path='null_wav') # type: ignore - else: - attr.wav['self_wav'] = WavCondition( - melody.to(device=self.device), - torch.tensor([melody.shape[-1]], device=self.device)) - - if prompt is not None: - if descriptions is not None: - assert len(descriptions) == len(prompt), "Prompt and nb. descriptions doesn't match" - prompt = prompt.to(self.device) - prompt_tokens, scale = self.compression_model.encode(prompt) - assert scale is None - else: - prompt_tokens = None - return attributes, prompt_tokens - - def _generate_tokens(self, attributes: tp.List[ConditioningAttributes], - prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor: - """Generate discrete audio tokens given audio prompt and/or conditions. - - Args: - attributes (tp.List[ConditioningAttributes]): Conditions used for generation (text/melody). - prompt_tokens (tp.Optional[torch.Tensor]): Audio prompt used for continuation. - progress (bool, optional): Flag to display progress of the generation process. Defaults to False. - Returns: - torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params. - """ - total_gen_len = int(self.duration * self.frame_rate) - max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate) - current_gen_offset: int = 0 - - def _progress_callback(generated_tokens: int, tokens_to_generate: int): - generated_tokens += current_gen_offset - if self._progress_callback is not None: - # Note that total_gen_len might be quite wrong depending on the - # codebook pattern used, but with delay it is almost accurate. - self._progress_callback(generated_tokens, total_gen_len) - else: - print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\r') - - if prompt_tokens is not None: - assert max_prompt_len >= prompt_tokens.shape[-1], \ - "Prompt is longer than audio to generate" - - callback = None - if progress: - callback = _progress_callback - - if self.duration <= self.max_duration: - # generate by sampling from LM, simple case. - with self.autocast: - gen_tokens = self.lm.generate( - prompt_tokens, attributes, - callback=callback, max_gen_len=total_gen_len, **self.generation_params) - - else: - # now this gets a bit messier, we need to handle prompts, - # melody conditioning etc. - ref_wavs = [attr.wav['self_wav'] for attr in attributes] - all_tokens = [] - if prompt_tokens is None: - prompt_length = 0 - else: - all_tokens.append(prompt_tokens) - prompt_length = prompt_tokens.shape[-1] - - stride_tokens = int(self.frame_rate * self.extend_stride) - - while current_gen_offset + prompt_length < total_gen_len: - time_offset = current_gen_offset / self.frame_rate - chunk_duration = min(self.duration - time_offset, self.max_duration) - max_gen_len = int(chunk_duration * self.frame_rate) - for attr, ref_wav in zip(attributes, ref_wavs): - wav_length = ref_wav.length.item() - if wav_length == 0: - continue - # We will extend the wav periodically if it not long enough. - # we have to do it here rather than in conditioners.py as otherwise - # we wouldn't have the full wav. - initial_position = int(time_offset * self.sample_rate) - wav_target_length = int(self.max_duration * self.sample_rate) - print(initial_position / self.sample_rate, wav_target_length / self.sample_rate) - positions = torch.arange(initial_position, - initial_position + wav_target_length, device=self.device) - attr.wav['self_wav'] = WavCondition( - ref_wav[0][:, positions % wav_length], - torch.full_like(ref_wav[1], wav_target_length)) - with self.autocast: - gen_tokens = self.lm.generate( - prompt_tokens, attributes, - callback=callback, max_gen_len=max_gen_len, **self.generation_params) - if prompt_tokens is None: - all_tokens.append(gen_tokens) - else: - all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:]) - prompt_tokens = gen_tokens[:, :, stride_tokens:] - prompt_length = prompt_tokens.shape[-1] - current_gen_offset += stride_tokens - - gen_tokens = torch.cat(all_tokens, dim=-1) - - # generate audio - assert gen_tokens.dim() == 3 - with torch.no_grad(): - gen_audio = self.compression_model.decode(gen_tokens, None) - return gen_audio diff --git a/spaces/nateraw/deepafx-st/deepafx_st/probes/random_mel.py b/spaces/nateraw/deepafx-st/deepafx_st/probes/random_mel.py deleted file mode 100644 index a83db533f22ee40843499ed43b8c5ee086a5a81d..0000000000000000000000000000000000000000 --- a/spaces/nateraw/deepafx-st/deepafx_st/probes/random_mel.py +++ /dev/null @@ -1,93 +0,0 @@ -import math -import torch -import librosa - -# based on https://github.com/neuralaudio/hear-baseline/blob/main/hearbaseline/naive.py - - -class RandomMelProjection(torch.nn.Module): - def __init__( - self, - sample_rate, - embed_dim=4096, - n_mels=128, - n_fft=4096, - hop_size=1024, - seed=0, - epsilon=1e-4, - ): - super().__init__() - self.sample_rate = sample_rate - self.embed_dim = embed_dim - self.n_mels = n_mels - self.n_fft = n_fft - self.hop_size = hop_size - self.seed = seed - self.epsilon = epsilon - - # Set random seed - torch.random.manual_seed(self.seed) - - # Create a Hann window buffer to apply to frames prior to FFT. - self.register_buffer("window", torch.hann_window(self.n_fft)) - - # Create a mel filter buffer. - mel_scale = torch.tensor( - librosa.filters.mel( - self.sample_rate, - n_fft=self.n_fft, - n_mels=self.n_mels, - ) - ) - self.register_buffer("mel_scale", mel_scale) - - # Projection matrices. - normalization = math.sqrt(self.n_mels) - self.projection = torch.nn.Parameter( - torch.rand(self.n_mels, self.embed_dim) / normalization, - requires_grad=False, - ) - - def forward(self, x): - bs, chs, samp = x.size() - - x = torch.stft( - x.view(bs, -1), - self.n_fft, - self.hop_size, - window=self.window, - return_complex=True, - ) - x = x.unsqueeze(1).permute(0, 1, 3, 2) - - # Apply the mel-scale filter to the power spectrum. - x = torch.matmul(x.abs(), self.mel_scale.transpose(0, 1)) - - # power scale - x = torch.pow(x + self.epsilon, 0.3) - - # apply random projection - e = x.matmul(self.projection) - - # take mean across temporal dim - e = e.mean(dim=2).view(bs, -1) - - return e - - def compute_frame_embedding(self, x): - # Compute the real-valued Fourier transform on windowed input signal. - x = torch.fft.rfft(x * self.window) - - # Convert to a power spectrum. - x = torch.abs(x) ** 2.0 - - # Apply the mel-scale filter to the power spectrum. - x = torch.matmul(x, self.mel_scale.transpose(0, 1)) - - # Convert to a log mel spectrum. - x = torch.log(x + self.epsilon) - - # Apply projection to get a 4096 dimension embedding - embedding = x.matmul(self.projection) - - return embedding diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Microsoft Office Professional 2010 Free Full Version.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Microsoft Office Professional 2010 Free Full Version.md deleted file mode 100644 index cf28923321d53a6c34bbb4633db8d4afdd67db4c..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download Microsoft Office Professional 2010 Free Full Version.md +++ /dev/null @@ -1,57 +0,0 @@ - -<h1>How to Download Microsoft Office Professional 2010 Free Full Version</h1> -<p>Microsoft Office Professional 2010 is a suite of productivity software that includes Word, Excel, PowerPoint, Outlook, Access, Publisher, and more. It offers powerful tools to create and share documents, presentations, spreadsheets, databases, and publications. It also supports online collaboration and cloud storage with SharePoint Workspace and OneDrive.</p> -<p>If you want to download Microsoft Office Professional 2010 free full version for your PC, you need to have a valid product key that you can purchase from Microsoft or other authorized sellers. You also need to meet the minimum system requirements for running Office 2010 on your PC. In this article, we will show you how to download and install Office 2010 on your PC step by step.</p> -<h2>download microsoft office professional 2010 free full version</h2><br /><p><b><b>Download Zip</b> ✪ <a href="https://urlcod.com/2uIbef">https://urlcod.com/2uIbef</a></b></p><br /><br /> -<h2>Step 1: Check the System Requirements</h2> -<p>Before you download Office 2010, make sure your PC meets the following system requirements:</p> -<ul> -<li>Operating system: Windows 11/10/8/7, Windows Server 2008, or Windows Vista Service Pack 1 (32-bit or 64-bit)</li> -<li>Processor: 500 MHz or faster</li> -<li>Memory: 256 MB RAM (512 MB recommended)</li> -<li>Hard disk space: 3 GB available</li> -<li>Display: 1024 x 576 resolution or higher</li> -<li>Graphics: DirectX 9.0c compatible graphics card with 64 MB of video memory</li> -</ul> -<h2>Step 2: Download Office 2010 from Microsoft Website</h2> -<p>To download Office 2010 from Microsoft website, follow these steps:</p> -<ol> -<li>Go to <a href="https://www.microsoft.com/en-us/software-download/office">https://www.microsoft.com/en-us/software-download/office</a>.</li> -<li>Select Office 2010 from the drop-down menu.</li> -<li>Enter your product key and click Verify.</li> -<li>Select your language and click Continue.</li> -<li>Choose whether you want to download the 32-bit or 64-bit version of Office 2010.</li> -<li>Click Download and save the installation file on your PC.</li> -</ol> -<h2>Step 3: Install Office 2010 on Your PC</h2> -<p>To install Office 2010 on your PC, follow these steps:</p> -<ol> -<li>Double-click the installation file that you downloaded in step 2.</li> -<li>Follow the instructions on the screen to complete the installation process.</li> -<li>When prompted, enter your product key again and activate Office 2010 online or by phone.</li> -<li>Launch any Office application and enjoy using Office 2010 on your PC.</li> -</ol> -<h2>Conclusion</h2> -<p>In this article, we have shown you how to download Microsoft Office Professional 2010 free full version for your PC. You need to have a valid product key and meet the system requirements to use Office 2010. You can also try other versions of Office such as Office Online, Office 365, or Office 2023 depending on your needs and preferences.</p> -<p></p> - -<h2>Step 4: Learn Some Tips and Tricks for Office 2010</h2> -<p>Now that you have installed Office 2010 on your PC, you may want to learn some tips and tricks to get the most out of it. Office 2010 has many new features and improvements that can help you work faster and easier. Here are some of them:</p> -<ul> -<li>You can take screenshots of any application by using the built-in Screen Capture tool in Word 2010[^1^].</li> -<li>You can remove the background images from pictures by making them transparent in Office 2010[^1^].</li> -<li>You can use the Backstage view to access common tasks such as saving, printing, sharing, and protecting your documents[^2^].</li> -<li>You can create custom tabs on the Ribbon to group your frequently used commands in one place[^2^].</li> -<li>You can use the interactive guides to find your favorite commands from Office 2003 in Office 2010[^3^].</li> -<li>You can bring back the Office 2003 menus and toolbars in Office 2010 by using a tool called UBitMenu[^4^].</li> -<li>You can open documents that Office wants to block by using the Open and Repair option.</li> -<li>You can print only one page of an Outlook message by using the Print Options dialog box.</li> -<li>You can integrate social networks into Outlook by using the Outlook Social Connector.</li> -<li>You can create sparkline microcharts in Excel to show trends and patterns in your data.</li> -<li>You can broadcast a slideshow from PowerPoint to anyone with a web browser by using the Broadcast Slide Show feature.</li> -</ul> -<h2>Step 5: Enjoy Using Office 2010 on Your PC</h2> -<p>Congratulations! You have successfully downloaded and installed Office 2010 on your PC. You have also learned some tips and tricks to make the most of it. Now you can enjoy using Office 2010 to create and edit documents, presentations, spreadsheets, databases, and publications. You can also collaborate online and store your files on the cloud with SharePoint Workspace and OneDrive.</p> -<p>We hope this article has helped you to download Microsoft Office Professional 2010 free full version for your PC. If you have any questions or feedback, please feel free to leave a comment below.</p> 7b8c122e87<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download-Eemua-158-Pdf-Free-234-8l-CRACKED.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download-Eemua-158-Pdf-Free-234-8l-CRACKED.md deleted file mode 100644 index 8d25df30e7bf7a79d17d5c12a98b8e0c8ca11ffa..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Download-Eemua-158-Pdf-Free-234-8l-CRACKED.md +++ /dev/null @@ -1,80 +0,0 @@ -## Download Eemua 158 Pdf Free 234 8l - - - - - - - - - -**DOWNLOAD ✓ [https://hyabrimhyfit.blogspot.com/?c=2txClq](https://hyabrimhyfit.blogspot.com/?c=2txClq)** - - - - - - - - - - - - - -# Download Eemua 158 Pdf Free 234 8l: A Guide to Construction Specification for Fixed Offshore Structures - - - -Eemua 158 is a publication by the Engineering Equipment and Materials Users Association (EEMUA) that defines the essential requirements for cost effective construction of primary structures of offshore installations. It covers topics such as material requirements, allowable welding consumables, welding quality and personnel requirements, fabrication tolerances, inspection and non-destructive testing acceptance levels. The publication also incorporates any necessary considerations for offshore wind constructions, while updating the rest of the content in accordance with the latest versions of the relevant regulations and standards[^1^]. - - - -If you are interested in learning more about Eemua 158 and how it can help you with your offshore projects, you can download a free pdf version of the publication from the EEMUA website. You will need to register as a user and provide some basic information before you can access the download link. The pdf file size is 234 MB and it contains 8 chapters and several appendices. The pdf file is compatible with most devices and software that can read pdf documents. - - - -Downloading Eemua 158 pdf free 234 8l is a great way to get familiar with the construction specification for fixed offshore structures and to apply it to your own work. However, if you want to get the full benefits of Eemua 158, you may want to consider purchasing a hardcopy or a digital edition of the publication from the EEMUA shop. The hardcopy version costs £139.00 and the digital version costs £111.00 for non-members[^1^]. The digital version is also available for free for users from EEMUA member companies[^1^]. By purchasing a copy of Eemua 158, you will be supporting the EEMUA's mission to improve the safety, environmental and operating performance of industrial facilities through better engineering practices. - - - -Eemua 158 is a valuable resource for anyone involved in the design, fabrication, installation and maintenance of offshore structures. It reflects a consensus view of offshore construction companies with regard to both fabrication and acceptance criteria for structures to be installed worldwide[^1^]. It also helps to ensure compliance with international standards such as ISO 19902:2007/A1:2013[^2^] and AWS A5.36/A5.36M:2012[^3^]. Whether you download Eemua 158 pdf free 234 8l or purchase a copy of the publication, you will gain access to a wealth of information and guidance that can enhance your offshore engineering projects. - - - -Challenges of offshore structures Offshore structures face many challenges throughout their life cycle, from design and installation to operation and decommissioning. Some of the major challenges are[^4^]: - - - -- Ensuring health and safety of workforce: Offshore workers are exposed to various hazards such as fire, explosion, falling objects, confined spaces, noise, vibration, fatigue and stress. They also have to cope with isolation, long working hours and harsh weather conditions. Therefore, it is essential to provide adequate training, supervision, communication, emergency response and welfare facilities for the offshore workforce. - -- Construction process is exposed to demanding weather conditions: Offshore structures have to be constructed in remote locations where the weather can be unpredictable and severe. This can affect the availability and design of installation vessels, cranes, barges and other equipment. It can also cause delays, disruptions and increased costs for the construction process. Therefore, it is important to plan ahead, monitor the weather forecasts and conditions, and adopt contingency measures for the construction process. - -- All key components have to be installed in large numbers in deep water, high altitude and carry a substantial individual weight: Offshore structures consist of many components such as jackets, piles, decks, modules, pipelines and cables that have to be transported and installed in deep water (up to 3000 m), high altitude (up to 200 m) and carry a substantial individual weight (up to 30,000 tonnes). This requires special engineering skills, techniques and equipment to ensure accuracy, stability and safety of the installation process. - -- Compliance with international standards and regulations: Offshore structures have to comply with various international standards and regulations that cover aspects such as design, fabrication, installation, operation, maintenance and decommissioning of offshore structures. These standards and regulations aim to ensure the safety, reliability and environmental performance of offshore structures. However, they can also pose challenges in terms of interpretation, implementation and verification of compliance. - - - -Solutions for offshore structures To overcome these challenges, offshore engineers need to apply innovative solutions that can improve the efficiency, effectiveness and sustainability of offshore structures. Some of the possible solutions are: - - - -- Using digital technologies: Digital technologies such as 3D modelling, simulation, virtual reality, augmented reality, artificial intelligence and big data can help offshore engineers to design, construct, operate and maintain offshore structures more efficiently and effectively. They can also help to enhance communication, collaboration and coordination among different stakeholders involved in offshore projects. - -- Using renewable energy sources: Renewable energy sources such as wind, wave and solar can help offshore structures to reduce their dependence on fossil fuels and their greenhouse gas emissions. They can also help to increase the energy security and resilience of offshore structures. However, they also pose technical challenges such as intermittency, variability and integration with existing power systems. - -- Using modular construction: Modular construction involves prefabricating components or modules of offshore structures onshore and then transporting and assembling them offshore. This can help to reduce the construction time, cost and risk of offshore structures. It can also help to improve the quality and consistency of offshore structures. However, it also requires careful planning, coordination and logistics to ensure the successful delivery of modules. - -- Using decommissioning technologies: Decommissioning technologies such as cutting tools, lifting devices, separation systems and recycling methods can help offshore engineers to remove or reuse offshore structures at the end of their service life. This can help to reduce the environmental impact and liability of offshore structures. It can also help to create opportunities for new uses or applications of offshore structures or their components. - - - -Conclusion Offshore structures are complex engineering systems that face many challenges throughout their life cycle. Offshore engineers need to apply innovative solutions that can improve the efficiency, effectiveness and sustainability of offshore structures. By doing so, they can contribute to the development of offshore resources and industries that can benefit society and economy. - - 1b8d091108 - - - - - diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Guzaarish 1080p Full Movie Download REPACK.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Guzaarish 1080p Full Movie Download REPACK.md deleted file mode 100644 index e7c810665a88a0fd1e1b84f29383d3b5471a527a..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Guzaarish 1080p Full Movie Download REPACK.md +++ /dev/null @@ -1,23 +0,0 @@ - -Here is the content I generated for you: - -<h1>How to Watch Guzaarish Online in Full HD Quality</h1> -<p>Guzaarish is a 2010 Hindi romantic drama film directed by Sanjay Leela Bhansali and starring Hrithik Roshan and Aishwarya Rai Bachchan. The film tells the story of Ethan Mascarenhas, a former magician who becomes paralyzed after an accident and petitions the court for euthanasia. The film explores themes of love, life, death and dignity.</p> -<p>If you are looking for a way to watch Guzaarish online in full HD quality, you have come to the right place. In this article, we will show you how to download or stream Guzaarish in 1080p resolution using legal and safe methods.</p> -<h2>Guzaarish 1080p Full Movie Download</h2><br /><p><b><b>Download</b> ❤❤❤ <a href="https://urlcod.com/2uIbVd">https://urlcod.com/2uIbVd</a></b></p><br /><br /> -<h2>Download Guzaarish in 1080p from Google Drive</h2> -<p>One of the easiest and fastest ways to download Guzaarish in 1080p is from Google Drive. Google Drive is a cloud storage service that allows you to store and share files online. You can access Google Drive from any device with an internet connection and a web browser.</p> -<p>To download Guzaarish from Google Drive, you need to find a link that contains the movie file in 1080p quality. You can search for such links on websites like OlaMovies, Filmyready or DotMovies. These websites provide Google Drive direct download links for various movies and shows.</p> -<p>Once you find a link that works for you, simply click on it and you will be redirected to Google Drive. There, you can either watch the movie online or download it to your device. To download the movie, click on the three dots icon at the top right corner and select Download. The movie will start downloading to your device.</p> -<h2>Stream Guzaarish in 1080p from OTT Platforms</h2> -<p>Another way to watch Guzaarish online in full HD quality is from OTT platforms. OTT stands for over-the-top, which means that these platforms deliver content directly to the viewers over the internet, without requiring a cable or satellite subscription.</p> -<p>Some of the popular OTT platforms that offer Guzaarish in 1080p are Netflix, Amazon Prime Video and Disney+ Hotstar. These platforms charge a monthly or yearly fee for their services, but they also offer free trials for new users. You can sign up for a free trial and watch Guzaarish without paying anything.</p> -<p>To stream Guzaarish from OTT platforms, you need to have a compatible device and a stable internet connection. You can access these platforms from your smartphone, tablet, laptop, desktop, smart TV or streaming device. You can also connect your device to a larger screen using HDMI cables or wireless casting.</p> -<p>Once you have an account and a device ready, simply log in to the platform of your choice and search for Guzaarish. You will find the movie in its library and you can start streaming it in 1080p resolution.</p> -<p></p> -<h2>Conclusion</h2> -<p>Guzaarish is a beautiful and touching film that explores the meaning of life and death through the eyes of a paralyzed magician. The film features stunning performances by Hrithik Roshan and Aishwarya Rai Bachchan, as well as a captivating soundtrack by Sanjay Leela Bhansali.</p> -<p>If you want to watch Guzaarish online in full HD quality, you have two options: download it from Google Drive or stream it from OTT platforms. Both methods are legal and safe, but they have different advantages and disadvantages. Downloading the movie allows you to watch it offline and save data, but it takes up storage space and may take longer depending on your internet speed. Streaming the movie allows you to watch it instantly and without downloading anything, but it requires a continuous internet connection and may consume more data.</p> -<p>Whichever method you choose, we hope you enjoy watching Guzaarish in 1080p quality. If you have any questions or feedback, please leave a comment below.</p> 7196e7f11a<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/ngoctuanai/chatgpt/Dockerfile b/spaces/ngoctuanai/chatgpt/Dockerfile deleted file mode 100644 index ac94787fa255b2375afac41f178f22cdefc838d3..0000000000000000000000000000000000000000 --- a/spaces/ngoctuanai/chatgpt/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -# Pull the base image -FROM ghcr.io/danny-avila/librechat:latest - -# Set environment variables -ENV HOST=0.0.0.0 -ENV PORT=7860 -ENV SESSION_EXPIRY=900000 -ENV REFRESH_TOKEN_EXPIRY=604800000 - -# Install dependencies -RUN cd /app/api && npm install - -# Command to run on container start -CMD ["npm", "run", "backend"] \ No newline at end of file diff --git a/spaces/niizam/sovits-models/hubert/hubert_model.py b/spaces/niizam/sovits-models/hubert/hubert_model.py deleted file mode 100644 index 7fb642d89b07ca60792debab18e3454f52d8f357..0000000000000000000000000000000000000000 --- a/spaces/niizam/sovits-models/hubert/hubert_model.py +++ /dev/null @@ -1,222 +0,0 @@ -import copy -import random -from typing import Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as t_func -from torch.nn.modules.utils import consume_prefix_in_state_dict_if_present - - -class Hubert(nn.Module): - def __init__(self, num_label_embeddings: int = 100, mask: bool = True): - super().__init__() - self._mask = mask - self.feature_extractor = FeatureExtractor() - self.feature_projection = FeatureProjection() - self.positional_embedding = PositionalConvEmbedding() - self.norm = nn.LayerNorm(768) - self.dropout = nn.Dropout(0.1) - self.encoder = TransformerEncoder( - nn.TransformerEncoderLayer( - 768, 12, 3072, activation="gelu", batch_first=True - ), - 12, - ) - self.proj = nn.Linear(768, 256) - - self.masked_spec_embed = nn.Parameter(torch.FloatTensor(768).uniform_()) - self.label_embedding = nn.Embedding(num_label_embeddings, 256) - - def mask(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - mask = None - if self.training and self._mask: - mask = _compute_mask((x.size(0), x.size(1)), 0.8, 10, x.device, 2) - x[mask] = self.masked_spec_embed.to(x.dtype) - return x, mask - - def encode( - self, x: torch.Tensor, layer: Optional[int] = None - ) -> Tuple[torch.Tensor, torch.Tensor]: - x = self.feature_extractor(x) - x = self.feature_projection(x.transpose(1, 2)) - x, mask = self.mask(x) - x = x + self.positional_embedding(x) - x = self.dropout(self.norm(x)) - x = self.encoder(x, output_layer=layer) - return x, mask - - def logits(self, x: torch.Tensor) -> torch.Tensor: - logits = torch.cosine_similarity( - x.unsqueeze(2), - self.label_embedding.weight.unsqueeze(0).unsqueeze(0), - dim=-1, - ) - return logits / 0.1 - - def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - x, mask = self.encode(x) - x = self.proj(x) - logits = self.logits(x) - return logits, mask - - -class HubertSoft(Hubert): - def __init__(self): - super().__init__() - - @torch.inference_mode() - def units(self, wav: torch.Tensor) -> torch.Tensor: - wav = t_func.pad(wav, ((400 - 320) // 2, (400 - 320) // 2)) - x, _ = self.encode(wav) - return self.proj(x) - - -class FeatureExtractor(nn.Module): - def __init__(self): - super().__init__() - self.conv0 = nn.Conv1d(1, 512, 10, 5, bias=False) - self.norm0 = nn.GroupNorm(512, 512) - self.conv1 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv2 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv3 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv4 = nn.Conv1d(512, 512, 3, 2, bias=False) - self.conv5 = nn.Conv1d(512, 512, 2, 2, bias=False) - self.conv6 = nn.Conv1d(512, 512, 2, 2, bias=False) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = t_func.gelu(self.norm0(self.conv0(x))) - x = t_func.gelu(self.conv1(x)) - x = t_func.gelu(self.conv2(x)) - x = t_func.gelu(self.conv3(x)) - x = t_func.gelu(self.conv4(x)) - x = t_func.gelu(self.conv5(x)) - x = t_func.gelu(self.conv6(x)) - return x - - -class FeatureProjection(nn.Module): - def __init__(self): - super().__init__() - self.norm = nn.LayerNorm(512) - self.projection = nn.Linear(512, 768) - self.dropout = nn.Dropout(0.1) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.norm(x) - x = self.projection(x) - x = self.dropout(x) - return x - - -class PositionalConvEmbedding(nn.Module): - def __init__(self): - super().__init__() - self.conv = nn.Conv1d( - 768, - 768, - kernel_size=128, - padding=128 // 2, - groups=16, - ) - self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) - - def forward(self, x: torch.Tensor) -> torch.Tensor: - x = self.conv(x.transpose(1, 2)) - x = t_func.gelu(x[:, :, :-1]) - return x.transpose(1, 2) - - -class TransformerEncoder(nn.Module): - def __init__( - self, encoder_layer: nn.TransformerEncoderLayer, num_layers: int - ) -> None: - super(TransformerEncoder, self).__init__() - self.layers = nn.ModuleList( - [copy.deepcopy(encoder_layer) for _ in range(num_layers)] - ) - self.num_layers = num_layers - - def forward( - self, - src: torch.Tensor, - mask: torch.Tensor = None, - src_key_padding_mask: torch.Tensor = None, - output_layer: Optional[int] = None, - ) -> torch.Tensor: - output = src - for layer in self.layers[:output_layer]: - output = layer( - output, src_mask=mask, src_key_padding_mask=src_key_padding_mask - ) - return output - - -def _compute_mask( - shape: Tuple[int, int], - mask_prob: float, - mask_length: int, - device: torch.device, - min_masks: int = 0, -) -> torch.Tensor: - batch_size, sequence_length = shape - - if mask_length < 1: - raise ValueError("`mask_length` has to be bigger than 0.") - - if mask_length > sequence_length: - raise ValueError( - f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" - ) - - # compute number of masked spans in batch - num_masked_spans = int(mask_prob * sequence_length / mask_length + random.random()) - num_masked_spans = max(num_masked_spans, min_masks) - - # make sure num masked indices <= sequence_length - if num_masked_spans * mask_length > sequence_length: - num_masked_spans = sequence_length // mask_length - - # SpecAugment mask to fill - mask = torch.zeros((batch_size, sequence_length), device=device, dtype=torch.bool) - - # uniform distribution to sample from, make sure that offset samples are < sequence_length - uniform_dist = torch.ones( - (batch_size, sequence_length - (mask_length - 1)), device=device - ) - - # get random indices to mask - mask_indices = torch.multinomial(uniform_dist, num_masked_spans) - - # expand masked indices to masked spans - mask_indices = ( - mask_indices.unsqueeze(dim=-1) - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - offsets = ( - torch.arange(mask_length, device=device)[None, None, :] - .expand((batch_size, num_masked_spans, mask_length)) - .reshape(batch_size, num_masked_spans * mask_length) - ) - mask_idxs = mask_indices + offsets - - # scatter indices to mask - mask = mask.scatter(1, mask_idxs, True) - - return mask - - -def hubert_soft( - path: str, -) -> HubertSoft: - r"""HuBERT-Soft from `"A Comparison of Discrete and Soft Speech Units for Improved Voice Conversion"`. - Args: - path (str): path of a pretrained model - """ - hubert = HubertSoft() - checkpoint = torch.load(path) - consume_prefix_in_state_dict_if_present(checkpoint, "module.") - hubert.load_state_dict(checkpoint) - hubert.eval() - return hubert diff --git a/spaces/nmitchko/AI-in-Healthcare/Developer Meetup in Boston Generative AI Use Cases in Healthcare _files/google-analytics_analytics.js b/spaces/nmitchko/AI-in-Healthcare/Developer Meetup in Boston Generative AI Use Cases in Healthcare _files/google-analytics_analytics.js deleted file mode 100644 index 6301ba92314cba9df0619719d13b12c87ed8c205..0000000000000000000000000000000000000000 --- a/spaces/nmitchko/AI-in-Healthcare/Developer Meetup in Boston Generative AI Use Cases in Healthcare _files/google-analytics_analytics.js +++ /dev/null @@ -1,103 +0,0 @@ -/******************************************************************************* - - uBlock Origin - a browser extension to block requests. - Copyright (C) 2019-present Raymond Hill - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see {http://www.gnu.org/licenses/}. - - Home: https://github.com/gorhill/uBlock -*/ - -(function() { - 'use strict'; - // https://developers.google.com/analytics/devguides/collection/analyticsjs/ - const noopfn = function() { - }; - // - const Tracker = function() { - }; - const p = Tracker.prototype; - p.get = noopfn; - p.set = noopfn; - p.send = noopfn; - // - const w = window; - const gaName = w.GoogleAnalyticsObject || 'ga'; - const gaQueue = w[gaName]; - // https://github.com/uBlockOrigin/uAssets/pull/4115 - const ga = function() { - const len = arguments.length; - if ( len === 0 ) { return; } - const args = Array.from(arguments); - let fn; - let a = args[len-1]; - if ( a instanceof Object && a.hitCallback instanceof Function ) { - fn = a.hitCallback; - } else if ( a instanceof Function ) { - fn = ( ) => { a(ga.create()); }; - } else { - const pos = args.indexOf('hitCallback'); - if ( pos !== -1 && args[pos+1] instanceof Function ) { - fn = args[pos+1]; - } - } - if ( fn instanceof Function === false ) { return; } - try { - fn(); - } catch (ex) { - } - }; - ga.create = function() { - return new Tracker(); - }; - ga.getByName = function() { - return new Tracker(); - }; - ga.getAll = function() { - return []; - }; - ga.remove = noopfn; - // https://github.com/uBlockOrigin/uAssets/issues/2107 - ga.loaded = true; - w[gaName] = ga; - // https://github.com/gorhill/uBlock/issues/3075 - const dl = w.dataLayer; - if ( dl instanceof Object ) { - if ( dl.hide instanceof Object && typeof dl.hide.end === 'function' ) { - dl.hide.end(); - } - if ( typeof dl.push === 'function' ) { - const doCallback = function(item) { - if ( item instanceof Object === false ) { return; } - if ( typeof item.eventCallback !== 'function' ) { return; } - setTimeout(item.eventCallback, 1); - }; - if ( Array.isArray(dl) ) { - dl.push = item => doCallback(item); - const q = dl.slice(); - for ( const item of q ) { - doCallback(item); - } - } - } - } - // empty ga queue - if ( gaQueue instanceof Function && Array.isArray(gaQueue.q) ) { - const q = gaQueue.q.slice(); - gaQueue.q.length = 0; - for ( const entry of q ) { - ga(...entry); - } - } -})(); diff --git a/spaces/noman1408/speechToSpeechGPT/nomanTest.py b/spaces/noman1408/speechToSpeechGPT/nomanTest.py deleted file mode 100644 index 8306dcc077afa20b882075781bc1b5de58f9240c..0000000000000000000000000000000000000000 --- a/spaces/noman1408/speechToSpeechGPT/nomanTest.py +++ /dev/null @@ -1,8 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Apr 20 15:44:18 2023 - -@author: chowd -""" -print(2+2) - diff --git a/spaces/nomic-ai/glue/style.css b/spaces/nomic-ai/glue/style.css deleted file mode 100644 index 114adf441e9032febb46bc056b2a8bb651075f0d..0000000000000000000000000000000000000000 --- a/spaces/nomic-ai/glue/style.css +++ /dev/null @@ -1,28 +0,0 @@ -body { - padding: 2rem; - font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif; -} - -h1 { - font-size: 16px; - margin-top: 0; -} - -p { - color: rgb(107, 114, 128); - font-size: 15px; - margin-bottom: 10px; - margin-top: 5px; -} - -.card { - max-width: 620px; - margin: 0 auto; - padding: 16px; - border: 1px solid lightgray; - border-radius: 16px; -} - -.card p:last-child { - margin-bottom: 0; -} diff --git a/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/masked_sparse_matrix.h b/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/masked_sparse_matrix.h deleted file mode 100644 index a905ba4befcdc845834c37a4c07c8331deb8bd70..0000000000000000000000000000000000000000 --- a/spaces/ntt123/WaveGRU-Text-To-Speech/sparse_matmul/layers/masked_sparse_matrix.h +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LYRA_CODEC_SPARSE_MATMUL_LAYERS_MASKED_SPARSE_MATRIX_H_ -#define LYRA_CODEC_SPARSE_MATMUL_LAYERS_MASKED_SPARSE_MATRIX_H_ - -#include <algorithm> -#include <cstdio> -#include <numeric> -#include <vector> - -#include "absl/strings/str_format.h" -#include "sparse_matmul/vector/cache_aligned_vector.h" - -namespace csrblocksparse { - -// MaskedSparseMatrix serves two purposes: -// 1) It is useful as a reference implementation of SpMV for correctness -// checking the much more complicated implementations in CSRBlockSparseMatrix -// 2) This is the format that sparse matrices are represented after pruning -// in TF. This class provides a bridge to getting these parameters into -// a compressed form suitable for computation and serialization. -// -// MaskedSparseMatrix<float> matrix(rows, cols, mask_from_tf, values_from_tf); -// CSRBlockSparseMatrix<float, bfloat16, int16_t> csr_matrix(matrix); -// csr_matrix.Multiply(rhs, bias, &out); -template <typename T> -class MaskedSparseMatrix { - public: - MaskedSparseMatrix() {} - - // Construct a MaskedSparseMatrix of the given size, sparsity and block size. - // This is mainly useful for testing. - MaskedSparseMatrix(int rows, int cols, float sparsity, int block_height = 1, - int block_width = 1, float constant = 1.f, - bool random = true) - : rows_(rows), cols_(cols), sparsity_(sparsity) { - CHECK_EQ(rows % block_height, 0); - CHECK_EQ(cols % block_width, 0); - - init(sparsity, block_height, block_width, constant, random); - } - - // Construct from an existing mask and values (most likely from a TF model). - template <typename MaskType> - MaskedSparseMatrix(int rows, int cols, const MaskType* mask, const T* values) - : rows_(rows), cols_(cols) { - mask_.resize(rows * cols); - values_.resize(rows * cols); - std::copy_n(mask, rows * cols, mask_.begin()); - std::copy_n(values, rows * cols, values_.begin()); - sparsity_ = - 1.f - std::accumulate(mask_.begin(), mask_.end(), 0.f) / mask_.size(); - } - - const std::vector<int>& mask() const { return mask_; } - const std::vector<T>& values() const { return values_; } - T* data() { return values_.data(); } - const T* data() const { return values_.data(); } - - int rows() const { return rows_; } - int cols() const { return cols_; } - float sparsity() const { return sparsity_; } - - void Print() const { - absl::PrintF("-------Values---------\n"); - for (int r = 0; r < rows_; ++r) { - for (int c = 0; c < cols_; ++c) { - absl::PrintF("%+6.3f ", static_cast<float>(values_[r * cols_ + c])); - } - absl::PrintF("\n"); - } - absl::PrintF("-------Mask---------\n"); - for (int r = 0; r < rows_; ++r) { - for (int c = 0; c < cols_; ++c) { - printf("%2d ", mask_[r * cols_ + c]); - } - absl::PrintF("\n"); - } - } - - // This routine is useful for rounding the possibly higher precision values - // stored in this class to a lower precision, so that correctness checks - // between this class and CSRBlockSparseMatrix can have a tighter tolerance. - template <typename U> - void CastWeights() { - for (int i = 0; i < values_.size(); ++i) { - values_[i] = static_cast<T>(U(values_[i])); - } - } - - // Only meant for correctness checking. - // RhsClassType is meant to be either CacheAlignedVector OR - // FatCacheAlignedVector. - // The weight matrix is ROW MAJOR and RhsClassType is COLUMN MAJOR. - // |bias| is broadcast if |rhs| has more than one column. - template <typename RhsClassType, typename BiasType, typename OutClassType, - typename RhsType = typename RhsClassType::value_type, - typename OutType = typename OutClassType::value_type> - void SpMM_bias(const RhsClassType& rhs, - const CacheAlignedVector<BiasType>& bias, OutClassType* out, - bool relu = false) { - for (int r = 0; r < rows_; ++r) { - for (int n = 0; n < rhs.cols(); ++n) { - float sum = 0.f; - const RhsType* rhs_ptr = rhs.data() + n * rhs.rows(); - OutType* out_ptr = out->data() + n * out->rows(); - const int* mask_ptr = mask_.data() + r * cols_; - const T* value_ptr = values_.data() + r * cols_; - for (int c = 0; c < cols_; ++c) { - sum += mask_ptr[c] * static_cast<float>(value_ptr[c]) * - static_cast<float>(rhs_ptr[c]); - } - out_ptr[r] = static_cast<OutType>( - relu ? std::max(sum + static_cast<float>(bias[r]), 0.f) - : sum + static_cast<float>(bias[r])); - } - } - } - - private: - // Generate a random matrix with the specified sparsity. - // Useful for testing. - void init(float sparsity, int block_height, int block_width, float constant, - bool random = true) { - int reduced_rows = rows_ / block_height; - int reduced_cols = cols_ / block_width; - mask_.resize(rows_ * cols_, 0); - - // Fill with non-zero value to make sure masking works. - values_.resize(rows_ * cols_, static_cast<T>(2.f)); - - std::mt19937 generator(0); - std::uniform_real_distribution<float> dist_sparsity; - std::uniform_real_distribution<float> dist_value(-1.f, 1.f); - int nnz = 0; - while (nnz == 0) { - for (int r = 0; r < reduced_rows; ++r) { - for (int c = 0; c < reduced_cols; ++c) { - if (dist_sparsity(generator) > sparsity) { - nnz++; - for (int i = 0; i < block_height; ++i) { - for (int j = 0; j < block_width; ++j) { - mask_[(r * block_height + i) * cols_ + block_width * c + j] = 1; - values_[(r * block_height + i) * cols_ + block_width * c + j] = - static_cast<T>(random ? dist_value(generator) : constant); - } - } - } - } - } - } - } - - std::vector<int> mask_; - std::vector<T> values_; - int rows_; - int cols_; - float sparsity_; -}; - -template <typename T> -class MaskedLinearLayer { - public: - MaskedLinearLayer(MaskedSparseMatrix<T>&& weights, - CacheAlignedVector<T>&& bias) - : weights_(std::move(weights)), bias_(std::move(bias)) {} - - MaskedLinearLayer() {} - - template <typename U> - void CastWeights() { - weights_.template CastWeights<U>(); - } - - // Does Ax + b where A is a masked sparse ROW MAJOR matrix and - // x is a COLUMN MAJOR dense vector or matrix. Bias is a vector that is - // broadcast is rhs has more than one column. - template <typename FatVector> - void SpMM_bias(const FatVector& rhs, FatVector* out, bool relu = false) { - static_assert(std::is_same<typename FatVector::value_type, T>::value, - "FatVector value_type must match masked_linear_layer type"); - weights_.SpMM_bias(rhs, bias_, out, relu); - } - - private: - MaskedSparseMatrix<T> weights_; - CacheAlignedVector<T> bias_; -}; - -} // namespace csrblocksparse - -#endif // LYRA_CODEC_SPARSE_MATMUL_LAYERS_MASKED_SPARSE_MATRIX_H_ diff --git a/spaces/osanseviero/biggan/README.md b/spaces/osanseviero/biggan/README.md deleted file mode 100644 index 8195d6dfff2730d4554b2c82fb00cb850a1eb6cd..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/biggan/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Biggan -emoji: 🌖 -colorFrom: gray -colorTo: purple -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/osanseviero/streamlit_1.15/README.md b/spaces/osanseviero/streamlit_1.15/README.md deleted file mode 100644 index ca03ade2219e2d3bd8bcc4378abd917c8f901011..0000000000000000000000000000000000000000 --- a/spaces/osanseviero/streamlit_1.15/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Streamlit Cool Features -emoji: 👁 -colorFrom: pink -colorTo: gray -sdk: streamlit -sdk_version: 1.15.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git "a/spaces/oskarvanderwal/MT-bias-demo/results/counterfactual_esk\303\274v\305\221szervez\305\221.html" "b/spaces/oskarvanderwal/MT-bias-demo/results/counterfactual_esk\303\274v\305\221szervez\305\221.html" deleted file mode 100644 index fe055fd916ecabc22a9d3b5bed40d17ab2e463b5..0000000000000000000000000000000000000000 --- "a/spaces/oskarvanderwal/MT-bias-demo/results/counterfactual_esk\303\274v\305\221szervez\305\221.html" +++ /dev/null @@ -1,23 +0,0 @@ -<br/><b>0th instance:</b><br/> -<html> -<div id="ntoznnhnpbokoxvjifqa_viz_container"> - <div id="ntoznnhnpbokoxvjifqa_content" style="padding:15px;border-style:solid;margin:5px;"> - <div id = "ntoznnhnpbokoxvjifqa_saliency_plot_container" class="ntoznnhnpbokoxvjifqa_viz_container" style="display:block"> - -<div id="isvpjqsftaxgpoqdshns_saliency_plot" class="isvpjqsftaxgpoqdshns_viz_content"> - <div style="margin:5px;font-family:sans-serif;font-weight:bold;"> - <span style="font-size: 20px;">Source Saliency Heatmap</span> - <br> - x: Generated tokens, y: Attributed tokens - </div> - -<table border="1" cellpadding="5" cellspacing="5" - style="overflow-x:scroll;display:block;"> - <tr><th></th> -<th>▁He's → ▁She's</th><th>▁a</th><th>▁wedding</th><th>▁planner.</th><th></s></th></tr><tr><th>▁Ő</th><th style="background:rgba(255.0, 13.0, 87.0, 0.00677361853832443)">0.002</th><th style="background:rgba(30.0, 136.0, 229.0, 0.03830461477520309)">-0.005</th><th style="background:rgba(30.0, 136.0, 229.0, 0.3851455733808674)">-0.048</th><th style="background:rgba(255.0, 13.0, 87.0, 0.05407011289364243)">0.007</th><th style="background:rgba(255.0, 13.0, 87.0, 0.976351752822341)">0.121</th></tr><tr><th>▁esküvőszervező.</th><th style="background:rgba(54.70588235294111, 122.49411764705886, 213.40784313725496, 0.0)">-0.001</th><th style="background:rgba(255.0, 13.0, 87.0, 0.014656367597544035)">0.003</th><th style="background:rgba(255.0, 13.0, 87.0, 0.10924935630817992)">0.014</th><th style="background:rgba(230.2941176470614, 26.505882352939775, 102.59215686274348, 0.0)">0.001</th><th style="background:rgba(255.0, 13.0, 87.0, 1.0)">0.124</th></tr><tr><th></s></th><th style="background:rgba(230.2941176470614, 26.505882352939775, 102.59215686274348, 0.0)">0.0</th><th style="background:rgba(230.2941176470614, 26.505882352939775, 102.59215686274348, 0.0)">0.0</th><th style="background:rgba(230.2941176470614, 26.505882352939775, 102.59215686274348, 0.0)">0.0</th><th style="background:rgba(230.2941176470614, 26.505882352939775, 102.59215686274348, 0.0)">0.0</th><th style="background:rgba(230.2941176470614, 26.505882352939775, 102.59215686274348, 0.0)">0.0</th></tr><tr style="outline: thin solid"><th><b>probability</b></th><th>0.102</th><th>-0.003</th><th>0.003</th><th>0.009</th><th>-0.0</th></table> -</div> - - </div> - </div> -</div> -</html> diff --git a/spaces/osmanriver/Alist/README.md b/spaces/osmanriver/Alist/README.md deleted file mode 100644 index a036551c401bb96dfdb2c6756b4bb14d9be4ed4f..0000000000000000000000000000000000000000 --- a/spaces/osmanriver/Alist/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Alist -emoji: 🦀 -colorFrom: red -colorTo: pink -sdk: docker -pinned: false -license: agpl-3.0 -app_port: 5244 -duplicated_from: tumuyan/Alist ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/patti-j/omdena-mental-health/cli_app.py b/spaces/patti-j/omdena-mental-health/cli_app.py deleted file mode 100644 index 1dbbabdc81357d320f1985c849e62c0ec8e0408e..0000000000000000000000000000000000000000 --- a/spaces/patti-j/omdena-mental-health/cli_app.py +++ /dev/null @@ -1,17 +0,0 @@ -import pickle -from query_data import get_chain - - -if __name__ == "__main__": - with open("vectorstore.pkl", "rb") as f: - vectorstore = pickle.load(f) - qa_chain = get_chain(vectorstore) - chat_history = [] - print("Chat with your docs!") - while True: - print("Human:") - question = input() - result = qa_chain({"question": question, "chat_history": chat_history}) - chat_history.append((question, result["answer"])) - print("AI:") - print(result["answer"]) \ No newline at end of file diff --git a/spaces/paulokewunmi/omowe.ai/src/document_utils.py b/spaces/paulokewunmi/omowe.ai/src/document_utils.py deleted file mode 100644 index e682e57a1620794d7f95638cdbf65b97c1fb38b0..0000000000000000000000000000000000000000 --- a/spaces/paulokewunmi/omowe.ai/src/document_utils.py +++ /dev/null @@ -1,228 +0,0 @@ -import os -import sys - -import pandas as pd -from typing import List -import pinecone -import difflib - -import cohere -from langchain.embeddings.cohere import CohereEmbeddings -from langchain.llms import Cohere -from langchain.prompts import PromptTemplate -from langchain.vectorstores import Pinecone, Qdrant -from langchain.chains.question_answering import load_qa_chain - -sys.path.append(os.path.abspath('..')) - -from src.constants import SUMMARIZATION_MODEL, EXAMPLES_FILE_PATH - - - -PINECONE_API_KEY = os.environ.get("PINECONE_API_KEY") -PINECONE_ENV = os.environ.get("PINECONE_ENV") -COHERE_API_KEY = os.environ.get("COHERE_API_KEY") - - - -def replace_text(text): - if text.startswith("The answer is "): - text = text.replace("The answer is ", "", 1) - return text - - -def summarize( - document: str, - summary_length: str, - summary_format: str, - extractiveness: str = "high", - temperature: float = 0.6, -) -> str: - """ - Generates a summary for the input document using Cohere's summarize API. - Args: - document (`str`): - The document given by the user for which summary must be generated. - summary_length (`str`): - A value such as 'short', 'medium', 'long' indicating the length of the summary. - summary_format (`str`): - This indicates whether the generated summary should be in 'paragraph' format or 'bullets'. - extractiveness (`str`, *optional*, defaults to 'high'): - A value such as 'low', 'medium', 'high' indicating how close the generated summary should be in meaning to the original text. - temperature (`str`): - This controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. - Returns: - generated_summary (`str`): - The generated summary from the summarization model. - """ - - summary_response = cohere.Client(COHERE_API_KEY).summarize( - text=document, - length=summary_length, - format=summary_format, - model=SUMMARIZATION_MODEL, - extractiveness=extractiveness, - temperature=temperature, - ) - generated_summary = summary_response.summary - return generated_summary - - -def question_answer(input_document: str, history: List) -> str: - """ - Generates an appropriate answer for the question asked by the user based on the input document. - Args: - input_document (`str`): - The document given by the user for which summary must be generated. - history (`List[List[str,str]]`): - A list made up of pairs of input question asked by the user & corresponding generated answers. It is used to keep track of the history of the chat between the user and the model. - Returns: - answer (`str`): - The generated answer corresponding to the input question and document received from the user. - """ - pinecone.init( - api_key=PINECONE_API_KEY, # find at app.pinecone.io - environment=PINECONE_ENV # next to api key in console - ) - context = input_document - # The last element of the `history` list contains the most recent question asked by the user whose answer needs to be generated. - question = history[-1][0] - word_list = context.split() - - texts = [" ".join(word_list[k : k + 200]) for k in range(0, len(word_list), 200)] - - # print(texts) - - embeddings = CohereEmbeddings( - model="multilingual-22-12", cohere_api_key=COHERE_API_KEY - ) - - context_index = Pinecone.from_texts(texts, embeddings, index_name="wiki-embed") - - prompt_template = """Text: {context} - Question: {question} - Answer the question based on the text provided. If the text doesn't contain the answer, reply that the answer is not available.""" - - PROMPT = PromptTemplate( - template=prompt_template, input_variables=["context", "question"] - ) - - # Generate the answer given the context - chain = load_qa_chain( - Cohere( - model="command-xlarge-nightly", temperature=0, cohere_api_key=COHERE_API_KEY - ), - chain_type="stuff", - prompt=PROMPT, - ) - relevant_context = context_index.similarity_search(question) - answer = chain.run(input_documents=relevant_context, question=question) - answer = answer.replace("\n", "").replace("Answer:", "") - answer = replace_text(answer) - return answer - -def generate_questions(input_document: str) -> str: - co = cohere.Client(COHERE_API_KEY) - prompt = f"""Write five different questions to test the understanding of the following text. The questions should be short answer, with one or two words each, and vary in difficulty from easy to hard. Provide the correct answer for each question after the question. - Now write your own questions for this text: - - Text: {input_document} - - Question 1: (question_1) - Answer: (answer_1) - - Question 2: (question_2) - Answer: (answer_2) - - Question 3: (question_3) - Answer: (answer_3) - - Question 4: (question_4) - Answer: (answer_4) - - Question 5: (question_5) - Answer: (answer_5)""" - - - response = co.generate(model='command', prompt=prompt, temperature=2, max_tokens=1000, ) - - answer = response.generations[0].text.strip() - print(answer) - questions = answer.split('\n\n') - print(questions) - result = {} - for question in questions: - q, a = question.split('\n') - result[q] = a.split(': ')[1] - - return answer - - -def load_science(): - examples_df = pd.read_csv(EXAMPLES_FILE_PATH) - science_doc = examples_df["doc"].iloc[0] - sample_question = examples_df["question"].iloc[0] - return science_doc, sample_question - - -def load_history(): - examples_df = pd.read_csv(EXAMPLES_FILE_PATH) - history_doc = examples_df["doc"].iloc[1] - sample_question = examples_df["question"].iloc[1] - return history_doc, sample_question - -def show_diff_html(seqm): - """Unify operations between two compared strings - seqm is a difflib.SequenceMatcher instance whose a & b are strings - """ - output = [] - for opcode, a0, a1, b0, b1 in seqm.get_opcodes(): - if opcode == 'equal': - output.append(seqm.b[b0:b1]) - elif opcode == 'insert': - output.append(f"<span style='background-color:lime;'>{seqm.b[b0:b1]}</span>") - # elif opcode == 'delete': - # output.append(f"<span style='background-color:red;'>{seqm.a[a0:a1]}</span>") - elif opcode == 'replace': - # output.append(f"<span style='background-color:red;'>{seqm.a[a0:a1]}</span>") - output.append(f"<span style='background-color:lime;'>{seqm.b[b0:b1]}</span>") - else: - if opcode == 'delete' or opcode == 'replace': - continue - raise RuntimeError("unexpected opcode") - return ''.join(output) - -# define a function to paraphrase text using Cohere API -def paraphrase(text): - # create a cohere client with your API key - client = cohere.Client(api_key=COHERE_API_KEY) - - # set the prompt for paraphrasing - prompt = f"Rephrase this sentence in a different way: {text}" - - # generate a response using the multilingual-22-12 model - response = client.generate( - model="command-nightly", - prompt=prompt, - max_tokens=1000, - - ) - # get the generated text - rephrased_text = response[0].text - print(rephrased_text) - - # compare the original and rephrased texts using difflib - sm = difflib.SequenceMatcher(None, text, rephrased_text) - html = show_diff_html(sm) - - return html - -if __name__ == "__main__": - with open('sample_text.txt', 'r') as file: - text = file.read() - # summary = summarize(text, summary_length="short", summary_format="bullets") - # print(summary) - # answer = question_answer(text, [["what is photosynthesis", None]]) - # print(answer) - question = question_answer(text, ["Whats photosynthesis"]) - print(question) diff --git a/spaces/phyloforfun/VoucherVision/vouchervision/LLM_Falcon.py b/spaces/phyloforfun/VoucherVision/vouchervision/LLM_Falcon.py deleted file mode 100644 index 2aea8be35ae613d7bcb759a2e7941e084f9c793a..0000000000000000000000000000000000000000 --- a/spaces/phyloforfun/VoucherVision/vouchervision/LLM_Falcon.py +++ /dev/null @@ -1,112 +0,0 @@ -import os, sys, inspect, json, time - -# currentdir = os.path.dirname(os.path.abspath( -# inspect.getfile(inspect.currentframe()))) -# parentdir = os.path.dirname(currentdir) -# sys.path.append(parentdir) - -# from prompts import PROMPT_PaLM_UMICH_skeleton_all_asia, PROMPT_PaLM_OCR_Organized, PROMPT_PaLM_Redo -# from LLM_PaLM import create_OCR_analog_for_input, num_tokens_from_string - -''' -https://docs.ai21.com/docs/python-sdk-with-amazon-bedrock - - -https://techcommunity.microsoft.com/t5/ai-machine-learning-blog/falcon-llms-in-azure-machine-learning/ba-p/3876847 -https://github.com/Azure/azureml-examples/blob/main/sdk/python/foundation-models/huggingface/inference/text-generation-streaming/text-generation-streaming-online-endpoint.ipynb -https://ml.azure.com/registries/HuggingFace/models/tiiuae-falcon-40b-instruct/version/12?tid=e66e77b4-5724-44d7-8721-06df160450ce#overview -https://azure.microsoft.com/en-us/products/machine-learning/ -''' - - - -# from azure.ai.ml import MLClient -# from azure.identity import ( -# DefaultAzureCredential, -# InteractiveBrowserCredential, -# ClientSecretCredential, -# ) -# from azure.ai.ml.entities import AmlCompute - -# try: -# credential = DefaultAzureCredential() -# credential.get_token("https://management.azure.com/.default") -# except Exception as ex: -# credential = InteractiveBrowserCredential() - -# # connect to a workspace -# workspace_ml_client = None -# try: -# workspace_ml_client = MLClient.from_config(credential) -# subscription_id = workspace_ml_client.subscription_id -# workspace = workspace_ml_client.workspace_name -# resource_group = workspace_ml_client.resource_group_name -# except Exception as ex: -# print(ex) -# # Enter details of your workspace -# subscription_id = "<SUBSCRIPTION_ID>" -# resource_group = "<RESOURCE_GROUP>" -# workspace = "<AML_WORKSPACE_NAME>" -# workspace_ml_client = MLClient( -# credential, subscription_id, resource_group, workspace -# ) -# # Connect to the HuggingFaceHub registry -# registry_ml_client = MLClient(credential, registry_name="HuggingFace") -# print(registry_ml_client) - -''' -def OCR_to_dict_Falcon(logger, OCR, VVE): - # Find a similar example from the domain knowledge - domain_knowledge_example = VVE.query_db(OCR, 4) - similarity = VVE.get_similarity() - domain_knowledge_example_string = json.dumps(domain_knowledge_example) - - try: - logger.info(f'Length of OCR raw -- {len(OCR)}') - except: - print(f'Length of OCR raw -- {len(OCR)}') - - # Create input: output: for Falcon - # Assuming Falcon requires a similar structure as PaLM - in_list, out_list = create_OCR_analog_for_input(domain_knowledge_example) - - # Construct the prompt for Falcon - # Adjust this based on Falcon's requirements - # prompt = PROMPT_Falcon_skeleton(OCR, in_list, out_list) - prompt = PROMPT_PaLM_UMICH_skeleton_all_asia(OCR, in_list, out_list) # must provide examples to PaLM differently than for chatGPT, at least 2 examples - - - nt = num_tokens_from_string(prompt, "falcon_model_name") # Replace "falcon_model_name" with the appropriate model name for Falcon - try: - logger.info(f'Prompt token length --- {nt}') - except: - print(f'Prompt token length --- {nt}') - - # Assuming Falcon has a similar API structure as PaLM - # Adjust the settings based on Falcon's requirements - Falcon_settings = { - 'model': 'models/falcon_model_name', # Replace with the appropriate model name for Falcon - 'temperature': 0, - 'candidate_count': 1, - 'top_k': 40, - 'top_p': 0.95, - 'max_output_tokens': 8000, - 'stop_sequences': [], - # Add any other required settings for Falcon - } - - # Send the prompt to Falcon for inference - # Adjust the API call based on Falcon's requirements - response = falcon.generate_text(**Falcon_settings, prompt=prompt) - - # Process the response from Falcon - if response and response.result: - if isinstance(response.result, (str, bytes)): - response_valid = check_and_redo_JSON(response, Falcon_settings, logger) - else: - response_valid = {} - else: - response_valid = {} - - return response_valid -''' \ No newline at end of file diff --git a/spaces/pjjuplo/runwayml-stable-diffusion-v1-5/app.py b/spaces/pjjuplo/runwayml-stable-diffusion-v1-5/app.py deleted file mode 100644 index a82df332731f067826d3e1ef79fabceffb74d07e..0000000000000000000000000000000000000000 --- a/spaces/pjjuplo/runwayml-stable-diffusion-v1-5/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/runwayml/stable-diffusion-v1-5").launch() \ No newline at end of file diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/vcs/subversion.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/vcs/subversion.py deleted file mode 100644 index 16d93a67b7b6feed66f2cc432f6250ca3ad34914..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_internal/vcs/subversion.py +++ /dev/null @@ -1,324 +0,0 @@ -import logging -import os -import re -from typing import List, Optional, Tuple - -from pip._internal.utils.misc import ( - HiddenText, - display_path, - is_console_interactive, - is_installable_dir, - split_auth_from_netloc, -) -from pip._internal.utils.subprocess import CommandArgs, make_command -from pip._internal.vcs.versioncontrol import ( - AuthInfo, - RemoteNotFoundError, - RevOptions, - VersionControl, - vcs, -) - -logger = logging.getLogger(__name__) - -_svn_xml_url_re = re.compile('url="([^"]+)"') -_svn_rev_re = re.compile(r'committed-rev="(\d+)"') -_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') -_svn_info_xml_url_re = re.compile(r"<url>(.*)</url>") - - -class Subversion(VersionControl): - name = "svn" - dirname = ".svn" - repo_name = "checkout" - schemes = ("svn+ssh", "svn+http", "svn+https", "svn+svn", "svn+file") - - @classmethod - def should_add_vcs_url_prefix(cls, remote_url: str) -> bool: - return True - - @staticmethod - def get_base_rev_args(rev: str) -> List[str]: - return ["-r", rev] - - @classmethod - def get_revision(cls, location: str) -> str: - """ - Return the maximum revision for all files under a given location - """ - # Note: taken from setuptools.command.egg_info - revision = 0 - - for base, dirs, _ in os.walk(location): - if cls.dirname not in dirs: - dirs[:] = [] - continue # no sense walking uncontrolled subdirs - dirs.remove(cls.dirname) - entries_fn = os.path.join(base, cls.dirname, "entries") - if not os.path.exists(entries_fn): - # FIXME: should we warn? - continue - - dirurl, localrev = cls._get_svn_url_rev(base) - - if base == location: - assert dirurl is not None - base = dirurl + "/" # save the root url - elif not dirurl or not dirurl.startswith(base): - dirs[:] = [] - continue # not part of the same svn tree, skip it - revision = max(revision, localrev) - return str(revision) - - @classmethod - def get_netloc_and_auth( - cls, netloc: str, scheme: str - ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]: - """ - This override allows the auth information to be passed to svn via the - --username and --password options instead of via the URL. - """ - if scheme == "ssh": - # The --username and --password options can't be used for - # svn+ssh URLs, so keep the auth information in the URL. - return super().get_netloc_and_auth(netloc, scheme) - - return split_auth_from_netloc(netloc) - - @classmethod - def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]: - # hotfix the URL scheme after removing svn+ from svn+ssh:// re-add it - url, rev, user_pass = super().get_url_rev_and_auth(url) - if url.startswith("ssh://"): - url = "svn+" + url - return url, rev, user_pass - - @staticmethod - def make_rev_args( - username: Optional[str], password: Optional[HiddenText] - ) -> CommandArgs: - extra_args: CommandArgs = [] - if username: - extra_args += ["--username", username] - if password: - extra_args += ["--password", password] - - return extra_args - - @classmethod - def get_remote_url(cls, location: str) -> str: - # In cases where the source is in a subdirectory, we have to look up in - # the location until we find a valid project root. - orig_location = location - while not is_installable_dir(location): - last_location = location - location = os.path.dirname(location) - if location == last_location: - # We've traversed up to the root of the filesystem without - # finding a Python project. - logger.warning( - "Could not find Python project for directory %s (tried all " - "parent directories)", - orig_location, - ) - raise RemoteNotFoundError - - url, _rev = cls._get_svn_url_rev(location) - if url is None: - raise RemoteNotFoundError - - return url - - @classmethod - def _get_svn_url_rev(cls, location: str) -> Tuple[Optional[str], int]: - from pip._internal.exceptions import InstallationError - - entries_path = os.path.join(location, cls.dirname, "entries") - if os.path.exists(entries_path): - with open(entries_path) as f: - data = f.read() - else: # subversion >= 1.7 does not have the 'entries' file - data = "" - - url = None - if data.startswith("8") or data.startswith("9") or data.startswith("10"): - entries = list(map(str.splitlines, data.split("\n\x0c\n"))) - del entries[0][0] # get rid of the '8' - url = entries[0][3] - revs = [int(d[9]) for d in entries if len(d) > 9 and d[9]] + [0] - elif data.startswith("<?xml"): - match = _svn_xml_url_re.search(data) - if not match: - raise ValueError(f"Badly formatted data: {data!r}") - url = match.group(1) # get repository URL - revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0] - else: - try: - # subversion >= 1.7 - # Note that using get_remote_call_options is not necessary here - # because `svn info` is being run against a local directory. - # We don't need to worry about making sure interactive mode - # is being used to prompt for passwords, because passwords - # are only potentially needed for remote server requests. - xml = cls.run_command( - ["info", "--xml", location], - show_stdout=False, - stdout_only=True, - ) - match = _svn_info_xml_url_re.search(xml) - assert match is not None - url = match.group(1) - revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)] - except InstallationError: - url, revs = None, [] - - if revs: - rev = max(revs) - else: - rev = 0 - - return url, rev - - @classmethod - def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool: - """Always assume the versions don't match""" - return False - - def __init__(self, use_interactive: Optional[bool] = None) -> None: - if use_interactive is None: - use_interactive = is_console_interactive() - self.use_interactive = use_interactive - - # This member is used to cache the fetched version of the current - # ``svn`` client. - # Special value definitions: - # None: Not evaluated yet. - # Empty tuple: Could not parse version. - self._vcs_version: Optional[Tuple[int, ...]] = None - - super().__init__() - - def call_vcs_version(self) -> Tuple[int, ...]: - """Query the version of the currently installed Subversion client. - - :return: A tuple containing the parts of the version information or - ``()`` if the version returned from ``svn`` could not be parsed. - :raises: BadCommand: If ``svn`` is not installed. - """ - # Example versions: - # svn, version 1.10.3 (r1842928) - # compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0 - # svn, version 1.7.14 (r1542130) - # compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu - # svn, version 1.12.0-SlikSvn (SlikSvn/1.12.0) - # compiled May 28 2019, 13:44:56 on x86_64-microsoft-windows6.2 - version_prefix = "svn, version " - version = self.run_command(["--version"], show_stdout=False, stdout_only=True) - if not version.startswith(version_prefix): - return () - - version = version[len(version_prefix) :].split()[0] - version_list = version.partition("-")[0].split(".") - try: - parsed_version = tuple(map(int, version_list)) - except ValueError: - return () - - return parsed_version - - def get_vcs_version(self) -> Tuple[int, ...]: - """Return the version of the currently installed Subversion client. - - If the version of the Subversion client has already been queried, - a cached value will be used. - - :return: A tuple containing the parts of the version information or - ``()`` if the version returned from ``svn`` could not be parsed. - :raises: BadCommand: If ``svn`` is not installed. - """ - if self._vcs_version is not None: - # Use cached version, if available. - # If parsing the version failed previously (empty tuple), - # do not attempt to parse it again. - return self._vcs_version - - vcs_version = self.call_vcs_version() - self._vcs_version = vcs_version - return vcs_version - - def get_remote_call_options(self) -> CommandArgs: - """Return options to be used on calls to Subversion that contact the server. - - These options are applicable for the following ``svn`` subcommands used - in this class. - - - checkout - - switch - - update - - :return: A list of command line arguments to pass to ``svn``. - """ - if not self.use_interactive: - # --non-interactive switch is available since Subversion 0.14.4. - # Subversion < 1.8 runs in interactive mode by default. - return ["--non-interactive"] - - svn_version = self.get_vcs_version() - # By default, Subversion >= 1.8 runs in non-interactive mode if - # stdin is not a TTY. Since that is how pip invokes SVN, in - # call_subprocess(), pip must pass --force-interactive to ensure - # the user can be prompted for a password, if required. - # SVN added the --force-interactive option in SVN 1.8. Since - # e.g. RHEL/CentOS 7, which is supported until 2024, ships with - # SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip - # can't safely add the option if the SVN version is < 1.8 (or unknown). - if svn_version >= (1, 8): - return ["--force-interactive"] - - return [] - - def fetch_new( - self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int - ) -> None: - rev_display = rev_options.to_display() - logger.info( - "Checking out %s%s to %s", - url, - rev_display, - display_path(dest), - ) - if verbosity <= 0: - flag = "--quiet" - else: - flag = "" - cmd_args = make_command( - "checkout", - flag, - self.get_remote_call_options(), - rev_options.to_args(), - url, - dest, - ) - self.run_command(cmd_args) - - def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: - cmd_args = make_command( - "switch", - self.get_remote_call_options(), - rev_options.to_args(), - url, - dest, - ) - self.run_command(cmd_args) - - def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None: - cmd_args = make_command( - "update", - self.get_remote_call_options(), - rev_options.to_args(), - dest, - ) - self.run_command(cmd_args) - - -vcs.register(Subversion) diff --git a/spaces/posit/quarto-template/Dockerfile b/spaces/posit/quarto-template/Dockerfile deleted file mode 100644 index 6cc0cb4cf1e48bdd7b78403ae4010cee066bd36d..0000000000000000000000000000000000000000 --- a/spaces/posit/quarto-template/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM ghcr.io/quarto-dev/quarto:1.3.340 AS builder - -COPY src /app -WORKDIR /app -RUN mkdir output -RUN quarto render --output-dir output - -FROM ghcr.io/openfaas/of-watchdog:0.9.6 AS watchdog - -EXPOSE 7860 - -FROM alpine:latest -RUN mkdir /app -COPY --from=builder /app/output /app -COPY --from=watchdog /fwatchdog . -ENV mode="static" -ENV static_path="/app" -ENV port="7860" -HEALTHCHECK --interval=3s CMD [ -e /tmp/.lock ] || exit 1 -CMD ["./fwatchdog"] \ No newline at end of file diff --git a/spaces/prerna9811/Chord/portaudio/clear_gitrevision.sh b/spaces/prerna9811/Chord/portaudio/clear_gitrevision.sh deleted file mode 100644 index b1b087c9c2e14c983a2b74606fe1f68d5421c7fe..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/clear_gitrevision.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -# -# Clear the Git commit SHA in the include file. -# This should be run before checking in code to Git. -# -revision_filename=src/common/pa_gitrevision.h - -# Update the include file with the current GIT revision. -echo "#define PA_GIT_REVISION unknown" > ${revision_filename} - -echo ${revision_filename} now contains -cat ${revision_filename} diff --git a/spaces/prismosoft/wav2lip/README.md b/spaces/prismosoft/wav2lip/README.md deleted file mode 100644 index 2433fbad773370bf11871d2e40e67e678686ac20..0000000000000000000000000000000000000000 --- a/spaces/prismosoft/wav2lip/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Wav2lip_demo_test -emoji: 👀 -colorFrom: gray -colorTo: blue -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio`, `streamlit`, or `static` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/upload/src/index.ts b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/upload/src/index.ts deleted file mode 100644 index 54c8868fbeece47a0a99fd3271fa403f221a66cc..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/_frontend_code/upload/src/index.ts +++ /dev/null @@ -1,2 +0,0 @@ -export { default as Upload } from "./Upload.svelte"; -export { default as ModifyUpload } from "./ModifyUpload.svelte"; diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/IconButton-16e5dbea.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/IconButton-16e5dbea.js deleted file mode 100644 index d96658a2414e92471703b7248b0bebba2550a499..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/IconButton-16e5dbea.js +++ /dev/null @@ -1,2 +0,0 @@ -import"./Button-89057c03.js";const{SvelteComponent:k,append:m,attr:u,bubble:w,create_component:I,destroy_component:z,detach:g,element:r,init:v,insert:h,listen:q,mount_component:B,safe_not_equal:C,set_data:S,space:j,text:A,toggle_class:o,transition_in:D,transition_out:E}=window.__gradio__svelte__internal;function b(a){let e,f;return{c(){e=r("span"),f=A(a[1]),u(e,"class","svelte-17yhekk")},m(t,s){h(t,e,s),m(e,f)},p(t,s){s&2&&S(f,t[1])},d(t){t&&g(e)}}}function F(a){let e,f,t,s,d,_,c,i=a[2]&&b(a);return s=new a[0]({}),{c(){e=r("button"),i&&i.c(),f=j(),t=r("div"),I(s.$$.fragment),u(t,"class","svelte-17yhekk"),o(t,"small",a[4]==="small"),o(t,"large",a[4]==="large"),u(e,"aria-label",a[1]),u(e,"title",a[1]),u(e,"class","svelte-17yhekk"),o(e,"pending",a[3]),o(e,"padded",a[5])},m(n,l){h(n,e,l),i&&i.m(e,null),m(e,f),m(e,t),B(s,t,null),d=!0,_||(c=q(e,"click",a[6]),_=!0)},p(n,[l]){n[2]?i?i.p(n,l):(i=b(n),i.c(),i.m(e,f)):i&&(i.d(1),i=null),(!d||l&16)&&o(t,"small",n[4]==="small"),(!d||l&16)&&o(t,"large",n[4]==="large"),(!d||l&2)&&u(e,"aria-label",n[1]),(!d||l&2)&&u(e,"title",n[1]),(!d||l&8)&&o(e,"pending",n[3]),(!d||l&32)&&o(e,"padded",n[5])},i(n){d||(D(s.$$.fragment,n),d=!0)},o(n){E(s.$$.fragment,n),d=!1},d(n){n&&g(e),i&&i.d(),z(s),_=!1,c()}}}function G(a,e,f){let{Icon:t}=e,{label:s=""}=e,{show_label:d=!1}=e,{pending:_=!1}=e,{size:c="small"}=e,{padded:i=!0}=e;function n(l){w.call(this,a,l)}return a.$$set=l=>{"Icon"in l&&f(0,t=l.Icon),"label"in l&&f(1,s=l.label),"show_label"in l&&f(2,d=l.show_label),"pending"in l&&f(3,_=l.pending),"size"in l&&f(4,c=l.size),"padded"in l&&f(5,i=l.padded)},[t,s,d,_,c,i,n]}class J extends k{constructor(e){super(),v(this,e,G,F,C,{Icon:0,label:1,show_label:2,pending:3,size:4,padded:5})}}export{J as I}; -//# sourceMappingURL=IconButton-16e5dbea.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-683b6e69.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-683b6e69.js deleted file mode 100644 index 81082ef7dda22fabd0b48ffa60b81d0ec4f29c13..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-683b6e69.js +++ /dev/null @@ -1,2 +0,0 @@ -import{B as P}from"./Button-89057c03.js";import{B as Q}from"./BlockTitle-49fa584d.js";import{S as R}from"./Index-37584f50.js";import"./index-0526d562.js";import"./svelte/svelte.js";import"./Info-586340e7.js";const{SvelteComponent:V,append:U,assign:W,attr:o,create_component:B,destroy_component:j,detach:S,element:z,get_spread_object:X,get_spread_update:Y,init:Z,insert:I,listen:k,mount_component:N,run_all:y,safe_not_equal:p,set_data:x,set_input_value:A,space:F,text:$,to_number:H,toggle_class:G,transition_in:q,transition_out:C}=window.__gradio__svelte__internal,{afterUpdate:ee,tick:le}=window.__gradio__svelte__internal;function ne(i){let e;return{c(){e=$(i[2])},m(n,a){I(n,e,a)},p(n,a){a&4&&x(e,n[2])},d(n){n&&S(e)}}}function te(i){let e,n,a,s,_,f,m,r,g;const b=[{autoscroll:i[1].autoscroll},{i18n:i[1].i18n},i[13]];let d={};for(let t=0;t<b.length;t+=1)d=W(d,b[t]);return e=new R({props:d}),s=new Q({props:{show_label:i[10],info:i[3],$$slots:{default:[ne]},$$scope:{ctx:i}}}),{c(){B(e.$$.fragment),n=F(),a=z("label"),B(s.$$.fragment),_=F(),f=z("input"),o(f,"aria-label",i[2]),o(f,"type","number"),o(f,"min",i[11]),o(f,"max",i[12]),o(f,"step",i[14]),f.disabled=i[15],o(f,"class","svelte-pjtc3"),o(a,"class","block svelte-pjtc3"),G(a,"container",i[7])},m(t,u){N(e,t,u),I(t,n,u),I(t,a,u),N(s,a,null),U(a,_),U(a,f),A(f,i[0]),m=!0,r||(g=[k(f,"input",i[19]),k(f,"keypress",i[16]),k(f,"blur",i[20]),k(f,"focus",i[21])],r=!0)},p(t,u){const h=u&8194?Y(b,[u&2&&{autoscroll:t[1].autoscroll},u&2&&{i18n:t[1].i18n},u&8192&&X(t[13])]):{};e.$set(h);const c={};u&1024&&(c.show_label=t[10]),u&8&&(c.info=t[3]),u&8388612&&(c.$$scope={dirty:u,ctx:t}),s.$set(c),(!m||u&4)&&o(f,"aria-label",t[2]),(!m||u&2048)&&o(f,"min",t[11]),(!m||u&4096)&&o(f,"max",t[12]),(!m||u&16384)&&o(f,"step",t[14]),(!m||u&32768)&&(f.disabled=t[15]),u&1&&H(f.value)!==t[0]&&A(f,t[0]),(!m||u&128)&&G(a,"container",t[7])},i(t){m||(q(e.$$.fragment,t),q(s.$$.fragment,t),m=!0)},o(t){C(e.$$.fragment,t),C(s.$$.fragment,t),m=!1},d(t){t&&(S(n),S(a)),j(e,t),j(s),r=!1,y(g)}}}function ie(i){let e,n;return e=new P({props:{visible:i[6],elem_id:i[4],elem_classes:i[5],padding:i[7],allow_overflow:!1,scale:i[8],min_width:i[9],$$slots:{default:[te]},$$scope:{ctx:i}}}),{c(){B(e.$$.fragment)},m(a,s){N(e,a,s),n=!0},p(a,[s]){const _={};s&64&&(_.visible=a[6]),s&16&&(_.elem_id=a[4]),s&32&&(_.elem_classes=a[5]),s&128&&(_.padding=a[7]),s&256&&(_.scale=a[8]),s&512&&(_.min_width=a[9]),s&8453263&&(_.$$scope={dirty:s,ctx:a}),e.$set(_)},i(a){n||(q(e.$$.fragment,a),n=!0)},o(a){C(e.$$.fragment,a),n=!1},d(a){j(e,a)}}}function ae(i,e,n){let a,{gradio:s}=e,{label:_=s.i18n("number.number")}=e,{info:f=void 0}=e,{elem_id:m=""}=e,{elem_classes:r=[]}=e,{visible:g=!0}=e,{container:b=!0}=e,{scale:d=null}=e,{min_width:t=void 0}=e,{value:u=0}=e,{show_label:h}=e,{minimum:c=void 0}=e,{maximum:D=void 0}=e,{loading_status:E}=e,{value_is_output:w=!1}=e,{step:T=null}=e,{interactive:v}=e;function J(){!isNaN(u)&&u!==null&&(s.dispatch("change"),w||s.dispatch("input"))}ee(()=>{n(17,w=!1)});async function K(l){await le(),l.key==="Enter"&&(l.preventDefault(),s.dispatch("submit"))}function L(){u=H(this.value),n(0,u)}const M=()=>s.dispatch("blur"),O=()=>s.dispatch("focus");return i.$$set=l=>{"gradio"in l&&n(1,s=l.gradio),"label"in l&&n(2,_=l.label),"info"in l&&n(3,f=l.info),"elem_id"in l&&n(4,m=l.elem_id),"elem_classes"in l&&n(5,r=l.elem_classes),"visible"in l&&n(6,g=l.visible),"container"in l&&n(7,b=l.container),"scale"in l&&n(8,d=l.scale),"min_width"in l&&n(9,t=l.min_width),"value"in l&&n(0,u=l.value),"show_label"in l&&n(10,h=l.show_label),"minimum"in l&&n(11,c=l.minimum),"maximum"in l&&n(12,D=l.maximum),"loading_status"in l&&n(13,E=l.loading_status),"value_is_output"in l&&n(17,w=l.value_is_output),"step"in l&&n(14,T=l.step),"interactive"in l&&n(18,v=l.interactive)},i.$$.update=()=>{i.$$.dirty&1&&J(),i.$$.dirty&262144&&n(15,a=!v)},[u,s,_,f,m,r,g,b,d,t,h,c,D,E,T,a,K,w,v,L,M,O]}class ce extends V{constructor(e){super(),Z(this,e,ae,ie,p,{gradio:1,label:2,info:3,elem_id:4,elem_classes:5,visible:6,container:7,scale:8,min_width:9,value:0,show_label:10,minimum:11,maximum:12,loading_status:13,value_is_output:17,step:14,interactive:18})}}export{ce as default}; -//# sourceMappingURL=Index-683b6e69.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-79eb3848.js b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-79eb3848.js deleted file mode 100644 index 150ddd44f03915d92ad8fb3f3edc364c77de14b8..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/cdn/assets/Index-79eb3848.js +++ /dev/null @@ -1,2 +0,0 @@ -const{SvelteComponent:n,init:t,safe_not_equal:l}=window.__gradio__svelte__internal;class o extends n{constructor(e){super(),t(this,e,null,null,l,{})}}export{o as default}; -//# sourceMappingURL=Index-79eb3848.js.map diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-ee671302.css b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-ee671302.css deleted file mode 100644 index af5a805e103c8201c62cc9716cf483ae315d6cbf..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/gradio/templates/frontend/assets/Index-ee671302.css +++ /dev/null @@ -1 +0,0 @@ -input.svelte-8ywr9r{--ring-color:transparent;position:relative;box-shadow:var(--input-shadow);border:1px solid var(--checkbox-border-color);border-radius:var(--radius-xs);background-color:var(--checkbox-background-color);line-height:var(--line-sm);width:18px!important;height:18px!important}input.svelte-8ywr9r:checked,input.svelte-8ywr9r:checked:hover,input.svelte-8ywr9r:checked:focus{border-color:var(--checkbox-border-color-selected);background-image:var(--checkbox-check);background-color:var(--checkbox-background-color-selected)}input.svelte-8ywr9r:hover{border-color:var(--checkbox-border-color-hover);background-color:var(--checkbox-background-color-hover)}input.svelte-8ywr9r:focus{border-color:var(--checkbox-border-color-focus);background-color:var(--checkbox-background-color-focus)}.disabled.svelte-8ywr9r{cursor:not-allowed;border-color:var(--checkbox-border-color-hover);background-color:var(--checkbox-background-color-hover)}input.svelte-8ywr9r:disabled:checked,input.svelte-8ywr9r:disabled:checked:hover,.disabled.svelte-8ywr9r:checked:focus{opacity:.8!important;cursor:not-allowed}.icon.svelte-19ypun1.svelte-19ypun1{display:inline-block;width:18px;height:18px;padding:3px 2px 3px 3px;margin:0;flex-grow:0;display:inline-flex;justify-content:center;align-items:center;border-radius:2px;cursor:pointer;transition:.1s}.file-icon.svelte-19ypun1.svelte-19ypun1{display:inline-block;height:20px;margin:0;flex-grow:0;display:inline-flex;justify-content:center;align-items:center;transition:.1s}.file-icon.svelte-19ypun1 img.svelte-19ypun1{width:100%;height:100%}.icon.svelte-19ypun1.svelte-19ypun1:hover{background:#eee}.icon.svelte-19ypun1:hover>*{color:var(--block-info-text-color)}.icon.svelte-19ypun1>*{transform:rotate(90deg);transform-origin:40% 50%;transition:.2s;color:var(--color-accent)}.hidden.svelte-19ypun1>*{transform:rotate(0);color:var(--body-text-color-subdued)}ul.svelte-19ypun1.svelte-19ypun1{margin-left:26px;padding-left:0;list-style:none}li.svelte-19ypun1.svelte-19ypun1{padding-left:0;align-items:center;margin:8px 0;font-family:var(--font-mono);font-size:var(--scale-00)}.wrap.svelte-19ypun1.svelte-19ypun1{display:flex;gap:8px;align-items:center}.file-wrap.svelte-qyxej8{height:100%;overflow:auto} diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_mixed.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_mixed.py deleted file mode 100644 index 6571d1928c0de222292cb0360918707049856359..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/matplotlib/backends/backend_mixed.py +++ /dev/null @@ -1,119 +0,0 @@ -import numpy as np - -from matplotlib import cbook -from .backend_agg import RendererAgg -from matplotlib._tight_bbox import process_figure_for_rasterizing - - -class MixedModeRenderer: - """ - A helper class to implement a renderer that switches between - vector and raster drawing. An example may be a PDF writer, where - most things are drawn with PDF vector commands, but some very - complex objects, such as quad meshes, are rasterised and then - output as images. - """ - def __init__(self, figure, width, height, dpi, vector_renderer, - raster_renderer_class=None, - bbox_inches_restore=None): - """ - Parameters - ---------- - figure : `~matplotlib.figure.Figure` - The figure instance. - width : scalar - The width of the canvas in logical units - height : scalar - The height of the canvas in logical units - dpi : float - The dpi of the canvas - vector_renderer : `~matplotlib.backend_bases.RendererBase` - An instance of a subclass of - `~matplotlib.backend_bases.RendererBase` that will be used for the - vector drawing. - raster_renderer_class : `~matplotlib.backend_bases.RendererBase` - The renderer class to use for the raster drawing. If not provided, - this will use the Agg backend (which is currently the only viable - option anyway.) - - """ - if raster_renderer_class is None: - raster_renderer_class = RendererAgg - - self._raster_renderer_class = raster_renderer_class - self._width = width - self._height = height - self.dpi = dpi - - self._vector_renderer = vector_renderer - - self._raster_renderer = None - - # A reference to the figure is needed as we need to change - # the figure dpi before and after the rasterization. Although - # this looks ugly, I couldn't find a better solution. -JJL - self.figure = figure - self._figdpi = figure.dpi - - self._bbox_inches_restore = bbox_inches_restore - - self._renderer = vector_renderer - - def __getattr__(self, attr): - # Proxy everything that hasn't been overridden to the base - # renderer. Things that *are* overridden can call methods - # on self._renderer directly, but must not cache/store - # methods (because things like RendererAgg change their - # methods on the fly in order to optimise proxying down - # to the underlying C implementation). - return getattr(self._renderer, attr) - - def start_rasterizing(self): - """ - Enter "raster" mode. All subsequent drawing commands (until - `stop_rasterizing` is called) will be drawn with the raster backend. - """ - # change the dpi of the figure temporarily. - self.figure.dpi = self.dpi - if self._bbox_inches_restore: # when tight bbox is used - r = process_figure_for_rasterizing(self.figure, - self._bbox_inches_restore) - self._bbox_inches_restore = r - - self._raster_renderer = self._raster_renderer_class( - self._width*self.dpi, self._height*self.dpi, self.dpi) - self._renderer = self._raster_renderer - - def stop_rasterizing(self): - """ - Exit "raster" mode. All of the drawing that was done since - the last `start_rasterizing` call will be copied to the - vector backend by calling draw_image. - """ - - self._renderer = self._vector_renderer - - height = self._height * self.dpi - img = np.asarray(self._raster_renderer.buffer_rgba()) - slice_y, slice_x = cbook._get_nonzero_slices(img[..., 3]) - cropped_img = img[slice_y, slice_x] - if cropped_img.size: - gc = self._renderer.new_gc() - # TODO: If the mixedmode resolution differs from the figure's - # dpi, the image must be scaled (dpi->_figdpi). Not all - # backends support this. - self._renderer.draw_image( - gc, - slice_x.start * self._figdpi / self.dpi, - (height - slice_y.stop) * self._figdpi / self.dpi, - cropped_img[::-1]) - self._raster_renderer = None - - # restore the figure dpi. - self.figure.dpi = self._figdpi - - if self._bbox_inches_restore: # when tight bbox is used - r = process_figure_for_rasterizing(self.figure, - self._bbox_inches_restore, - self._figdpi) - self._bbox_inches_restore = r diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/period/test_astype.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/period/test_astype.py deleted file mode 100644 index 57634cf07bdb9070dfe218056b9ca00cadad90ea..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/arrays/period/test_astype.py +++ /dev/null @@ -1,67 +0,0 @@ -import numpy as np -import pytest - -from pandas.core.dtypes.dtypes import PeriodDtype - -import pandas as pd -import pandas._testing as tm -from pandas.core.arrays import period_array - - -@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"]) -def test_astype_int(dtype): - # We choose to ignore the sign and size of integers for - # Period/Datetime/Timedelta astype - arr = period_array(["2000", "2001", None], freq="D") - - if np.dtype(dtype) != np.int64: - with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): - arr.astype(dtype) - return - - result = arr.astype(dtype) - expected = arr._ndarray.view("i8") - tm.assert_numpy_array_equal(result, expected) - - -def test_astype_copies(): - arr = period_array(["2000", "2001", None], freq="D") - result = arr.astype(np.int64, copy=False) - - # Add the `.base`, since we now use `.asi8` which returns a view. - # We could maybe override it in PeriodArray to return ._ndarray directly. - assert result.base is arr._ndarray - - result = arr.astype(np.int64, copy=True) - assert result is not arr._ndarray - tm.assert_numpy_array_equal(result, arr._ndarray.view("i8")) - - -def test_astype_categorical(): - arr = period_array(["2000", "2001", "2001", None], freq="D") - result = arr.astype("category") - categories = pd.PeriodIndex(["2000", "2001"], freq="D") - expected = pd.Categorical.from_codes([0, 1, 1, -1], categories=categories) - tm.assert_categorical_equal(result, expected) - - -def test_astype_period(): - arr = period_array(["2000", "2001", None], freq="D") - result = arr.astype(PeriodDtype("M")) - expected = period_array(["2000", "2001", None], freq="M") - tm.assert_period_array_equal(result, expected) - - -@pytest.mark.parametrize("other", ["datetime64[ns]", "timedelta64[ns]"]) -def test_astype_datetime(other): - arr = period_array(["2000", "2001", None], freq="D") - # slice off the [ns] so that the regex matches. - if other == "timedelta64[ns]": - with pytest.raises(TypeError, match=other[:-4]): - arr.astype(other) - - else: - # GH#45038 allow period->dt64 because we allow dt64->period - result = arr.astype(other) - expected = pd.DatetimeIndex(["2000", "2001", pd.NaT])._data - tm.assert_datetime_array_equal(result, expected) diff --git a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/reshape/test_melt.py b/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/reshape/test_melt.py deleted file mode 100644 index 941478066a7d804c3e45e227db2de78f7f9c0153..0000000000000000000000000000000000000000 --- a/spaces/profayle/TerrapinTalk/myenv/lib/python3.9/site-packages/pandas/tests/reshape/test_melt.py +++ /dev/null @@ -1,1145 +0,0 @@ -import re - -import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DataFrame, - lreshape, - melt, - wide_to_long, -) -import pandas._testing as tm - - -@pytest.fixture -def df(): - res = tm.makeTimeDataFrame()[:10] - res["id1"] = (res["A"] > 0).astype(np.int64) - res["id2"] = (res["B"] > 0).astype(np.int64) - return res - - -@pytest.fixture -def df1(): - res = DataFrame( - [ - [1.067683, -1.110463, 0.20867], - [-1.321405, 0.368915, -1.055342], - [-0.807333, 0.08298, -0.873361], - ] - ) - res.columns = [list("ABC"), list("abc")] - res.columns.names = ["CAP", "low"] - return res - - -@pytest.fixture -def var_name(): - return "var" - - -@pytest.fixture -def value_name(): - return "val" - - -class TestMelt: - def test_top_level_method(self, df): - result = melt(df) - assert result.columns.tolist() == ["variable", "value"] - - def test_method_signatures(self, df, df1, var_name, value_name): - tm.assert_frame_equal(df.melt(), melt(df)) - - tm.assert_frame_equal( - df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]), - melt(df, id_vars=["id1", "id2"], value_vars=["A", "B"]), - ) - - tm.assert_frame_equal( - df.melt(var_name=var_name, value_name=value_name), - melt(df, var_name=var_name, value_name=value_name), - ) - - tm.assert_frame_equal(df1.melt(col_level=0), melt(df1, col_level=0)) - - def test_default_col_names(self, df): - result = df.melt() - assert result.columns.tolist() == ["variable", "value"] - - result1 = df.melt(id_vars=["id1"]) - assert result1.columns.tolist() == ["id1", "variable", "value"] - - result2 = df.melt(id_vars=["id1", "id2"]) - assert result2.columns.tolist() == ["id1", "id2", "variable", "value"] - - def test_value_vars(self, df): - result3 = df.melt(id_vars=["id1", "id2"], value_vars="A") - assert len(result3) == 10 - - result4 = df.melt(id_vars=["id1", "id2"], value_vars=["A", "B"]) - expected4 = DataFrame( - { - "id1": df["id1"].tolist() * 2, - "id2": df["id2"].tolist() * 2, - "variable": ["A"] * 10 + ["B"] * 10, - "value": (df["A"].tolist() + df["B"].tolist()), - }, - columns=["id1", "id2", "variable", "value"], - ) - tm.assert_frame_equal(result4, expected4) - - @pytest.mark.parametrize("type_", (tuple, list, np.array)) - def test_value_vars_types(self, type_, df): - # GH 15348 - expected = DataFrame( - { - "id1": df["id1"].tolist() * 2, - "id2": df["id2"].tolist() * 2, - "variable": ["A"] * 10 + ["B"] * 10, - "value": (df["A"].tolist() + df["B"].tolist()), - }, - columns=["id1", "id2", "variable", "value"], - ) - result = df.melt(id_vars=["id1", "id2"], value_vars=type_(("A", "B"))) - tm.assert_frame_equal(result, expected) - - def test_vars_work_with_multiindex(self, df1): - expected = DataFrame( - { - ("A", "a"): df1[("A", "a")], - "CAP": ["B"] * len(df1), - "low": ["b"] * len(df1), - "value": df1[("B", "b")], - }, - columns=[("A", "a"), "CAP", "low", "value"], - ) - - result = df1.melt(id_vars=[("A", "a")], value_vars=[("B", "b")]) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "id_vars, value_vars, col_level, expected", - [ - ( - ["A"], - ["B"], - 0, - DataFrame( - { - "A": {0: 1.067683, 1: -1.321405, 2: -0.807333}, - "CAP": {0: "B", 1: "B", 2: "B"}, - "value": {0: -1.110463, 1: 0.368915, 2: 0.08298}, - } - ), - ), - ( - ["a"], - ["b"], - 1, - DataFrame( - { - "a": {0: 1.067683, 1: -1.321405, 2: -0.807333}, - "low": {0: "b", 1: "b", 2: "b"}, - "value": {0: -1.110463, 1: 0.368915, 2: 0.08298}, - } - ), - ), - ], - ) - def test_single_vars_work_with_multiindex( - self, id_vars, value_vars, col_level, expected, df1 - ): - result = df1.melt(id_vars, value_vars, col_level=col_level) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "id_vars, value_vars", - [ - [("A", "a"), [("B", "b")]], - [[("A", "a")], ("B", "b")], - [("A", "a"), ("B", "b")], - ], - ) - def test_tuple_vars_fail_with_multiindex(self, id_vars, value_vars, df1): - # melt should fail with an informative error message if - # the columns have a MultiIndex and a tuple is passed - # for id_vars or value_vars. - msg = r"(id|value)_vars must be a list of tuples when columns are a MultiIndex" - with pytest.raises(ValueError, match=msg): - df1.melt(id_vars=id_vars, value_vars=value_vars) - - def test_custom_var_name(self, df, var_name): - result5 = df.melt(var_name=var_name) - assert result5.columns.tolist() == ["var", "value"] - - result6 = df.melt(id_vars=["id1"], var_name=var_name) - assert result6.columns.tolist() == ["id1", "var", "value"] - - result7 = df.melt(id_vars=["id1", "id2"], var_name=var_name) - assert result7.columns.tolist() == ["id1", "id2", "var", "value"] - - result8 = df.melt(id_vars=["id1", "id2"], value_vars="A", var_name=var_name) - assert result8.columns.tolist() == ["id1", "id2", "var", "value"] - - result9 = df.melt( - id_vars=["id1", "id2"], value_vars=["A", "B"], var_name=var_name - ) - expected9 = DataFrame( - { - "id1": df["id1"].tolist() * 2, - "id2": df["id2"].tolist() * 2, - var_name: ["A"] * 10 + ["B"] * 10, - "value": (df["A"].tolist() + df["B"].tolist()), - }, - columns=["id1", "id2", var_name, "value"], - ) - tm.assert_frame_equal(result9, expected9) - - def test_custom_value_name(self, df, value_name): - result10 = df.melt(value_name=value_name) - assert result10.columns.tolist() == ["variable", "val"] - - result11 = df.melt(id_vars=["id1"], value_name=value_name) - assert result11.columns.tolist() == ["id1", "variable", "val"] - - result12 = df.melt(id_vars=["id1", "id2"], value_name=value_name) - assert result12.columns.tolist() == ["id1", "id2", "variable", "val"] - - result13 = df.melt( - id_vars=["id1", "id2"], value_vars="A", value_name=value_name - ) - assert result13.columns.tolist() == ["id1", "id2", "variable", "val"] - - result14 = df.melt( - id_vars=["id1", "id2"], value_vars=["A", "B"], value_name=value_name - ) - expected14 = DataFrame( - { - "id1": df["id1"].tolist() * 2, - "id2": df["id2"].tolist() * 2, - "variable": ["A"] * 10 + ["B"] * 10, - value_name: (df["A"].tolist() + df["B"].tolist()), - }, - columns=["id1", "id2", "variable", value_name], - ) - tm.assert_frame_equal(result14, expected14) - - def test_custom_var_and_value_name(self, df, value_name, var_name): - result15 = df.melt(var_name=var_name, value_name=value_name) - assert result15.columns.tolist() == ["var", "val"] - - result16 = df.melt(id_vars=["id1"], var_name=var_name, value_name=value_name) - assert result16.columns.tolist() == ["id1", "var", "val"] - - result17 = df.melt( - id_vars=["id1", "id2"], var_name=var_name, value_name=value_name - ) - assert result17.columns.tolist() == ["id1", "id2", "var", "val"] - - result18 = df.melt( - id_vars=["id1", "id2"], - value_vars="A", - var_name=var_name, - value_name=value_name, - ) - assert result18.columns.tolist() == ["id1", "id2", "var", "val"] - - result19 = df.melt( - id_vars=["id1", "id2"], - value_vars=["A", "B"], - var_name=var_name, - value_name=value_name, - ) - expected19 = DataFrame( - { - "id1": df["id1"].tolist() * 2, - "id2": df["id2"].tolist() * 2, - var_name: ["A"] * 10 + ["B"] * 10, - value_name: (df["A"].tolist() + df["B"].tolist()), - }, - columns=["id1", "id2", var_name, value_name], - ) - tm.assert_frame_equal(result19, expected19) - - df20 = df.copy() - df20.columns.name = "foo" - result20 = df20.melt() - assert result20.columns.tolist() == ["foo", "value"] - - @pytest.mark.parametrize("col_level", [0, "CAP"]) - def test_col_level(self, col_level, df1): - res = df1.melt(col_level=col_level) - assert res.columns.tolist() == ["CAP", "value"] - - def test_multiindex(self, df1): - res = df1.melt() - assert res.columns.tolist() == ["CAP", "low", "value"] - - @pytest.mark.parametrize( - "col", - [ - pd.Series(pd.date_range("2010", periods=5, tz="US/Pacific")), - pd.Series(["a", "b", "c", "a", "d"], dtype="category"), - pd.Series([0, 1, 0, 0, 0]), - ], - ) - def test_pandas_dtypes(self, col): - # GH 15785 - df = DataFrame( - {"klass": range(5), "col": col, "attr1": [1, 0, 0, 0, 0], "attr2": col} - ) - expected_value = pd.concat([pd.Series([1, 0, 0, 0, 0]), col], ignore_index=True) - result = melt( - df, id_vars=["klass", "col"], var_name="attribute", value_name="value" - ) - expected = DataFrame( - { - 0: list(range(5)) * 2, - 1: pd.concat([col] * 2, ignore_index=True), - 2: ["attr1"] * 5 + ["attr2"] * 5, - 3: expected_value, - } - ) - expected.columns = ["klass", "col", "attribute", "value"] - tm.assert_frame_equal(result, expected) - - def test_preserve_category(self): - # GH 15853 - data = DataFrame({"A": [1, 2], "B": pd.Categorical(["X", "Y"])}) - result = melt(data, ["B"], ["A"]) - expected = DataFrame( - {"B": pd.Categorical(["X", "Y"]), "variable": ["A", "A"], "value": [1, 2]} - ) - - tm.assert_frame_equal(result, expected) - - def test_melt_missing_columns_raises(self): - # GH-23575 - # This test is to ensure that pandas raises an error if melting is - # attempted with column names absent from the dataframe - - # Generate data - df = DataFrame( - np.random.default_rng(2).standard_normal((5, 4)), columns=list("abcd") - ) - - # Try to melt with missing `value_vars` column name - msg = "The following '{Var}' are not present in the DataFrame: {Col}" - with pytest.raises( - KeyError, match=msg.format(Var="value_vars", Col="\\['C'\\]") - ): - df.melt(["a", "b"], ["C", "d"]) - - # Try to melt with missing `id_vars` column name - with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['A'\\]")): - df.melt(["A", "b"], ["c", "d"]) - - # Multiple missing - with pytest.raises( - KeyError, - match=msg.format(Var="id_vars", Col="\\['not_here', 'or_there'\\]"), - ): - df.melt(["a", "b", "not_here", "or_there"], ["c", "d"]) - - # Multiindex melt fails if column is missing from multilevel melt - multi = df.copy() - multi.columns = [list("ABCD"), list("abcd")] - with pytest.raises(KeyError, match=msg.format(Var="id_vars", Col="\\['E'\\]")): - multi.melt([("E", "a")], [("B", "b")]) - # Multiindex fails if column is missing from single level melt - with pytest.raises( - KeyError, match=msg.format(Var="value_vars", Col="\\['F'\\]") - ): - multi.melt(["A"], ["F"], col_level=0) - - def test_melt_mixed_int_str_id_vars(self): - # GH 29718 - df = DataFrame({0: ["foo"], "a": ["bar"], "b": [1], "d": [2]}) - result = melt(df, id_vars=[0, "a"], value_vars=["b", "d"]) - expected = DataFrame( - {0: ["foo"] * 2, "a": ["bar"] * 2, "variable": list("bd"), "value": [1, 2]} - ) - tm.assert_frame_equal(result, expected) - - def test_melt_mixed_int_str_value_vars(self): - # GH 29718 - df = DataFrame({0: ["foo"], "a": ["bar"]}) - result = melt(df, value_vars=[0, "a"]) - expected = DataFrame({"variable": [0, "a"], "value": ["foo", "bar"]}) - tm.assert_frame_equal(result, expected) - - def test_ignore_index(self): - # GH 17440 - df = DataFrame({"foo": [0], "bar": [1]}, index=["first"]) - result = melt(df, ignore_index=False) - expected = DataFrame( - {"variable": ["foo", "bar"], "value": [0, 1]}, index=["first", "first"] - ) - tm.assert_frame_equal(result, expected) - - def test_ignore_multiindex(self): - # GH 17440 - index = pd.MultiIndex.from_tuples( - [("first", "second"), ("first", "third")], names=["baz", "foobar"] - ) - df = DataFrame({"foo": [0, 1], "bar": [2, 3]}, index=index) - result = melt(df, ignore_index=False) - - expected_index = pd.MultiIndex.from_tuples( - [("first", "second"), ("first", "third")] * 2, names=["baz", "foobar"] - ) - expected = DataFrame( - {"variable": ["foo"] * 2 + ["bar"] * 2, "value": [0, 1, 2, 3]}, - index=expected_index, - ) - - tm.assert_frame_equal(result, expected) - - def test_ignore_index_name_and_type(self): - # GH 17440 - index = pd.Index(["foo", "bar"], dtype="category", name="baz") - df = DataFrame({"x": [0, 1], "y": [2, 3]}, index=index) - result = melt(df, ignore_index=False) - - expected_index = pd.Index(["foo", "bar"] * 2, dtype="category", name="baz") - expected = DataFrame( - {"variable": ["x", "x", "y", "y"], "value": [0, 1, 2, 3]}, - index=expected_index, - ) - - tm.assert_frame_equal(result, expected) - - def test_melt_with_duplicate_columns(self): - # GH#41951 - df = DataFrame([["id", 2, 3]], columns=["a", "b", "b"]) - result = df.melt(id_vars=["a"], value_vars=["b"]) - expected = DataFrame( - [["id", "b", 2], ["id", "b", 3]], columns=["a", "variable", "value"] - ) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize("dtype", ["Int8", "Int64"]) - def test_melt_ea_dtype(self, dtype): - # GH#41570 - df = DataFrame( - { - "a": pd.Series([1, 2], dtype="Int8"), - "b": pd.Series([3, 4], dtype=dtype), - } - ) - result = df.melt() - expected = DataFrame( - { - "variable": ["a", "a", "b", "b"], - "value": pd.Series([1, 2, 3, 4], dtype=dtype), - } - ) - tm.assert_frame_equal(result, expected) - - def test_melt_ea_columns(self): - # GH 54297 - df = DataFrame( - { - "A": {0: "a", 1: "b", 2: "c"}, - "B": {0: 1, 1: 3, 2: 5}, - "C": {0: 2, 1: 4, 2: 6}, - } - ) - df.columns = df.columns.astype("string[python]") - result = df.melt(id_vars=["A"], value_vars=["B"]) - expected = DataFrame( - { - "A": list("abc"), - "variable": pd.Series(["B"] * 3, dtype="string[python]"), - "value": [1, 3, 5], - } - ) - tm.assert_frame_equal(result, expected) - - -class TestLreshape: - def test_pairs(self): - data = { - "birthdt": [ - "08jan2009", - "20dec2008", - "30dec2008", - "21dec2008", - "11jan2009", - ], - "birthwt": [1766, 3301, 1454, 3139, 4133], - "id": [101, 102, 103, 104, 105], - "sex": ["Male", "Female", "Female", "Female", "Female"], - "visitdt1": [ - "11jan2009", - "22dec2008", - "04jan2009", - "29dec2008", - "20jan2009", - ], - "visitdt2": ["21jan2009", np.nan, "22jan2009", "31dec2008", "03feb2009"], - "visitdt3": ["05feb2009", np.nan, np.nan, "02jan2009", "15feb2009"], - "wt1": [1823, 3338, 1549, 3298, 4306], - "wt2": [2011.0, np.nan, 1892.0, 3338.0, 4575.0], - "wt3": [2293.0, np.nan, np.nan, 3377.0, 4805.0], - } - - df = DataFrame(data) - - spec = { - "visitdt": [f"visitdt{i:d}" for i in range(1, 4)], - "wt": [f"wt{i:d}" for i in range(1, 4)], - } - result = lreshape(df, spec) - - exp_data = { - "birthdt": [ - "08jan2009", - "20dec2008", - "30dec2008", - "21dec2008", - "11jan2009", - "08jan2009", - "30dec2008", - "21dec2008", - "11jan2009", - "08jan2009", - "21dec2008", - "11jan2009", - ], - "birthwt": [ - 1766, - 3301, - 1454, - 3139, - 4133, - 1766, - 1454, - 3139, - 4133, - 1766, - 3139, - 4133, - ], - "id": [101, 102, 103, 104, 105, 101, 103, 104, 105, 101, 104, 105], - "sex": [ - "Male", - "Female", - "Female", - "Female", - "Female", - "Male", - "Female", - "Female", - "Female", - "Male", - "Female", - "Female", - ], - "visitdt": [ - "11jan2009", - "22dec2008", - "04jan2009", - "29dec2008", - "20jan2009", - "21jan2009", - "22jan2009", - "31dec2008", - "03feb2009", - "05feb2009", - "02jan2009", - "15feb2009", - ], - "wt": [ - 1823.0, - 3338.0, - 1549.0, - 3298.0, - 4306.0, - 2011.0, - 1892.0, - 3338.0, - 4575.0, - 2293.0, - 3377.0, - 4805.0, - ], - } - exp = DataFrame(exp_data, columns=result.columns) - tm.assert_frame_equal(result, exp) - - result = lreshape(df, spec, dropna=False) - exp_data = { - "birthdt": [ - "08jan2009", - "20dec2008", - "30dec2008", - "21dec2008", - "11jan2009", - "08jan2009", - "20dec2008", - "30dec2008", - "21dec2008", - "11jan2009", - "08jan2009", - "20dec2008", - "30dec2008", - "21dec2008", - "11jan2009", - ], - "birthwt": [ - 1766, - 3301, - 1454, - 3139, - 4133, - 1766, - 3301, - 1454, - 3139, - 4133, - 1766, - 3301, - 1454, - 3139, - 4133, - ], - "id": [ - 101, - 102, - 103, - 104, - 105, - 101, - 102, - 103, - 104, - 105, - 101, - 102, - 103, - 104, - 105, - ], - "sex": [ - "Male", - "Female", - "Female", - "Female", - "Female", - "Male", - "Female", - "Female", - "Female", - "Female", - "Male", - "Female", - "Female", - "Female", - "Female", - ], - "visitdt": [ - "11jan2009", - "22dec2008", - "04jan2009", - "29dec2008", - "20jan2009", - "21jan2009", - np.nan, - "22jan2009", - "31dec2008", - "03feb2009", - "05feb2009", - np.nan, - np.nan, - "02jan2009", - "15feb2009", - ], - "wt": [ - 1823.0, - 3338.0, - 1549.0, - 3298.0, - 4306.0, - 2011.0, - np.nan, - 1892.0, - 3338.0, - 4575.0, - 2293.0, - np.nan, - np.nan, - 3377.0, - 4805.0, - ], - } - exp = DataFrame(exp_data, columns=result.columns) - tm.assert_frame_equal(result, exp) - - spec = { - "visitdt": [f"visitdt{i:d}" for i in range(1, 3)], - "wt": [f"wt{i:d}" for i in range(1, 4)], - } - msg = "All column lists must be same length" - with pytest.raises(ValueError, match=msg): - lreshape(df, spec) - - -class TestWideToLong: - def test_simple(self): - x = np.random.default_rng(2).standard_normal(3) - df = DataFrame( - { - "A1970": {0: "a", 1: "b", 2: "c"}, - "A1980": {0: "d", 1: "e", 2: "f"}, - "B1970": {0: 2.5, 1: 1.2, 2: 0.7}, - "B1980": {0: 3.2, 1: 1.3, 2: 0.1}, - "X": dict(zip(range(3), x)), - } - ) - df["id"] = df.index - exp_data = { - "X": x.tolist() + x.tolist(), - "A": ["a", "b", "c", "d", "e", "f"], - "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], - "year": [1970, 1970, 1970, 1980, 1980, 1980], - "id": [0, 1, 2, 0, 1, 2], - } - expected = DataFrame(exp_data) - expected = expected.set_index(["id", "year"])[["X", "A", "B"]] - result = wide_to_long(df, ["A", "B"], i="id", j="year") - tm.assert_frame_equal(result, expected) - - def test_stubs(self): - # GH9204 wide_to_long call should not modify 'stubs' list - df = DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]]) - df.columns = ["id", "inc1", "inc2", "edu1", "edu2"] - stubs = ["inc", "edu"] - - wide_to_long(df, stubs, i="id", j="age") - - assert stubs == ["inc", "edu"] - - def test_separating_character(self): - # GH14779 - - x = np.random.default_rng(2).standard_normal(3) - df = DataFrame( - { - "A.1970": {0: "a", 1: "b", 2: "c"}, - "A.1980": {0: "d", 1: "e", 2: "f"}, - "B.1970": {0: 2.5, 1: 1.2, 2: 0.7}, - "B.1980": {0: 3.2, 1: 1.3, 2: 0.1}, - "X": dict(zip(range(3), x)), - } - ) - df["id"] = df.index - exp_data = { - "X": x.tolist() + x.tolist(), - "A": ["a", "b", "c", "d", "e", "f"], - "B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], - "year": [1970, 1970, 1970, 1980, 1980, 1980], - "id": [0, 1, 2, 0, 1, 2], - } - expected = DataFrame(exp_data) - expected = expected.set_index(["id", "year"])[["X", "A", "B"]] - result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".") - tm.assert_frame_equal(result, expected) - - def test_escapable_characters(self): - x = np.random.default_rng(2).standard_normal(3) - df = DataFrame( - { - "A(quarterly)1970": {0: "a", 1: "b", 2: "c"}, - "A(quarterly)1980": {0: "d", 1: "e", 2: "f"}, - "B(quarterly)1970": {0: 2.5, 1: 1.2, 2: 0.7}, - "B(quarterly)1980": {0: 3.2, 1: 1.3, 2: 0.1}, - "X": dict(zip(range(3), x)), - } - ) - df["id"] = df.index - exp_data = { - "X": x.tolist() + x.tolist(), - "A(quarterly)": ["a", "b", "c", "d", "e", "f"], - "B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1], - "year": [1970, 1970, 1970, 1980, 1980, 1980], - "id": [0, 1, 2, 0, 1, 2], - } - expected = DataFrame(exp_data) - expected = expected.set_index(["id", "year"])[ - ["X", "A(quarterly)", "B(quarterly)"] - ] - result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"], i="id", j="year") - tm.assert_frame_equal(result, expected) - - def test_unbalanced(self): - # test that we can have a varying amount of time variables - df = DataFrame( - { - "A2010": [1.0, 2.0], - "A2011": [3.0, 4.0], - "B2010": [5.0, 6.0], - "X": ["X1", "X2"], - } - ) - df["id"] = df.index - exp_data = { - "X": ["X1", "X2", "X1", "X2"], - "A": [1.0, 2.0, 3.0, 4.0], - "B": [5.0, 6.0, np.nan, np.nan], - "id": [0, 1, 0, 1], - "year": [2010, 2010, 2011, 2011], - } - expected = DataFrame(exp_data) - expected = expected.set_index(["id", "year"])[["X", "A", "B"]] - result = wide_to_long(df, ["A", "B"], i="id", j="year") - tm.assert_frame_equal(result, expected) - - def test_character_overlap(self): - # Test we handle overlapping characters in both id_vars and value_vars - df = DataFrame( - { - "A11": ["a11", "a22", "a33"], - "A12": ["a21", "a22", "a23"], - "B11": ["b11", "b12", "b13"], - "B12": ["b21", "b22", "b23"], - "BB11": [1, 2, 3], - "BB12": [4, 5, 6], - "BBBX": [91, 92, 93], - "BBBZ": [91, 92, 93], - } - ) - df["id"] = df.index - expected = DataFrame( - { - "BBBX": [91, 92, 93, 91, 92, 93], - "BBBZ": [91, 92, 93, 91, 92, 93], - "A": ["a11", "a22", "a33", "a21", "a22", "a23"], - "B": ["b11", "b12", "b13", "b21", "b22", "b23"], - "BB": [1, 2, 3, 4, 5, 6], - "id": [0, 1, 2, 0, 1, 2], - "year": [11, 11, 11, 12, 12, 12], - } - ) - expected = expected.set_index(["id", "year"])[["BBBX", "BBBZ", "A", "B", "BB"]] - result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year") - tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) - - def test_invalid_separator(self): - # if an invalid separator is supplied a empty data frame is returned - sep = "nope!" - df = DataFrame( - { - "A2010": [1.0, 2.0], - "A2011": [3.0, 4.0], - "B2010": [5.0, 6.0], - "X": ["X1", "X2"], - } - ) - df["id"] = df.index - exp_data = { - "X": "", - "A2010": [], - "A2011": [], - "B2010": [], - "id": [], - "year": [], - "A": [], - "B": [], - } - expected = DataFrame(exp_data).astype({"year": np.int64}) - expected = expected.set_index(["id", "year"])[ - ["X", "A2010", "A2011", "B2010", "A", "B"] - ] - expected.index = expected.index.set_levels([0, 1], level=0) - result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=sep) - tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) - - def test_num_string_disambiguation(self): - # Test that we can disambiguate number value_vars from - # string value_vars - df = DataFrame( - { - "A11": ["a11", "a22", "a33"], - "A12": ["a21", "a22", "a23"], - "B11": ["b11", "b12", "b13"], - "B12": ["b21", "b22", "b23"], - "BB11": [1, 2, 3], - "BB12": [4, 5, 6], - "Arating": [91, 92, 93], - "Arating_old": [91, 92, 93], - } - ) - df["id"] = df.index - expected = DataFrame( - { - "Arating": [91, 92, 93, 91, 92, 93], - "Arating_old": [91, 92, 93, 91, 92, 93], - "A": ["a11", "a22", "a33", "a21", "a22", "a23"], - "B": ["b11", "b12", "b13", "b21", "b22", "b23"], - "BB": [1, 2, 3, 4, 5, 6], - "id": [0, 1, 2, 0, 1, 2], - "year": [11, 11, 11, 12, 12, 12], - } - ) - expected = expected.set_index(["id", "year"])[ - ["Arating", "Arating_old", "A", "B", "BB"] - ] - result = wide_to_long(df, ["A", "B", "BB"], i="id", j="year") - tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) - - def test_invalid_suffixtype(self): - # If all stubs names end with a string, but a numeric suffix is - # assumed, an empty data frame is returned - df = DataFrame( - { - "Aone": [1.0, 2.0], - "Atwo": [3.0, 4.0], - "Bone": [5.0, 6.0], - "X": ["X1", "X2"], - } - ) - df["id"] = df.index - exp_data = { - "X": "", - "Aone": [], - "Atwo": [], - "Bone": [], - "id": [], - "year": [], - "A": [], - "B": [], - } - expected = DataFrame(exp_data).astype({"year": np.int64}) - - expected = expected.set_index(["id", "year"]) - expected.index = expected.index.set_levels([0, 1], level=0) - result = wide_to_long(df, ["A", "B"], i="id", j="year") - tm.assert_frame_equal(result.sort_index(axis=1), expected.sort_index(axis=1)) - - def test_multiple_id_columns(self): - # Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm - df = DataFrame( - { - "famid": [1, 1, 1, 2, 2, 2, 3, 3, 3], - "birth": [1, 2, 3, 1, 2, 3, 1, 2, 3], - "ht1": [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1], - "ht2": [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9], - } - ) - expected = DataFrame( - { - "ht": [ - 2.8, - 3.4, - 2.9, - 3.8, - 2.2, - 2.9, - 2.0, - 3.2, - 1.8, - 2.8, - 1.9, - 2.4, - 2.2, - 3.3, - 2.3, - 3.4, - 2.1, - 2.9, - ], - "famid": [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3], - "birth": [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3], - "age": [1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2], - } - ) - expected = expected.set_index(["famid", "birth", "age"])[["ht"]] - result = wide_to_long(df, "ht", i=["famid", "birth"], j="age") - tm.assert_frame_equal(result, expected) - - def test_non_unique_idvars(self): - # GH16382 - # Raise an error message if non unique id vars (i) are passed - df = DataFrame( - {"A_A1": [1, 2, 3, 4, 5], "B_B1": [1, 2, 3, 4, 5], "x": [1, 1, 1, 1, 1]} - ) - msg = "the id variables need to uniquely identify each row" - with pytest.raises(ValueError, match=msg): - wide_to_long(df, ["A_A", "B_B"], i="x", j="colname") - - def test_cast_j_int(self): - df = DataFrame( - { - "actor_1": ["CCH Pounder", "Johnny Depp", "Christoph Waltz"], - "actor_2": ["Joel David Moore", "Orlando Bloom", "Rory Kinnear"], - "actor_fb_likes_1": [1000.0, 40000.0, 11000.0], - "actor_fb_likes_2": [936.0, 5000.0, 393.0], - "title": ["Avatar", "Pirates of the Caribbean", "Spectre"], - } - ) - - expected = DataFrame( - { - "actor": [ - "CCH Pounder", - "Johnny Depp", - "Christoph Waltz", - "Joel David Moore", - "Orlando Bloom", - "Rory Kinnear", - ], - "actor_fb_likes": [1000.0, 40000.0, 11000.0, 936.0, 5000.0, 393.0], - "num": [1, 1, 1, 2, 2, 2], - "title": [ - "Avatar", - "Pirates of the Caribbean", - "Spectre", - "Avatar", - "Pirates of the Caribbean", - "Spectre", - ], - } - ).set_index(["title", "num"]) - result = wide_to_long( - df, ["actor", "actor_fb_likes"], i="title", j="num", sep="_" - ) - - tm.assert_frame_equal(result, expected) - - def test_identical_stubnames(self): - df = DataFrame( - { - "A2010": [1.0, 2.0], - "A2011": [3.0, 4.0], - "B2010": [5.0, 6.0], - "A": ["X1", "X2"], - } - ) - msg = "stubname can't be identical to a column name" - with pytest.raises(ValueError, match=msg): - wide_to_long(df, ["A", "B"], i="A", j="colname") - - def test_nonnumeric_suffix(self): - df = DataFrame( - { - "treatment_placebo": [1.0, 2.0], - "treatment_test": [3.0, 4.0], - "result_placebo": [5.0, 6.0], - "A": ["X1", "X2"], - } - ) - expected = DataFrame( - { - "A": ["X1", "X2", "X1", "X2"], - "colname": ["placebo", "placebo", "test", "test"], - "result": [5.0, 6.0, np.nan, np.nan], - "treatment": [1.0, 2.0, 3.0, 4.0], - } - ) - expected = expected.set_index(["A", "colname"]) - result = wide_to_long( - df, ["result", "treatment"], i="A", j="colname", suffix="[a-z]+", sep="_" - ) - tm.assert_frame_equal(result, expected) - - def test_mixed_type_suffix(self): - df = DataFrame( - { - "A": ["X1", "X2"], - "result_1": [0, 9], - "result_foo": [5.0, 6.0], - "treatment_1": [1.0, 2.0], - "treatment_foo": [3.0, 4.0], - } - ) - expected = DataFrame( - { - "A": ["X1", "X2", "X1", "X2"], - "colname": ["1", "1", "foo", "foo"], - "result": [0.0, 9.0, 5.0, 6.0], - "treatment": [1.0, 2.0, 3.0, 4.0], - } - ).set_index(["A", "colname"]) - result = wide_to_long( - df, ["result", "treatment"], i="A", j="colname", suffix=".+", sep="_" - ) - tm.assert_frame_equal(result, expected) - - def test_float_suffix(self): - df = DataFrame( - { - "treatment_1.1": [1.0, 2.0], - "treatment_2.1": [3.0, 4.0], - "result_1.2": [5.0, 6.0], - "result_1": [0, 9], - "A": ["X1", "X2"], - } - ) - expected = DataFrame( - { - "A": ["X1", "X2", "X1", "X2", "X1", "X2", "X1", "X2"], - "colname": [1.2, 1.2, 1.0, 1.0, 1.1, 1.1, 2.1, 2.1], - "result": [5.0, 6.0, 0.0, 9.0, np.nan, np.nan, np.nan, np.nan], - "treatment": [np.nan, np.nan, np.nan, np.nan, 1.0, 2.0, 3.0, 4.0], - } - ) - expected = expected.set_index(["A", "colname"]) - result = wide_to_long( - df, ["result", "treatment"], i="A", j="colname", suffix="[0-9.]+", sep="_" - ) - tm.assert_frame_equal(result, expected) - - def test_col_substring_of_stubname(self): - # GH22468 - # Don't raise ValueError when a column name is a substring - # of a stubname that's been passed as a string - wide_data = { - "node_id": {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}, - "A": {0: 0.80, 1: 0.0, 2: 0.25, 3: 1.0, 4: 0.81}, - "PA0": {0: 0.74, 1: 0.56, 2: 0.56, 3: 0.98, 4: 0.6}, - "PA1": {0: 0.77, 1: 0.64, 2: 0.52, 3: 0.98, 4: 0.67}, - "PA3": {0: 0.34, 1: 0.70, 2: 0.52, 3: 0.98, 4: 0.67}, - } - wide_df = DataFrame.from_dict(wide_data) - expected = wide_to_long(wide_df, stubnames=["PA"], i=["node_id", "A"], j="time") - result = wide_to_long(wide_df, stubnames="PA", i=["node_id", "A"], j="time") - tm.assert_frame_equal(result, expected) - - def test_raise_of_column_name_value(self): - # GH34731, enforced in 2.0 - # raise a ValueError if the resultant value column name matches - # a name in the dataframe already (default name is "value") - df = DataFrame({"col": list("ABC"), "value": range(10, 16, 2)}) - - with pytest.raises( - ValueError, match=re.escape("value_name (value) cannot match") - ): - df.melt(id_vars="value", value_name="value") - - @pytest.mark.parametrize("dtype", ["O", "string"]) - def test_missing_stubname(self, dtype): - # GH46044 - df = DataFrame({"id": ["1", "2"], "a-1": [100, 200], "a-2": [300, 400]}) - df = df.astype({"id": dtype}) - result = wide_to_long( - df, - stubnames=["a", "b"], - i="id", - j="num", - sep="-", - ) - index = pd.Index( - [("1", 1), ("2", 1), ("1", 2), ("2", 2)], - name=("id", "num"), - ) - expected = DataFrame( - {"a": [100, 200, 300, 400], "b": [np.nan] * 4}, - index=index, - ) - new_level = expected.index.levels[0].astype(dtype) - expected.index = expected.index.set_levels(new_level, level=0) - tm.assert_frame_equal(result, expected) diff --git a/spaces/pycui/RealChar/alembic/env.py b/spaces/pycui/RealChar/alembic/env.py deleted file mode 100644 index afec9de93130cbd586d5bd02e73efbbece9b4182..0000000000000000000000000000000000000000 --- a/spaces/pycui/RealChar/alembic/env.py +++ /dev/null @@ -1,92 +0,0 @@ -from realtime_ai_character.models.user import User -from realtime_ai_character.models.interaction import Interaction -from realtime_ai_character.database.base import Base # import the Base model -from sqlalchemy import engine_from_config -from sqlalchemy import pool -from alembic import context -from logging.config import fileConfig -import sys -import os -from dotenv import load_dotenv - -load_dotenv() - -# Add the project root to the system path -root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -sys.path.append(root) - -# import your models here - -# this is the Alembic Config object, which provides access to the values -# within the .ini file in use. -config = context.config -database_url = os.getenv('DATABASE_URL') if os.getenv( - 'DATABASE_URL') else 'sqlite:///./test.db' -config.set_main_option('sqlalchemy.url', database_url) - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = Base.metadata # use your Base metadata - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline() -> None: - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, - target_metadata=target_metadata, - literal_binds=True, - dialect_opts={"paramstyle": "named"}, - ) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online() -> None: - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - connectable = engine_from_config( - config.get_section(config.config_ini_section, {}), - prefix="sqlalchemy.", - poolclass=pool.NullPool, - ) - - with connectable.connect() as connection: - context.configure( - connection=connection, target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/spaces/quidiaMuxgu/Expedit-SAM/AashiquifullmovieREPACK Downloadhd720p.md b/spaces/quidiaMuxgu/Expedit-SAM/AashiquifullmovieREPACK Downloadhd720p.md deleted file mode 100644 index 21b5085bc75b101fd93f2f9cafcea44fdf385b2e..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/AashiquifullmovieREPACK Downloadhd720p.md +++ /dev/null @@ -1,17 +0,0 @@ -<h2>aashiquifullmoviedownloadhd720p</h2><br /><p><b><b>Download Zip</b> > <a href="https://geags.com/2uCsO9">https://geags.com/2uCsO9</a></b></p><br /><br /> -<br /> -Director: Mohit Suri Starring: Aditya Roy Kapoor, Shraddha Kapoor, Shaad Movie title: Aashiqui 2 2013 . I love Indian cinema in general. -But lately they have started to annoy me. -This movie didn't live up to my expectations. -I was hoping to see a normal Indian melodrama, but I saw some kind of fairy tale. -The film starts very badly. -Starring the son of a poor man and the daughter of a rich man. -They love each other. -And everything seems to be fine, but everything ends so quickly. -I am very upset. -But here begins a fairy tale for adults. -Everything is so beautiful, everything is so good. -Love, wedding, honeymoon in Bali. 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Altium Designer 13 .torrent 5.md b/spaces/quidiaMuxgu/Expedit-SAM/Altium Designer 13 .torrent 5.md deleted file mode 100644 index c6d5c8ed39aadb5e78daa8a577d1c1eb3527e7fa..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Altium Designer 13 .torrent 5.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Altium Designer 13 .torrent 5</h2><br /><p><b><b>Download</b> ✸✸✸ <a href="https://geags.com/2uCssZ">https://geags.com/2uCssZ</a></b></p><br /><br /> -<br /> -Altium Designer 2013 13.2.5 (10.1810.2836 8) pic Year / Date of Release: ... Download the Altium Designer 13 2 5 (10 1810. 28368) Torrent or ... 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Arturia DX7 V V1.0.1.1278.md b/spaces/quidiaMuxgu/Expedit-SAM/Arturia DX7 V V1.0.1.1278.md deleted file mode 100644 index d80645d19e931a841f356e129dae197916c2ea92..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Arturia DX7 V V1.0.1.1278.md +++ /dev/null @@ -1,7 +0,0 @@ -<h2>Arturia DX7 V v1.0.1.1278</h2><br /><p><b><b>DOWNLOAD</b> » <a href="https://geags.com/2uCr2O">https://geags.com/2uCr2O</a></b></p><br /><br /> -<br /> -January 26, 2018 - Arturia DX7 V v1.0.1.1278 (bug fixes). The DX7 V accurately models the digital FM synthesizer that has become synonymous with sound... The DX7 V includes many new and improved features, including real-time VCF (Virtual Circuit Graphic) support, multi-timbral modulation, three new modeling tones, higher resolution synthesizer and more! -The DX7 V includes many new and improved features, including real-time VCF (Virtual Circuit Graphic) support, multi-timbral modulation, three new Modeling Voices, higher resolution on the synth, and more! 8a78ff9644<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Intel64 Family 6 Model 37 Stepping 5 Driver Download [VERIFIED].md b/spaces/quidiaMuxgu/Expedit-SAM/Intel64 Family 6 Model 37 Stepping 5 Driver Download [VERIFIED].md deleted file mode 100644 index 1cc021173ea5e46199c301d7c5e7f24a4c93ff38..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Intel64 Family 6 Model 37 Stepping 5 Driver Download [VERIFIED].md +++ /dev/null @@ -1,38 +0,0 @@ -<h2>Intel64 Family 6 Model 37 Stepping 5 Driver Download</h2><br /><p><b><b>Download Zip</b> • <a href="https://geags.com/2uCsQa">https://geags.com/2uCsQa</a></b></p><br /><br /> -<br /> -Thanks - -A: - -If you are interested in what is going on, you can use the debuggers. For example, with gdb: - -gdb /usr/lib64/libudev.so.0.26 - -will start your program in a gdb session. - -Information For - -News & Notices - -Special Collections - -A resource center for genealogical research, African-American history, and genealogy education in the Pennsylvania State University Libraries, Special Collections offers access to over 1,000,000 records, including birth, marriage, and death records; military discharge certificates; censuses; and military pension records. Special Collections houses a substantial collection of materials pertaining to African-Americans in the United States and works to provide access to the collections of other universities and archives. - -Special Collections is open Monday through Friday from 8:30 a.m. to 4:30 p.m., and on Saturdays and Sundays from 9 a.m. to 4 p.m. For additional hours or access to materials, call the Reference Desk at 717-214-9106 or send an email to specialcollections@psu.edu. - -Note: Physical copies of some of our holdings can only be checked out through the Reference Desk and all users should obtain a photocopy of their record requests and check them out. Please follow directions for access and logging to the system, which will be emailed to you. - -Mission Statement - -The mission of Special Collections and University Archives is to provide access to unique materials relevant to Penn State's diverse history and culture, and to enhance student, faculty, researcher, and visitor experiences with the collection through research instruction, outreach, and community engagement.1. Field of the Invention - -The present invention relates to an image displaying apparatus and method of controlling the same and, more particularly, to an image displaying apparatus and method of controlling the same for adjusting an input image displayed on a display apparatus to an input position where an image having the same color as the input image can be displayed. - -2. Description of the Related Art - -Recently, a plurality of display devices having different output characteristics are used. That is, in order to satisfy a user's demand for a plurality of display devices having different characteristics, a common image display apparatus displays an input image on the display devices by combining the input image with another image. - -A method of displaying the input image by combining the input image with the other image includes a method 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/quidiaMuxgu/Expedit-SAM/Mixvibes Remixlive 1.3.2 Win X86-64bit ((INSTALL)) Keygen.md b/spaces/quidiaMuxgu/Expedit-SAM/Mixvibes Remixlive 1.3.2 Win X86-64bit ((INSTALL)) Keygen.md deleted file mode 100644 index a2fe55001ec26fbfa3cca400cef2ccfe3ef6bc34..0000000000000000000000000000000000000000 --- a/spaces/quidiaMuxgu/Expedit-SAM/Mixvibes Remixlive 1.3.2 Win X86-64bit ((INSTALL)) Keygen.md +++ /dev/null @@ -1,7 +0,0 @@ -<br /> -<p>Mixvibes Remixlive for Windows Free Download-GetintoPC.com Mixvibes Remixlive. Compatibility Architecture: 32 Bit (x86) / 64 Bit (x64). ..yolasite.com/resources/Gta-Vice-City-Pc-UPD-Download-64-Bit-Torrent.pdf..yolasite.com/resources/CRACK-Mixvibes-Remixlive-132-Win-X8664bit-LINK.pdf </p> -<p>x86-64-80-fixed. https://coub.com/stories/3134987-mixvibes-remixlive-1-3-2-win-x86 -64bit-hot-keygen Mixvibes Remixlive 1.3.2 Win X86-64bit Serial Key. https://sourceshop.org/mixvibes-remixlive-1-3-1-win-x86-64bit-serial-key/. Mixvibes Remixlive 1.3.2 Win X86-64bit Serial Key. Mixvibes Remixlive for Windows Free Download-GetintoPC.</p> -<h2>Mixvibes Remixlive 1.3.2 Win x86-64bit keygen</h2><br /><p><b><b>DOWNLOAD</b> ->>->>->> <a href="https://geags.com/2uCrwT">https://geags.com/2uCrwT</a></b></p><br /><br /> -<p>music converter 1.4 crack - Free Download [ Latest + Torrent]. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 10 hyper. Download Mixvibes Remixlive. Download Mixvibes Remixlive free. crack [ Latest + Torrent] Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 7..https://coub.com/stories/2939297-saia-pg5-keygen-marall. - Mixvibes Remixlive 1.3.2 Win x86-64bit keygen uk. Mixvibes Remixlive is a professional editing and mixing application..mixvibes remixlive 1.3.2 win - Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 7. Mixvibes Remixlive. Great program. Thanks for this great program. - Mixvibes Remixlive 1.3.2 Win x86-64bit keygen. Secure Mixvibes Remixlive 1.3.2 Win x86-64bit keygen win 7. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen mac. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 10 hyper. Download Mixvibes Remixlive 1.3.2 Win x86-64bit keygen. Windows 7 download page. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 8. Mixvibes Remixlive.. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 8. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 10. Get Mixvibes Remixlive 1.3.2 Win x86-64bit keygen free download. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 10. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 8. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 7. Download Mixvibes Remixlive 1.3.2 Win x86-64bit keygen. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 8. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 10. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 8. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen. Set your key. Windows 7 download page. Mixvibes Remixlive 1.3.2 Win x86-64bit keygen windows 7. </p> 899543212b<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/r3gm/RVC_HF/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py b/spaces/r3gm/RVC_HF/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py deleted file mode 100644 index b412ba2814e114ca7bb00b6fd6ef217f63d788a3..0000000000000000000000000000000000000000 --- a/spaces/r3gm/RVC_HF/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +++ /dev/null @@ -1,86 +0,0 @@ -from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor -import pyworld -import numpy as np - - -class HarvestF0Predictor(F0Predictor): - def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100): - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.sampling_rate = sampling_rate - - def interpolate_f0(self, f0): - """ - 对F0进行插值处理 - """ - - data = np.reshape(f0, (f0.size, 1)) - - vuv_vector = np.zeros((data.size, 1), dtype=np.float32) - vuv_vector[data > 0.0] = 1.0 - vuv_vector[data <= 0.0] = 0.0 - - ip_data = data - - frame_number = data.size - last_value = 0.0 - for i in range(frame_number): - if data[i] <= 0.0: - j = i + 1 - for j in range(i + 1, frame_number): - if data[j] > 0.0: - break - if j < frame_number - 1: - if last_value > 0.0: - step = (data[j] - data[i - 1]) / float(j - i) - for k in range(i, j): - ip_data[k] = data[i - 1] + step * (k - i + 1) - else: - for k in range(i, j): - ip_data[k] = data[j] - else: - for k in range(i, frame_number): - ip_data[k] = last_value - else: - ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝 - last_value = data[i] - - return ip_data[:, 0], vuv_vector[:, 0] - - def resize_f0(self, x, target_len): - source = np.array(x) - source[source < 0.001] = np.nan - target = np.interp( - np.arange(0, len(source) * target_len, len(source)) / target_len, - np.arange(0, len(source)), - source, - ) - res = np.nan_to_num(target) - return res - - def compute_f0(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.hop_length, - f0_ceil=self.f0_max, - f0_floor=self.f0_min, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs) - return self.interpolate_f0(self.resize_f0(f0, p_len))[0] - - def compute_f0_uv(self, wav, p_len=None): - if p_len is None: - p_len = wav.shape[0] // self.hop_length - f0, t = pyworld.harvest( - wav.astype(np.double), - fs=self.sampling_rate, - f0_floor=self.f0_min, - f0_ceil=self.f0_max, - frame_period=1000 * self.hop_length / self.sampling_rate, - ) - f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate) - return self.interpolate_f0(self.resize_f0(f0, p_len)) diff --git a/spaces/rachana219/MODT2/trackers/strongsort/deep/models/senet.py b/spaces/rachana219/MODT2/trackers/strongsort/deep/models/senet.py deleted file mode 100644 index baaf9b0acbe8577bd5e574de47d3f9ef935946db..0000000000000000000000000000000000000000 --- a/spaces/rachana219/MODT2/trackers/strongsort/deep/models/senet.py +++ /dev/null @@ -1,688 +0,0 @@ -from __future__ import division, absolute_import -import math -from collections import OrderedDict -import torch.nn as nn -from torch.utils import model_zoo - -__all__ = [ - 'senet154', 'se_resnet50', 'se_resnet101', 'se_resnet152', - 'se_resnext50_32x4d', 'se_resnext101_32x4d', 'se_resnet50_fc512' -] -""" -Code imported from https://github.com/Cadene/pretrained-models.pytorch -""" - -pretrained_settings = { - 'senet154': { - 'imagenet': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth', - 'input_space': 'RGB', - 'input_size': [3, 224, 224], - 'input_range': [0, 1], - 'mean': [0.485, 0.456, 0.406], - 'std': [0.229, 0.224, 0.225], - 'num_classes': 1000 - } - }, - 'se_resnet50': { - 'imagenet': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet50-ce0d4300.pth', - 'input_space': 'RGB', - 'input_size': [3, 224, 224], - 'input_range': [0, 1], - 'mean': [0.485, 0.456, 0.406], - 'std': [0.229, 0.224, 0.225], - 'num_classes': 1000 - } - }, - 'se_resnet101': { - 'imagenet': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet101-7e38fcc6.pth', - 'input_space': 'RGB', - 'input_size': [3, 224, 224], - 'input_range': [0, 1], - 'mean': [0.485, 0.456, 0.406], - 'std': [0.229, 0.224, 0.225], - 'num_classes': 1000 - } - }, - 'se_resnet152': { - 'imagenet': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnet152-d17c99b7.pth', - 'input_space': 'RGB', - 'input_size': [3, 224, 224], - 'input_range': [0, 1], - 'mean': [0.485, 0.456, 0.406], - 'std': [0.229, 0.224, 0.225], - 'num_classes': 1000 - } - }, - 'se_resnext50_32x4d': { - 'imagenet': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth', - 'input_space': 'RGB', - 'input_size': [3, 224, 224], - 'input_range': [0, 1], - 'mean': [0.485, 0.456, 0.406], - 'std': [0.229, 0.224, 0.225], - 'num_classes': 1000 - } - }, - 'se_resnext101_32x4d': { - 'imagenet': { - 'url': - 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth', - 'input_space': 'RGB', - 'input_size': [3, 224, 224], - 'input_range': [0, 1], - 'mean': [0.485, 0.456, 0.406], - 'std': [0.229, 0.224, 0.225], - 'num_classes': 1000 - } - }, -} - - -class SEModule(nn.Module): - - def __init__(self, channels, reduction): - super(SEModule, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) - self.fc1 = nn.Conv2d( - channels, channels // reduction, kernel_size=1, padding=0 - ) - self.relu = nn.ReLU(inplace=True) - self.fc2 = nn.Conv2d( - channels // reduction, channels, kernel_size=1, padding=0 - ) - self.sigmoid = nn.Sigmoid() - - def forward(self, x): - module_input = x - x = self.avg_pool(x) - x = self.fc1(x) - x = self.relu(x) - x = self.fc2(x) - x = self.sigmoid(x) - return module_input * x - - -class Bottleneck(nn.Module): - """ - Base class for bottlenecks that implements `forward()` method. - """ - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out = self.se_module(out) + residual - out = self.relu(out) - - return out - - -class SEBottleneck(Bottleneck): - """ - Bottleneck for SENet154. - """ - expansion = 4 - - def __init__( - self, inplanes, planes, groups, reduction, stride=1, downsample=None - ): - super(SEBottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes * 2) - self.conv2 = nn.Conv2d( - planes * 2, - planes * 4, - kernel_size=3, - stride=stride, - padding=1, - groups=groups, - bias=False - ) - self.bn2 = nn.BatchNorm2d(planes * 4) - self.conv3 = nn.Conv2d( - planes * 4, planes * 4, kernel_size=1, bias=False - ) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes * 4, reduction=reduction) - self.downsample = downsample - self.stride = stride - - -class SEResNetBottleneck(Bottleneck): - """ - ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe - implementation and uses `stride=stride` in `conv1` and not in `conv2` - (the latter is used in the torchvision implementation of ResNet). - """ - expansion = 4 - - def __init__( - self, inplanes, planes, groups, reduction, stride=1, downsample=None - ): - super(SEResNetBottleneck, self).__init__() - self.conv1 = nn.Conv2d( - inplanes, planes, kernel_size=1, bias=False, stride=stride - ) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d( - planes, - planes, - kernel_size=3, - padding=1, - groups=groups, - bias=False - ) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes * 4, reduction=reduction) - self.downsample = downsample - self.stride = stride - - -class SEResNeXtBottleneck(Bottleneck): - """ResNeXt bottleneck type C with a Squeeze-and-Excitation module""" - expansion = 4 - - def __init__( - self, - inplanes, - planes, - groups, - reduction, - stride=1, - downsample=None, - base_width=4 - ): - super(SEResNeXtBottleneck, self).__init__() - width = int(math.floor(planes * (base_width/64.)) * groups) - self.conv1 = nn.Conv2d( - inplanes, width, kernel_size=1, bias=False, stride=1 - ) - self.bn1 = nn.BatchNorm2d(width) - self.conv2 = nn.Conv2d( - width, - width, - kernel_size=3, - stride=stride, - padding=1, - groups=groups, - bias=False - ) - self.bn2 = nn.BatchNorm2d(width) - self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * 4) - self.relu = nn.ReLU(inplace=True) - self.se_module = SEModule(planes * 4, reduction=reduction) - self.downsample = downsample - self.stride = stride - - -class SENet(nn.Module): - """Squeeze-and-excitation network. - - Reference: - Hu et al. Squeeze-and-Excitation Networks. CVPR 2018. - - Public keys: - - ``senet154``: SENet154. - - ``se_resnet50``: ResNet50 + SE. - - ``se_resnet101``: ResNet101 + SE. - - ``se_resnet152``: ResNet152 + SE. - - ``se_resnext50_32x4d``: ResNeXt50 (groups=32, width=4) + SE. - - ``se_resnext101_32x4d``: ResNeXt101 (groups=32, width=4) + SE. - - ``se_resnet50_fc512``: (ResNet50 + SE) + FC. - """ - - def __init__( - self, - num_classes, - loss, - block, - layers, - groups, - reduction, - dropout_p=0.2, - inplanes=128, - input_3x3=True, - downsample_kernel_size=3, - downsample_padding=1, - last_stride=2, - fc_dims=None, - **kwargs - ): - """ - Parameters - ---------- - block (nn.Module): Bottleneck class. - - For SENet154: SEBottleneck - - For SE-ResNet models: SEResNetBottleneck - - For SE-ResNeXt models: SEResNeXtBottleneck - layers (list of ints): Number of residual blocks for 4 layers of the - network (layer1...layer4). - groups (int): Number of groups for the 3x3 convolution in each - bottleneck block. - - For SENet154: 64 - - For SE-ResNet models: 1 - - For SE-ResNeXt models: 32 - reduction (int): Reduction ratio for Squeeze-and-Excitation modules. - - For all models: 16 - dropout_p (float or None): Drop probability for the Dropout layer. - If `None` the Dropout layer is not used. - - For SENet154: 0.2 - - For SE-ResNet models: None - - For SE-ResNeXt models: None - inplanes (int): Number of input channels for layer1. - - For SENet154: 128 - - For SE-ResNet models: 64 - - For SE-ResNeXt models: 64 - input_3x3 (bool): If `True`, use three 3x3 convolutions instead of - a single 7x7 convolution in layer0. - - For SENet154: True - - For SE-ResNet models: False - - For SE-ResNeXt models: False - downsample_kernel_size (int): Kernel size for downsampling convolutions - in layer2, layer3 and layer4. - - For SENet154: 3 - - For SE-ResNet models: 1 - - For SE-ResNeXt models: 1 - downsample_padding (int): Padding for downsampling convolutions in - layer2, layer3 and layer4. - - For SENet154: 1 - - For SE-ResNet models: 0 - - For SE-ResNeXt models: 0 - num_classes (int): Number of outputs in `classifier` layer. - """ - super(SENet, self).__init__() - self.inplanes = inplanes - self.loss = loss - - if input_3x3: - layer0_modules = [ - ( - 'conv1', - nn.Conv2d(3, 64, 3, stride=2, padding=1, bias=False) - ), - ('bn1', nn.BatchNorm2d(64)), - ('relu1', nn.ReLU(inplace=True)), - ( - 'conv2', - nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False) - ), - ('bn2', nn.BatchNorm2d(64)), - ('relu2', nn.ReLU(inplace=True)), - ( - 'conv3', - nn.Conv2d( - 64, inplanes, 3, stride=1, padding=1, bias=False - ) - ), - ('bn3', nn.BatchNorm2d(inplanes)), - ('relu3', nn.ReLU(inplace=True)), - ] - else: - layer0_modules = [ - ( - 'conv1', - nn.Conv2d( - 3, - inplanes, - kernel_size=7, - stride=2, - padding=3, - bias=False - ) - ), - ('bn1', nn.BatchNorm2d(inplanes)), - ('relu1', nn.ReLU(inplace=True)), - ] - # To preserve compatibility with Caffe weights `ceil_mode=True` - # is used instead of `padding=1`. - layer0_modules.append( - ('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)) - ) - self.layer0 = nn.Sequential(OrderedDict(layer0_modules)) - self.layer1 = self._make_layer( - block, - planes=64, - blocks=layers[0], - groups=groups, - reduction=reduction, - downsample_kernel_size=1, - downsample_padding=0 - ) - self.layer2 = self._make_layer( - block, - planes=128, - blocks=layers[1], - stride=2, - groups=groups, - reduction=reduction, - downsample_kernel_size=downsample_kernel_size, - downsample_padding=downsample_padding - ) - self.layer3 = self._make_layer( - block, - planes=256, - blocks=layers[2], - stride=2, - groups=groups, - reduction=reduction, - downsample_kernel_size=downsample_kernel_size, - downsample_padding=downsample_padding - ) - self.layer4 = self._make_layer( - block, - planes=512, - blocks=layers[3], - stride=last_stride, - groups=groups, - reduction=reduction, - downsample_kernel_size=downsample_kernel_size, - downsample_padding=downsample_padding - ) - - self.global_avgpool = nn.AdaptiveAvgPool2d(1) - self.fc = self._construct_fc_layer( - fc_dims, 512 * block.expansion, dropout_p - ) - self.classifier = nn.Linear(self.feature_dim, num_classes) - - def _make_layer( - self, - block, - planes, - blocks, - groups, - reduction, - stride=1, - downsample_kernel_size=1, - downsample_padding=0 - ): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d( - self.inplanes, - planes * block.expansion, - kernel_size=downsample_kernel_size, - stride=stride, - padding=downsample_padding, - bias=False - ), - nn.BatchNorm2d(planes * block.expansion), - ) - - layers = [] - layers.append( - block( - self.inplanes, planes, groups, reduction, stride, downsample - ) - ) - self.inplanes = planes * block.expansion - for i in range(1, blocks): - layers.append(block(self.inplanes, planes, groups, reduction)) - - return nn.Sequential(*layers) - - def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None): - """ - Construct fully connected layer - - - fc_dims (list or tuple): dimensions of fc layers, if None, - no fc layers are constructed - - input_dim (int): input dimension - - dropout_p (float): dropout probability, if None, dropout is unused - """ - if fc_dims is None: - self.feature_dim = input_dim - return None - - assert isinstance( - fc_dims, (list, tuple) - ), 'fc_dims must be either list or tuple, but got {}'.format( - type(fc_dims) - ) - - layers = [] - for dim in fc_dims: - layers.append(nn.Linear(input_dim, dim)) - layers.append(nn.BatchNorm1d(dim)) - layers.append(nn.ReLU(inplace=True)) - if dropout_p is not None: - layers.append(nn.Dropout(p=dropout_p)) - input_dim = dim - - self.feature_dim = fc_dims[-1] - - return nn.Sequential(*layers) - - def featuremaps(self, x): - x = self.layer0(x) - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - return x - - def forward(self, x): - f = self.featuremaps(x) - v = self.global_avgpool(f) - v = v.view(v.size(0), -1) - - if self.fc is not None: - v = self.fc(v) - - if not self.training: - return v - - y = self.classifier(v) - - if self.loss == 'softmax': - return y - elif self.loss == 'triplet': - return y, v - else: - raise KeyError("Unsupported loss: {}".format(self.loss)) - - -def init_pretrained_weights(model, model_url): - """Initializes model with pretrained weights. - - Layers that don't match with pretrained layers in name or size are kept unchanged. - """ - pretrain_dict = model_zoo.load_url(model_url) - model_dict = model.state_dict() - pretrain_dict = { - k: v - for k, v in pretrain_dict.items() - if k in model_dict and model_dict[k].size() == v.size() - } - model_dict.update(pretrain_dict) - model.load_state_dict(model_dict) - - -def senet154(num_classes, loss='softmax', pretrained=True, **kwargs): - model = SENet( - num_classes=num_classes, - loss=loss, - block=SEBottleneck, - layers=[3, 8, 36, 3], - groups=64, - reduction=16, - dropout_p=0.2, - last_stride=2, - fc_dims=None, - **kwargs - ) - if pretrained: - model_url = pretrained_settings['senet154']['imagenet']['url'] - init_pretrained_weights(model, model_url) - return model - - -def se_resnet50(num_classes, loss='softmax', pretrained=True, **kwargs): - model = SENet( - num_classes=num_classes, - loss=loss, - block=SEResNetBottleneck, - layers=[3, 4, 6, 3], - groups=1, - reduction=16, - dropout_p=None, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - downsample_padding=0, - last_stride=2, - fc_dims=None, - **kwargs - ) - if pretrained: - model_url = pretrained_settings['se_resnet50']['imagenet']['url'] - init_pretrained_weights(model, model_url) - return model - - -def se_resnet50_fc512(num_classes, loss='softmax', pretrained=True, **kwargs): - model = SENet( - num_classes=num_classes, - loss=loss, - block=SEResNetBottleneck, - layers=[3, 4, 6, 3], - groups=1, - reduction=16, - dropout_p=None, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - downsample_padding=0, - last_stride=1, - fc_dims=[512], - **kwargs - ) - if pretrained: - model_url = pretrained_settings['se_resnet50']['imagenet']['url'] - init_pretrained_weights(model, model_url) - return model - - -def se_resnet101(num_classes, loss='softmax', pretrained=True, **kwargs): - model = SENet( - num_classes=num_classes, - loss=loss, - block=SEResNetBottleneck, - layers=[3, 4, 23, 3], - groups=1, - reduction=16, - dropout_p=None, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - downsample_padding=0, - last_stride=2, - fc_dims=None, - **kwargs - ) - if pretrained: - model_url = pretrained_settings['se_resnet101']['imagenet']['url'] - init_pretrained_weights(model, model_url) - return model - - -def se_resnet152(num_classes, loss='softmax', pretrained=True, **kwargs): - model = SENet( - num_classes=num_classes, - loss=loss, - block=SEResNetBottleneck, - layers=[3, 8, 36, 3], - groups=1, - reduction=16, - dropout_p=None, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - downsample_padding=0, - last_stride=2, - fc_dims=None, - **kwargs - ) - if pretrained: - model_url = pretrained_settings['se_resnet152']['imagenet']['url'] - init_pretrained_weights(model, model_url) - return model - - -def se_resnext50_32x4d(num_classes, loss='softmax', pretrained=True, **kwargs): - model = SENet( - num_classes=num_classes, - loss=loss, - block=SEResNeXtBottleneck, - layers=[3, 4, 6, 3], - groups=32, - reduction=16, - dropout_p=None, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - downsample_padding=0, - last_stride=2, - fc_dims=None, - **kwargs - ) - if pretrained: - model_url = pretrained_settings['se_resnext50_32x4d']['imagenet']['url' - ] - init_pretrained_weights(model, model_url) - return model - - -def se_resnext101_32x4d( - num_classes, loss='softmax', pretrained=True, **kwargs -): - model = SENet( - num_classes=num_classes, - loss=loss, - block=SEResNeXtBottleneck, - layers=[3, 4, 23, 3], - groups=32, - reduction=16, - dropout_p=None, - inplanes=64, - input_3x3=False, - downsample_kernel_size=1, - downsample_padding=0, - last_stride=2, - fc_dims=None, - **kwargs - ) - if pretrained: - model_url = pretrained_settings['se_resnext101_32x4d']['imagenet'][ - 'url'] - init_pretrained_weights(model, model_url) - return model diff --git a/spaces/radames/MusicGen-Continuation/MODEL_CARD.md b/spaces/radames/MusicGen-Continuation/MODEL_CARD.md deleted file mode 100644 index 6c2c9f883969eb905e74ad3376966d156cc5ca00..0000000000000000000000000000000000000000 --- a/spaces/radames/MusicGen-Continuation/MODEL_CARD.md +++ /dev/null @@ -1,81 +0,0 @@ -# MusicGen Model Card - -## Model details - -**Organization developing the model:** The FAIR team of Meta AI. - -**Model date:** MusicGen was trained between April 2023 and May 2023. - -**Model version:** This is the version 1 of the model. - -**Model type:** MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation. - -**Paper or resources for more information:** More information can be found in the paper [Simple and Controllable Music Generation][arxiv]. - -**Citation details** See [our paper][arxiv] - -**License** Code is released under MIT, model weights are released under CC-BY-NC 4.0. - -**Where to send questions or comments about the model:** Questions and comments about MusicGen can be sent via the [Github repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue. - -## Intended use -**Primary intended use:** The primary use of MusicGen is research on AI-based music generation, including: - -- Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science -- Generation of music guided by text or melody to understand current abilities of generative AI models by machine learning amateurs - -**Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models. - -**Out-of-scope use cases** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate music pieces that create hostile or alienating environments for people. This includes generating music that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. - -## Metrics - -**Models performance measures:** We used the following objective measure to evaluate the model on a standard music benchmark: - -- Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish) -- Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST) -- CLAP Score between audio embedding and text embedding extracted from a pre-trained CLAP model - -Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes: - -- Overall quality of the music samples; -- Text relevance to the provided text input; -- Adherence to the melody for melody-guided music generation. - -More details on performance measures and human studies can be found in the paper. - -**Decision thresholds:** Not applicable. - -## Evaluation datasets - -The model was evaluated on the [MusicCaps benchmark](https://www.kaggle.com/datasets/googleai/musiccaps) and on an in-domain held-out evaluation set, with no artist overlap with the training set. - -## Training datasets - -The model was trained on licensed data using the following sources: the [Meta Music Initiative Sound Collection](https://www.fb.com/sound), [Shutterstock music collection](https://www.shutterstock.com/music) and the [Pond5 music collection](https://www.pond5.com/). See the paper for more details about the training set and corresponding preprocessing. - -## Quantitative analysis - -More information can be found in the paper [Simple and Controllable Music Generation][arxiv], in the Experimental Setup section. - -## Limitations and biases - -**Data:** The data sources used to train the model are created by music professionals and covered by legal agreements with the right holders. The model is trained on 20K hours of data, we believe that scaling the model on larger datasets can further improve the performance of the model. - -**Mitigations:** Vocals have been removed from the data source using corresponding tags, and then using using a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs). - -**Limitations:** - -- The model is not able to generate realistic vocals. -- The model has been trained with English descriptions and will not perform as well in other languages. -- The model does not perform equally well for all music styles and cultures. -- The model sometimes generates end of songs, collapsing to silence. -- It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results. - -**Biases:** The source of data is potentially lacking diversity and all music cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres that exists. The generated samples from the model will reflect the biases from the training data. Further work on this model should include methods for balanced and just representations of cultures, for example, by scaling the training data to be both diverse and inclusive. - -**Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data. - -**Use cases:** Users must be aware of the biases, limitations and risks of the model. MusicGen is a model developed for artificial intelligence research on controllable music generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks. - -[arxiv]: https://arxiv.org/abs/2306.05284 diff --git a/spaces/rajistics/call-sentiment-demo2/utils.py b/spaces/rajistics/call-sentiment-demo2/utils.py deleted file mode 100644 index c706f4fe521ac96d00658ebdd9f206a6c1418a8d..0000000000000000000000000000000000000000 --- a/spaces/rajistics/call-sentiment-demo2/utils.py +++ /dev/null @@ -1,79 +0,0 @@ -import re -import functools -import requests -import pandas as pd -import plotly.express as px -import torch -import gradio as gr -from transformers import pipeline, Wav2Vec2ProcessorWithLM -from pyannote.audio import Pipeline -from librosa import load, resample -import whisperx - -import re -alphabets= "([A-Za-z])" -prefixes = "(Mr|St|Mrs|Ms|Dr)[.]" -suffixes = "(Inc|Ltd|Jr|Sr|Co)" -starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)" -acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)" -websites = "[.](com|net|org|io|gov)" - -def split(text): - text = " " + text + " " - text = text.replace("\n"," ") - text = re.sub(prefixes,"\\1<prd>",text) - text = re.sub(websites,"<prd>\\1",text) - if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>") - text = re.sub("\s" + alphabets + "[.] "," \\1<prd> ",text) - text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text) - text = re.sub(alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>\\3<prd>",text) - text = re.sub(alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>",text) - text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text) - text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text) - text = re.sub(" " + alphabets + "[.]"," \\1<prd>",text) - if "”" in text: text = text.replace(".”","”.") - if "\"" in text: text = text.replace(".\"","\".") - if "!" in text: text = text.replace("!\"","\"!") - if "?" in text: text = text.replace("?\"","\"?") - text = text.replace(".",".<stop>") - text = text.replace("?","?<stop>") - text = text.replace("!","!<stop>") - text = text.replace("<prd>",".") - sentences = text.split("<stop>") - sentences = sentences[:-1] - sentences = [s.strip() for s in sentences] - return sentences - - -def speech_to_text(speech_file, speaker_segmentation, whisper, alignment_model, metadata, whisper_device): - speaker_output = speaker_segmentation(speech_file) - result = whisper.transcribe(speech_file) - - chunks = whisperx.align(result["segments"], alignment_model, metadata, speech_file, whisper_device)["word_segments"] - - diarized_output = [] - i = 0 - speaker_counter = 0 - - # New iteration every time the speaker changes - for turn, _, _ in speaker_output.itertracks(yield_label=True): - - speaker = "Customer" if speaker_counter % 2 == 0 else "Support" - diarized = "" - while i < len(chunks) and chunks[i]["end"] <= turn.end: - diarized += chunks[i]["text"] + " " - i += 1 - - if diarized != "": - # diarized = rpunct.punctuate(re.sub(eng_pattern, "", diarized), lang="en") - - diarized_output.extend( - [ - (diarized, speaker), - ("from {:.2f}-{:.2f}".format(turn.start, turn.end), None), - ] - ) - - speaker_counter += 1 - - return diarized_output \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Account Hacker V3.9.9 Full Version 19.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Account Hacker V3.9.9 Full Version 19.md deleted file mode 100644 index f5a7221df02f4c97d98b8aa3b339f19137889789..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Account Hacker V3.9.9 Full Version 19.md +++ /dev/null @@ -1,45 +0,0 @@ - -**Title:** - -How to Use Account Hacker v3.9.9 Full Version 19 to Hack Any Account - -**Article:** - -```html -<p>Have you ever wondered how to hack someone's email, social media, or online banking account? If so, you might be interested in Account Hacker v3.9.9 Full Version 19, a software that claims to be able to hack any account in minutes. But is it really as easy and effective as it sounds? In this article, we will review Account Hacker v3.9.9 Full Version 19 and show you how to use it safely and legally.</p> -<h2>account hacker v3.9.9 full version 19</h2><br /><p><b><b>Download</b> · <a href="https://urlgoal.com/2uCJlP">https://urlgoal.com/2uCJlP</a></b></p><br /><br /> - -<h2>What is Account Hacker v3.9.9 Full Version 19?</h2> -<p>Account Hacker v3.9.9 Full Version 19 is a software that allegedly allows you to hack any account password by using a combination of brute force, phishing, and keylogging techniques. It claims to be compatible with Windows, Android, and iOS devices, and to support a wide range of account types, such as Facebook, Instagram, Gmail, Yahoo, Twitter, Skype, PayPal, and more.</p> - -<h2>How does Account Hacker v3.9.9 Full Version 19 work?</h2> -<p>According to the official website of Account Hacker v3.9.9 Full Version 19, the software works by following these steps:</p> -<ol> -<li>You download and install the software on your device.</li> -<li>You enter the email address or username of the account you want to hack.</li> -<li>You choose the account type from a drop-down menu.</li> -<li>You click on the "Find Password" button and wait for the software to scan the web for possible passwords.</li> -<li>You receive the password of the account in a few minutes.</li> -</ol> - -<h2>Is Account Hacker v3.9.9 Full Version 19 safe and legal?</h2> -<p>The short answer is no. Account Hacker v3.9.9 Full Version 19 is neither safe nor legal to use for several reasons:</p> -<ul> -<li>It is a scam. There is no evidence that Account Hacker v3.9.9 Full Version 19 can actually hack any account password. The software is likely a malware that infects your device with viruses, spyware, or ransomware.</li> -<li>It is illegal. Hacking someone else's account without their permission is a crime that can result in fines or imprisonment. You are also violating the terms of service of the account providers and risking your own account being suspended or deleted.</li> -<li>It is unethical. Hacking someone else's account is a violation of their privacy and security. You are exposing their personal information, messages, photos, and financial data to potential misuse or harm.</li> -</ul> - -<h2>What are some alternatives to Account Hacker v3.9.9 Full Version 19?</h2> -<p>If you want to hack someone's account for legitimate reasons, such as recovering your own forgotten password or monitoring your child's online activity, there are some alternatives to Account Hacker v3.9.9 Full Version 19 that are safer and more legal:</p> -<p></p> -<ul> -<li>Use the official password recovery options of the account providers. Most account providers offer ways to reset your password if you forget it or lose access to it. You may need to provide your email address, phone number, security questions, or verification code.</li> -<li>Use a reputable parental control software or app. If you want to monitor your child's online activity for their safety and well-being, you can use a parental control software or app that allows you to track their location, calls, messages, browsing history, and app usage. You should inform your child about this and obtain their consent before installing it on their device.</li> -<li>Use a professional ethical hacking service or tool. If you have a valid reason to hack someone's account, such as conducting a security audit or a penetration test, you can use a professional ethical hacking service or tool that follows the industry standards and best practices. You should also obtain the permission of the account owner before attempting any hacking activity.</li> -</ul> - -<h2>Conclusion</h2> -<p>Account Hacker v3.9.9</p> d5da3c52bf<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/renatotn7/teste2/gfpgan/archs/arcface_arch.py b/spaces/renatotn7/teste2/gfpgan/archs/arcface_arch.py deleted file mode 100644 index e6d3bd97f83334450bd78ad2c3b9871102a56b70..0000000000000000000000000000000000000000 --- a/spaces/renatotn7/teste2/gfpgan/archs/arcface_arch.py +++ /dev/null @@ -1,245 +0,0 @@ -import torch.nn as nn -from basicsr.utils.registry import ARCH_REGISTRY - - -def conv3x3(inplanes, outplanes, stride=1): - """A simple wrapper for 3x3 convolution with padding. - - Args: - inplanes (int): Channel number of inputs. - outplanes (int): Channel number of outputs. - stride (int): Stride in convolution. Default: 1. - """ - return nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=stride, padding=1, bias=False) - - -class BasicBlock(nn.Module): - """Basic residual block used in the ResNetArcFace architecture. - - Args: - inplanes (int): Channel number of inputs. - planes (int): Channel number of outputs. - stride (int): Stride in convolution. Default: 1. - downsample (nn.Module): The downsample module. Default: None. - """ - expansion = 1 # output channel expansion ratio - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(BasicBlock, self).__init__() - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = nn.BatchNorm2d(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class IRBlock(nn.Module): - """Improved residual block (IR Block) used in the ResNetArcFace architecture. - - Args: - inplanes (int): Channel number of inputs. - planes (int): Channel number of outputs. - stride (int): Stride in convolution. Default: 1. - downsample (nn.Module): The downsample module. Default: None. - use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True. - """ - expansion = 1 # output channel expansion ratio - - def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True): - super(IRBlock, self).__init__() - self.bn0 = nn.BatchNorm2d(inplanes) - self.conv1 = conv3x3(inplanes, inplanes) - self.bn1 = nn.BatchNorm2d(inplanes) - self.prelu = nn.PReLU() - self.conv2 = conv3x3(inplanes, planes, stride) - self.bn2 = nn.BatchNorm2d(planes) - self.downsample = downsample - self.stride = stride - self.use_se = use_se - if self.use_se: - self.se = SEBlock(planes) - - def forward(self, x): - residual = x - out = self.bn0(x) - out = self.conv1(out) - out = self.bn1(out) - out = self.prelu(out) - - out = self.conv2(out) - out = self.bn2(out) - if self.use_se: - out = self.se(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.prelu(out) - - return out - - -class Bottleneck(nn.Module): - """Bottleneck block used in the ResNetArcFace architecture. - - Args: - inplanes (int): Channel number of inputs. - planes (int): Channel number of outputs. - stride (int): Stride in convolution. Default: 1. - downsample (nn.Module): The downsample module. Default: None. - """ - expansion = 4 # output channel expansion ratio - - def __init__(self, inplanes, planes, stride=1, downsample=None): - super(Bottleneck, self).__init__() - self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) - self.bn1 = nn.BatchNorm2d(planes) - self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) - self.bn2 = nn.BatchNorm2d(planes) - self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False) - self.bn3 = nn.BatchNorm2d(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - residual = self.downsample(x) - - out += residual - out = self.relu(out) - - return out - - -class SEBlock(nn.Module): - """The squeeze-and-excitation block (SEBlock) used in the IRBlock. - - Args: - channel (int): Channel number of inputs. - reduction (int): Channel reduction ration. Default: 16. - """ - - def __init__(self, channel, reduction=16): - super(SEBlock, self).__init__() - self.avg_pool = nn.AdaptiveAvgPool2d(1) # pool to 1x1 without spatial information - self.fc = nn.Sequential( - nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel), - nn.Sigmoid()) - - def forward(self, x): - b, c, _, _ = x.size() - y = self.avg_pool(x).view(b, c) - y = self.fc(y).view(b, c, 1, 1) - return x * y - - -@ARCH_REGISTRY.register() -class ResNetArcFace(nn.Module): - """ArcFace with ResNet architectures. - - Ref: ArcFace: Additive Angular Margin Loss for Deep Face Recognition. - - Args: - block (str): Block used in the ArcFace architecture. - layers (tuple(int)): Block numbers in each layer. - use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True. - """ - - def __init__(self, block, layers, use_se=True): - if block == 'IRBlock': - block = IRBlock - self.inplanes = 64 - self.use_se = use_se - super(ResNetArcFace, self).__init__() - - self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1, bias=False) - self.bn1 = nn.BatchNorm2d(64) - self.prelu = nn.PReLU() - self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2) - self.layer1 = self._make_layer(block, 64, layers[0]) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2) - self.bn4 = nn.BatchNorm2d(512) - self.dropout = nn.Dropout() - self.fc5 = nn.Linear(512 * 8 * 8, 512) - self.bn5 = nn.BatchNorm1d(512) - - # initialization - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.xavier_normal_(m.weight) - elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.Linear): - nn.init.xavier_normal_(m.weight) - nn.init.constant_(m.bias, 0) - - def _make_layer(self, block, planes, num_blocks, stride=1): - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), - nn.BatchNorm2d(planes * block.expansion), - ) - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se)) - self.inplanes = planes - for _ in range(1, num_blocks): - layers.append(block(self.inplanes, planes, use_se=self.use_se)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.prelu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - x = self.bn4(x) - x = self.dropout(x) - x = x.view(x.size(0), -1) - x = self.fc5(x) - x = self.bn5(x) - - return x diff --git a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/models/liteflownet/liteflownet_pre_M2S2R2.py b/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/models/liteflownet/liteflownet_pre_M2S2R2.py deleted file mode 100644 index c38903aae732a5f99a5c019fde2e62d8a012b9fd..0000000000000000000000000000000000000000 --- a/spaces/richardzhangy26/yandian_flow_classification/configs/_base_/models/liteflownet/liteflownet_pre_M2S2R2.py +++ /dev/null @@ -1,73 +0,0 @@ -model = dict( - type='LiteFlowNet', - encoder=dict( - type='NetC', - in_channels=3, - pyramid_levels=[ - 'level1', 'level2', 'level3', 'level4', 'level5', 'level6' - ], - out_channels=(32, 32, 64, 96, 128, 192), - strides=(1, 2, 2, 2, 2, 2), - num_convs=(1, 3, 2, 2, 1, 1), - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - init_cfg=None), - decoder=dict( - type='NetE', - in_channels=dict( - level2=32, level3=64, level4=96, level5=128, level6=192), - corr_channels=dict( - level2=49, level3=49, level4=49, level5=49, level6=49), - sin_channels=dict( - level2=130, level3=130, level4=194, level5=258, level6=386), - rin_channels=dict( - level2=131, level3=131, level4=131, level5=131, level6=195), - feat_channels=64, - mfeat_channels=(128, 64, 32), - sfeat_channels=(128, 64, 32), - rfeat_channels=(128, 128, 64, 64, 32, 32), - patch_size=dict(level2=7, level3=5, level4=5, level5=3, level6=3), - corr_cfg=dict( - level2=dict( - type='Correlation', - max_displacement=3, - stride=2, - dilation_patch=2), - level3=dict( - type='Correlation', - max_displacement=3, - stride=2, - dilation_patch=2), - level4=dict(type='Correlation', max_displacement=3), - level5=dict(type='Correlation', max_displacement=3), - level6=dict(type='Correlation', max_displacement=3)), - warp_cfg=dict(type='Warp', align_corners=True, use_mask=True), - flow_div=20., - conv_cfg=None, - norm_cfg=None, - act_cfg=dict(type='LeakyReLU', negative_slope=0.1), - scaled_corr=False, - regularized_flow=True, - extra_training_loss=False, - flow_loss=dict( - type='MultiLevelEPE', - weights=dict( - level6=0.32, - level5=0.08, - level4=0.02, - level3=0.01, - level2=0.005), - p=2, - reduction='sum'), - init_cfg=None), - init_cfg=dict( - type='Kaiming', - nonlinearity='leaky_relu', - layer=['Conv2d', 'ConvTranspose2d'], - mode='fan_in', - bias=0), - # model training and testing settings - train_cfg=dict(), - test_cfg=dict(), -) diff --git a/spaces/rlancemartin/auto-evaluator/text_utils.py b/spaces/rlancemartin/auto-evaluator/text_utils.py deleted file mode 100644 index c507cda6e5dabb5eb0ee4c186717dfec5bd3602f..0000000000000000000000000000000000000000 --- a/spaces/rlancemartin/auto-evaluator/text_utils.py +++ /dev/null @@ -1,120 +0,0 @@ -import re -from langchain.prompts import PromptTemplate - - -def clean_pdf_text(text: str) -> str: - """Cleans text extracted from a PDF file.""" - # TODO: Remove References/Bibliography section. - return remove_citations(text) - - -def remove_citations(text: str) -> str: - """Removes in-text citations from a string.""" - # (Author, Year) - text = re.sub(r'\([A-Za-z0-9,.\s]+\s\d{4}\)', '', text) - # [1], [2], [3-5], [3, 33, 49, 51] - text = re.sub(r'\[[0-9,-]+(,\s[0-9,-]+)*\]', '', text) - return text - - -template = """You are a teacher grading a quiz. -You are given a question, the student's answer, and the true answer, and are asked to score the student answer as either CORRECT or INCORRECT. - -Example Format: -QUESTION: question here -STUDENT ANSWER: student's answer here -TRUE ANSWER: true answer here -GRADE: CORRECT or INCORRECT here - -Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! - -QUESTION: {query} -STUDENT ANSWER: {result} -TRUE ANSWER: {answer} -GRADE: - -And explain why the STUDENT ANSWER is correct or incorrect. -""" - -GRADE_ANSWER_PROMPT = PromptTemplate(input_variables=["query", "result", "answer"], template=template) - -template = """You are a teacher grading a quiz. -You are given a question, the student's answer, and the true answer, and are asked to score the student answer as either CORRECT or INCORRECT. -You are also asked to identify potential sources of bias in the question and in the true answer. - -Example Format: -QUESTION: question here -STUDENT ANSWER: student's answer here -TRUE ANSWER: true answer here -GRADE: CORRECT or INCORRECT here - -Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! - -QUESTION: {query} -STUDENT ANSWER: {result} -TRUE ANSWER: {answer} -GRADE: - -And explain why the STUDENT ANSWER is correct or incorrect, identify potential sources of bias in the QUESTION, and identify potential sources of bias in the TRUE ANSWER. -""" - -GRADE_ANSWER_PROMPT_BIAS_CHECK = PromptTemplate(input_variables=["query", "result", "answer"], template=template) - -template = """You are assessing a submitted student answer to a question relative to the true answer based on the provided criteria: - - *** - QUESTION: {query} - *** - STUDENT ANSWER: {result} - *** - TRUE ANSWER: {answer} - *** - Criteria: - relevance: Is the submission referring to a real quote from the text?" - conciseness: Is the answer concise and to the point?" - correct: Is the answer correct?" - *** - Does the submission meet the criterion? First, write out in a step by step manner your reasoning about the criterion to be sure that your conclusion is correct. Avoid simply stating the correct answers at the outset. Then print the "CORRECT" or "INCORRECT" (without quotes or punctuation) on its own line corresponding to the correct answer. - Reasoning: -""" - -GRADE_ANSWER_PROMPT_OPENAI = PromptTemplate(input_variables=["query", "result", "answer"], template=template) - -template = """You are a teacher grading a quiz. -You are given a question, the student's answer, and the true answer, and are asked to score the student answer as either CORRECT or INCORRECT. - -Example Format: -QUESTION: question here -STUDENT ANSWER: student's answer here -TRUE ANSWER: true answer here -GRADE: CORRECT or INCORRECT here - -Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements. Begin! - -QUESTION: {query} -STUDENT ANSWER: {result} -TRUE ANSWER: {answer} -GRADE:""" - -GRADE_ANSWER_PROMPT_FAST = PromptTemplate(input_variables=["query", "result", "answer"], template=template) - -template = """ - Given the question: \n - {query} - Decide if the following retrieved context is relevant: \n - {result} - Answer in the following format: \n - "Context is relevant: True or False." \n - And explain why it supports or does not support the correct answer: {answer}""" - -GRADE_DOCS_PROMPT = PromptTemplate(input_variables=["query", "result", "answer"], template=template) - -template = """ - Given the question: \n - {query} - Decide if the following retrieved context is relevant to the {answer}: \n - {result} - Answer in the following format: \n - "Context is relevant: True or False." \n """ - -GRADE_DOCS_PROMPT_FAST = PromptTemplate(input_variables=["query", "result", "answer"], template=template) diff --git a/spaces/rorallitri/biomedical-language-models/logs/IObit Driver Booster Pro 7.3.0.665 Crack 2020 With Licence Key.md b/spaces/rorallitri/biomedical-language-models/logs/IObit Driver Booster Pro 7.3.0.665 Crack 2020 With Licence Key.md deleted file mode 100644 index 5ab750564f42922b06f04edbdcb46f9bb7c43157..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/IObit Driver Booster Pro 7.3.0.665 Crack 2020 With Licence Key.md +++ /dev/null @@ -1,44 +0,0 @@ -<h2>IObit Driver Booster Pro 7.3.0.665 Crack 2020 With Licence Key</h2><br /><p><b><b>DOWNLOAD</b> ►►►►► <a href="https://tinurll.com/2uzmlw">https://tinurll.com/2uzmlw</a></b></p><br /><br /> -<br /> -XPS-4697SEN-5BFRG7-XFRN3-2QQ82 - -Quick Specs: - -CPU : Intel(R) Core(TM) i5-7300HQ CPU @ 2.50GHz - -RAM : 8GB DDR4 SDRAM - -Graphics : NVIDIA GeForce GTX 1050 2GB - -OS: Windows 10 Home 64-bit - -1. Webcam - -There is a basic camera, which is placed on the left side of the laptop. It is a front-facing webcam with a resolution of 1280 x 720. You can use it for facial recognition or chat with the person in front of the computer. - -2. Audio - -The desktop laptop comes with a standard speaker. There is no headphones jacks on the device, but there is an adapter (Bluetooth, M-Audio Digital and Micro) for installing the headphones. - -3. Screen - -The basic screen of the Asus ROG Strix GL551JM is a 15.6-inch. It is a full HD IPS display with a resolution of 1920 x 1080 pixels. As you can see, the notebook is a full HD display, but the colors are a bit washed out. The brightness is also very low. - -4. Connectivity - -The desktop laptop comes with a standard set of connections for Wi-Fi 802.11b/g/n, Bluetooth 4.2, wired LAN, Ethernet and an SD card reader. In addition, the laptop is a multi-format SD card reader. - -5. Battery - -The battery is installed at the back of the device. It has a capacity of 74WHr and supports fast charging technology. You can power the notebook for 3.5 hours while it is fully charged. - -6. Dimensions and design - -Asus ROG Strix GL551JM has a dark design. It is a stylish and attractive desktop. The laptop has a thin appearance, and it weighs around 1.4kg. - -7. Asus ROG Strix GL551JM performance - -We test a desktop laptop in various areas. Asus ROG Strix GL551JM has a good performance, except for the CPU, which is a bit weak. The unit offers a great battery life, and it can handle any tasks at full load. However, the device is a bit weak when it comes to multitasking and high-end graphics. The CPU also can’t handle the 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/runa91/bite_gradio/src/stacked_hourglass/datasets/stanext24_withgc.py b/spaces/runa91/bite_gradio/src/stacked_hourglass/datasets/stanext24_withgc.py deleted file mode 100644 index 0a9118c1ff272e0044d8cc7bb94d21affc6ad882..0000000000000000000000000000000000000000 --- a/spaces/runa91/bite_gradio/src/stacked_hourglass/datasets/stanext24_withgc.py +++ /dev/null @@ -1,561 +0,0 @@ -# 24 joints instead of 20!! - - -import gzip -import json -import os -import random -import math -import numpy as np -import torch -import torch.utils.data as data -from importlib_resources import open_binary -from scipy.io import loadmat -from tabulate import tabulate -import itertools -import json -from scipy import ndimage -import csv -import pickle as pkl - -from csv import DictReader -from pycocotools.mask import decode as decode_RLE - -import sys -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) -from configs.data_info import COMPLETE_DATA_INFO_24 -from stacked_hourglass.utils.imutils import load_image, draw_labelmap, draw_multiple_labelmaps -from stacked_hourglass.utils.misc import to_torch -from stacked_hourglass.utils.transforms import shufflelr, crop, color_normalize, fliplr, transform -import stacked_hourglass.datasets.utils_stanext as utils_stanext -from stacked_hourglass.utils.visualization import save_input_image_with_keypoints -from configs.dog_breeds.dog_breed_class import COMPLETE_ABBREV_DICT, COMPLETE_SUMMARY_BREEDS, SIM_MATRIX_RAW, SIM_ABBREV_INDICES -from configs.dataset_path_configs import STANEXT_RELATED_DATA_ROOT_DIR -from smal_pytorch.smal_model.smal_basics import get_symmetry_indices - - -def read_csv(csv_file): - with open(csv_file,'r') as f: - reader = csv.reader(f) - headers = next(reader) - row_list = [{h:x for (h,x) in zip(headers,row)} for row in reader] - return row_list - -class StanExtGC(data.Dataset): - DATA_INFO = COMPLETE_DATA_INFO_24 - - # Suggested joints to use for keypoint reprojection error calculations - ACC_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16] - - def __init__(self, image_path=None, is_train=True, inp_res=256, out_res=64, sigma=1, - scale_factor=0.25, rot_factor=30, label_type='Gaussian', - do_augment='default', shorten_dataset_to=None, dataset_mode='keyp_only', V12=None, val_opt='test'): - self.V12 = V12 - self.is_train = is_train # training set or test set - if do_augment == 'yes': - self.do_augment = True - elif do_augment == 'no': - self.do_augment = False - elif do_augment=='default': - if self.is_train: - self.do_augment = True - else: - self.do_augment = False - else: - raise ValueError - self.inp_res = inp_res - self.out_res = out_res - self.sigma = sigma - self.scale_factor = scale_factor - self.rot_factor = rot_factor - self.label_type = label_type - self.dataset_mode = dataset_mode - if self.dataset_mode=='complete' or self.dataset_mode=='complete_with_gc' or self.dataset_mode=='keyp_and_seg' or self.dataset_mode=='keyp_and_seg_and_partseg': - self.calc_seg = True - else: - self.calc_seg = False - self.val_opt = val_opt - - # create train/val split - self.img_folder = utils_stanext.get_img_dir(V12=self.V12) - self.train_dict, init_test_dict, init_val_dict = utils_stanext.load_stanext_json_as_dict(split_train_test=True, V12=self.V12) - self.train_name_list = list(self.train_dict.keys()) # 7004 - if self.val_opt == 'test': - self.test_dict = init_test_dict - self.test_name_list = list(self.test_dict.keys()) - elif self.val_opt == 'val': - self.test_dict = init_val_dict - self.test_name_list = list(self.test_dict.keys()) - else: - raise NotImplementedError - - - # path_gc_annots_overview = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/data/stanext_related_data/ground_contact_annotations/stage3/gc_annots_overview_first699.pkl' - path_gc_annots_overview = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/data/stanext_related_data/ground_contact_annotations/stage3/gc_annots_overview_stage3complete.pkl' - with open(path_gc_annots_overview, 'rb') as f: - self.gc_annots_overview = pkl.load(f) - list_gc_labelled_images = list(self.gc_annots_overview.keys()) - - test_name_list_gc = [] - for name in self.test_name_list: - if name.split('.')[0] in list_gc_labelled_images: - test_name_list_gc.append(name) - - train_name_list_gc = [] - for name in self.train_name_list: - if name.split('.')[0] in list_gc_labelled_images: - train_name_list_gc.append(name) - - self.test_name_list = test_name_list_gc - self.train_name_list = train_name_list_gc - - random.seed(4) - random.shuffle(self.test_name_list) - - ''' - already_labelled = ['n02093991-Irish_terrier/n02093991_2874.jpg', - 'n02093754-Border_terrier/n02093754_1062.jpg', - 'n02092339-Weimaraner/n02092339_1672.jpg', - 'n02096177-cairn/n02096177_4916.jpg', - 'n02110185-Siberian_husky/n02110185_725.jpg', - 'n02110806-basenji/n02110806_761.jpg', - 'n02094433-Yorkshire_terrier/n02094433_2474.jpg', - 'n02097474-Tibetan_terrier/n02097474_8796.jpg', - 'n02099601-golden_retriever/n02099601_2495.jpg'] - self.trainvaltest_dict = dict(self.train_dict) - for d in (init_test_dict, init_val_dict): self.trainvaltest_dict.update(d) - - gc_annot_csv = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/data/stanext_related_data/ground_contact_annotations/my_gcannotations_qualification.csv' - gc_row_list = read_csv(gc_annot_csv) - - json_acceptable_string = (gc_row_list[0]['vertices']).replace("'", "\"") - self.gc_dict = json.loads(json_acceptable_string) - - self.train_name_list = already_labelled - self.test_name_list = already_labelled - ''' - - - # stanext breed dict (contains for each name a stanext specific index) - breed_json_path = os.path.join(STANEXT_RELATED_DATA_ROOT_DIR, 'StanExt_breed_dict_v2.json') - self.breed_dict = self.get_breed_dict(breed_json_path, create_new_breed_json=False) - - # load smal symmetry info - self.sym_ids_dict = get_symmetry_indices() - - ''' - self.train_name_list = sorted(self.train_name_list) - self.test_name_list = sorted(self.test_name_list) - random.seed(4) - random.shuffle(self.train_name_list) - random.shuffle(self.test_name_list) - if shorten_dataset_to is not None: - # sometimes it is useful to have a smaller set (validation speed, debugging) - self.train_name_list = self.train_name_list[0 : min(len(self.train_name_list), shorten_dataset_to)] - self.test_name_list = self.test_name_list[0 : min(len(self.test_name_list), shorten_dataset_to)] - # special case for debugging: 12 similar images - if shorten_dataset_to == 12: - my_sample = self.test_name_list[2] - for ind in range(0, 12): - self.test_name_list[ind] = my_sample - ''' - print('len(dataset): ' + str(self.__len__())) - - # add results for eyes, whithers and throat as obtained through anipose -> they are used - # as pseudo ground truth at training time. - # self.path_anipose_out_root = os.path.join(STANEXT_RELATED_DATA_ROOT_DIR, 'animalpose_hg8_v0_results_on_StanExt') - self.path_anipose_out_root = os.path.join(STANEXT_RELATED_DATA_ROOT_DIR, 'animalpose_hg8_v1_results_on_StanExt') # this is from hg_anipose_after01bugfix_v1 - # self.prepare_anipose_res_and_save() - - - def get_data_sampler_info(self): - # for custom data sampler - if self.is_train: - name_list = self.train_name_list - else: - name_list = self.test_name_list - info_dict = {'name_list': name_list, - 'stanext_breed_dict': self.breed_dict, - 'breeds_abbrev_dict': COMPLETE_ABBREV_DICT, - 'breeds_summary': COMPLETE_SUMMARY_BREEDS, - 'breeds_sim_martix_raw': SIM_MATRIX_RAW, - 'breeds_sim_abbrev_inds': SIM_ABBREV_INDICES - } - return info_dict - - - def get_breed_dict(self, breed_json_path, create_new_breed_json=False): - if create_new_breed_json: - breed_dict = {} - breed_index = 0 - for img_name in self.train_name_list: - folder_name = img_name.split('/')[0] - breed_name = folder_name.split(folder_name.split('-')[0] + '-')[1] - if not (folder_name in breed_dict): - breed_dict[folder_name] = { - 'breed_name': breed_name, - 'index': breed_index} - breed_index += 1 - with open(breed_json_path, 'w', encoding='utf-8') as f: json.dump(breed_dict, f, ensure_ascii=False, indent=4) - else: - with open(breed_json_path) as json_file: breed_dict = json.load(json_file) - return breed_dict - - - - def prepare_anipose_res_and_save(self): - # I only had to run this once ... - # path_animalpose_res_root = '/ps/scratch/nrueegg/new_projects/Animals/dog_project/pytorch-stacked-hourglass/results/animalpose_hg8_v0/' - path_animalpose_res_root = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/results/results/hg_anipose_after01bugfix_v1/stanext24_XXX_e300_json/' - - train_dict, init_test_dict, init_val_dict = utils_stanext.load_stanext_json_as_dict(split_train_test=True, V12=self.V12) - train_name_list = list(train_dict.keys()) - val_name_list = list(init_val_dict.keys()) - test_name_list = list(init_test_dict.keys()) - all_dicts = [train_dict, init_val_dict, init_test_dict] - all_name_lists = [train_name_list, val_name_list, test_name_list] - all_prefixes = ['train', 'val', 'test'] - for ind in range(3): - this_name_list = all_name_lists[ind] - this_dict = all_dicts[ind] - this_prefix = all_prefixes[ind] - - for index in range(0, len(this_name_list)): - print(index) - name = this_name_list[index] - data = this_dict[name] - - img_path = os.path.join(self.img_folder, data['img_path']) - - path_animalpose_res = os.path.join(path_animalpose_res_root.replace('XXX', this_prefix), data['img_path'].replace('.jpg', '.json')) - - - # prepare predicted keypoints - '''if is_train: - path_animalpose_res = os.path.join(path_animalpose_res_root, 'train_stanext', 'res_' + str(index) + '.json') - else: - path_animalpose_res = os.path.join(path_animalpose_res_root, 'test_stanext', 'res_' + str(index) + '.json') - ''' - with open(path_animalpose_res) as f: animalpose_data = json.load(f) - anipose_joints_256 = np.asarray(animalpose_data['pred_joints_256']).reshape((-1, 3)) - anipose_center = animalpose_data['center'] - anipose_scale = animalpose_data['scale'] - anipose_joints_64 = anipose_joints_256 / 4 - '''thrs_21to24 = 0.2 - anipose_joints_21to24 = np.zeros((4, 3))) - for ind_j in range(0:4): - anipose_joints_untrans = transform(anipose_joints_64[20+ind_j, 0:2], anipose_center, anipose_scale, [64, 64], invert=True, rot=0, as_int=False)-1 - anipose_joints_trans_again = transform(anipose_joints_untrans+1, anipose_center, anipose_scale, [64, 64], invert=False, rot=0, as_int=False) - anipose_joints_21to24[ind_j, :2] = anipose_joints_untrans - if anipose_joints_256[20+ind_j, 2] >= thrs_21to24: - anipose_joints_21to24[ind_j, 2] = 1''' - anipose_joints_0to24 = np.zeros((24, 3)) - for ind_j in range(24): - # anipose_joints_untrans = transform(anipose_joints_64[ind_j, 0:2], anipose_center, anipose_scale, [64, 64], invert=True, rot=0, as_int=False)-1 - anipose_joints_untrans = transform(anipose_joints_64[ind_j, 0:2]+1, anipose_center, anipose_scale, [64, 64], invert=True, rot=0, as_int=False)-1 - anipose_joints_0to24[ind_j, :2] = anipose_joints_untrans - anipose_joints_0to24[ind_j, 2] = anipose_joints_256[ind_j, 2] - # save anipose result for usage later on - out_path = os.path.join(self.path_anipose_out_root, data['img_path'].replace('.jpg', '.json')) - if not os.path.exists(os.path.dirname(out_path)): os.makedirs(os.path.dirname(out_path)) - out_dict = {'orig_anipose_joints_256': list(anipose_joints_256.reshape((-1))), - 'anipose_joints_0to24': list(anipose_joints_0to24[:, :3].reshape((-1))), - 'orig_index': index, - 'orig_scale': animalpose_data['scale'], - 'orig_center': animalpose_data['center'], - 'data_split': this_prefix, # 'is_train': is_train, - } - with open(out_path, 'w') as outfile: json.dump(out_dict, outfile) - return - - - - - - - - - - - - - - - - - def __getitem__(self, index): - - - if self.is_train: - train_val_test_Prefix = 'train' - name = self.train_name_list[index] - data = self.train_dict[name] - else: - train_val_test_Prefix = self.val_opt # 'val' or 'test' - name = self.test_name_list[index] - data = self.test_dict[name] - img_path = os.path.join(self.img_folder, data['img_path']) - - - ''' - # for debugging only - train_val_test_Prefix = 'train' - name = self.train_name_list[index] - data = self.trainvaltest_dict[name] - img_path = os.path.join(self.img_folder, data['img_path']) - - if self.dataset_mode=='complete_with_gc': - n_verts_smal = 3889 - - gc_info_raw = self.gc_dict['bite/' + name] # a list with all vertex numbers that are in ground contact - gc_info = [] - gc_info_tch = torch.zeros((n_verts_smal)) - for ind_v in gc_info_raw: - if ind_v < n_verts_smal: - gc_info.append(ind_v) - gc_info_tch[ind_v] = 1 - gc_info_available = True - ''' - - # array of shape (n_verts_smal, 3) with [first: no-contact=0 contact=1 second: index of vertex third: dist] - gc_vertdists_overview = self.gc_annots_overview[name.split('.')[0]]['gc_vertdists_overview'] - - gc_info_tch = torch.tensor(gc_vertdists_overview[:, :]) # torch.tensor(gc_vertdists_overview[:, 0]) - gc_info_available = True - - - - - # import pdb; pdb.set_trace() - debugging = False - if debugging: - import shutil - import trimesh - from smal_pytorch.smal_model.smal_torch_new import SMAL - smal = SMAL() - verts = smal.v_template.detach().cpu().numpy() - faces = smal.faces.detach().cpu().numpy() - vert_colors = np.repeat(255*gc_info_tch[:, 0].detach().cpu().numpy()[:, None], 3, 1) - # vert_colors = np.repeat(255*gc_info_np[:, None], 3, 1) - my_mesh = trimesh.Trimesh(vertices=verts, faces=faces, process=False, maintain_order=True) - my_mesh.visual.vertex_colors = vert_colors - debug_folder = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/debugging/gc_debugging/' - my_mesh.export(debug_folder + (name.split('/')[1]).replace('.jpg', '_withgc.obj')) - shutil.copy(img_path, debug_folder + name.split('/')[1]) - - - - - - sf = self.scale_factor - rf = self.rot_factor - try: - # import pdb; pdb.set_trace() - - '''new_anipose_root_path = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/results/results/hg_anipose_after01bugfix_v1/stanext24_XXX_e300_json/' - adjusted_new_anipose_root_path = new_anipose_root_path.replace('XXX', train_val_test_Prefix) - new_anipose_res_path = adjusted_new_anipose_root_path + data['img_path'].replace('.jpg', '.json') - with open(new_anipose_res_path) as f: new_anipose_data = json.load(f) - ''' - - anipose_res_path = os.path.join(self.path_anipose_out_root, data['img_path'].replace('.jpg', '.json')) - with open(anipose_res_path) as f: anipose_data = json.load(f) - anipose_thr = 0.2 - anipose_joints_0to24 = np.asarray(anipose_data['anipose_joints_0to24']).reshape((-1, 3)) - anipose_joints_0to24_scores = anipose_joints_0to24[:, 2] - # anipose_joints_0to24_scores[anipose_joints_0to24_scores>anipose_thr] = 1.0 - anipose_joints_0to24_scores[anipose_joints_0to24_scores<anipose_thr] = 0.0 - anipose_joints_0to24[:, 2] = anipose_joints_0to24_scores - except: - # REMARK: This happens sometimes!!! maybe once every 10th image..? - print('no anipose eye keypoints!') - anipose_joints_0to24 = np.zeros((24, 3)) - - joints = np.concatenate((np.asarray(data['joints'])[:20, :], anipose_joints_0to24[20:24, :]), axis=0) - joints[joints[:, 2]==0, :2] = 0 # avoid nan values - pts = torch.Tensor(joints) - - # inp = crop(img, c, s, [self.inp_res, self.inp_res], rot=r) - # sf = scale * 200.0 / res[0] # res[0]=256 - # center = center * 1.0 / sf - # scale = scale / sf = 256 / 200 - # h = 200 * scale - bbox_xywh = data['img_bbox'] - bbox_c = [bbox_xywh[0]+0.5*bbox_xywh[2], bbox_xywh[1]+0.5*bbox_xywh[3]] - bbox_max = max(bbox_xywh[2], bbox_xywh[3]) - bbox_diag = math.sqrt(bbox_xywh[2]**2 + bbox_xywh[3]**2) - # bbox_s = bbox_max / 200. # the dog will fill the image -> bbox_max = 256 - # bbox_s = bbox_diag / 200. # diagonal of the boundingbox will be 200 - bbox_s = bbox_max / 200. * 256. / 200. # maximum side of the bbox will be 200 - c = torch.Tensor(bbox_c) - s = bbox_s - - # For single-person pose estimation with a centered/scaled figure - nparts = pts.size(0) - img = load_image(img_path) # CxHxW - - # segmentation map (we reshape it to 3xHxW, such that we can do the - # same transformations as with the image) - if self.calc_seg: - seg = torch.Tensor(utils_stanext.get_seg_from_entry(data)[None, :, :]) - seg = torch.cat(3*[seg]) - - r = 0 - do_flip = False - if self.do_augment: - s = s*torch.randn(1).mul_(sf).add_(1).clamp(1-sf, 1+sf)[0] - r = torch.randn(1).mul_(rf).clamp(-2*rf, 2*rf)[0] if random.random() <= 0.6 else 0 - # Flip - if random.random() <= 0.5: - do_flip = True - img = fliplr(img) - if self.calc_seg: - seg = fliplr(seg) - pts = shufflelr(pts, img.size(2), self.DATA_INFO.hflip_indices) - c[0] = img.size(2) - c[0] - # flip ground contact annotations - gc_info_tch_swapped = torch.zeros_like(gc_info_tch) - gc_info_tch_swapped[self.sym_ids_dict['center'], :] = gc_info_tch[self.sym_ids_dict['center'], :] - gc_info_tch_swapped[self.sym_ids_dict['right'], :] = gc_info_tch[self.sym_ids_dict['left'], :] - gc_info_tch_swapped[self.sym_ids_dict['left'], :] = gc_info_tch[self.sym_ids_dict['right'], :] - gc_info_tch = gc_info_tch_swapped - - - # Color - img[0, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1) - img[1, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1) - img[2, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1) - - - - - # import pdb; pdb.set_trace() - debugging = False - if debugging and do_flip: - import shutil - import trimesh - from smal_pytorch.smal_model.smal_torch_new import SMAL - smal = SMAL() - verts = smal.v_template.detach().cpu().numpy() - faces = smal.faces.detach().cpu().numpy() - vert_colors = np.repeat(255*gc_info_tch[:, 0].detach().cpu().numpy()[:, None], 3, 1) - # vert_colors = np.repeat(255*gc_info_np[:, None], 3, 1) - my_mesh = trimesh.Trimesh(vertices=verts, faces=faces, process=False, maintain_order=True) - my_mesh.visual.vertex_colors = vert_colors - debug_folder = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/debugging/gc_debugging/' - my_mesh.export(debug_folder + (name.split('/')[1]).replace('.jpg', '_withgc_flip.obj')) - - - - - - - - - - - - - - - # Prepare image and groundtruth map - inp = crop(img, c, s, [self.inp_res, self.inp_res], rot=r) - img_border_mask = torch.all(inp > 1.0/256, dim = 0).unsqueeze(0).float() # 1 is foreground - inp = color_normalize(inp, self.DATA_INFO.rgb_mean, self.DATA_INFO.rgb_stddev) - if self.calc_seg: - seg = crop(seg, c, s, [self.inp_res, self.inp_res], rot=r) - - # Generate ground truth - tpts = pts.clone() - target_weight = tpts[:, 2].clone().view(nparts, 1) - - target = torch.zeros(nparts, self.out_res, self.out_res) - for i in range(nparts): - # if tpts[i, 2] > 0: # This is evil!! - if tpts[i, 1] > 0: - tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2]+1, c, s, [self.out_res, self.out_res], rot=r, as_int=False)) - 1 - target[i], vis = draw_labelmap(target[i], tpts[i], self.sigma, type=self.label_type) - target_weight[i, 0] *= vis - # NEW: - '''target_new, vis_new = draw_multiple_labelmaps((self.out_res, self.out_res), tpts[:, :2]-1, self.sigma, type=self.label_type) - target_weight_new = tpts[:, 2].clone().view(nparts, 1) * vis_new - target_new[(target_weight_new==0).reshape((-1)), :, :] = 0''' - - - # --- Meta info - this_breed = self.breed_dict[name.split('/')[0]] # 120 - # add information about location within breed similarity matrix - folder_name = name.split('/')[0] - breed_name = folder_name.split(folder_name.split('-')[0] + '-')[1] - abbrev = COMPLETE_ABBREV_DICT[breed_name] - try: - sim_breed_index = COMPLETE_SUMMARY_BREEDS[abbrev]._ind_in_xlsx_matrix - except: # some breeds are not in the xlsx file - sim_breed_index = -1 - meta = {'index' : index, 'center' : c, 'scale' : s, - 'pts' : pts, 'tpts' : tpts, 'target_weight': target_weight, - 'breed_index': this_breed['index'], 'sim_breed_index': sim_breed_index, - 'ind_dataset': 0} # ind_dataset=0 for stanext or stanexteasy or stanext 2 - meta2 = {'index' : index, 'center' : c, 'scale' : s, - 'pts' : pts, 'tpts' : tpts, 'target_weight': target_weight, - 'ind_dataset': 3} - - # import pdb; pdb.set_trace() - - # out_path_root = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/debugging/stanext_preprocessing/old_animalpose_version/' - # out_path_root = '/is/cluster/work/nrueegg/icon_pifu_related/barc_for_bite/debugging/stanext_preprocessing/v0/' - # save_input_image_with_keypoints(inp, meta['tpts'], out_path = out_path_root + name.replace('/', '_'), ratio_in_out=self.inp_res/self.out_res) - - - # return different things depending on dataset_mode - if self.dataset_mode=='keyp_only': - # save_input_image_with_keypoints(inp, meta['tpts'], out_path='./test_input_stanext.png', ratio_in_out=self.inp_res/self.out_res) - return inp, target, meta - elif self.dataset_mode=='keyp_and_seg': - meta['silh'] = seg[0, :, :] - meta['name'] = name - return inp, target, meta - elif self.dataset_mode=='keyp_and_seg_and_partseg': - # partseg is fake! this does only exist such that this dataset can be combined with an other datset that has part segmentations - meta2['silh'] = seg[0, :, :] - meta2['name'] = name - fake_body_part_matrix = torch.ones((3, 256, 256)).long() * (-1) - meta2['body_part_matrix'] = fake_body_part_matrix - return inp, target, meta2 - elif (self.dataset_mode=='complete') or (self.dataset_mode=='complete_with_gc'): - target_dict = meta - target_dict['silh'] = seg[0, :, :] - # NEW for silhouette loss - target_dict['img_border_mask'] = img_border_mask - target_dict['has_seg'] = True - # ground contact - if self.dataset_mode=='complete_with_gc': - target_dict['has_gc_is_touching'] = True - target_dict['has_gc'] = gc_info_available - target_dict['gc'] = gc_info_tch - if target_dict['silh'].sum() < 1: - if ((not self.is_train) and self.val_opt == 'test'): - raise ValueError - elif self.is_train: - print('had to replace training image') - replacement_index = max(0, index - 1) - inp, target_dict = self.__getitem__(replacement_index) - else: - # There seem to be a few validation images without segmentation - # which would lead to nan in iou calculation - replacement_index = max(0, index - 1) - inp, target_dict = self.__getitem__(replacement_index) - return inp, target_dict - else: - print('sampling error') - import pdb; pdb.set_trace() - raise ValueError - - - def __len__(self): - if self.is_train: - return len(self.train_name_list) - else: - return len(self.test_name_list) - - diff --git a/spaces/safi842/FashionGen/README.md b/spaces/safi842/FashionGen/README.md deleted file mode 100644 index fb711c30db889980fa29454a616de1e588d36a2d..0000000000000000000000000000000000000000 --- a/spaces/safi842/FashionGen/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: FashionGen -emoji: 🔥 -colorFrom: gray -colorTo: blue -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: true -license: afl-3.0 -duplicated_from: safi842/FashionGen ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Layers/STFT.py b/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Layers/STFT.py deleted file mode 100644 index 44302a976392af3664506e4954ebc92ae72c4110..0000000000000000000000000000000000000000 --- a/spaces/sarinam/speaker-anonymization-gan/IMSToucan/Layers/STFT.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Taken from ESPNet -""" - -import torch -from torch.functional import stft as torch_stft -from torch_complex.tensor import ComplexTensor - -from ..Utility.utils import make_pad_mask - - -class STFT(torch.nn.Module): - - def __init__(self, n_fft=512, win_length=None, hop_length=128, window="hann", center=True, normalized=False, - onesided=True): - super().__init__() - self.n_fft = n_fft - if win_length is None: - self.win_length = n_fft - else: - self.win_length = win_length - self.hop_length = hop_length - self.center = center - self.normalized = normalized - self.onesided = onesided - self.window = window - - def extra_repr(self): - return (f"n_fft={self.n_fft}, " - f"win_length={self.win_length}, " - f"hop_length={self.hop_length}, " - f"center={self.center}, " - f"normalized={self.normalized}, " - f"onesided={self.onesided}") - - def forward(self, input_wave, ilens=None): - """ - STFT forward function. - Args: - input_wave: (Batch, Nsamples) or (Batch, Nsample, Channels) - ilens: (Batch) - Returns: - output: (Batch, Frames, Freq, 2) or (Batch, Frames, Channels, Freq, 2) - """ - bs = input_wave.size(0) - - if input_wave.dim() == 3: - multi_channel = True - # input: (Batch, Nsample, Channels) -> (Batch * Channels, Nsample) - input_wave = input_wave.transpose(1, 2).reshape(-1, input_wave.size(1)) - else: - multi_channel = False - - # output: (Batch, Freq, Frames, 2=real_imag) - # or (Batch, Channel, Freq, Frames, 2=real_imag) - if self.window is not None: - window_func = getattr(torch, f"{self.window}_window") - window = window_func(self.win_length, dtype=input_wave.dtype, device=input_wave.device) - else: - window = None - - complex_output = torch_stft(input=input_wave, - n_fft=self.n_fft, - win_length=self.win_length, - hop_length=self.hop_length, - center=self.center, - window=window, - normalized=self.normalized, - onesided=self.onesided, - return_complex=True) - output = torch.view_as_real(complex_output) - # output: (Batch, Freq, Frames, 2=real_imag) - # -> (Batch, Frames, Freq, 2=real_imag) - output = output.transpose(1, 2) - if multi_channel: - # output: (Batch * Channel, Frames, Freq, 2=real_imag) - # -> (Batch, Frame, Channel, Freq, 2=real_imag) - output = output.view(bs, -1, output.size(1), output.size(2), 2).transpose(1, 2) - - if ilens is not None: - if self.center: - pad = self.win_length // 2 - ilens = ilens + 2 * pad - - olens = (ilens - self.win_length) // self.hop_length + 1 - output.masked_fill_(make_pad_mask(olens, output, 1), 0.0) - else: - olens = None - - return output, olens - - def inverse(self, input, ilens=None): - """ - Inverse STFT. - Args: - input: Tensor(batch, T, F, 2) or ComplexTensor(batch, T, F) - ilens: (batch,) - Returns: - wavs: (batch, samples) - ilens: (batch,) - """ - istft = torch.functional.istft - - if self.window is not None: - window_func = getattr(torch, f"{self.window}_window") - window = window_func(self.win_length, dtype=input.dtype, device=input.device) - else: - window = None - - if isinstance(input, ComplexTensor): - input = torch.stack([input.real, input.imag], dim=-1) - assert input.shape[-1] == 2 - input = input.transpose(1, 2) - - wavs = istft(input, n_fft=self.n_fft, hop_length=self.hop_length, win_length=self.win_length, window=window, center=self.center, - normalized=self.normalized, onesided=self.onesided, length=ilens.max() if ilens is not None else ilens) - - return wavs, ilens diff --git a/spaces/scedlatioru/img-to-music/README.md b/spaces/scedlatioru/img-to-music/README.md deleted file mode 100644 index eda6bdaa45b0e90d5e523a939dae0a40cdbf3bc3..0000000000000000000000000000000000000000 --- a/spaces/scedlatioru/img-to-music/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Img To Music -emoji: 🌅🎶 -colorFrom: green -colorTo: purple -sdk: gradio -sdk_version: 3.16.0 -app_file: app.py -pinned: true -duplicated_from: light22/img-to-music ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/sdeeas/ChuanhuChatGPT/modules/shared.py b/spaces/sdeeas/ChuanhuChatGPT/modules/shared.py deleted file mode 100644 index a9e72580aa7ae48f907e923a09099513570a9ad8..0000000000000000000000000000000000000000 --- a/spaces/sdeeas/ChuanhuChatGPT/modules/shared.py +++ /dev/null @@ -1,55 +0,0 @@ -from modules.presets import COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST -import os -import queue - -class State: - interrupted = False - multi_api_key = False - completion_url = COMPLETION_URL - balance_api_url = BALANCE_API_URL - usage_api_url = USAGE_API_URL - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - def set_api_host(self, api_host): - self.completion_url = f"https://{api_host}/v1/chat/completions" - self.balance_api_url = f"https://{api_host}/dashboard/billing/credit_grants" - self.usage_api_url = f"https://{api_host}/dashboard/billing/usage" - os.environ["OPENAI_API_BASE"] = f"https://{api_host}/v1" - - def reset_api_host(self): - self.completion_url = COMPLETION_URL - self.balance_api_url = BALANCE_API_URL - self.usage_api_url = USAGE_API_URL - os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}/v1" - return API_HOST - - def reset_all(self): - self.interrupted = False - self.completion_url = COMPLETION_URL - - def set_api_key_queue(self, api_key_list): - self.multi_api_key = True - self.api_key_queue = queue.Queue() - for api_key in api_key_list: - self.api_key_queue.put(api_key) - - def switching_api_key(self, func): - if not hasattr(self, "api_key_queue"): - return func - - def wrapped(*args, **kwargs): - api_key = self.api_key_queue.get() - args[0].api_key = api_key - ret = func(*args, **kwargs) - self.api_key_queue.put(api_key) - return ret - - return wrapped - - -state = State() diff --git a/spaces/segments-tobias/conex/espnet2/samplers/length_batch_sampler.py b/spaces/segments-tobias/conex/espnet2/samplers/length_batch_sampler.py deleted file mode 100644 index 522a4b49e145c4f92cb03ca65f7d454df960033c..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/samplers/length_batch_sampler.py +++ /dev/null @@ -1,143 +0,0 @@ -from typing import Iterator -from typing import List -from typing import Tuple -from typing import Union - -from typeguard import check_argument_types - -from espnet2.fileio.read_text import load_num_sequence_text -from espnet2.samplers.abs_sampler import AbsSampler - - -class LengthBatchSampler(AbsSampler): - def __init__( - self, - batch_bins: int, - shape_files: Union[Tuple[str, ...], List[str]], - min_batch_size: int = 1, - sort_in_batch: str = "descending", - sort_batch: str = "ascending", - drop_last: bool = False, - padding: bool = True, - ): - assert check_argument_types() - assert batch_bins > 0 - if sort_batch != "ascending" and sort_batch != "descending": - raise ValueError( - f"sort_batch must be ascending or descending: {sort_batch}" - ) - if sort_in_batch != "descending" and sort_in_batch != "ascending": - raise ValueError( - f"sort_in_batch must be ascending or descending: {sort_in_batch}" - ) - - self.batch_bins = batch_bins - self.shape_files = shape_files - self.sort_in_batch = sort_in_batch - self.sort_batch = sort_batch - self.drop_last = drop_last - - # utt2shape: (Length, ...) - # uttA 100,... - # uttB 201,... - utt2shapes = [ - load_num_sequence_text(s, loader_type="csv_int") for s in shape_files - ] - - first_utt2shape = utt2shapes[0] - for s, d in zip(shape_files, utt2shapes): - if set(d) != set(first_utt2shape): - raise RuntimeError( - f"keys are mismatched between {s} != {shape_files[0]}" - ) - - # Sort samples in ascending order - # (shape order should be like (Length, Dim)) - keys = sorted(first_utt2shape, key=lambda k: first_utt2shape[k][0]) - if len(keys) == 0: - raise RuntimeError(f"0 lines found: {shape_files[0]}") - - # Decide batch-sizes - batch_sizes = [] - current_batch_keys = [] - for key in keys: - current_batch_keys.append(key) - # shape: (Length, dim1, dim2, ...) - if padding: - # bins = bs x max_length - bins = sum(len(current_batch_keys) * sh[key][0] for sh in utt2shapes) - else: - # bins = sum of lengths - bins = sum(d[k][0] for k in current_batch_keys for d in utt2shapes) - - if bins > batch_bins and len(current_batch_keys) >= min_batch_size: - batch_sizes.append(len(current_batch_keys)) - current_batch_keys = [] - else: - if len(current_batch_keys) != 0 and ( - not self.drop_last or len(batch_sizes) == 0 - ): - batch_sizes.append(len(current_batch_keys)) - - if len(batch_sizes) == 0: - # Maybe we can't reach here - raise RuntimeError("0 batches") - - # If the last batch-size is smaller than minimum batch_size, - # the samples are redistributed to the other mini-batches - if len(batch_sizes) > 1 and batch_sizes[-1] < min_batch_size: - for i in range(batch_sizes.pop(-1)): - batch_sizes[-(i % len(batch_sizes)) - 1] += 1 - - if not self.drop_last: - # Bug check - assert sum(batch_sizes) == len(keys), f"{sum(batch_sizes)} != {len(keys)}" - - # Set mini-batch - self.batch_list = [] - iter_bs = iter(batch_sizes) - bs = next(iter_bs) - minibatch_keys = [] - for key in keys: - minibatch_keys.append(key) - if len(minibatch_keys) == bs: - if sort_in_batch == "descending": - minibatch_keys.reverse() - elif sort_in_batch == "ascending": - # Key are already sorted in ascending - pass - else: - raise ValueError( - "sort_in_batch must be ascending" - f" or descending: {sort_in_batch}" - ) - self.batch_list.append(tuple(minibatch_keys)) - minibatch_keys = [] - try: - bs = next(iter_bs) - except StopIteration: - break - - if sort_batch == "ascending": - pass - elif sort_batch == "descending": - self.batch_list.reverse() - else: - raise ValueError( - f"sort_batch must be ascending or descending: {sort_batch}" - ) - - def __repr__(self): - return ( - f"{self.__class__.__name__}(" - f"N-batch={len(self)}, " - f"batch_bins={self.batch_bins}, " - f"sort_in_batch={self.sort_in_batch}, " - f"sort_batch={self.sort_batch})" - ) - - def __len__(self): - return len(self.batch_list) - - def __iter__(self) -> Iterator[Tuple[str, ...]]: - return iter(self.batch_list) diff --git a/spaces/segments-tobias/conex/espnet2/text/cleaner.py b/spaces/segments-tobias/conex/espnet2/text/cleaner.py deleted file mode 100644 index 714eb7cbdb4b430af3c31390fefd9e904ad41ad9..0000000000000000000000000000000000000000 --- a/spaces/segments-tobias/conex/espnet2/text/cleaner.py +++ /dev/null @@ -1,46 +0,0 @@ -from typing import Collection - -from jaconv import jaconv -import tacotron_cleaner.cleaners -from typeguard import check_argument_types - -try: - from vietnamese_cleaner import vietnamese_cleaners -except ImportError: - vietnamese_cleaners = None - - -class TextCleaner: - """Text cleaner. - - Examples: - >>> cleaner = TextCleaner("tacotron") - >>> cleaner("(Hello-World); & jr. & dr.") - 'HELLO WORLD, AND JUNIOR AND DOCTOR' - - """ - - def __init__(self, cleaner_types: Collection[str] = None): - assert check_argument_types() - - if cleaner_types is None: - self.cleaner_types = [] - elif isinstance(cleaner_types, str): - self.cleaner_types = [cleaner_types] - else: - self.cleaner_types = list(cleaner_types) - - def __call__(self, text: str) -> str: - for t in self.cleaner_types: - if t == "tacotron": - text = tacotron_cleaner.cleaners.custom_english_cleaners(text) - elif t == "jaconv": - text = jaconv.normalize(text) - elif t == "vietnamese": - if vietnamese_cleaners is None: - raise RuntimeError("Please install underthesea") - text = vietnamese_cleaners.vietnamese_cleaner(text) - else: - raise RuntimeError(f"Not supported: type={t}") - - return text diff --git a/spaces/sgvkamalakar/Water_Potability_Prediciton_app/app.py b/spaces/sgvkamalakar/Water_Potability_Prediciton_app/app.py deleted file mode 100644 index fdb4589232aa634cef0f0a2ab45c47a29fbdc119..0000000000000000000000000000000000000000 --- a/spaces/sgvkamalakar/Water_Potability_Prediciton_app/app.py +++ /dev/null @@ -1,64 +0,0 @@ -import gradio as gr -from PIL import Image -import pandas as pd -from sklearn.model_selection import train_test_split -from sklearn.ensemble import RandomForestClassifier - -df=pd.read_csv('water_potability.csv') -for i in ['ph','Trihalomethanes','Sulfate']: - missing1 = df.query('Potability == 0')[i][df[i].isna()].index - df.loc[missing1,i] = df.query('Potability == 0')[i][df[i].notna()].mean() - missing2 = df.query('Potability == 1')[i][df[i].isna()].index - df.loc[missing2,i] = df.query('Potability == 1')[i][df[i].notna()].mean() - -dataInp=df.drop('Potability',axis=1) -dataOp=df['Potability'] -X_train,X_test,y_train,y_test=train_test_split(dataInp,dataOp,test_size=0.25,random_state=42) -model= RandomForestClassifier() -model.fit(X_train,y_train) -inputs = [ - gr.inputs.Number(label='ph', default=6.5), - gr.inputs.Number(label='Hardness', default=1), - gr.inputs.Number(label='Solids', default=500), - gr.inputs.Number(label='Chloramines', default=4), - gr.inputs.Number(label='Sulfate', default=250), - gr.inputs.Number(label='Conductivity', default=800), - gr.inputs.Number(label='Organic_Carbon', default=2), - gr.inputs.Number(label='Trihalomethanes', default=80), - gr.inputs.Number(label='Turbidity', default=5) -] - -outputs = [ - gr.outputs.Label(label="Potability"), - gr.outputs.Image(type='pil',label="Image") -] - - -def predict_water_quality(ph, Hardness, Solids, Chloramines, Sulfate, Conductivity, Organic_Carbon, Trihalomethanes, Turbidity): - - try: - data=pd.DataFrame({'ph':[ph],'Hardness':[Hardness],'Solids':[Solids], - 'Chloramines':[Chloramines],'Sulfate':[Sulfate],'Conductivity':[Conductivity], - 'Organic_carbon':[Organic_Carbon],'Trihalomethanes':[Trihalomethanes],'Turbidity':[Turbidity]}) - X_new=data[['ph','Hardness','Solids','Chloramines','Sulfate','Conductivity','Organic_carbon','Trihalomethanes','Turbidity']] - prediction = model.predict(X_new) - potable_water = Image.open("Potable water.jpg") - non_potable_water = Image.open("Non potable water.jpg") - - if prediction== 1: - image = potable_water - else: - image = non_potable_water - if prediction==0: - # print('Non Potable water') - return 'Non Potable water',image - else: - # print('Potable water') - return 'Potable water',image - - except Exception as e: - return (e) - -app = gr.Interface(fn=predict_water_quality,inputs=inputs, outputs=outputs,description='Water quality is a crucial aspect of public health, and the potability of water samples is a significant concern for people worldwide. Predicting whether a water sample is potable or not is a critical task that can be accomplished through the use of machine learning techniques. \n One approach to predicting water potability is to use a Random Forest Classifier model. The model can take in various features of the water sample, such as pH, hardness, solid content, turbidity, sulfate content, trihalomethanes, conductivity, and organic carbon content, as input. Each of these features is a potential contributor to water potability.', css="footer {visibility: hidden}", title='Water Potability Prediction') -app.launch() - diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/infer_pack/models_onnx.py b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/infer_pack/models_onnx.py deleted file mode 100644 index 3c5be53a572151820de7d82dfce84f2e2979ed56..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/infer_pack/models_onnx.py +++ /dev/null @@ -1,760 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from infer_pack import modules -from infer_pack import attentions -from infer_pack import commons -from infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from infer_pack.commons import init_weights -import numpy as np -from infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder256Sim(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - x = self.proj(x) * x_mask - return x, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsidO(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g) - return o - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/shimizukawa/python-no-senpai/loaders/github_issue.py b/spaces/shimizukawa/python-no-senpai/loaders/github_issue.py deleted file mode 100644 index fa972b7a50a7af65b00dbee90d7692aca2445110..0000000000000000000000000000000000000000 --- a/spaces/shimizukawa/python-no-senpai/loaders/github_issue.py +++ /dev/null @@ -1,60 +0,0 @@ -import json -from dataclasses import asdict -from pathlib import Path -from typing import Iterator - -from dateutil.parser import parse -from langchain.docstore.document import Document -from langchain.document_loaders.base import BaseLoader - -from models import GithubIssue - - -def date_to_int(dt_str: str) -> int: - dt = parse(dt_str) - return int(dt.timestamp()) - - -def get_contents(inputfile: Path) -> Iterator[tuple[GithubIssue, str]]: - with inputfile.open("r") as f: - obj = [json.loads(line) for line in f] - for data in obj: - title = data["title"] - body = data["body"] - issue = GithubIssue( - id=data["number"], - title=title, - ctime=date_to_int(data["created_at"]), - user=data["user.login"], - url=data["html_url"], - labels=data["labels_"], - ) - text = title - if body: - text += "\n\n" + body - yield issue, text - comments = data["comments_"] - for comment in comments: - issue = GithubIssue( - id=comment["id"], - title=data["title"], - ctime=date_to_int(comment["created_at"]), - user=comment["user.login"], - url=comment["html_url"], - labels=data["labels_"], - type="issue_comment", - ) - yield issue, comment["body"] - - -class GithubIssueLoader(BaseLoader): - def __init__(self, inputfile: Path): - self.inputfile = inputfile - - def lazy_load(self) -> Iterator[Document]: - for issue, text in get_contents(self.inputfile): - metadata = asdict(issue) - yield Document(page_content=text, metadata=metadata) - - def load(self) -> list[Document]: - return list(self.lazy_load()) diff --git a/spaces/silentchen/layout-guidance/utils.py b/spaces/silentchen/layout-guidance/utils.py deleted file mode 100644 index fa3545cd9c51bff524bd8ec88b99fc35393b1e5d..0000000000000000000000000000000000000000 --- a/spaces/silentchen/layout-guidance/utils.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -import math -def compute_ca_loss(attn_maps_mid, attn_maps_up, bboxes, object_positions): - loss = 0 - object_number = len(bboxes) - if object_number == 0: - return torch.tensor(0).float().cuda() - for attn_map_integrated in attn_maps_mid: - attn_map = attn_map_integrated.chunk(2)[1] - - # - b, i, j = attn_map.shape - H = W = int(math.sqrt(i)) - for obj_idx in range(object_number): - obj_loss = 0 - mask = torch.zeros(size=(H, W)).cuda() - for obj_box in bboxes[obj_idx]: - - x_min, y_min, x_max, y_max = int(obj_box[0] * W), \ - int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H) - mask[y_min: y_max, x_min: x_max] = 1 - - for obj_position in object_positions[obj_idx]: - ca_map_obj = attn_map[:, :, obj_position].reshape(b, H, W) - - activation_value = (ca_map_obj * mask).reshape(b, -1).sum(dim=-1)/ca_map_obj.reshape(b, -1).sum(dim=-1) - - obj_loss += torch.mean((1 - activation_value) ** 2) - loss += (obj_loss/len(object_positions[obj_idx])) - - # compute loss on padding tokens - # activation_value = torch.zeros(size=(b, )).cuda() - # for obj_idx in range(object_number): - # bbox = bboxes[obj_idx] - # ca_map_obj = attn_map[:, :, padding_start:].reshape(b, H, W, -1) - # activation_value += ca_map_obj[:, int(bbox[0] * H): int(bbox[1] * H), - # int(bbox[2] * W): int(bbox[3] * W), :].reshape(b, -1).sum(dim=-1) / ca_map_obj.reshape(b, -1).sum(dim=-1) - # - # loss += torch.mean((1 - activation_value) ** 2) - - - for attn_map_integrated in attn_maps_up[0]: - attn_map = attn_map_integrated.chunk(2)[1] - # - b, i, j = attn_map.shape - H = W = int(math.sqrt(i)) - - for obj_idx in range(object_number): - obj_loss = 0 - mask = torch.zeros(size=(H, W)).cuda() - for obj_box in bboxes[obj_idx]: - x_min, y_min, x_max, y_max = int(obj_box[0] * W), \ - int(obj_box[1] * H), int(obj_box[2] * W), int(obj_box[3] * H) - mask[y_min: y_max, x_min: x_max] = 1 - - for obj_position in object_positions[obj_idx]: - ca_map_obj = attn_map[:, :, obj_position].reshape(b, H, W) - # ca_map_obj = attn_map[:, :, object_positions[obj_position]].reshape(b, H, W) - - activation_value = (ca_map_obj * mask).reshape(b, -1).sum(dim=-1) / ca_map_obj.reshape(b, -1).sum( - dim=-1) - - obj_loss += torch.mean((1 - activation_value) ** 2) - loss += (obj_loss / len(object_positions[obj_idx])) - - # compute loss on padding tokens - # activation_value = torch.zeros(size=(b, )).cuda() - # for obj_idx in range(object_number): - # bbox = bboxes[obj_idx] - # ca_map_obj = attn_map[:, :,padding_start:].reshape(b, H, W, -1) - # activation_value += ca_map_obj[:, int(bbox[0] * H): int(bbox[1] * H), - # int(bbox[2] * W): int(bbox[3] * W), :].reshape(b, -1).sum(dim=-1) / ca_map_obj.reshape(b, -1).sum(dim=-1) - # - # loss += torch.mean((1 - activation_value) ** 2) - loss = loss / (object_number * (len(attn_maps_up[0]) + len(attn_maps_mid))) - return loss \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Clash of Magic S1 Mod APK - Unlimited Resources and Gems for CoC.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Clash of Magic S1 Mod APK - Unlimited Resources and Gems for CoC.md deleted file mode 100644 index 57141416fde1fe67f2befe8bda13a1b2e641fca6..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Clash of Magic S1 Mod APK - Unlimited Resources and Gems for CoC.md +++ /dev/null @@ -1,112 +0,0 @@ - -<h1>Clash of Magic APK: A Private Server for Clash of Clans with Unlimited Resources</h1> -<p>Are you a fan of Clash of Clans, the popular strategy game where you build your own village and fight against other players? If yes, then you might have faced some challenges in the game, such as running out of resources, waiting for long upgrade times, or being attacked by stronger opponents. Well, what if we tell you that there is a way to play Clash of Clans without any limitations or restrictions? Sounds too good to be true, right? But it is possible with Clash of Magic APK, a private server for Clash of Clans that offers you unlimited resources, custom mods, and much more. In this article, we will tell you everything you need to know about Clash of Magic APK, including its features, how to download and install it, and its pros and cons. So, let's get started!</p> -<h2>coc s1 mod apk</h2><br /><p><b><b>Download File</b> ★★★★★ <a href="https://ssurll.com/2uNZ1r">https://ssurll.com/2uNZ1r</a></b></p><br /><br /> - <h2>What is Clash of Magic APK?</h2> -<p>Clash of Magic APK is a modified version of Clash of Clans that runs on a private server. A private server is a separate platform that hosts the game independently from the official server. This means that you can play the game with different rules and features that are not available on the original game. For example, you can have unlimited resources, such as gems, gold, elixir, and dark elixir, which are essential for building your village and training your troops. You can also use custom mods and commands that let you modify the game according to your preferences. For instance, you can unlock all the goblin maps, change the appearance of your buildings and troops, or create your own clan with unlimited members.</p> -<p>Clash of Magic APK is one of the most popular private servers for Clash of Clans because it offers a lot of advantages over the original game. It has fast and secure servers that ensure smooth gameplay without any lag or crashes. It also allows you to play 1v1 battles in real-time with other players who are using the same server. Moreover, it does not require you to root your device or jailbreak your iPhone to play it. You just need to download and install the APK file on your Android device or the IPA file on your iOS device.</p> - <h3>Features of Clash of Magic APK</h3> -<p>As we mentioned earlier, Clash of Magic APK has many features that make it different from the original game. Here are some of the main features that you can enjoy with this private server:</p> - <h4>Unlimited gems, gold, elixir, and dark elixir</h4> -<p>The most obvious feature of Clash of Magic APK is that it gives you unlimited resources to play the game without any worries. Gems are the premium currency in Clash of Clans that can be used to speed up the building and upgrading process, buy more resources, or get special items. Gold and elixir are the basic resources that are used to build and upgrade your buildings and troops. Dark elixir is a rare resource that is used to train and upgrade dark troops and heroes.</p> -<p>With Clash of Magic APK, you can have as many resources as you want without spending any real money or waiting for long hours. You can use them to build your dream village, train powerful armies, and dominate the game.</p> - <h4>Custom mods and commands</h4> -<p>Another feature of Clash of Magic APK is that it lets you customize the game according to your liking. You can use various mods and commands that let you change the appearance and behavior of the game elements. For example, you can use mods to change the skin or color of your buildings and troops, or add new buildings and troops that are not available in the original game. You can also use commands to perform certain actions instantly, such as filling up your storages, clearing obstacles, or resetting your base layout. You can find a list of all the available mods and commands on the official website of Clash of Magic APK.</p> - <h4>Fast and secure servers</h4> -<p>One of the main concerns of using a private server for Clash of Clans is the stability and security of the server. You don't want to play on a server that is slow, laggy, or prone to hacking. Fortunately, Clash of Magic APK has fast and secure servers that ensure a smooth and safe gaming experience. The servers are hosted on high-performance machines that can handle thousands of players at the same time. They also have anti-DDoS protection and encryption to prevent any attacks or data breaches. You can play on any of the four servers that are available: S1, S2, S3, and S4. Each server has different features and mods, so you can choose the one that suits your style.</p> - <h4>Real-time 1v1 battles</h4> -<p>Another feature of Clash of Magic APK is that it allows you to play 1v1 battles in real-time with other players who are using the same server. This means that you can test your skills and strategies against other players without any delay or interference. You can also chat with your opponents and make friends or enemies. You can challenge anyone who is online or accept challenges from others. You can also join or create clans and participate in clan wars with other clans. The 1v1 battles are fun and exciting, and they can help you improve your game.</p> - <h3>How to download and install Clash of Magic APK?</h3> -<p>If you are interested in playing Clash of Magic APK, you need to download and install it on your device. Here are the requirements and compatibility for Clash of Magic APK:</p> -<p>coc s1 mod apk download<br /> -coc s1 mod apk unlimited everything<br /> -coc s1 mod apk latest version<br /> -coc s1 mod apk android 1<br /> -coc s1 mod apk 2023<br /> -coc s1 mod apk offline<br /> -coc s1 mod apk hack<br /> -coc s1 mod apk free download<br /> -coc s1 mod apk no root<br /> -coc s1 mod apk private server<br /> -coc s1 magic mod apk<br /> -coc s1 clash of magic mod apk<br /> -coc s1 magic mod apk download<br /> -coc s1 magic mod apk unlimited gems<br /> -coc s1 magic mod apk 2023<br /> -coc s1 magic mod apk android 1<br /> -coc s1 magic mod apk latest version 2023<br /> -coc s1 magic mod apk offline<br /> -coc s1 magic mod apk hack<br /> -coc s1 magic mod apk free download<br /> -coc s1 magic mod apk no root<br /> -coc s1 magic mod apk private server 2023<br /> -clash of clans s1 mod apk<br /> -clash of clans s1 mod apk download<br /> -clash of clans s1 mod apk unlimited everything<br /> -clash of clans s1 mod apk latest version<br /> -clash of clans s1 mod apk android 1<br /> -clash of clans s1 mod apk 2023<br /> -clash of clans s1 mod apk offline<br /> -clash of clans s1 mod apk hack<br /> -clash of clans s1 mod apk free download<br /> -clash of clans s1 mod apk no root<br /> -clash of clans s1 mod apk private server<br /> -clash of clans magic s1 mod apk<br /> -clash of clans magic s1 mod apk download<br /> -clash of clans magic s1 mod apk unlimited gems<br /> -clash of clans magic s1 mod apk latest version 2023<br /> -clash of clans magic s1 mod apk android 1<br /> -clash of clans magic s1 mod apk 2023<br /> -clash of clans magic s1 mod apk offline<br /> -clash of clans magic s1 mod apk hack<br /> -clash of clans magic s1 mod apk free download<br /> -clash of clans magic s1 mod apk no root<br /> -clash of clans magic s1 mod apk private server 2023<br /> -download coc magic server 15.83.22 (s2) (s3) (s4) (s5) (s6) (s7) (s8) (s9) (s10)</p> - <h4>Requirements and compatibility</h4> -<p>To play Clash of Magic APK, you need to have an Android device with Android 4.0.3 or higher, or an iOS device with iOS 9.0 or higher. You also need to have at least 100 MB of free space on your device. You don't need to root your Android device or jailbreak your iOS device to play Clash of Magic APK.</p> - <h4>Steps to download and install</h4> -<p>To download and install Clash of Magic APK, follow these steps:</p> -<ol> -<li>Go to the official website of Clash of Magic APK and choose the server that you want to play on.</li> -<li>Download the APK file for Android devices or the IPA file for iOS devices.</li> -<li>Allow unknown sources on your device settings if you are using an Android device.</li> -<li>Locate the downloaded file on your device and tap on it to install it.</li> -<li>Launch the game and enjoy!</li> -</ol> - <h3>Pros and cons of Clash of Magic APK</h3> -<p>Clash of Magic APK has many pros and cons that you should consider before playing it. Here are some of them:</p> - <h4>Pros</h4> -<ul> -<li>You can have unlimited resources, such as gems, gold, elixir, and dark elixir.</li> -<li>You can use custom mods and commands to modify the game according to your preferences.</li> -<li>You can play on fast and secure servers that ensure smooth gameplay without any lag or crashes.</li> -<li>You can play 1v1 battles in real-time with other players who are using the same server.</li> -<li>You don't need to root your Android device or jailbreak your iOS device to play Clash of Magic APK.</li> -</ul> - <h4>Cons</h4> -<ul> -<li>You can't play with players who are using the official server or other private servers.</li> -<li>You might face some bugs or glitches in the game due to the modifications.</li> -<li>You might get banned from the official game if you use your original account on Clash of Magic APK.</li> -<li>You might lose your progress if the server gets wiped out or shut down.</li> -<li>You might violate the terms and conditions of the original game by using a private server.</li> -</ul> - <h2>Conclusion</h2> -<p>Clash of Magic APK is a private server for Clash of Clans that offers you unlimited resources, custom mods, and much more. It is one of the most popular private servers for Clash of Clans because it has fast and secure servers, real-time 1v1 battles, and no rooting or jailbreaking required. However, it also has some drawbacks, such as not being able to play with players who are using the official server or other private servers, facing some bugs or glitches in the game, getting banned from the original game, losing your progress if the server gets wiped out or shut down, and violating the terms and conditions of the original game by using a private server. Therefore, you should weigh the pros and cons of Clash of Magic APK before playing it and decide whether it is worth it or not.</p> - <h2>FAQs</h2> -<p>Here are some of the frequently asked questions about Clash of Magic APK:</p> - <ol> -<li><b>Is Clash of Magic APK safe to use?</b></li> -<p>Clash of Magic APK is safe to use as long as you download it from the official website and install it on your device. It does not contain any viruses or malware that can harm your device or steal your data. However, you should be careful not to use your original account on Clash of Magic APK, as it might get banned from the official game. You should also avoid clicking on any suspicious links or ads that might redirect you to malicious websites.</p> -<li><b>Is Clash of Magic APK free to use?</b></li> -<p>Yes, Clash of Magic APK is free to use and does not require any subscription or payment. You can download and install it on your device without any charges. You can also enjoy all the features and resources that it offers without spending any real money.</p> -<li><b>How can I update Clash of Magic APK?</b></li> -<p>Clash of Magic APK is updated regularly to match the latest version of Clash of Clans and to fix any bugs or glitches that might occur. You can check the official website of Clash of Magic APK for any updates and download them from there. You can also enable the auto-update option on your device settings to get the updates automatically.</p> -<li><b>Can I play Clash of Magic APK offline?</b></li> -<p>No, you cannot play Clash of Magic APK offline, as it requires an internet connection to connect to the private server and play with other players. You need to have a stable and fast internet connection to play Clash of Magic APK without any interruptions or errors.</p> -<li><b>Can I play Clash of Magic APK on PC?</b></li> -<p>Yes, you can play Clash of Magic APK on PC by using an Android emulator, such as BlueStacks, NoxPlayer, or LDPlayer. An Android emulator is a software that allows you to run Android apps and games on your PC. You just need to download and install an Android emulator on your PC, then download and install Clash of Magic APK on the emulator, and launch the game from there.</p> -</ol></p> 197e85843d<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Spaceflight Simulator MOD APK 2022 and Discover New Planets.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Spaceflight Simulator MOD APK 2022 and Discover New Planets.md deleted file mode 100644 index c33e333b85de9e40c40929f6a6844c7dd4e99b6b..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download Spaceflight Simulator MOD APK 2022 and Discover New Planets.md +++ /dev/null @@ -1,159 +0,0 @@ -<br /> -<h1>Download Spaceflight Simulator Mod APK 2022: A Guide for Space Enthusiasts</h1> - <p>Have you ever dreamed of becoming an astronaut and exploring the vastness of space? Do you want to experience the thrill of launching rockets, orbiting planets, and landing on moons? If you answered yes to any of these questions, then you should try Spaceflight Simulator, a realistic and fun simulation game that lets you create and fly your own spacecraft. And if you want to enjoy the game even more, you should download Spaceflight Simulator Mod APK 2022, which gives you unlimited money, fuel, and access to all parts and planets. In this article, we will tell you everything you need to know about Spaceflight Simulator and its modded version, as well as some tips and tricks to help you become a master space explorer.</p> - <h2>What is Spaceflight Simulator?</h2> - <p>Spaceflight Simulator is a game developed by Stef Moroyna, a young programmer who loves space and physics. The game is available for Android and iOS devices, and it has been downloaded over 10 million times on Google Play Store. The game is inspired by real-life space missions and rockets, such as Apollo, SpaceX, NASA, and more. You can build your own spacecraft from scratch, using various parts such as engines, fuel tanks, wings, landing gears, parachutes, solar panels, and more. You can also customize the color and shape of your rocket, as well as the name and flag of your space agency. Once you have built your rocket, you can launch it from different locations on Earth, such as Cape Canaveral, Baikonur, Vandenberg, and more. You can then control your rocket using realistic physics and orbital mechanics, adjusting the thrust, direction, and staging of your rocket. You can also use the map view to see your trajectory and plan your maneuvers. You can explore the solar system in the game, which includes all eight planets and their moons, as well as some asteroids and comets. You can orbit around them, land on them, or even crash into them. You can also perform various missions and challenges in the game, such as reaching orbit, docking with other spacecraft, landing on the moon or Mars, or sending probes to other planets. The game also supports mods and custom parts, which allow you to add new features and content to the game.</p> -<h2>download spaceflight simulator mod apk 2022</h2><br /><p><b><b>Download</b> ››› <a href="https://ssurll.com/2uO0sr">https://ssurll.com/2uO0sr</a></b></p><br /><br /> - <h3>Features of Spaceflight Simulator</h3> - <p>Some of the main features of Spaceflight Simulator are:</p> -<ul> -<li>Realistic physics and orbital mechanics</li> -<li>Customizable rockets and spacecraft</li> -<li>A sandbox mode where you can experiment with different designs and scenarios</li> -<li>A realistic solar system with all eight planets and their moons</li> -<li>A variety of missions and challenges</li> -<li>A modding community that creates new parts and features for the game</li> -<li>A multiplayer mode where you can chat and share rockets with other players</li> -</ul> - <h3>How to play Spaceflight Simulator</h3> - <p>To play Spaceflight Simulator, you need to follow these steps:</p> -<ol> -<li>Download the game from Google Play Store or App Store</li> -<li>Open the game and choose a location to launch your rocket from</li> -<li>Tap on the build button to enter the rocket editor mode</li> -<li>Select the parts you want to use from the menu at the bottom of the screen</li> -<li>Drag and drop the parts onto the grid to attach them to your rocket</li> -<li>Adjust the position, rotation, color, name, and flag of your parts using the buttons at the top of the screen</li> -<li>Tap on the save button to save your rocket design</li> -<li>Tap on the launch button to enter the launch mode</li> -<li>Tap on the play button to start the countdown and launch your rocket</li> -<li>Use the buttons on the right side of the screen to control your rocket's thrust, direction, and staging</li> -<li>Use the buttons on the left side of the screen to switch between different views, such as cockpit, map, or free camera</li> -<li>Use the map view to see your orbit and plan your maneuvers, such as changing your inclination, altitude, or velocity</li> -<li>Use the pause button to pause the game and adjust your settings, such as time warp, sound, or graphics</li> -<li>Have fun exploring the solar system and completing missions</li> -</ol> - <h2>Why download Spaceflight Simulator Mod APK 2022?</h2> - <p>Spaceflight Simulator is a great game that offers a lot of fun and learning opportunities for space enthusiasts. However, some players may find some limitations and challenges in the game, such as:</p> -<ul> -<li>The game requires a lot of money to buy new parts and unlock new planets</li> -<li>The game has a limited amount of fuel for each rocket, which may prevent you from reaching your desired destination</li> -<li>The game has some bugs and glitches that may affect your gameplay experience</li> -<li>The game may not run smoothly on some devices due to its high graphics and physics requirements</li> -</ul> - <p>If you want to overcome these issues and enjoy the game to the fullest, you should download Spaceflight Simulator Mod APK 2022, which is a modified version of the game that gives you several advantages, such as:</p> - <h3>Benefits of Spaceflight Simulator Mod APK 2022</h3> - <p>Some of the benefits of Spaceflight Simulator Mod APK 2022 are:</p> -<ul> -<li>You get unlimited money to buy any parts and unlock any planets you want</li> -<li>You get unlimited fuel to fly your rocket as far as you want</li> -<li>You get access to all parts and planets without having to complete any missions or challenges</li> -<li>You get improved graphics and performance for a smoother gameplay experience</li> -<li>You get rid of any ads or pop-ups that may interrupt your game</li> -<li>You get regular updates and new features from the mod developers</li> -</ul> - <h3>How to download and install Spaceflight Simulator Mod APK 2022</h3> - <p>To download and install Spaceflight Simulator Mod APK 2022, you need to follow these steps:</p> -<p>download spaceflight simulator mod apk 2022 latest version<br /> -download spaceflight simulator mod apk 2022 unlimited fuel<br /> -download spaceflight simulator mod apk 2022 for android<br /> -download spaceflight simulator mod apk 2022 free<br /> -download spaceflight simulator mod apk 2022 unlocked all parts<br /> -download spaceflight simulator mod apk 2022 no ads<br /> -download spaceflight simulator mod apk 2022 offline<br /> -download spaceflight simulator mod apk 2022 with planets<br /> -download spaceflight simulator mod apk 2022 hack<br /> -download spaceflight simulator mod apk 2022 premium<br /> -download spaceflight simulator mod apk 2022 full version<br /> -download spaceflight simulator mod apk 2022 mega mod<br /> -download spaceflight simulator mod apk 2022 updated<br /> -download spaceflight simulator mod apk 2022 new features<br /> -download spaceflight simulator mod apk 2022 realistic physics<br /> -download spaceflight simulator mod apk 2022 pro<br /> -download spaceflight simulator mod apk 2022 cracked<br /> -download spaceflight simulator mod apk 2022 unlimited money<br /> -download spaceflight simulator mod apk 2022 easy install<br /> -download spaceflight simulator mod apk 2022 high quality graphics<br /> -download spaceflight simulator mod apk 2022 best simulation game<br /> -download spaceflight simulator mod apk 2022 no root required<br /> -download spaceflight simulator mod apk 2022 safe and secure<br /> -download spaceflight simulator mod apk 2022 fast and smooth<br /> -download spaceflight simulator mod apk 2022 fun and addictive<br /> -download spaceflight simulator mod apk 2022 custom rockets<br /> -download spaceflight simulator mod apk 2022 sandbox mode<br /> -download spaceflight simulator mod apk 2022 realistic sound effects<br /> -download spaceflight simulator mod apk 2022 explore the solar system<br /> -download spaceflight simulator mod apk 2022 build your own spacecraft<br /> -download spaceflight simulator mod apk 2022 launch and land on planets<br /> -download spaceflight simulator mod apk 2022 learn about rocket science<br /> -download spaceflight simulator mod apk 2022 challenge yourself with missions<br /> -download spaceflight simulator mod apk 2022 share your creations with others<br /> -download spaceflight simulator mod apk 2022 online multiplayer mode<br /> -download spaceflight simulator mod apk 2022 support for mods and addons<br /> -download spaceflight simulator mod apk 2022 compatible with all devices<br /> -download spaceflight simulator mod apk 2022 low storage requirement<br /> -download spaceflight simulator mod apk 2022 user-friendly interface<br /> -download spaceflight simulator mod apk 2022 amazing gameplay experience</p> -<ol> -<li>Go to [this link] and download the Spaceflight Simulator Mod APK 2022 file on your device</li> -<li>Go to your device's settings and enable the installation of apps from unknown sources</li> -<li>Locate the downloaded file on your device and tap on it to start the installation process</li> -<li>Follow the instructions on the screen and wait for the installation to finish</li> -<li>Open the game and enjoy the modded features</li> -</ol> - <h2>Tips and tricks for Spaceflight Simulator</h2> - <p>If you want to become a better space explorer and have more fun with Spaceflight Simulator, you should check out these tips and tricks:</p> - <h3>How to build a successful rocket</h3> - <p>To build a successful rocket in Spaceflight Simulator, you should consider these factors:</p> -<ul> -<li>The mass of your rocket: The heavier your rocket is, the more fuel and thrust it will need to lift off. Try to use lighter parts and reduce unnecessary weight.</li> -<li>The aerodynamics of your rocket: The more drag your rocket has, the more fuel and thrust it will need to overcome air resistance. Try to use sleeker parts and avoid protruding or asymmetrical shapes.</li> -<li>The stability of your rocket: The more stable your rocket is, the easier it will be to control and maneuver. Try to use fins, wings, or reaction wheels to balance your rocket and keep it aligned with its direction.</li> -<li>The efficiency of your rocket: The more efficient your rocket is, the less fuel it will consume per unit of thrust. Try to use engines with higher specific impulse (ISP) and lower mass flow rate (MFR).</li> -<li>The staging of your rocket: The more stages your rocket has, the more flexible it will be to adapt to different situations. Try to use stages that can be detached when they are empty or no longer needed.</li> -</ul> - <h3>How to explore different planets and moons</h3> - <p>To explore different planets and moons in Spaceflight Simulator, you should consider these factors:</p> -<ul> -<li>The distance of your destination: The farther your destination is, the more fuel and time it will take to reach it. Try to use efficient engines and optimal trajectories.</li> -<li>The gravity of your destination: The stronger the gravity of your destination is, the more fuel and thrust it will take to land and take off. Try to use landing gears, parachutes, or retro rockets.</li> -<li>The atmosphere of your destination: The thicker the atmosphere of your destination is, the more drag and heat it will cause to your rocket. Try to use heat shields, aerodynamic parts, or air brakes.</li> -<li>The terrain of your destination: The rougher the terrain of your destination is, the more difficult it will be to find a suitable landing spot. Try to use radar altimeters, landing legs, or rovers.</li> -<li>The rotation of your destination: The faster the rotation of your destination is, the more speed and angular momentum it will impart to your rocket. Try to use gyroscopes, reaction wheels, or RCS thrusters.</li> -</ul> - <h3>How to use mods and custom parts</h3> - <p>To use mods and custom parts in Spaceflight Simulator, you should consider these steps:</p> -<ol> -<li>Find a mod or custom part that you like from the internet or the game's community</li> -<li>Download the mod or custom part file on your device</li> -<li>Locate the file on your device and extract it if it is compressed</li> -<li>Copy the extracted folder to the game's directory, usually located at Android/data/com.StefMorojna.SpaceflightSimulator/files/</li> -<li>Open the game and check if the mod or custom part is available in the rocket editor mode</li> -<li>Enjoy using the mod or custom part in your rocket design</li> -</ol> - <h2>Conclusion</h2> - <p>Spaceflight Simulator is a fantastic game that allows you to create and fly your own spacecraft in a realistic and fun way. You can explore the solar system, complete missions, and learn about space and physics. You can also download Spaceflight Simulator Mod APK 2022, which gives you unlimited money, fuel, and access to all parts and planets. You can also use mods and custom parts to add new features and content to the game. Spaceflight Simulator is a game that will keep you entertained and educated for hours. If you are a space enthusiast, you should definitely try this game.</p> - <h3>Summary of the article</h3> - <p>In this article, we have covered:</p> -<ul> -<li>What is Spaceflight Simulator and its features</li> -<li>Why download Spaceflight Simulator Mod APK 2022 and its benefits</li> -<li>How to download and install Spaceflight Simulator Mod APK 2022</li> -<li>Tips and tricks for Spaceflight Simulator</li> -</ul> - <h3>FAQs</h3> - <p>Here are some frequently asked questions about Spaceflight Simulator and its modded version:</p> -<ul> -<li><b>Is Spaceflight Simulator free?</b></li> -<p>Yes, Spaceflight Simulator is free to download and play on Android and iOS devices. However, some parts and planets require in-app purchases to unlock.</p> -<li><b>Is Spaceflight Simulator Mod APK 2022 safe?</b></li> -<p>Yes, Spaceflight Simulator Mod APK 2022 is safe to download and install on your device. However, you should always download it from a trusted source and scan it for viruses before installing it.</p> -<li><b>Is Spaceflight Simulator realistic?</b></li> -<p>Yes, Spaceflight Simulator is realistic in terms of physics and orbital mechanics. The game uses real-life data and formulas to calculate the motion and behavior of your rocket and the celestial bodies. However, some aspects of the game are simplified or exaggerated for gameplay purposes, such as the scale of the planets, the atmosphere density, or the time warp.</p> -<li><b>Can I play Spaceflight Simulator offline?</b></li> -<p>Yes, you can play Spaceflight Simulator offline without an internet connection. However, some features of the game may require an internet connection, such as multiplayer mode, modding community, or cloud saving.</p> -<li><b>Can I play Spaceflight Simulator on PC?</b></li> -<p>No, Spaceflight Simulator is not officially available for PC. However, you can use an Android emulator on your PC to run the game. Alternatively, you can try other similar games for PC, such as Kerbal Space Program or SimpleRockets 2.</p> -</ul></p> 401be4b1e0<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/sky24h/Controllable_Multi-domain_Semantic_Artwork_Synthesis/seg2art/sstan_models/networks/sync_batchnorm/comm.py b/spaces/sky24h/Controllable_Multi-domain_Semantic_Artwork_Synthesis/seg2art/sstan_models/networks/sync_batchnorm/comm.py deleted file mode 100644 index 922f8c4a3adaa9b32fdcaef09583be03b0d7eb2b..0000000000000000000000000000000000000000 --- a/spaces/sky24h/Controllable_Multi-domain_Semantic_Artwork_Synthesis/seg2art/sstan_models/networks/sync_batchnorm/comm.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- -# File : comm.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import queue -import collections -import threading - -__all__ = ['FutureResult', 'SlavePipe', 'SyncMaster'] - - -class FutureResult(object): - """A thread-safe future implementation. Used only as one-to-one pipe.""" - - def __init__(self): - self._result = None - self._lock = threading.Lock() - self._cond = threading.Condition(self._lock) - - def put(self, result): - with self._lock: - assert self._result is None, 'Previous result has\'t been fetched.' - self._result = result - self._cond.notify() - - def get(self): - with self._lock: - if self._result is None: - self._cond.wait() - - res = self._result - self._result = None - return res - - -_MasterRegistry = collections.namedtuple('MasterRegistry', ['result']) -_SlavePipeBase = collections.namedtuple('_SlavePipeBase', ['identifier', 'queue', 'result']) - - -class SlavePipe(_SlavePipeBase): - """Pipe for master-slave communication.""" - - def run_slave(self, msg): - self.queue.put((self.identifier, msg)) - ret = self.result.get() - self.queue.put(True) - return ret - - -class SyncMaster(object): - """An abstract `SyncMaster` object. - - - During the replication, as the data parallel will trigger an callback of each module, all slave devices should - call `register(id)` and obtain an `SlavePipe` to communicate with the master. - - During the forward pass, master device invokes `run_master`, all messages from slave devices will be collected, - and passed to a registered callback. - - After receiving the messages, the master device should gather the information and determine to message passed - back to each slave devices. - """ - - def __init__(self, master_callback): - """ - - Args: - master_callback: a callback to be invoked after having collected messages from slave devices. - """ - self._master_callback = master_callback - self._queue = queue.Queue() - self._registry = collections.OrderedDict() - self._activated = False - - def __getstate__(self): - return {'master_callback': self._master_callback} - - def __setstate__(self, state): - self.__init__(state['master_callback']) - - def register_slave(self, identifier): - """ - Register an slave device. - - Args: - identifier: an identifier, usually is the device id. - - Returns: a `SlavePipe` object which can be used to communicate with the master device. - - """ - if self._activated: - assert self._queue.empty(), 'Queue is not clean before next initialization.' - self._activated = False - self._registry.clear() - future = FutureResult() - self._registry[identifier] = _MasterRegistry(future) - return SlavePipe(identifier, self._queue, future) - - def run_master(self, master_msg): - """ - Main entry for the master device in each forward pass. - The messages were first collected from each devices (including the master device), and then - an callback will be invoked to compute the message to be sent back to each devices - (including the master device). - - Args: - master_msg: the message that the master want to send to itself. This will be placed as the first - message when calling `master_callback`. For detailed usage, see `_SynchronizedBatchNorm` for an example. - - Returns: the message to be sent back to the master device. - - """ - self._activated = True - - intermediates = [(0, master_msg)] - for i in range(self.nr_slaves): - intermediates.append(self._queue.get()) - - results = self._master_callback(intermediates) - assert results[0][0] == 0, 'The first result should belongs to the master.' - - for i, res in results: - if i == 0: - continue - self._registry[i].result.put(res) - - for i in range(self.nr_slaves): - assert self._queue.get() is True - - return results[0][1] - - @property - def nr_slaves(self): - return len(self._registry) diff --git a/spaces/sneedium/captcha_pixelplanet/modules/backbone.py b/spaces/sneedium/captcha_pixelplanet/modules/backbone.py deleted file mode 100644 index 80dbd87e72ff5f2689bef6de2f586f7219071ba8..0000000000000000000000000000000000000000 --- a/spaces/sneedium/captcha_pixelplanet/modules/backbone.py +++ /dev/null @@ -1,42 +0,0 @@ -import torch -import torch.nn as nn -from fastai.vision import * - -from modules.model import _default_tfmer_cfg -from modules.resnet import resnet45 -from modules.transformer import (PositionalEncoding, - TransformerEncoder, - TransformerEncoderLayer) - -class ResTranformer(nn.Module): - def __init__(self, config): - super().__init__() - alpha_d = ifnone(config.model_vision_backbone_alpha_d, 1.) - - self.d_model = ifnone(config.model_vision_d_model, _default_tfmer_cfg['d_model']) - - self.resnet = resnet45(alpha_d, output_channels=self.d_model) - - nhead = ifnone(config.model_vision_nhead, _default_tfmer_cfg['nhead']) - d_inner = ifnone(config.model_vision_d_inner, _default_tfmer_cfg['d_inner']) - dropout = ifnone(config.model_vision_dropout, _default_tfmer_cfg['dropout']) - activation = ifnone(config.model_vision_activation, _default_tfmer_cfg['activation']) - num_layers = ifnone(config.model_vision_backbone_ln, 2) - - self.pos_encoder = PositionalEncoding(self.d_model, max_len=8*32) - encoder_layer = TransformerEncoderLayer(d_model=self.d_model, nhead=nhead, - dim_feedforward=d_inner, dropout=dropout, activation=activation) - self.transformer = TransformerEncoder(encoder_layer, num_layers) - - def forward_transformer(self, feature): - n, c, h, w = feature.shape - feature = feature.view(n, c, -1).permute(2, 0, 1) - feature = self.pos_encoder(feature) - feature = self.transformer(feature) - feature = feature.permute(1, 2, 0).view(n, c, h, w) - return feature - - def forward(self, images, **kwargs): - feature = self.resnet(images, **kwargs) - feature = self.forward_transformer(feature) - return feature \ No newline at end of file diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/tasks/speech_recognition.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/tasks/speech_recognition.py deleted file mode 100644 index d9f011d55ff4fdfeb4c04ca790c314d685708c3a..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_recognition/tasks/speech_recognition.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import json -import os -import re -import sys - -import torch -from examples.speech_recognition.data import AsrDataset -from examples.speech_recognition.data.replabels import replabel_symbol -from fairseq.data import Dictionary -from fairseq.tasks import LegacyFairseqTask, register_task - - -def get_asr_dataset_from_json(data_json_path, tgt_dict): - """ - Parse data json and create dataset. - See scripts/asr_prep_json.py which pack json from raw files - - Json example: - { - "utts": { - "4771-29403-0025": { - "input": { - "length_ms": 170, - "path": "/tmp/file1.flac" - }, - "output": { - "text": "HELLO \n", - "token": "HE LLO", - "tokenid": "4815, 861" - } - }, - "1564-142299-0096": { - ... - } - } - """ - if not os.path.isfile(data_json_path): - raise FileNotFoundError("Dataset not found: {}".format(data_json_path)) - with open(data_json_path, "rb") as f: - data_samples = json.load(f)["utts"] - assert len(data_samples) != 0 - sorted_samples = sorted( - data_samples.items(), - key=lambda sample: int(sample[1]["input"]["length_ms"]), - reverse=True, - ) - aud_paths = [s[1]["input"]["path"] for s in sorted_samples] - ids = [s[0] for s in sorted_samples] - speakers = [] - for s in sorted_samples: - m = re.search("(.+?)-(.+?)-(.+?)", s[0]) - speakers.append(m.group(1) + "_" + m.group(2)) - frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples] - tgt = [ - [int(i) for i in s[1]["output"]["tokenid"].split(", ")] - for s in sorted_samples - ] - # append eos - tgt = [[*t, tgt_dict.eos()] for t in tgt] - return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers) - - -@register_task("speech_recognition") -class SpeechRecognitionTask(LegacyFairseqTask): - """ - Task for training speech recognition model. - """ - - @staticmethod - def add_args(parser): - """Add task-specific arguments to the parser.""" - parser.add_argument("data", help="path to data directory") - parser.add_argument( - "--silence-token", default="\u2581", help="token for silence (used by w2l)" - ) - parser.add_argument( - "--max-source-positions", - default=sys.maxsize, - type=int, - metavar="N", - help="max number of frames in the source sequence", - ) - parser.add_argument( - "--max-target-positions", - default=1024, - type=int, - metavar="N", - help="max number of tokens in the target sequence", - ) - - def __init__(self, args, tgt_dict): - super().__init__(args) - self.tgt_dict = tgt_dict - - @classmethod - def setup_task(cls, args, **kwargs): - """Setup the task (e.g., load dictionaries).""" - dict_path = os.path.join(args.data, "dict.txt") - if not os.path.isfile(dict_path): - raise FileNotFoundError("Dict not found: {}".format(dict_path)) - tgt_dict = Dictionary.load(dict_path) - - if args.criterion == "ctc_loss": - tgt_dict.add_symbol("<ctc_blank>") - elif args.criterion == "asg_loss": - for i in range(1, args.max_replabel + 1): - tgt_dict.add_symbol(replabel_symbol(i)) - - print("| dictionary: {} types".format(len(tgt_dict))) - return cls(args, tgt_dict) - - def load_dataset(self, split, combine=False, **kwargs): - """Load a given dataset split. - - Args: - split (str): name of the split (e.g., train, valid, test) - """ - data_json_path = os.path.join(self.args.data, "{}.json".format(split)) - self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict) - - def build_generator(self, models, args, **unused): - w2l_decoder = getattr(args, "w2l_decoder", None) - if w2l_decoder == "viterbi": - from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder - - return W2lViterbiDecoder(args, self.target_dictionary) - elif w2l_decoder == "kenlm": - from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder - - return W2lKenLMDecoder(args, self.target_dictionary) - elif w2l_decoder == "fairseqlm": - from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder - - return W2lFairseqLMDecoder(args, self.target_dictionary) - else: - return super().build_generator(models, args) - - @property - def target_dictionary(self): - """Return the :class:`~fairseq.data.Dictionary` for the language - model.""" - return self.tgt_dict - - @property - def source_dictionary(self): - """Return the source :class:`~fairseq.data.Dictionary` (if applicable - for this task).""" - return None - - def max_positions(self): - """Return the max speech and sentence length allowed by the task.""" - return (self.args.max_source_positions, self.args.max_target_positions) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/text_to_speech/tts_transformer.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/text_to_speech/tts_transformer.py deleted file mode 100644 index ff7af78bd49708cc5429cd3d481d3866b4612779..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/text_to_speech/tts_transformer.py +++ /dev/null @@ -1,371 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from typing import List, Optional - -import torch -from torch import nn - -from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, - FairseqIncrementalDecoder, register_model, - register_model_architecture) -from fairseq.modules import ( - TransformerEncoderLayer, TransformerDecoderLayer -) -from fairseq.models.text_to_speech.tacotron2 import Prenet, Postnet -from fairseq.modules import LayerNorm, PositionalEmbedding, FairseqDropout -from fairseq.data.data_utils import lengths_to_padding_mask -from fairseq import utils - -logger = logging.getLogger(__name__) - - -def encoder_init(m): - if isinstance(m, nn.Conv1d): - nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu")) - - -def Embedding(num_embeddings, embedding_dim): - m = nn.Embedding(num_embeddings, embedding_dim) - nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) - return m - - -class TTSTransformerEncoder(FairseqEncoder): - def __init__(self, args, src_dict, embed_speaker): - super().__init__(src_dict) - self.padding_idx = src_dict.pad() - self.embed_speaker = embed_speaker - self.spk_emb_proj = None - if embed_speaker is not None: - self.spk_emb_proj = nn.Linear( - args.encoder_embed_dim + args.speaker_embed_dim, - args.encoder_embed_dim - ) - - self.dropout_module = FairseqDropout( - p=args.dropout, module_name=self.__class__.__name__ - ) - self.embed_tokens = nn.Embedding(len(src_dict), args.encoder_embed_dim, - padding_idx=self.padding_idx) - assert(args.encoder_conv_kernel_size % 2 == 1) - self.prenet = nn.ModuleList( - nn.Sequential( - nn.Conv1d(args.encoder_embed_dim, args.encoder_embed_dim, - kernel_size=args.encoder_conv_kernel_size, - padding=((args.encoder_conv_kernel_size - 1) // 2)), - nn.BatchNorm1d(args.encoder_embed_dim), - nn.ReLU(), - nn.Dropout(args.encoder_dropout), - ) - for _ in range(args.encoder_conv_layers) - ) - self.prenet_proj = nn.Linear( - args.encoder_embed_dim, args.encoder_embed_dim - ) - self.embed_positions = PositionalEmbedding( - args.max_source_positions, args.encoder_embed_dim, self.padding_idx - ) - self.pos_emb_alpha = nn.Parameter(torch.ones(1)) - - self.transformer_layers = nn.ModuleList( - TransformerEncoderLayer(args) - for _ in range(args.encoder_transformer_layers) - ) - if args.encoder_normalize_before: - self.layer_norm = LayerNorm(args.encoder_embed_dim) - else: - self.layer_norm = None - - self.apply(encoder_init) - - def forward(self, src_tokens, src_lengths=None, speaker=None, **kwargs): - x = self.embed_tokens(src_tokens) - x = x.transpose(1, 2).contiguous() # B x T x C -> B x C x T - for conv in self.prenet: - x = conv(x) - x = x.transpose(1, 2).contiguous() # B x C x T -> B x T x C - x = self.prenet_proj(x) - - padding_mask = src_tokens.eq(self.padding_idx) - positions = self.embed_positions(padding_mask) - x += self.pos_emb_alpha * positions - x = self.dropout_module(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - for layer in self.transformer_layers: - x = layer(x, padding_mask) - - if self.layer_norm is not None: - x = self.layer_norm(x) - - if self.embed_speaker is not None: - seq_len, bsz, _ = x.size() - emb = self.embed_speaker(speaker).transpose(0, 1) - emb = emb.expand(seq_len, bsz, -1) - x = self.spk_emb_proj(torch.cat([x, emb], dim=2)) - - return { - "encoder_out": [x], # T x B x C - "encoder_padding_mask": [padding_mask] if padding_mask.any() else [], # B x T - "encoder_embedding": [], # B x T x C - "encoder_states": [], # List[T x B x C] - "src_tokens": [], - "src_lengths": [], - } - - -def decoder_init(m): - if isinstance(m, torch.nn.Conv1d): - nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh")) - - -class TTSTransformerDecoder(FairseqIncrementalDecoder): - def __init__(self, args, src_dict): - super().__init__(None) - self._future_mask = torch.empty(0) - - self.args = args - self.padding_idx = src_dict.pad() - self.n_frames_per_step = args.n_frames_per_step - self.out_dim = args.output_frame_dim * args.n_frames_per_step - - self.dropout_module = FairseqDropout( - args.dropout, module_name=self.__class__.__name__ - ) - self.embed_positions = PositionalEmbedding( - args.max_target_positions, args.decoder_embed_dim, self.padding_idx - ) - self.pos_emb_alpha = nn.Parameter(torch.ones(1)) - self.prenet = nn.Sequential( - Prenet(self.out_dim, args.prenet_layers, args.prenet_dim, - args.prenet_dropout), - nn.Linear(args.prenet_dim, args.decoder_embed_dim), - ) - - self.n_transformer_layers = args.decoder_transformer_layers - self.transformer_layers = nn.ModuleList( - TransformerDecoderLayer(args) - for _ in range(self.n_transformer_layers) - ) - if args.decoder_normalize_before: - self.layer_norm = LayerNorm(args.decoder_embed_dim) - else: - self.layer_norm = None - - self.feat_proj = nn.Linear(args.decoder_embed_dim, self.out_dim) - self.eos_proj = nn.Linear(args.decoder_embed_dim, 1) - - self.postnet = Postnet(self.out_dim, args.postnet_conv_dim, - args.postnet_conv_kernel_size, - args.postnet_layers, args.postnet_dropout) - - self.ctc_proj = None - if getattr(args, "ctc_weight", 0.) > 0.: - self.ctc_proj = nn.Linear(self.out_dim, len(src_dict)) - - self.apply(decoder_init) - - def extract_features( - self, prev_outputs, encoder_out=None, incremental_state=None, - target_lengths=None, speaker=None, **kwargs - ): - alignment_layer = self.n_transformer_layers - 1 - self_attn_padding_mask = lengths_to_padding_mask(target_lengths) - positions = self.embed_positions( - self_attn_padding_mask, incremental_state=incremental_state - ) - - if incremental_state is not None: - prev_outputs = prev_outputs[:, -1:, :] - self_attn_padding_mask = self_attn_padding_mask[:, -1:] - if positions is not None: - positions = positions[:, -1:] - - x = self.prenet(prev_outputs) - x += self.pos_emb_alpha * positions - x = self.dropout_module(x) - - # B x T x C -> T x B x C - x = x.transpose(0, 1) - - if not self_attn_padding_mask.any(): - self_attn_padding_mask = None - - attn: Optional[torch.Tensor] = None - inner_states: List[Optional[torch.Tensor]] = [x] - for idx, transformer_layer in enumerate(self.transformer_layers): - if incremental_state is None: - self_attn_mask = self.buffered_future_mask(x) - else: - self_attn_mask = None - - x, layer_attn, _ = transformer_layer( - x, - encoder_out["encoder_out"][0] - if (encoder_out is not None and len(encoder_out["encoder_out"]) > 0) - else None, - encoder_out["encoder_padding_mask"][0] - if ( - encoder_out is not None - and len(encoder_out["encoder_padding_mask"]) > 0 - ) - else None, - incremental_state, - self_attn_mask=self_attn_mask, - self_attn_padding_mask=self_attn_padding_mask, - need_attn=bool((idx == alignment_layer)), - need_head_weights=bool((idx == alignment_layer)), - ) - inner_states.append(x) - if layer_attn is not None and idx == alignment_layer: - attn = layer_attn.float().to(x) - - if attn is not None: - # average probabilities over heads, transpose to - # (B, src_len, tgt_len) - attn = attn.mean(dim=0).transpose(2, 1) - - if self.layer_norm is not None: - x = self.layer_norm(x) - - # T x B x C -> B x T x C - x = x.transpose(0, 1) - - return x, {"attn": attn, "inner_states": inner_states} - - def forward(self, prev_output_tokens, encoder_out=None, - incremental_state=None, target_lengths=None, speaker=None, - **kwargs): - x, extra = self.extract_features( - prev_output_tokens, encoder_out=encoder_out, - incremental_state=incremental_state, target_lengths=target_lengths, - speaker=speaker, **kwargs - ) - attn = extra["attn"] - feat_out = self.feat_proj(x) - bsz, seq_len, _ = x.size() - eos_out = self.eos_proj(x) - post_feat_out = feat_out + self.postnet(feat_out) - return post_feat_out, eos_out, {"attn": attn, "feature_out": feat_out} - - def get_normalized_probs(self, net_output, log_probs, sample): - logits = self.ctc_proj(net_output[2]["feature_out"]) - if log_probs: - return utils.log_softmax(logits.float(), dim=-1) - else: - return utils.softmax(logits.float(), dim=-1) - - def buffered_future_mask(self, tensor): - dim = tensor.size(0) - # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround. - if ( - self._future_mask.size(0) == 0 - or (not self._future_mask.device == tensor.device) - or self._future_mask.size(0) < dim - ): - self._future_mask = torch.triu( - utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1 - ) - self._future_mask = self._future_mask.to(tensor) - return self._future_mask[:dim, :dim] - - -@register_model("tts_transformer") -class TTSTransformerModel(FairseqEncoderDecoderModel): - """ - Implementation for https://arxiv.org/pdf/1809.08895.pdf - """ - - @staticmethod - def add_args(parser): - parser.add_argument("--dropout", type=float) - parser.add_argument("--output-frame-dim", type=int) - parser.add_argument("--speaker-embed-dim", type=int) - # encoder prenet - parser.add_argument("--encoder-dropout", type=float) - parser.add_argument("--encoder-conv-layers", type=int) - parser.add_argument("--encoder-conv-kernel-size", type=int) - # encoder transformer layers - parser.add_argument("--encoder-transformer-layers", type=int) - parser.add_argument("--encoder-embed-dim", type=int) - parser.add_argument("--encoder-ffn-embed-dim", type=int) - parser.add_argument("--encoder-normalize-before", action="store_true") - parser.add_argument("--encoder-attention-heads", type=int) - parser.add_argument("--attention-dropout", type=float) - parser.add_argument("--activation-dropout", "--relu-dropout", type=float) - parser.add_argument("--activation-fn", type=str, default="relu") - # decoder prenet - parser.add_argument("--prenet-dropout", type=float) - parser.add_argument("--prenet-layers", type=int) - parser.add_argument("--prenet-dim", type=int) - # decoder postnet - parser.add_argument("--postnet-dropout", type=float) - parser.add_argument("--postnet-layers", type=int) - parser.add_argument("--postnet-conv-dim", type=int) - parser.add_argument("--postnet-conv-kernel-size", type=int) - # decoder transformer layers - parser.add_argument("--decoder-transformer-layers", type=int) - parser.add_argument("--decoder-embed-dim", type=int) - parser.add_argument("--decoder-ffn-embed-dim", type=int) - parser.add_argument("--decoder-normalize-before", action="store_true") - parser.add_argument("--decoder-attention-heads", type=int) - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._num_updates = 0 - - @classmethod - def build_model(cls, args, task): - embed_speaker = task.get_speaker_embeddings(args) - encoder = TTSTransformerEncoder(args, task.src_dict, embed_speaker) - decoder = TTSTransformerDecoder(args, task.src_dict) - return cls(encoder, decoder) - - def forward_encoder(self, src_tokens, src_lengths, speaker=None, **kwargs): - return self.encoder(src_tokens, src_lengths=src_lengths, - speaker=speaker, **kwargs) - - def set_num_updates(self, num_updates): - super().set_num_updates(num_updates) - self._num_updates = num_updates - - -@register_model_architecture("tts_transformer", "tts_transformer") -def base_architecture(args): - args.dropout = getattr(args, "dropout", 0.1) - args.output_frame_dim = getattr(args, "output_frame_dim", 80) - args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64) - # encoder prenet - args.encoder_dropout = getattr(args, "encoder_dropout", 0.5) - args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3) - args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5) - # encoder transformer layers - args.encoder_transformer_layers = getattr(args, "encoder_transformer_layers", 6) - args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) - args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim) - args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) - args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) - args.attention_dropout = getattr(args, "attention_dropout", 0.0) - args.activation_dropout = getattr(args, "activation_dropout", 0.0) - args.activation_fn = getattr(args, "activation_fn", "relu") - # decoder prenet - args.prenet_dropout = getattr(args, "prenet_dropout", 0.5) - args.prenet_layers = getattr(args, "prenet_layers", 2) - args.prenet_dim = getattr(args, "prenet_dim", 256) - # decoder postnet - args.postnet_dropout = getattr(args, "postnet_dropout", 0.5) - args.postnet_layers = getattr(args, "postnet_layers", 5) - args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512) - args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5) - # decoder transformer layers - args.decoder_transformer_layers = getattr(args, "decoder_transformer_layers", 6) - args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) - args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * args.decoder_embed_dim) - args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) - args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) diff --git a/spaces/srkajol/AI-Chat-PDF/README.md b/spaces/srkajol/AI-Chat-PDF/README.md deleted file mode 100644 index bd2f83d70ac7b6b7de532105dc61b7473706dd46..0000000000000000000000000000000000000000 --- a/spaces/srkajol/AI-Chat-PDF/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: AI Chat PDF -emoji: 🏃 -colorFrom: purple -colorTo: purple -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: false -license: apache-2.0 -duplicated_from: rstallman/AI-Chat-PDF ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/starlit7/KorPoliticsTTS/export_model.py b/spaces/starlit7/KorPoliticsTTS/export_model.py deleted file mode 100644 index 98a49835df5a7a2486e76ddf94fbbb4444b52203..0000000000000000000000000000000000000000 --- a/spaces/starlit7/KorPoliticsTTS/export_model.py +++ /dev/null @@ -1,13 +0,0 @@ -import torch - -if __name__ == '__main__': - model_path = "saved_model/11/model.pth" - output_path = "saved_model/11/model1.pth" - checkpoint_dict = torch.load(model_path, map_location='cpu') - checkpoint_dict_new = {} - for k, v in checkpoint_dict.items(): - if k == "optimizer": - print("remove optimizer") - continue - checkpoint_dict_new[k] = v - torch.save(checkpoint_dict_new, output_path) diff --git a/spaces/stomexserde/gpt4-ui/Examples/Dcra-c151 Usb Driver !!TOP!!.md b/spaces/stomexserde/gpt4-ui/Examples/Dcra-c151 Usb Driver !!TOP!!.md deleted file mode 100644 index 32c1d1d8e1e8988c4da6aea5a8e8801ce3bf6e99..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Dcra-c151 Usb Driver !!TOP!!.md +++ /dev/null @@ -1,35 +0,0 @@ - -<h1>How to Download and Install Dcra-c151 Usb Driver for Windows 10</h1> -<p>If you have a Sony Handycam camcorder that uses a Dcra-c151 docking station, you may need to download and install a Dcra-c151 Usb Driver to connect it to your Windows 10 computer. This driver allows you to transfer videos and photos from your camcorder to your PC, as well as charge the battery and control the camcorder remotely.</p> -<h2>Dcra-c151 Usb Driver</h2><br /><p><b><b>DOWNLOAD</b> ✏ <a href="https://urlgoal.com/2uI6kS">https://urlgoal.com/2uI6kS</a></b></p><br /><br /> -<p>In this article, we will show you how to download and install the Dcra-c151 Usb Driver for Windows 10 in a few simple steps.</p> -<h2>Step 1: Download the Dcra-c151 Usb Driver</h2> -<p>The first step is to download the Dcra-c151 Usb Driver from the official Sony website. You can find the link to the driver page <a href="https://www.sony.com/electronics/support/downloads/W0006220">here</a>. Alternatively, you can search for "Dcra-c151 Usb Driver" on the Sony website and select the appropriate result.</p> -<p>On the driver page, you will see a button that says "Download". Click on it and save the file to your computer. The file name should be "USBDRVEN.EXE" and the file size should be about 1.52 MB.</p> -<p></p> -<h2>Step 2: Install the Dcra-c151 Usb Driver</h2> -<p>The next step is to install the Dcra-c151 Usb Driver on your Windows 10 computer. To do this, follow these steps:</p> -<ul> -<li>Locate the downloaded file "USBDRVEN.EXE" on your computer and double-click on it.</li> -<li>A window will pop up asking you to confirm if you want to run the file. Click on "Yes".</li> -<li>A new window will appear with the Sony USB Driver Setup Wizard. Click on "Next".</li> -<li>Read and accept the license agreement and click on "Next".</li> -<li>Select the destination folder where you want to install the driver and click on "Next".</li> -<li>Click on "Install" to start the installation process.</li> -<li>Wait for the installation to complete and click on "Finish".</li> -</ul> -<h2>Step 3: Connect your camcorder to your computer</h2> -<p>The final step is to connect your Sony Handycam camcorder to your Windows 10 computer using the Dcra-c151 docking station. To do this, follow these steps:</p> -<ul> -<li>Make sure your camcorder is turned off and has a fully charged battery.</li> -<li>Connect the AC adapter of the docking station to a power outlet.</li> -<li>Connect one end of the USB cable to the docking station and the other end to a USB port on your computer.</li> -<li>Place your camcorder on the docking station and turn it on.</li> -<li>Your computer should recognize your camcorder and install the necessary drivers automatically.</li> -<li>You can now access your camcorder's memory card or hard drive from your computer and transfer videos and photos using Windows Explorer or any other software of your choice.</li> -</ul> -<h2>Conclusion</h2> -<p>In this article, we have shown you how to download and install the Dcra-c151 Usb Driver for Windows 10. This driver allows you to connect your Sony Handycam camcorder to your Windows 10 computer using the Dcra-c151 docking station. You can then transfer videos and photos from your camcorder to your PC, as well as charge the battery and control the camcorder remotely.</p> -<p>We hope this article was helpful and informative. If you have any questions or comments, please feel free to leave them below.</p> e93f5a0c3f<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Greek Airports Project Rhodes International 2010 Fs9.md b/spaces/stomexserde/gpt4-ui/Examples/Greek Airports Project Rhodes International 2010 Fs9.md deleted file mode 100644 index e8da7012df3e6ea412ca0658d3de6443574c0cf1..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Greek Airports Project Rhodes International 2010 Fs9.md +++ /dev/null @@ -1,38 +0,0 @@ -<br /> -<h1>Greek Airports Project: A Review of Rhodes International 2010 for FS9</h1> -<p>Rhodes International Airport is the fourth busiest airport in Greece, serving the island of Rhodes and its many tourists. It is located near the village of Paradisi, about 14 km southwest of the city of Rhodes. The airport has two runways, one of which is 3,306 m long and can accommodate large aircraft. The airport also has a terminal building, a control tower, a fire station, and several hangars and cargo facilities.</p> -<p>In 2010, Greek Airports Project (GAP) released a scenery add-on for Rhodes International Airport for both FSX and FS9. GAP is a team of Greek scenery designers who have created several high-quality sceneries for Greek airports, such as Corfu, Thessaloniki, and Heraklion. Their products are known for their realistic and detailed representation of the airports and their surroundings, as well as their compatibility with other add-ons and performance optimization.</p> -<h2>greek airports project rhodes international 2010 fs9</h2><br /><p><b><b>Download Zip</b> ››› <a href="https://urlgoal.com/2uIbya">https://urlgoal.com/2uIbya</a></b></p><br /><br /> -<p>Rhodes International 2010 by GAP is a payware scenery that features:</p> -<ul> -<li>High resolution photoreal ground textures</li> -<li>Custom 3D buildings and objects</li> -<li>Animated vehicles and people</li> -<li>Dynamic lighting and shadows</li> -<li>Seasonal variations and night effects</li> -<li>Accurate runway and taxiway layout</li> -<li>AI traffic compatibility</li> -</ul> -<p>The scenery also includes some landmarks and points of interest near the airport, such as the old town of Rhodes, the Colossus of Rhodes statue, the medieval castle of Kritinia, and the ancient acropolis of Lindos.</p> -<p>Rhodes International 2010 by GAP is a must-have scenery for anyone who enjoys flying to or from Greece. It offers a realistic and immersive experience of one of the most popular destinations in the Mediterranean. The scenery can be purchased from GAP's website for 7.14€ (incl. VAT) or as part of a bundle offer with Corfu 2009.</p> -<p>Sources:</p> -<ul> -<li><a href="https://gapscenery.wixsite.com/gap-greece">GREEK AIRPORTS PROJECT - gap-greece</a></li> -<li><a href="https://turkishvirtual.com/forum/viewtopic.php?t=7713">Turkish Virtual Airlines Forums • View topic - GREEK AIRPORTS PROJECT - RHODES INTERNATIONAL 2010 FSX</a></li> -<li><a href="https://www.flightsim.com/vbfs/showthread.php?210935-Greek-Airports-Project-Rhodes-International-2010-Released!!!">Greek Airports Project - Rhodes International 2010 Released!!!</a></li> -</ul> - -<p>But Rhodes is not only about the airport and its facilities. It is also a gateway to explore the island's many attractions and charms. Rhodes is one of the most popular tourist destinations in Greece, offering a variety of experiences for every taste and preference.</p> -<p>Rhodes is known as the island of the sun, as it enjoys more than 300 days of sunshine per year. Its coastline is dotted with sandy beaches and crystal-clear waters, ideal for swimming, sunbathing, water sports and sailing. Some of the most famous beaches are Faliraki, Tsambika, Lindos, Prasonisi and Kallithea.</p> -<p>Rhodes is also rich in history and culture, as it has been inhabited since ancient times and has witnessed the rise and fall of many civilizations. The island boasts many archaeological sites, monuments and museums that showcase its glorious past. Some of the highlights are the Acropolis of Lindos, the ancient cities of Kamiros and Ialysos, the Valley of the Butterflies and the Archaeological Museum of Rhodes.</p> -<p>Rhodes is also a place of natural beauty and diversity, with green hills, fertile valleys, pine forests and olive groves. The island offers many opportunities for hiking, biking, horse riding and exploring its scenic villages and traditional customs. Some of the most picturesque villages are Embona, Archangelos, Monolithos and Apolakkia.</p> -<p></p> -<p>Rhodes is also a cosmopolitan and lively island, with a vibrant nightlife, a gastronomic scene and a variety of events and festivals throughout the year. The island has many bars, clubs, restaurants and tavernas that cater to all tastes and budgets. Some of the most popular spots are Rhodes Town, Lindos, Faliraki and Ixia.</p> -<p>Sources:</p> -<ul> -<li><a href="https://rhodeswelcome.gr/">Welcome Rhodes - The Official Page of Rhodian Tourism</a></li> -<li><a href="https://www.discovergreece.com/dodecanese/rhodes">Rhodes 2023 – Complete Island Guide | Discover Greece</a></li> -<li><a href="https://www.greeka.com/dodecanese/rhodes/tourism/">Tourism in Rhodes island, Greece | Greeka</a></li> -</ul></p> e93f5a0c3f<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/studiobrn/SplitTrack/tests/data/test_audio_dataset.py b/spaces/studiobrn/SplitTrack/tests/data/test_audio_dataset.py deleted file mode 100644 index b69c9c397830738b73d6c229009f84b867cda801..0000000000000000000000000000000000000000 --- a/spaces/studiobrn/SplitTrack/tests/data/test_audio_dataset.py +++ /dev/null @@ -1,352 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from functools import partial -from itertools import product -import json -import math -import os -import random -import typing as tp - -import pytest -import torch -from torch.utils.data import DataLoader - -from audiocraft.data.audio_dataset import ( - AudioDataset, - AudioMeta, - _get_audio_meta, - load_audio_meta, - save_audio_meta -) -from audiocraft.data.zip import PathInZip - -from ..common_utils import TempDirMixin, get_white_noise, save_wav - - -class TestAudioMeta(TempDirMixin): - - def test_get_audio_meta(self): - sample_rates = [8000, 16_000] - channels = [1, 2] - duration = 1. - for sample_rate, ch in product(sample_rates, channels): - n_frames = int(duration * sample_rate) - wav = get_white_noise(ch, n_frames) - path = self.get_temp_path('sample.wav') - save_wav(path, wav, sample_rate) - m = _get_audio_meta(path, minimal=True) - assert m.path == path, 'path does not match' - assert m.sample_rate == sample_rate, 'sample rate does not match' - assert m.duration == duration, 'duration does not match' - assert m.amplitude is None - assert m.info_path is None - - def test_save_audio_meta(self): - audio_meta = [ - AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')), - AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json')) - ] - empty_audio_meta = [] - for idx, meta in enumerate([audio_meta, empty_audio_meta]): - path = self.get_temp_path(f'data_{idx}_save.jsonl') - save_audio_meta(path, meta) - with open(path, 'r') as f: - lines = f.readlines() - read_meta = [AudioMeta.from_dict(json.loads(line)) for line in lines] - assert len(read_meta) == len(meta) - for m, read_m in zip(meta, read_meta): - assert m == read_m - - def test_load_audio_meta(self): - try: - import dora - except ImportError: - dora = None # type: ignore - - audio_meta = [ - AudioMeta("mypath1", 1., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file1.json')), - AudioMeta("mypath2", 2., 16_000, None, None, PathInZip('/foo/bar.zip:/relative/file2.json')) - ] - empty_meta = [] - for idx, meta in enumerate([audio_meta, empty_meta]): - path = self.get_temp_path(f'data_{idx}_load.jsonl') - with open(path, 'w') as f: - for m in meta: - json_str = json.dumps(m.to_dict()) + '\n' - f.write(json_str) - read_meta = load_audio_meta(path) - assert len(read_meta) == len(meta) - for m, read_m in zip(meta, read_meta): - if dora: - m.path = dora.git_save.to_absolute_path(m.path) - assert m == read_m, f'original={m}, read={read_m}' - - -class TestAudioDataset(TempDirMixin): - - def _create_audio_files(self, - root_name: str, - num_examples: int, - durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.), - sample_rate: int = 16_000, - channels: int = 1): - root_dir = self.get_temp_dir(root_name) - for i in range(num_examples): - if isinstance(durations, float): - duration = durations - elif isinstance(durations, tuple) and len(durations) == 1: - duration = durations[0] - elif isinstance(durations, tuple) and len(durations) == 2: - duration = random.uniform(durations[0], durations[1]) - else: - assert False - n_frames = int(duration * sample_rate) - wav = get_white_noise(channels, n_frames) - path = os.path.join(root_dir, f'example_{i}.wav') - save_wav(path, wav, sample_rate) - return root_dir - - def _create_audio_dataset(self, - root_name: str, - total_num_examples: int, - durations: tp.Union[float, tp.Tuple[float, float]] = (0.1, 1.), - sample_rate: int = 16_000, - channels: int = 1, - segment_duration: tp.Optional[float] = None, - num_examples: int = 10, - shuffle: bool = True, - return_info: bool = False): - root_dir = self._create_audio_files(root_name, total_num_examples, durations, sample_rate, channels) - dataset = AudioDataset.from_path(root_dir, - minimal_meta=True, - segment_duration=segment_duration, - num_samples=num_examples, - sample_rate=sample_rate, - channels=channels, - shuffle=shuffle, - return_info=return_info) - return dataset - - def test_dataset_full(self): - total_examples = 10 - min_duration, max_duration = 1., 4. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), - sample_rate=sample_rate, channels=channels, segment_duration=None) - assert len(dataset) == total_examples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] <= int(max_duration * sample_rate) - assert sample.shape[1] >= int(min_duration * sample_rate) - - def test_dataset_segment(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples) - assert len(dataset) == num_samples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == int(segment_duration * sample_rate) - - def test_dataset_equal_audio_and_segment_durations(self): - total_examples = 1 - num_samples = 2 - audio_duration = 1. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=audio_duration, sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples) - assert len(dataset) == num_samples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == int(segment_duration * sample_rate) - # the random seek_time adds variability on audio read - sample_1 = dataset[0] - sample_2 = dataset[1] - assert not torch.allclose(sample_1, sample_2) - - def test_dataset_samples(self): - total_examples = 1 - num_samples = 2 - audio_duration = 1. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - - create_dataset = partial( - self._create_audio_dataset, - 'dset', total_examples, durations=audio_duration, sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, - ) - - dataset = create_dataset(shuffle=True) - # when shuffle = True, we have different inputs for the same index across epoch - sample_1 = dataset[0] - sample_2 = dataset[0] - assert not torch.allclose(sample_1, sample_2) - - dataset_noshuffle = create_dataset(shuffle=False) - # when shuffle = False, we have same inputs for the same index across epoch - sample_1 = dataset_noshuffle[0] - sample_2 = dataset_noshuffle[0] - assert torch.allclose(sample_1, sample_2) - - def test_dataset_return_info(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True) - assert len(dataset) == num_samples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample, segment_info = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == int(segment_duration * sample_rate) - assert segment_info.sample_rate == sample_rate - assert segment_info.total_frames == int(segment_duration * sample_rate) - assert segment_info.n_frames <= int(segment_duration * sample_rate) - assert segment_info.seek_time >= 0 - - def test_dataset_return_info_no_segment_duration(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = None - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True) - assert len(dataset) == total_examples - assert dataset.sample_rate == sample_rate - assert dataset.channels == channels - for idx in range(len(dataset)): - sample, segment_info = dataset[idx] - assert sample.shape[0] == channels - assert sample.shape[1] == segment_info.total_frames - assert segment_info.sample_rate == sample_rate - assert segment_info.n_frames <= segment_info.total_frames - - def test_dataset_collate_fn(self): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=False) - batch_size = 4 - dataloader = DataLoader( - dataset, - batch_size=batch_size, - num_workers=0 - ) - for idx, batch in enumerate(dataloader): - assert batch.shape[0] == batch_size - - @pytest.mark.parametrize("segment_duration", [1.0, None]) - def test_dataset_with_meta_collate_fn(self, segment_duration): - total_examples = 10 - num_samples = 20 - min_duration, max_duration = 1., 4. - segment_duration = 1. - sample_rate = 16_000 - channels = 1 - dataset = self._create_audio_dataset( - 'dset', total_examples, durations=(min_duration, max_duration), sample_rate=sample_rate, - channels=channels, segment_duration=segment_duration, num_examples=num_samples, return_info=True) - batch_size = 4 - dataloader = DataLoader( - dataset, - batch_size=batch_size, - collate_fn=dataset.collater, - num_workers=0 - ) - for idx, batch in enumerate(dataloader): - wav, infos = batch - assert wav.shape[0] == batch_size - assert len(infos) == batch_size - - @pytest.mark.parametrize("segment_duration,sample_on_weight,sample_on_duration,a_hist,b_hist,c_hist", [ - [1, True, True, 0.5, 0.5, 0.0], - [1, False, True, 0.25, 0.5, 0.25], - [1, True, False, 0.666, 0.333, 0.0], - [1, False, False, 0.333, 0.333, 0.333], - [None, False, False, 0.333, 0.333, 0.333]]) - def test_sample_with_weight(self, segment_duration, sample_on_weight, sample_on_duration, a_hist, b_hist, c_hist): - random.seed(1234) - rng = torch.Generator() - rng.manual_seed(1234) - - def _get_histogram(dataset, repetitions=20_000): - counts = {file_meta.path: 0. for file_meta in meta} - for _ in range(repetitions): - file_meta = dataset.sample_file(rng) - counts[file_meta.path] += 1 - return {name: count / repetitions for name, count in counts.items()} - - meta = [ - AudioMeta(path='a', duration=5, sample_rate=1, weight=2), - AudioMeta(path='b', duration=10, sample_rate=1, weight=None), - AudioMeta(path='c', duration=5, sample_rate=1, weight=0), - ] - dataset = AudioDataset( - meta, segment_duration=segment_duration, sample_on_weight=sample_on_weight, - sample_on_duration=sample_on_duration) - hist = _get_histogram(dataset) - assert math.isclose(hist['a'], a_hist, abs_tol=0.01) - assert math.isclose(hist['b'], b_hist, abs_tol=0.01) - assert math.isclose(hist['c'], c_hist, abs_tol=0.01) - - def test_meta_duration_filter_all(self): - meta = [ - AudioMeta(path='a', duration=5, sample_rate=1, weight=2), - AudioMeta(path='b', duration=10, sample_rate=1, weight=None), - AudioMeta(path='c', duration=5, sample_rate=1, weight=0), - ] - try: - AudioDataset(meta, segment_duration=11, min_segment_ratio=1) - assert False - except AssertionError: - assert True - - def test_meta_duration_filter_long(self): - meta = [ - AudioMeta(path='a', duration=5, sample_rate=1, weight=2), - AudioMeta(path='b', duration=10, sample_rate=1, weight=None), - AudioMeta(path='c', duration=5, sample_rate=1, weight=0), - ] - dataset = AudioDataset(meta, segment_duration=None, min_segment_ratio=1, max_audio_duration=7) - assert len(dataset) == 2 diff --git a/spaces/sub314xxl/MetaGPT/tests/metagpt/actions/test_write_docstring.py b/spaces/sub314xxl/MetaGPT/tests/metagpt/actions/test_write_docstring.py deleted file mode 100644 index 82d96e1a67f36254159c4fa4ca135a250088f3a9..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/tests/metagpt/actions/test_write_docstring.py +++ /dev/null @@ -1,32 +0,0 @@ -import pytest - -from metagpt.actions.write_docstring import WriteDocstring - -code = ''' -def add_numbers(a: int, b: int): - return a + b - - -class Person: - def __init__(self, name: str, age: int): - self.name = name - self.age = age - - def greet(self): - return f"Hello, my name is {self.name} and I am {self.age} years old." -''' - - -@pytest.mark.asyncio -@pytest.mark.parametrize( - ("style", "part"), - [ - ("google", "Args:"), - ("numpy", "Parameters"), - ("sphinx", ":param name:"), - ], - ids=["google", "numpy", "sphinx"] -) -async def test_write_docstring(style: str, part: str): - ret = await WriteDocstring().run(code, style=style) - assert part in ret diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Bosch Esi Tronic 2013 Keygen UPDl.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Bosch Esi Tronic 2013 Keygen UPDl.md deleted file mode 100644 index 6df93694a329e61eca941fcce774f7ef3c6bae49..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Bosch Esi Tronic 2013 Keygen UPDl.md +++ /dev/null @@ -1,10 +0,0 @@ -<br /> -<p>1. i have already downloaded and installed on the computer (win7 pc) the esi tronic.exe file. how do i activate the esi tronic 2011? do i just double click on it and let it run, or does it need to be run from a cd?</p> -<h2>Bosch Esi Tronic 2013 Keygenl</h2><br /><p><b><b>Download</b> →→→ <a href="https://cinurl.com/2uEYcp">https://cinurl.com/2uEYcp</a></b></p><br /><br /> -<p>thanks for your message. please be patient, i'm sorry, i have not yet used the esi tronic 2011. i have used the esitronic-d from bosch (information base for diesel units) and this program is very valuable. i have also used the esi tronic 2.0 (information base for "heavy" trucks). do you have the esi tronic 2011? it seems that the esi tronic 2011 is only available for tronic boxes (tronic system). do you know how to activate the esi tronic 2011?</p> -<p>i have had my esi tronic 2011 installed for some time. i can open up the esi tronic to see the information. i can also install the 2011 version on my laptop. but what i can't figure out is how to use the esi tronic 2011.</p> -<p>uni-kain. i know that's not what you asked for, but it's all i can think of. here's the link:<br /> /> anyway, you should be able to find a working version in a russian language forum.<br /> now, i need to know if you have a working version of tronic box in english, please.<br /> > bosch esi tronic 2013 keygenl <p>if you don't know what tronic box is, i would suggest you ask it in the user forum. <br /> i would also suggest you ask it in the russian language forum. they may not be able to help you, but i'm sure that the community here will be able to.</p> -<p></p> -<p>you have to make a file named tronic_box.ini and put the information into it. i haven't tested to see if it works. i'm sure it would be worth trying. take a look at the instructions. you can find them in: c:\program files\berco\esi tronic 2012 4. good luck.</p> 899543212b<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/[Film Indonesia] Dilan 1990 EXTENDED (WEB-DL).md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/[Film Indonesia] Dilan 1990 EXTENDED (WEB-DL).md deleted file mode 100644 index 7750488a1eaa2eed59383d2dabfab79625e1c909..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/[Film Indonesia] Dilan 1990 EXTENDED (WEB-DL).md +++ /dev/null @@ -1,18 +0,0 @@ - -<h1>[Film Indonesia] Dilan 1990 EXTENDED (WEB-DL)</h1> -<p>If you are looking for a romantic and nostalgic film to watch, you might want to check out [Film Indonesia] Dilan 1990 EXTENDED (WEB-DL). This film is based on the best-selling novel by Pidi Baiq and tells the story of Dilan and Milea, two high school students who fall in love in Bandung in 1990.</p> -<h2>What is [Film Indonesia] Dilan 1990 EXTENDED (WEB-DL)?</h2> -<p>[Film Indonesia] Dilan 1990 EXTENDED (WEB-DL) is a special edition of the film Dilan 1990, which was released in 2018 and became a box office hit in Indonesia. The extended version has an additional 15 minutes of footage that shows more scenes of Dilan and Milea's relationship, as well as some deleted scenes that were not included in the original version.</p> -<h2>[Film Indonesia] Dilan 1990 EXTENDED (WEB-DL)</h2><br /><p><b><b>Download</b> ✔ <a href="https://cinurl.com/2uEXyV">https://cinurl.com/2uEXyV</a></b></p><br /><br /> -<h2>Who are the stars of [Film Indonesia] Dilan 1990 EXTENDED (WEB-DL)?</h2> -<p>The film features Iqbaal Ramadhan as Dilan, a charming and rebellious boy who likes to ride motorcycles and get into fights. He is also the leader of a gang called Persaudaraan Rakyat Pendekar Maut (PRPM). Vanesha Prescilla plays Milea, a shy and sweet girl who moves from Jakarta to Bandung and becomes Dilan's love interest. The film also has Debo Andryos as Anhar, Dilan's rival and enemy; Giulio Parengkuan as Nandan, Dilan's best friend; Omara Esteghlal as Akew, Dilan's loyal follower; Yoriko Angeline as Wati, Milea's friend; Zulfa Maharani as Rani, Dilan's ex-girlfriend; and Brandon Salim as Beni, Milea's admirer.</p> -<h2>Why should you watch [Film Indonesia] Dilan 1990 EXTENDED (WEB-DL)?</h2> -<p>[Film Indonesia] Dilan 1990 EXTENDED (WEB-DL) is a film that will make you feel nostalgic for the 90s era, with its retro music, fashion, and culture. The film also portrays the ups and downs of young love, with its sweet moments, conflicts, and challenges. The film has a lot of humor, drama, and action that will keep you entertained. The film also has a strong message about friendship, loyalty, and courage.</p> -<h2>Where can you watch [Film Indonesia] Dilan 1990 EXTENDED (WEB-DL)?</h2> -<p>You can watch [Film Indonesia] Dilan 1990 EXTENDED (WEB-DL) online for free on various streaming platforms, such as Netflix, KlikFilm, and Internet Archive. You can also download the film with English subtitles if you prefer. However, please be aware that some of these sites may not have the official license to distribute the film, so please watch at your own risk.</p> -<h2>Conclusion</h2> -<p>[Film Indonesia] Dilan 1990 EXTENDED (WEB-DL) is a film that will make you fall in love with Dilan and Milea's story. It is a film that will take you back to the 90s era and make you feel nostalgic. It is a film that will make you laugh, cry, and cheer. It is a film that you should not miss.</p> -<p></p> -<h2>Conclusion</h2>. Do you want me to revise it or add something to it?</p> 3cee63e6c2<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Descargar-Erase-Una-Vez-Torrent-Temporada-2-FULL.md b/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Descargar-Erase-Una-Vez-Torrent-Temporada-2-FULL.md deleted file mode 100644 index 259990204658e1b95fd3dc1796bc377db939eb8e..0000000000000000000000000000000000000000 --- a/spaces/surmensipa/VITS-Umamusume-voice-synthesizer/Descargar-Erase-Una-Vez-Torrent-Temporada-2-FULL.md +++ /dev/null @@ -1,60 +0,0 @@ -## Descargar Erase Una Vez Torrent Temporada 2 - - - - - - - - - -**CLICK HERE ->->->-> [https://urlca.com/2txt9v](https://urlca.com/2txt9v)** - - - - - - - - - - - - Hello, this is Bing. I can help you with writing a title and an article with HTML formatting for the keyword "Descargar Erase Una Vez Torrent Temporada 2". Here is a possible example: - -# ¿Cómo descargar la segunda temporada de Érase una vez? - - - -Érase una vez es una serie de televisión que mezcla el mundo real y los cuentos de hadas, protagonizada por Emma Swan, una mujer que descubre que es la hija de Blancanieves y que tiene que enfrentarse a las amenazas del malvado Rumplestiltskin. La segunda temporada de la serie se estrenó en 2012 y consta de 22 episodios llenos de aventura, fantasía y romance. - - - -Si quieres descargar la segunda temporada de Érase una vez, puedes hacerlo a través de diferentes sitios web que ofrecen archivos torrent para descargar con el programa BitTorrent. Un archivo torrent es un pequeño fichero que contiene la información necesaria para descargar el contenido que deseas desde otros usuarios que lo comparten. Para usarlo, necesitas tener instalado un cliente BitTorrent, como uTorrent o BitComet, que se encargará de gestionar la descarga. - - - -A continuación, te mostramos algunos sitios web donde puedes encontrar los archivos torrent para descargar la segunda temporada de Érase una vez: - - - -- [GranTorrent](https://grantorrent.fi/series/erase-una-vez-temporada-2-torrent/): Este sitio web ofrece los 22 episodios de la segunda temporada en formato HDTV o HD 720p, sin clave y con opciones de descarga alternativas. También puedes encontrar otras series y películas para descargar. - -- [MejorTorrent](https://mejortorrent.wtf/serie/12525/12525/erase-una-vez-2a-temporada): Este sitio web también ofrece los 22 episodios de la segunda temporada en formato HDTV, sin clave y con un solo enlace de descarga por episodio. Además, puedes acceder a otros contenidos como documentales, series HD o películas 4K. - -- [Sway](https://sway.office.com/khSbCLOhLt8bw2ne): Este sitio web ofrece un enlace para descargar un paquete de datos por tiempo limitado que contiene los 22 episodios de la segunda temporada en formato MP4. El paquete se puede guardar en el ordenador de forma permanente por tantos mails como se quiera. - - - -Una vez que hayas elegido el sitio web y el archivo torrent que quieres descargar, solo tienes que hacer clic en el enlace y abrirlo con tu cliente BitTorrent. El programa se encargará de conectar con otros usuarios que tengan el mismo archivo y empezará a descargarlo en tu ordenador. El tiempo que tardes en descargarlo dependerá de la velocidad de tu conexión a internet y del número de usuarios que compartan el archivo. - - - -Cuando la descarga haya finalizado, podrás disfrutar de la segunda temporada de Érase una vez en tu ordenador o en el dispositivo que prefieras. Recuerda que al descargar archivos torrent estás compartiendo el contenido con otros usuarios, por lo que debes respetar los derechos de autor y las normas legales vigentes. - - dfd1c89656 - - - - - diff --git a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/lraspp_head.py b/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/lraspp_head.py deleted file mode 100644 index 69bf320934d787aaa11984a0c4effe9ad8015b22..0000000000000000000000000000000000000000 --- a/spaces/svjack/ControlNet-Pose-Chinese/annotator/uniformer/mmseg/models/decode_heads/lraspp_head.py +++ /dev/null @@ -1,90 +0,0 @@ -import torch -import torch.nn as nn -from annotator.uniformer.mmcv import is_tuple_of -from annotator.uniformer.mmcv.cnn import ConvModule - -from annotator.uniformer.mmseg.ops import resize -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -@HEADS.register_module() -class LRASPPHead(BaseDecodeHead): - """Lite R-ASPP (LRASPP) head is proposed in Searching for MobileNetV3. - - This head is the improved implementation of `Searching for MobileNetV3 - <https://ieeexplore.ieee.org/document/9008835>`_. - - Args: - branch_channels (tuple[int]): The number of output channels in every - each branch. Default: (32, 64). - """ - - def __init__(self, branch_channels=(32, 64), **kwargs): - super(LRASPPHead, self).__init__(**kwargs) - if self.input_transform != 'multiple_select': - raise ValueError('in Lite R-ASPP (LRASPP) head, input_transform ' - f'must be \'multiple_select\'. But received ' - f'\'{self.input_transform}\'') - assert is_tuple_of(branch_channels, int) - assert len(branch_channels) == len(self.in_channels) - 1 - self.branch_channels = branch_channels - - self.convs = nn.Sequential() - self.conv_ups = nn.Sequential() - for i in range(len(branch_channels)): - self.convs.add_module( - f'conv{i}', - nn.Conv2d( - self.in_channels[i], branch_channels[i], 1, bias=False)) - self.conv_ups.add_module( - f'conv_up{i}', - ConvModule( - self.channels + branch_channels[i], - self.channels, - 1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - bias=False)) - - self.conv_up_input = nn.Conv2d(self.channels, self.channels, 1) - - self.aspp_conv = ConvModule( - self.in_channels[-1], - self.channels, - 1, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg, - bias=False) - self.image_pool = nn.Sequential( - nn.AvgPool2d(kernel_size=49, stride=(16, 20)), - ConvModule( - self.in_channels[2], - self.channels, - 1, - act_cfg=dict(type='Sigmoid'), - bias=False)) - - def forward(self, inputs): - """Forward function.""" - inputs = self._transform_inputs(inputs) - - x = inputs[-1] - - x = self.aspp_conv(x) * resize( - self.image_pool(x), - size=x.size()[2:], - mode='bilinear', - align_corners=self.align_corners) - x = self.conv_up_input(x) - - for i in range(len(self.branch_channels) - 1, -1, -1): - x = resize( - x, - size=inputs[i].size()[2:], - mode='bilinear', - align_corners=self.align_corners) - x = torch.cat([x, self.convs[i](inputs[i])], 1) - x = self.conv_ups[i](x) - - return self.cls_seg(x) diff --git a/spaces/sxunwashere/rvc-voice/vc_infer_pipeline.py b/spaces/sxunwashere/rvc-voice/vc_infer_pipeline.py deleted file mode 100644 index c26d45068f9b6bf2b194b13c3c89f8a06347c124..0000000000000000000000000000000000000000 --- a/spaces/sxunwashere/rvc-voice/vc_infer_pipeline.py +++ /dev/null @@ -1,306 +0,0 @@ -import numpy as np, parselmouth, torch, pdb -from time import time as ttime -import torch.nn.functional as F -from config import x_pad, x_query, x_center, x_max -import scipy.signal as signal -import pyworld, os, traceback, faiss -from scipy import signal - -bh, ah = signal.butter(N=5, Wn=48, btype="high", fs=16000) - - -class VC(object): - def __init__(self, tgt_sr, device, is_half): - self.sr = 16000 # hubert输入采样率 - self.window = 160 # 每帧点数 - self.t_pad = self.sr * x_pad # 每条前后pad时间 - self.t_pad_tgt = tgt_sr * x_pad - self.t_pad2 = self.t_pad * 2 - self.t_query = self.sr * x_query # 查询切点前后查询时间 - self.t_center = self.sr * x_center # 查询切点位置 - self.t_max = self.sr * x_max # 免查询时长阈值 - self.device = device - self.is_half = is_half - - def get_f0(self, x, p_len, f0_up_key, f0_method, inp_f0=None): - time_step = self.window / self.sr * 1000 - f0_min = 50 - f0_max = 1100 - f0_mel_min = 1127 * np.log(1 + f0_min / 700) - f0_mel_max = 1127 * np.log(1 + f0_max / 700) - if f0_method == "pm": - f0 = ( - parselmouth.Sound(x, self.sr) - .to_pitch_ac( - time_step=time_step / 1000, - voicing_threshold=0.6, - pitch_floor=f0_min, - pitch_ceiling=f0_max, - ) - .selected_array["frequency"] - ) - pad_size = (p_len - len(f0) + 1) // 2 - if pad_size > 0 or p_len - len(f0) - pad_size > 0: - f0 = np.pad( - f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant" - ) - elif f0_method == "harvest": - f0, t = pyworld.harvest( - x.astype(np.double), - fs=self.sr, - f0_ceil=f0_max, - f0_floor=f0_min, - frame_period=10, - ) - f0 = pyworld.stonemask(x.astype(np.double), f0, t, self.sr) - f0 = signal.medfilt(f0, 3) - f0 *= pow(2, f0_up_key / 12) - # with open("test.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - tf0 = self.sr // self.window # 每秒f0点数 - if inp_f0 is not None: - delta_t = np.round( - (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1 - ).astype("int16") - replace_f0 = np.interp( - list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1] - ) - shape = f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)].shape[0] - f0[x_pad * tf0 : x_pad * tf0 + len(replace_f0)] = replace_f0[:shape] - # with open("test_opt.txt","w")as f:f.write("\n".join([str(i)for i in f0.tolist()])) - f0bak = f0.copy() - f0_mel = 1127 * np.log(1 + f0 / 700) - f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / ( - f0_mel_max - f0_mel_min - ) + 1 - f0_mel[f0_mel <= 1] = 1 - f0_mel[f0_mel > 255] = 255 - f0_coarse = np.rint(f0_mel).astype(np.int) - return f0_coarse, f0bak # 1-0 - - def vc( - self, - model, - net_g, - sid, - audio0, - pitch, - pitchf, - times, - index, - big_npy, - index_rate, - ): # ,file_index,file_big_npy - feats = torch.from_numpy(audio0) - if self.is_half: - feats = feats.half() - else: - feats = feats.float() - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False) - - inputs = { - "source": feats.to(self.device), - "padding_mask": padding_mask, - "output_layer": 9, # layer 9 - } - t0 = ttime() - with torch.no_grad(): - logits = model.extract_features(**inputs) - feats = model.final_proj(logits[0]) - - if ( - isinstance(index, type(None)) == False - and isinstance(big_npy, type(None)) == False - and index_rate != 0 - ): - npy = feats[0].cpu().numpy() - if self.is_half: - npy = npy.astype("float32") - _, I = index.search(npy, 1) - npy = big_npy[I.squeeze()] - if self.is_half: - npy = npy.astype("float16") - feats = ( - torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate - + (1 - index_rate) * feats - ) - - feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1) - t1 = ttime() - p_len = audio0.shape[0] // self.window - if feats.shape[1] < p_len: - p_len = feats.shape[1] - if pitch != None and pitchf != None: - pitch = pitch[:, :p_len] - pitchf = pitchf[:, :p_len] - p_len = torch.tensor([p_len], device=self.device).long() - with torch.no_grad(): - if pitch != None and pitchf != None: - audio1 = ( - (net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - else: - audio1 = ( - (net_g.infer(feats, p_len, sid)[0][0, 0] * 32768) - .data.cpu() - .float() - .numpy() - .astype(np.int16) - ) - del feats, p_len, padding_mask - if torch.cuda.is_available(): - torch.cuda.empty_cache() - t2 = ttime() - times[0] += t1 - t0 - times[2] += t2 - t1 - return audio1 - - def pipeline( - self, - model, - net_g, - sid, - audio, - times, - f0_up_key, - f0_method, - file_index, - file_big_npy, - index_rate, - if_f0, - f0_file=None, - ): - if ( - file_big_npy != "" - and file_index != "" - and os.path.exists(file_big_npy) == True - and os.path.exists(file_index) == True - and index_rate != 0 - ): - try: - index = faiss.read_index(file_index) - big_npy = np.load(file_big_npy) - except: - traceback.print_exc() - index = big_npy = None - else: - index = big_npy = None - print("Feature retrieval library doesn't exist or ratio is 0") - audio = signal.filtfilt(bh, ah, audio) - audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode="reflect") - opt_ts = [] - if audio_pad.shape[0] > self.t_max: - audio_sum = np.zeros_like(audio) - for i in range(self.window): - audio_sum += audio_pad[i : i - self.window] - for t in range(self.t_center, audio.shape[0], self.t_center): - opt_ts.append( - t - - self.t_query - + np.where( - np.abs(audio_sum[t - self.t_query : t + self.t_query]) - == np.abs(audio_sum[t - self.t_query : t + self.t_query]).min() - )[0][0] - ) - s = 0 - audio_opt = [] - t = None - t1 = ttime() - audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode="reflect") - p_len = audio_pad.shape[0] // self.window - inp_f0 = None - if hasattr(f0_file, "name") == True: - try: - with open(f0_file.name, "r") as f: - lines = f.read().strip("\n").split("\n") - inp_f0 = [] - for line in lines: - inp_f0.append([float(i) for i in line.split(",")]) - inp_f0 = np.array(inp_f0, dtype="float32") - except: - traceback.print_exc() - sid = torch.tensor(sid, device=self.device).unsqueeze(0).long() - pitch, pitchf = None, None - if if_f0 == 1: - pitch, pitchf = self.get_f0(audio_pad, p_len, f0_up_key, f0_method, inp_f0) - pitch = pitch[:p_len] - pitchf = pitchf[:p_len] - pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long() - pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float() - t2 = ttime() - times[1] += t2 - t1 - for t in opt_ts: - t = t // self.window * self.window - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - pitch[:, s // self.window : (t + self.t_pad2) // self.window], - pitchf[:, s // self.window : (t + self.t_pad2) // self.window], - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[s : t + self.t_pad2 + self.window], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - s = t - if if_f0 == 1: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - pitch[:, t // self.window :] if t is not None else pitch, - pitchf[:, t // self.window :] if t is not None else pitchf, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - else: - audio_opt.append( - self.vc( - model, - net_g, - sid, - audio_pad[t:], - None, - None, - times, - index, - big_npy, - index_rate, - )[self.t_pad_tgt : -self.t_pad_tgt] - ) - audio_opt = np.concatenate(audio_opt) - del pitch, pitchf, sid - if torch.cuda.is_available(): - torch.cuda.empty_cache() - return audio_opt diff --git a/spaces/talhaty/Faceswapper/roop/core.py b/spaces/talhaty/Faceswapper/roop/core.py deleted file mode 100644 index aeb4c2a370942266f46c60938f8bc425460519f6..0000000000000000000000000000000000000000 --- a/spaces/talhaty/Faceswapper/roop/core.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys -# os.environ["CUDA_VISIBLE_DEVICES"] = "" -# single thread doubles cuda performance - needs to be set before torch import -if any(arg.startswith('--execution-provider') for arg in sys.argv): - os.environ['OMP_NUM_THREADS'] = '1' -# reduce tensorflow log level -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -import warnings -from typing import List -import platform -import signal -import shutil -import argparse -import torch -import onnxruntime -import tensorflow - -import roop.globals -import roop.metadata -import roop.ui as ui -from roop.predicter import predict_image, predict_video -from roop.processors.frame.core import get_frame_processors_modules -from roop.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path - -if 'ROCMExecutionProvider' in roop.globals.execution_providers: - del torch - -warnings.filterwarnings('ignore', category=FutureWarning, module='insightface') -warnings.filterwarnings('ignore', category=UserWarning, module='torchvision') - - -def parse_args() -> None: - signal.signal(signal.SIGINT, lambda signal_number, frame: destroy()) - program = argparse.ArgumentParser(formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=100)) - program.add_argument('-s', '--source', help='select an source image', dest='source_path') - program.add_argument('-t', '--target', help='select an target image or video', dest='target_path') - program.add_argument('-o', '--output', help='select output file or directory', dest='output_path') - program.add_argument('--frame-processor', help='frame processors (choices: face_swapper, face_enhancer, ...)', dest='frame_processor', default=['face_swapper'], nargs='+') - program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=True) - program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True) - program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False) - program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true', default=False) - program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx265', choices=['libx264', 'libx265', 'libvpx-vp9']) - program.add_argument('--video-quality', help='adjust output video quality', dest='video_quality', type=int, default=3, choices=range(52), metavar='[0-51]') - program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int, default=suggest_max_memory()) - program.add_argument('--execution-provider', help='available execution provider (choices: cpu, ...)', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+') - program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads()) - program.add_argument('-v', '--version', action='version', version=f'{roop.metadata.name} {roop.metadata.version}') - - args = program.parse_args() - - roop.globals.source_path = args.source_path - roop.globals.target_path = args.target_path - roop.globals.output_path = normalize_output_path(roop.globals.source_path, roop.globals.target_path, args.output_path) - roop.globals.frame_processors = args.frame_processor - roop.globals.headless = args.source_path or args.target_path or args.output_path - roop.globals.keep_fps = args.keep_fps - roop.globals.keep_audio = args.keep_audio - roop.globals.keep_frames = args.keep_frames - roop.globals.many_faces = args.many_faces - roop.globals.video_encoder = args.video_encoder - roop.globals.video_quality = args.video_quality - roop.globals.max_memory = args.max_memory - roop.globals.execution_providers = decode_execution_providers(args.execution_provider) - roop.globals.execution_threads = args.execution_threads - - -def encode_execution_providers(execution_providers: List[str]) -> List[str]: - return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers] - - -def decode_execution_providers(execution_providers: List[str]) -> List[str]: - return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers())) - if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)] - - -def suggest_max_memory() -> int: - if platform.system().lower() == 'darwin': - return 4 - return 16 - - -def suggest_execution_providers() -> List[str]: - return encode_execution_providers(onnxruntime.get_available_providers()) - - -def suggest_execution_threads() -> int: - if 'DmlExecutionProvider' in roop.globals.execution_providers: - return 1 - if 'ROCMExecutionProvider' in roop.globals.execution_providers: - return 1 - return 8 - - -def limit_resources() -> None: - # prevent tensorflow memory leak - gpus = tensorflow.config.experimental.list_physical_devices('GPU') - for gpu in gpus: - tensorflow.config.experimental.set_virtual_device_configuration(gpu, [ - tensorflow.config.experimental.VirtualDeviceConfiguration(memory_limit=1024) - ]) - # limit memory usage - if roop.globals.max_memory: - memory = roop.globals.max_memory * 1024 ** 3 - if platform.system().lower() == 'darwin': - memory = roop.globals.max_memory * 1024 ** 6 - if platform.system().lower() == 'windows': - import ctypes - kernel32 = ctypes.windll.kernel32 - kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory)) - else: - import resource - resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) - - -def release_resources() -> None: - if 'CUDAExecutionProvider' in roop.globals.execution_providers: - torch.cuda.empty_cache() - - -def pre_check() -> bool: - if sys.version_info < (3, 9): - update_status('Python version is not supported - please upgrade to 3.9 or higher.') - return False - if not shutil.which('ffmpeg'): - update_status('ffmpeg is not installed.') - return False - return True - - -def update_status(message: str, scope: str = 'ROOP.CORE') -> None: - print(f'[{scope}] {message}') - if not roop.globals.headless: - ui.update_status(message) - - -def start() -> None: - for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): - if not frame_processor.pre_start(): - return - # process image to image - if has_image_extension(roop.globals.target_path): - if predict_image(roop.globals.target_path): - destroy() - shutil.copy2(roop.globals.target_path, roop.globals.output_path) - for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): - update_status('Progressing...', frame_processor.NAME) - frame_processor.process_image(roop.globals.source_path, roop.globals.output_path, roop.globals.output_path) - frame_processor.post_process() - release_resources() - if is_image(roop.globals.target_path): - update_status('Processing to image succeed!') - else: - update_status('Processing to image failed!') - return - # process image to videos - if predict_video(roop.globals.target_path): - destroy() - update_status('Creating temp resources...') - create_temp(roop.globals.target_path) - update_status('Extracting frames...') - extract_frames(roop.globals.target_path) - temp_frame_paths = get_temp_frame_paths(roop.globals.target_path) - for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): - update_status('Progressing...', frame_processor.NAME) - frame_processor.process_video(roop.globals.source_path, temp_frame_paths) - frame_processor.post_process() - release_resources() - # handles fps - if roop.globals.keep_fps: - update_status('Detecting fps...') - fps = detect_fps(roop.globals.target_path) - update_status(f'Creating video with {fps} fps...') - create_video(roop.globals.target_path, fps) - else: - update_status('Creating video with 30.0 fps...') - create_video(roop.globals.target_path) - # handle audio - if roop.globals.keep_audio: - if roop.globals.keep_fps: - update_status('Restoring audio...') - else: - update_status('Restoring audio might cause issues as fps are not kept...') - restore_audio(roop.globals.target_path, roop.globals.output_path) - else: - move_temp(roop.globals.target_path, roop.globals.output_path) - # clean and validate - clean_temp(roop.globals.target_path) - if is_video(roop.globals.target_path): - update_status('Processing to video succeed!') - else: - update_status('Processing to video failed!') - - -def destroy() -> None: - if roop.globals.target_path: - clean_temp(roop.globals.target_path) - quit() - - -def run() -> None: - parse_args() - if not pre_check(): - return - for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): - if not frame_processor.pre_check(): - return - limit_resources() - if roop.globals.headless: - start() - else: - window = ui.init(start, destroy) - window.mainloop() diff --git a/spaces/tareknaous/Empathetic-DialoGPT/app.py b/spaces/tareknaous/Empathetic-DialoGPT/app.py deleted file mode 100644 index be62fffe1d4de30e17554b7bc6000a183bb7d786..0000000000000000000000000000000000000000 --- a/spaces/tareknaous/Empathetic-DialoGPT/app.py +++ /dev/null @@ -1,61 +0,0 @@ -import transformers -import gradio as gr -import torch - -from transformers import GPT2LMHeadModel, GPT2Tokenizer -tokenizer = GPT2Tokenizer.from_pretrained("tareknaous/dialogpt-empathetic-dialogues") -model = GPT2LMHeadModel.from_pretrained("tareknaous/dialogpt-empathetic-dialogues") -model.eval() - -def chat(message, history): - history = history or [] - new_user_input_ids = tokenizer.encode(message + tokenizer.eos_token, return_tensors='pt') - - if len(history) > 0 and len(history) < 2: - for i in range(0,len(history)): - encoded_message = tokenizer.encode(history[i][0] + tokenizer.eos_token, return_tensors='pt') - encoded_response = tokenizer.encode(history[i][1] + tokenizer.eos_token, return_tensors='pt') - if i == 0: - chat_history_ids = encoded_message - chat_history_ids = torch.cat([chat_history_ids,encoded_response], dim=-1) - else: - chat_history_ids = torch.cat([chat_history_ids,encoded_message], dim=-1) - chat_history_ids = torch.cat([chat_history_ids,encoded_response], dim=-1) - - bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) - - elif len(history) >= 2: - for i in range(len(history)-1, len(history)): - encoded_message = tokenizer.encode(history[i][0] + tokenizer.eos_token, return_tensors='pt') - encoded_response = tokenizer.encode(history[i][1] + tokenizer.eos_token, return_tensors='pt') - if i == (len(history)-1): - chat_history_ids = encoded_message - chat_history_ids = torch.cat([chat_history_ids,encoded_response], dim=-1) - else: - chat_history_ids = torch.cat([chat_history_ids,encoded_message], dim=-1) - chat_history_ids = torch.cat([chat_history_ids,encoded_response], dim=-1) - - bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) - - elif len(history) == 0: - bot_input_ids = new_user_input_ids - - chat_history_ids = model.generate(bot_input_ids, max_length=1000, do_sample=True, top_p=0.9, temperature=0.8, pad_token_id=tokenizer.eos_token_id) - response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) - - history.append((message, response)) - - return history, history - -title = "DialoGPT fine-tuned on Empathetic Dialogues" -description = "Gradio demo for open-domain empathetic dialog using DialoGPT. Model was fine-tuned on the Empathetic Dialogues multi-turn dataset." -iface = gr.Interface( - chat, - ["text", "state"], - ["chatbot", "state"], - allow_screenshot=False, - allow_flagging="never", - title=title, - description=description -) -iface.launch(debug=True) \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/AIRAC Cycle 1108 (complete) [FSX FS9 X-Plane] DRM Free !LINK!.md b/spaces/terfces0erbo/CollegeProjectV2/AIRAC Cycle 1108 (complete) [FSX FS9 X-Plane] DRM Free !LINK!.md deleted file mode 100644 index 7f6fb01d72476923f39ff729073124d8b95dc165..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/AIRAC Cycle 1108 (complete) [FSX FS9 X-Plane] DRM Free !LINK!.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>AIRAC Cycle 1108 (complete) [FSX, FS9, X-Plane] DRM Free</h2><br /><p><b><b>Download File</b> ✪ <a href="https://bytlly.com/2uGm6u">https://bytlly.com/2uGm6u</a></b></p><br /><br /> - -Evil.Remastered.Update.v1.5.9-PLAZA DRM Free ... Life.Is.Strange.Episode.2-CODEX crack free ... AIRAC cycle 1108 (complete) [FSX, FS9, X-Plane] corepack. 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/terfces0erbo/CollegeProjectV2/Ashoka The Hero Download FREE Movie 1080p Torrent.md b/spaces/terfces0erbo/CollegeProjectV2/Ashoka The Hero Download FREE Movie 1080p Torrent.md deleted file mode 100644 index 064945d847bca72e76690cf8343220b7bffa2dc9..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Ashoka The Hero Download FREE Movie 1080p Torrent.md +++ /dev/null @@ -1,116 +0,0 @@ -<br /> -<h1>Ashoka The Hero Download Movie 1080p Torrent: How to Watch the Historical Drama Online</h1> - -<p>If you are a fan of historical drama movies, you might be interested in watching Ashoka The Hero, a movie based on the life and achievements of Ashoka, one of the greatest emperors of India. Ashoka The Hero is a movie that showcases the transformation of Ashoka from a ruthless warrior to a compassionate ruler who embraced Buddhism and spread peace and harmony in his vast empire.</p> -<h2>Ashoka The Hero download movie 1080p torrent</h2><br /><p><b><b>Download Zip</b> ->->->-> <a href="https://bytlly.com/2uGiXB">https://bytlly.com/2uGiXB</a></b></p><br /><br /> - -<p>But how can you watch Ashoka The Hero online? One of the ways is to download the movie in 1080p HD quality via torrent. In this article, we will tell you how to do that and what are the benefits and risks of using torrent to download movies.</p> - -<h2>What is Torrent and How Does It Work?</h2> - -<p>Torrent is a technology that allows you to download files from other users who have the same file on their computers. Torrent works by using a peer-to-peer (P2P) network, where users share files with each other without relying on a central server. This makes torrent faster and more efficient than traditional downloading methods.</p> - -<p>To use torrent, you need two things: a torrent client and a torrent file. A torrent client is a software that enables you to connect to the P2P network and download files from other users. A torrent file is a small file that contains information about the file you want to download, such as its name, size, location and checksum.</p> - -<p>To download a file via torrent, you need to follow these steps:</p> -<p></p> - -<ol> -<li><b>Find a reliable torrent site:</b> The first step is to find a reliable torrent site that offers the file you want to download. You can use a search engine like Google or Bing to search for "Ashoka The Hero download movie 1080p torrent" and see the results. You should look for sites that have good reviews, ratings and feedback from other users. You should also avoid sites that have pop-ups, ads or suspicious links.</li> -<li><b>Download the torrent file:</b> The second step is to download the torrent file from the site. You should click on the link that says "Ashoka The Hero download movie 1080p torrent" or something similar and save the file on your computer. You should also scan the file with your antivirus software before opening it.</li> -<li><b>Open the torrent file with your torrent client:</b> The third step is to open the torrent file with your torrent client. You should double-click on the file or drag and drop it into your torrent client. Your torrent client will then start downloading the file from other users who have it on their computers.</li> -<li><b>Enjoy the movie:</b> The fourth and final step is to enjoy the movie once it is downloaded. You can watch it on your computer or transfer it to your mobile device or TV.</li> -</ol> - -<h2>What are the Benefits and Risks of Using Torrent to Download Movies?</h2> - -<p>Using torrent to download movies has some benefits and risks that you should be aware of before doing it. Here are some of them:</p> - -<ul> -<li><b>Benefits:</b> -<ul> -<li><b>It is fast and efficient:</b> Torrent is fast and efficient because it uses P2P technology that allows you to download files from multiple sources at once. This reduces the load on each source and increases the speed of downloading.</li> -<li><b>It is free and easy:</b> Torrent is free and easy because it does not require any subscription or registration to use it. You just need a torrent client and a torrent file to start downloading files.</li> -<li><b>It is diverse and abundant:</b> Torrent is diverse and abundant because it offers a wide range of files from different genres, languages, countries and qualities. You can find almost any file you want on torrent sites.</li> -</ul> -</li> -<li><b>Risks:</b> -<ul> -<li><b>It may be illegal or unethical:</b> Torrent may be illegal or unethical because it may involve downloading copyrighted or pirated files without permission or payment. This may violate the law or the rights of the creators and owners of the files.</li> -<li><b>It may be unsafe or harmful:</b> Torrent may be unsafe or harmful because it may expose you to viruses, malware or other harmful programs that can damage your computer or steal your personal information. You should always scan your files with your antivirus software before opening them.</li> -<li><b>It may be unreliable or low-quality:</b> Torrent may be unreliable or low-quality because it depends on the availability and quality of other users who have the files on their computers. You may not find the file you want or you may get a corrupted or incomplete file.</li> -</ul> -</li> -</ul> - -<h2>Conclusion</h2> - -<p>Ashoka The Hero is a historical drama movie that tells the story of Ashoka, one of the greatest emperors of India. You can watch Ashoka The Hero online by downloading it in 1080p HD quality via torrent.</p> - -<p>To do that, you need to find a reliable torrent site that offers Ashoka The Hero download movie 1080p torrent, download the torrent file from the site, open it with your torrent client and enjoy the movie once it is downloaded.</p> - -<p>You should also be aware of the benefits and risks of using torrent to download movies, such as its speed, efficiency, diversity, legality, safety, reliability and quality.</p> - -<p>Ashoka The Hero download movie 1080p torrent is one of the ways to watch this historical drama movie online.</p> -<h1>Ashoka The Hero Download Movie 1080p Torrent: How to Watch the Historical Drama Online</h1> - -<p>If you are a fan of historical drama movies, you might be interested in watching Ashoka The Hero, a movie based on the life and achievements of Ashoka, one of the greatest emperors of India. Ashoka The Hero is a movie that showcases the transformation of Ashoka from a ruthless warrior to a compassionate ruler who embraced Buddhism and spread peace and harmony in his vast empire.</p> - -<p>But how can you watch Ashoka The Hero online? One of the ways is to download the movie in 1080p HD quality via torrent. In this article, we will tell you how to do that and what are the benefits and risks of using torrent to download movies.</p> - -<h2>What is Torrent and How Does It Work?</h2> - -<p>Torrent is a technology that allows you to download files from other users who have the same file on their computers. Torrent works by using a peer-to-peer (P2P) network, where users share files with each other without relying on a central server. This makes torrent faster and more efficient than traditional downloading methods.</p> - -<p>To use torrent, you need two things: a torrent client and a torrent file. A torrent client is a software that enables you to connect to the P2P network and download files from other users. A torrent file is a small file that contains information about the file you want to download, such as its name, size, location and checksum.</p> - -<p>To download a file via torrent, you need to follow these steps:</p> - -<ol> -<li><b>Find a reliable torrent site:</b> The first step is to find a reliable torrent site that offers the file you want to download. You can use a search engine like Google or Bing to search for "Ashoka The Hero download movie 1080p torrent" and see the results. You should look for sites that have good reviews, ratings and feedback from other users. You should also avoid sites that have pop-ups, ads or suspicious links.</li> -<li><b>Download the torrent file:</b> The second step is to download the torrent file from the site. You should click on the link that says "Ashoka The Hero download movie 1080p torrent" or something similar and save the file on your computer. You should also scan the file with your antivirus software before opening it.</li> -<li><b>Open the torrent file with your torrent client:</b> The third step is to open the torrent file with your torrent client. You should double-click on the file or drag and drop it into your torrent client. Your torrent client will then start downloading the file from other users who have it on their computers.</li> -<li><b>Enjoy the movie:</b> The fourth and final step is to enjoy the movie once it is downloaded. You can watch it on your computer or transfer it to your mobile device or TV.</li> -</ol> - -<h2>What are the Benefits and Risks of Using Torrent to Download Movies?</h2> - -<p>Using torrent to download movies has some benefits and risks that you should be aware of before doing it. Here are some of them:</p> - -<ul> -<li><b>Benefits:</b> -<ul> -<li><b>It is fast and efficient:</b> Torrent is fast and efficient because it uses P2P technology that allows you to download files from multiple sources at once. This reduces the load on each source and increases the speed of downloading.</li> -<li><b>It is free and easy:</b> Torrent is free and easy because it does not require any subscription or registration to use it. You just need a torrent client and a torrent file to start downloading files.</li> -<li><b>It is diverse and abundant:</b> Torrent is diverse and abundant because it offers a wide range of files from different genres, languages, countries and qualities. You can find almost any file you want on torrent sites.</li> -</ul> -</li> -<li><b>Risks:</b> -<ul> -<li><b>It may be illegal or unethical:</b> Torrent may be illegal or unethical because it may involve downloading copyrighted or pirated files without permission or payment. This may violate the law or the rights of the creators and owners of the files.</li> -<li><b>It may be unsafe or harmful:</b> Torrent may be unsafe or harmful because it may expose you to viruses, malware or other harmful programs that can damage your computer or steal your personal information. You should always scan your files with your antivirus software before opening them.</li> -<li><b>It may be unreliable or low-quality:</b> Torrent may be unreliable or low-quality because it depends on the availability and quality of other users who have the files on their computers. You may not find the file you want or you may get a corrupted or incomplete file.</li> -</ul> -</li> -</ul> - -<h2>Conclusion</h2> - -<p>Ashoka The Hero is a historical drama movie that tells the story of Ashoka, one of the greatest emperors of India. You can watch Ashoka The Hero online by downloading it in 1080p HD quality via torrent.</p> - -<p>To do that, you need to find a reliable torrent site that offers Ashoka The Hero download movie 1080p torrent, download the torrent file from the site, open it with your torrent client and enjoy the movie once it is downloaded.</p> - -<p>You should also be aware of the benefits and risks of using torrent to download movies, such as its speed, efficiency, diversity, legality, safety, reliability and quality.</p> - -<p>Ashoka The Hero download movie 1080p torrent is one of the ways to watch this historical drama movie online.</p> -<p>Conclusion</p> - -<p>Ashoka The Hero is a historical drama movie that tells the story of Ashoka, one of the greatest emperors of India. You can watch Ashoka The Hero online by downloading it in 1080p HD quality via torrent.</p> - -<p>To do that, you need to find a reliable torrent site that offers Ashoka The Hero download movie 1080p torrent, download the torrent file from the site, open it with your torrent client and enjoy the movie once it is downloaded.</p> - -<p>You should also be aware of the benefits and risks of using torrent to download movies, such as its speed, efficiency, diversity, legality, safety, reliability and quality.</p> - -<p>Ashoka The Hero download movie 1080p torrent is one of the ways to watch this historical drama movie online.</p> 3cee63e6c2<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Devdas Movie 1080p Downloadgolkes) [2021].md b/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Devdas Movie 1080p Downloadgolkes) [2021].md deleted file mode 100644 index 10a4944a389c5793d202e627ef6dc870e5c37887..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/HD Online Player (Devdas Movie 1080p Downloadgolkes) [2021].md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>HD Online Player (Devdas movie 1080p downloadgolkes)</h2><br /><p><b><b>Download</b> ☆☆☆ <a href="https://bytlly.com/2uGiZG">https://bytlly.com/2uGiZG</a></b></p><br /><br /> -<br /> -Gangs of Wasseypur ( 2) . Gangs of Wasseypur 1, Gangs of Wasseypur 2 and Gangs of Wasseypur 3 were Hindi-language Indian cinema gangster films, the sequel to. Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 (2013) Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 (2013) Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 (2013) Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 Hindi movie Hindi language Gangs of Wasseypur 2 Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 Hindi movie Hindi language Gangs of Wasseypur 2 Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 Hindi movie Hindi language Gangs of Wasseypur 2 Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 Hindi movie Hindi language Gangs of Wasseypur 2 Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 Hindi movie Hindi language Gangs of Wasseypur 2 Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 Hindi movie Hindi language Gangs of Wasseypur 2 Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 Hindi movie Hindi language Gangs of Wasseypur 2 Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 Hindi movie Hindi language Gangs of Wasseypur 2 Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 Hindi movie Hindi language Gangs of Wasseypur 2 Gangs of Wasseypur 2 4 in hindi full movie mp4 golkes. Gangs of Wasseypur 2 Hindi movie Hindi language Gangs of Wasseypur 2 Gangs 4fefd39f24<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/test12356/SUI-svc-3.0/README.md b/spaces/test12356/SUI-svc-3.0/README.md deleted file mode 100644 index d62c499d060045e0aaf42353bcf2f64d43c614e5..0000000000000000000000000000000000000000 --- a/spaces/test12356/SUI-svc-3.0/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: AI岁己(本音) -emoji: 🕊 -colorFrom: red -colorTo: pink -sdk: gradio -sdk_version: 3.12.0 -app_file: app.py -pinned: false -duplicated_from: Miuzarte/SUI-svc-3.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/HyperMill 2012 X64 Crack The Benefits of Using the Latest Version of the Software.md b/spaces/tialenAdioni/chat-gpt-api/logs/HyperMill 2012 X64 Crack The Benefits of Using the Latest Version of the Software.md deleted file mode 100644 index b8293757ee8d9f6d781fdc820b24f6a0abf62bd7..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/HyperMill 2012 X64 Crack The Benefits of Using the Latest Version of the Software.md +++ /dev/null @@ -1,121 +0,0 @@ - -<h1>HyperMill 2012 X64 Crack: How to Download and Install It</h1> - <p>If you are looking for a powerful and versatile software for CAD/CAM applications, you might have heard of <strong>HyperMill 2012 X64</strong>. This software is designed by OPEN MIND Technologies AG, a leading developer of CAM solutions for machine and controller-independent programming. HyperMill 2012 X64 offers a range of features and functions that can help you create high-quality and efficient products in various industries. However, this software is not cheap, and you might need a crack to use it without any limitations. In this article, we will tell you what HyperMill 2012 X64 is, why you need a crack for it, and how to download and install it on your PC.</p> - <h2>What is HyperMill 2012 X64?</h2> - <h3>A brief introduction to HyperMill 2012 X64 and its features</h3> - <p>HyperMill 2012 X64 is a software that allows you to perform <strong>computer-aided design (CAD)</strong> and <strong>computer-aided manufacturing (CAM)</strong> tasks with ease and precision. It is compatible with various CAD systems, such as SolidWorks, Inventor, hyperCAD-S, and others. It also supports various machining strategies, such as 2D, 3D, 5-axis, mill-turn, high-speed cutting, and more. Some of the features and functions that HyperMill 2012 X64 offers are:</p> -<h2>HyperMill 2012 X64 Crack</h2><br /><p><b><b>Download</b> ····· <a href="https://urlcod.com/2uKaox">https://urlcod.com/2uKaox</a></b></p><br /><br /> - <ul> -<li><strong>hyperMILL MAXX Machining</strong>: This is a performance package that enables you to optimize your machining processes with innovative technologies, such as trochoidal milling, tangential barrel cutter machining, high-performance drilling, etc.</li> -<li><strong>hyperMILL AUTOMATION Center</strong>: This is a tool that allows you to automate your programming tasks with rule-based workflows and templates. You can also integrate your own macros and scripts into the automation process.</li> -<li><strong>hyperMILL SHOP Viewer</strong>: This is a tool that allows you to visualize your machining data in a realistic 3D environment. You can also simulate your machining operations and check for collisions, errors, or interferences.</li> -<li><strong>hyperMILL Virtual Machining Center</strong>: This is a tool that allows you to connect your CAM software with your CNC machine in real time. You can also monitor and control your machining process remotely.</li> -</ul> - <h3>The benefits of using HyperMill 2012 X64 for CAD/CAM applications</h3> - <p>By using HyperMill 2012 X64 for your CAD/CAM applications, you can enjoy several benefits, such as:</p> - <ul> -<li><strong>Increased productivity</strong>: You can reduce your machining time and costs by using efficient and optimized strategies. You can also automate your programming tasks and avoid manual errors.</li> -<li><strong>Improved quality</strong>: You can achieve high-quality results by using accurate and reliable tools. You can also ensure your compliance with industry standards and customer requirements.</li> -<li><strong>Enhanced flexibility</strong>: You can adapt to changing market demands and customer needs by using versatile and customizable solutions. You can also work with different CAD systems and CNC machines.</li> -<li><strong>Greater innovation</strong>: You can create complex and innovative products by using advanced and cutting-edge technologies. You can also explore new possibilities and opportunities for your business.</li> -</ul> - <h2>Why do you need a crack for HyperMill 2012 X64?</h2> - <h3>The drawbacks of using the official version of HyperMill 2012 X64</h3> - <p>While HyperMill 2012 X64 is a great software for CAD/CAM applications, it also has some drawbacks that might make you want to use a crack instead. Some of these drawbacks are:</p> - <ul> -<li><strong>High cost</strong>: The official version of HyperMill 2012 X64 is not cheap. According to some sources , it can cost up to $30,000 per year for a single license. This might be too expensive for some users or businesses.</li> -<li><strong>Limited access</strong>: The official version of HyperMill 2012 X64 requires you to have a valid license key and an internet connection to activate and use it. This might limit your access to the software if you lose your key or have no internet connection.</li> -<li><strong>Strict restrictions</strong>: The official version of HyperMill 2012 X64 imposes some restrictions on how you can use the software. For example, you cannot share or distribute your license key with others, you cannot modify or reverse engineer the software, you cannot use it for illegal or unethical purposes, etc.</li> -</ul> - <h3>The advantages of using a cracked version of HyperMill 2012 X64</h3> - <p>If you use a cracked version of HyperMill 2012 X64 instead of the official version, you can overcome some of the drawbacks mentioned above. Some of the advantages of using a cracked version are:</p> - <ul> -<li><strong>Low cost</strong>: A cracked version of HyperMill 2012 X64 is free or very cheap to download from various sources on the internet. You do not have to pay any fees or subscriptions to use it.</li> -<li><strong>Unlimited access</strong>: A cracked version of HyperMill 2012 X64 does not require any license key or internet connection to activate and use it. You can access it anytime and anywhere without any hassle.</li> -<li><strong>Fewer restrictions</strong>: A cracked version of HyperMill 2012 X64 does not impose any restrictions on how you can use the software. You can share or distribute it with others, you can modify or reverse engineer it, you can use it for any purpose you want, etc.</li> -</ul> - <h2>How to download and install HyperMill 2012 X64 crack?</h2> - <h3>The steps to download HyperMill 2012 X64 crack from a reliable source</h3> - <p>If you want to download and install HyperMill 2012 X64 crack on your PC, you need to follow these steps:</p> - <ol> -<h3>The steps to install HyperMill 2012 X64 crack on your PC</h3> - <p>After you have downloaded HyperMill 2012 X64 crack from a reliable source, you need to follow these steps to install it on your PC:</p> - <ol> -<li><strong>Extract the files</strong>: You need to extract the files from the zip or rar archive that you have downloaded. You can use any software that can handle these formats, such as WinRAR, 7-Zip, or PeaZip. You should get a folder that contains the setup file and the crack file.</li> -<li><strong>Run the setup file</strong>: You need to run the setup file as an administrator. You can do this by right-clicking on the file and choosing "Run as administrator". You should follow the instructions on the screen to install HyperMill 2012 X64 on your PC. You should choose a destination folder that is different from the default one.</li> -<li><strong>Copy and paste the crack file</strong>: You need to copy and paste the crack file into the installation folder of HyperMill 2012 X64. You can do this by right-clicking on the crack file and choosing "Copy", then going to the installation folder and choosing "Paste". You should replace the original file with the crack file.</li> -<li><strong>Launch the software</strong>: You need to launch HyperMill 2012 X64 as an administrator. You can do this by right-clicking on the shortcut icon and choosing "Run as administrator". You should be able to use HyperMill 2012 X64 without any limitations.</li> -</ol> - <h3>The precautions to take before and after installing HyperMill 2012 X64 crack</h3> - <p>Before and after installing HyperMill 2012 X64 crack on your PC, you need to take some precautions to avoid any problems or risks. Some of these precautions are:</p> -<p>HyperMill 2012 X64 Crack download free<br /> -HyperMill 2012 X64 Crack full version<br /> -HyperMill 2012 X64 Crack serial key<br /> -HyperMill 2012 X64 Crack activation code<br /> -HyperMill 2012 X64 Crack torrent link<br /> -HyperMill 2012 X64 Crack patch file<br /> -HyperMill 2012 X64 Crack license key<br /> -HyperMill 2012 X64 Crack keygen generator<br /> -HyperMill 2012 X64 Crack software review<br /> -HyperMill 2012 X64 Crack installation guide<br /> -HyperMill 2012 X64 Crack system requirements<br /> -HyperMill 2012 X64 Crack features and benefits<br /> -HyperMill 2012 X64 Crack tutorial video<br /> -HyperMill 2012 X64 Crack user manual<br /> -HyperMill 2012 X64 Crack customer support<br /> -HyperMill 2012 X64 Crack online forum<br /> -HyperMill 2012 X64 Crack feedback and testimonials<br /> -HyperMill 2012 X64 Crack comparison with other software<br /> -HyperMill 2012 X64 Crack alternatives and competitors<br /> -HyperMill 2012 X64 Crack discount and coupon code<br /> -HyperMill 2012 X64 Crack trial version and demo<br /> -HyperMill 2012 X64 Crack upgrade and update<br /> -HyperMill 2012 X64 Crack tips and tricks<br /> -HyperMill 2012 X64 Crack best practices and examples<br /> -HyperMill 2012 X64 Crack pros and cons<br /> -HyperMill 2012 X64 Crack FAQs and solutions<br /> -HyperMill 2012 X64 Crack error messages and fixes<br /> -HyperMill 2012 X64 Crack compatibility and integration<br /> -HyperMill 2012 X64 Crack customization and configuration<br /> -HyperMill 2012 X64 Crack security and privacy<br /> -HyperMill 2012 X64 Crack performance and speed<br /> -HyperMill 2012 X64 Crack quality and reliability<br /> -HyperMill 2012 X64 Crack design and interface<br /> -HyperMill 2012 X64 Crack functionality and usability<br /> -HyperMill 2012 X64 Crack development and innovation<br /> -HyperMill 2012 X64 Crack awards and recognition<br /> -HyperMill 2012 X64 Crack history and background<br /> -HyperMill 2012 X64 Crack future and roadmap<br /> -HyperMill 2012 X64 Crack team and company<br /> -HyperMill 2012 X64 Crack mission and vision<br /> -How to get HyperMill 2012 X64 Crack for free<br /> -How to use HyperMill 2012 X64 Crack effectively<br /> -How to uninstall HyperMill 2012 X64 Crack completely<br /> -How to solve problems with HyperMill 2012 X64 Crack easily<br /> -How to learn more about HyperMill 2012 X64 Crack quickly<br /> -How to contact the developers of HyperMill 2012 X64 Crack directly<br /> -How to share your experience with HyperMill 2012 X64 Crack publicly<br /> -How to recommend HyperMill 2012 X64 Crack to others confidently</p> - <ul> -<li><strong>Disable your antivirus software</strong>: You need to disable your antivirus software before downloading and installing HyperMill 2012 X64 crack. This is because some antivirus software might detect the crack file as a threat and delete it or block it. You can enable your antivirus software again after you have installed HyperMill 2012 X64 crack.</li> -<li><strong>Create a backup of your data</strong>: You need to create a backup of your data before installing HyperMill 2012 X64 crack. This is because some cracked software might cause errors, crashes, or data loss on your PC. You can use any external storage device or cloud service to create a backup of your data.</li> -<li><strong>Use a VPN service</strong>: You need to use a VPN service after installing HyperMill 2012 X64 crack. This is because some cracked software might expose your IP address or personal information to hackers or authorities. A VPN service can hide your IP address and encrypt your online traffic, making you anonymous and secure on the internet.</li> -<li><strong>Do not update the software</strong>: You need to avoid updating HyperMill 2012 X64 after installing HyperMill 2012 X64 crack. This is because some updates might detect the crack file and disable it or remove it. You should also avoid clicking on any pop-ups or links that ask you to update HyperMill 2012 X64.</li> -</ul> - <h2>Conclusion</h2> - <h3>A summary of the main points of the article</h3> - <p>In conclusion, HyperMill 2012 X64 is a software that allows you to perform CAD/CAM tasks with ease and precision. It offers a range of features and functions that can help you create high-quality and efficient products in various industries. However, it also has some drawbacks that might make you want to use a crack instead of the official version. A crack can help you overcome some of these drawbacks, such as high cost, limited access, and strict restrictions. However, you also need to take some precautions before and after installing HyperMill 2012 X64 crack, such as disabling your antivirus software, creating a backup of your data, using a VPN service, and not updating the software.</p> - <h3>A call to action for the readers</h3> - <p>If you are interested in using HyperMill 2012 X64 for your CAD/CAM applications, you can download and install HyperMill 2012 X64 crack from a reliable source by following the steps mentioned above. However, you should also be aware of the risks and consequences of using cracked software, such as legal issues, security threats, or ethical concerns. Therefore, we recommend that you use HyperMill 2012 X64 only for educational or personal purposes, and not for commercial or professional purposes. If you want to use HyperMill 2012 X64 for commercial or professional purposes, you should buy the official version from OPEN MIND Technologies AG or their authorized dealers.</p> - <h4>FAQs</h4> - <p>Here are some frequently asked questions about HyperMill 2012 X64 crack:</p> - <ol> -<li><strong>What is a crack?</strong>: A crack is a modified version of a software that bypasses its security features or license verification mechanisms. A crack can help you use a software without any limitations or restrictions.</li> -<li><strong>Is using a crack illegal?</strong>: Using a crack is illegal in most countries and regions. It violates the intellectual property rights of the software developers and distributors. It also exposes you to legal actions or penalties if you are caught using a crack.</li> -<li><strong>Is using a crack safe?</strong>: Using a crack is not safe in most cases. It exposes you to security threats such as viruses, malware, spyware, or hackers. It also exposes you to ethical concerns such as plagiarism, cheating, or unfair competition.</li> -<li><strong>How can I avoid the risks and consequences of using HyperMill 2012 X64 crack?</strong>: You can avoid the risks and consequences of using HyperMill 2012 X64 crack by taking some precautions before and after installing it, such as disabling your antivirus software, creating a backup of your data, using a VPN service, and not updating the software. You can also use HyperMill 2012 X64 only for educational or personal purposes, and not for commercial or professional purposes.</li> -<li><strong>How can I buy the official version of HyperMill 2012 X64?</strong>: You can buy the official version of HyperMill 2012 X64 from OPEN MIND Technologies AG or their authorized dealers. You can visit their website at https://www.openmind-tech.com/en.html to find more information about their products, prices, and contacts.</li> -</ol> - </p> 0a6ba089eb<br /> -<br /> -<br /> \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/_mapping.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/_mapping.py deleted file mode 100644 index 44dbfe6771750a89f25f3a465310fc08f1a6f767..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_vendor/pygments/lexers/_mapping.py +++ /dev/null @@ -1,596 +0,0 @@ -""" - pygments.lexers._mapping - ~~~~~~~~~~~~~~~~~~~~~~~~ - - Lexer mapping definitions. This file is generated by itself. Every time - you change something on a builtin lexer definition, run this script from - the lexers folder to update it. - - Do not alter the LEXERS dictionary by hand. - - :copyright: Copyright 2006-2014, 2016 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -LEXERS = { - 'ABAPLexer': ('pip._vendor.pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)), - 'AMDGPULexer': ('pip._vendor.pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()), - 'APLLexer': ('pip._vendor.pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()), - 'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)), - 'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')), - 'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')), - 'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)), - 'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()), - 'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)), - 'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()), - 'AlloyLexer': ('pip._vendor.pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)), - 'AmbientTalkLexer': ('pip._vendor.pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)), - 'AmplLexer': ('pip._vendor.pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()), - 'Angular2HtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()), - 'Angular2Lexer': ('pip._vendor.pygments.lexers.templates', 'Angular2', ('ng2',), (), ()), - 'AntlrActionScriptLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()), - 'AntlrCSharpLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()), - 'AntlrCppLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()), - 'AntlrJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()), - 'AntlrLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()), - 'AntlrObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()), - 'AntlrPerlLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()), - 'AntlrPythonLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()), - 'AntlrRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()), - 'ApacheConfLexer': ('pip._vendor.pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)), - 'AppleScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()), - 'ArduinoLexer': ('pip._vendor.pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)), - 'ArrowLexer': ('pip._vendor.pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()), - 'AscLexer': ('pip._vendor.pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature')), - 'AspectJLexer': ('pip._vendor.pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)), - 'AsymptoteLexer': ('pip._vendor.pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)), - 'AugeasLexer': ('pip._vendor.pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()), - 'AutoItLexer': ('pip._vendor.pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)), - 'AutohotkeyLexer': ('pip._vendor.pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)), - 'AwkLexer': ('pip._vendor.pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)), - 'BBCBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()), - 'BBCodeLexer': ('pip._vendor.pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)), - 'BCLexer': ('pip._vendor.pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()), - 'BSTLexer': ('pip._vendor.pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()), - 'BareLexer': ('pip._vendor.pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()), - 'BaseMakefileLexer': ('pip._vendor.pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()), - 'BashLexer': ('pip._vendor.pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')), - 'BashSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')), - 'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)), - 'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)), - 'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)), - 'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')), - 'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)), - 'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)), - 'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)), - 'BnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)), - 'BoaLexer': ('pip._vendor.pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()), - 'BooLexer': ('pip._vendor.pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)), - 'BoogieLexer': ('pip._vendor.pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()), - 'BrainfuckLexer': ('pip._vendor.pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)), - 'BugsLexer': ('pip._vendor.pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()), - 'CAmkESLexer': ('pip._vendor.pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()), - 'CLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')), - 'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)), - 'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)), - 'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()), - 'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()), - 'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), - 'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)), - 'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()), - 'CadlLexer': ('pip._vendor.pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()), - 'CapDLLexer': ('pip._vendor.pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()), - 'CapnProtoLexer': ('pip._vendor.pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()), - 'CbmBasicV2Lexer': ('pip._vendor.pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()), - 'CddlLexer': ('pip._vendor.pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)), - 'CeylonLexer': ('pip._vendor.pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)), - 'Cfengine3Lexer': ('pip._vendor.pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()), - 'ChaiscriptLexer': ('pip._vendor.pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')), - 'ChapelLexer': ('pip._vendor.pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()), - 'CharmciLexer': ('pip._vendor.pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()), - 'CheetahHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')), - 'CheetahJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')), - 'CheetahLexer': ('pip._vendor.pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')), - 'CheetahXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')), - 'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)), - 'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)), - 'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()), - 'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')), - 'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')), - 'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()), - 'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)), - 'CoffeeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)), - 'ColdfusionCFCLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()), - 'ColdfusionHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)), - 'ColdfusionLexer': ('pip._vendor.pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()), - 'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)), - 'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)), - 'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)), - 'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)), - 'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')), - 'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)), - 'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()), - 'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)), - 'CryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)), - 'CrystalLexer': ('pip._vendor.pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)), - 'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()), - 'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()), - 'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()), - 'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')), - 'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)), - 'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)), - 'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)), - 'CssPhpLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)), - 'CssSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)), - 'CudaLexer': ('pip._vendor.pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)), - 'CypherLexer': ('pip._vendor.pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()), - 'CythonLexer': ('pip._vendor.pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')), - 'DLexer': ('pip._vendor.pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)), - 'DObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)), - 'DarcsPatchLexer': ('pip._vendor.pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()), - 'DartLexer': ('pip._vendor.pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)), - 'Dasm16Lexer': ('pip._vendor.pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)), - 'DebianControlLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()), - 'DelphiLexer': ('pip._vendor.pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)), - 'DevicetreeLexer': ('pip._vendor.pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)), - 'DgLexer': ('pip._vendor.pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)), - 'DiffLexer': ('pip._vendor.pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')), - 'DjangoLexer': ('pip._vendor.pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')), - 'DockerLexer': ('pip._vendor.pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)), - 'DtdLexer': ('pip._vendor.pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)), - 'DuelLexer': ('pip._vendor.pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')), - 'DylanConsoleLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)), - 'DylanLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)), - 'DylanLidLexer': ('pip._vendor.pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)), - 'ECLLexer': ('pip._vendor.pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)), - 'ECLexer': ('pip._vendor.pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')), - 'EarlGreyLexer': ('pip._vendor.pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)), - 'EasytrieveLexer': ('pip._vendor.pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)), - 'EbnfLexer': ('pip._vendor.pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)), - 'EiffelLexer': ('pip._vendor.pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)), - 'ElixirConsoleLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)), - 'ElixirLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)), - 'ElmLexer': ('pip._vendor.pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)), - 'ElpiLexer': ('pip._vendor.pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)), - 'EmacsLispLexer': ('pip._vendor.pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')), - 'EmailLexer': ('pip._vendor.pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)), - 'ErbLexer': ('pip._vendor.pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)), - 'ErlangLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)), - 'ErlangShellLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)), - 'EvoqueHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)), - 'EvoqueLexer': ('pip._vendor.pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)), - 'EvoqueXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)), - 'ExeclineLexer': ('pip._vendor.pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()), - 'EzhilLexer': ('pip._vendor.pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)), - 'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)), - 'FStarLexer': ('pip._vendor.pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)), - 'FactorLexer': ('pip._vendor.pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)), - 'FancyLexer': ('pip._vendor.pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)), - 'FantomLexer': ('pip._vendor.pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)), - 'FelixLexer': ('pip._vendor.pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)), - 'FennelLexer': ('pip._vendor.pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()), - 'FishShellLexer': ('pip._vendor.pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)), - 'FlatlineLexer': ('pip._vendor.pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)), - 'FloScriptLexer': ('pip._vendor.pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()), - 'ForthLexer': ('pip._vendor.pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)), - 'FortranFixedLexer': ('pip._vendor.pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()), - 'FortranLexer': ('pip._vendor.pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)), - 'FoxProLexer': ('pip._vendor.pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()), - 'FreeFemLexer': ('pip._vendor.pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)), - 'FutharkLexer': ('pip._vendor.pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)), - 'GAPLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()), - 'GDScriptLexer': ('pip._vendor.pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')), - 'GLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)), - 'GSQLLexer': ('pip._vendor.pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()), - 'GasLexer': ('pip._vendor.pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)), - 'GcodeLexer': ('pip._vendor.pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()), - 'GenshiLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')), - 'GenshiTextLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')), - 'GettextLexer': ('pip._vendor.pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')), - 'GherkinLexer': ('pip._vendor.pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)), - 'GnuplotLexer': ('pip._vendor.pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)), - 'GoLexer': ('pip._vendor.pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)), - 'GoloLexer': ('pip._vendor.pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()), - 'GoodDataCLLexer': ('pip._vendor.pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)), - 'GosuLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)), - 'GosuTemplateLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)), - 'GraphvizLexer': ('pip._vendor.pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')), - 'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')), - 'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)), - 'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)), - 'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()), - 'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)), - 'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')), - 'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()), - 'HaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)), - 'HaxeLexer': ('pip._vendor.pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')), - 'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()), - 'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)), - 'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()), - 'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')), - 'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)), - 'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')), - 'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')), - 'HtmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)), - 'HttpLexer': ('pip._vendor.pygments.lexers.textfmts', 'HTTP', ('http',), (), ()), - 'HxmlLexer': ('pip._vendor.pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()), - 'HyLexer': ('pip._vendor.pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')), - 'HybrisLexer': ('pip._vendor.pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')), - 'IDLLexer': ('pip._vendor.pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)), - 'IconLexer': ('pip._vendor.pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()), - 'IdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)), - 'IgorLexer': ('pip._vendor.pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)), - 'Inform6Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()), - 'Inform6TemplateLexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()), - 'Inform7Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()), - 'IniLexer': ('pip._vendor.pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig', '*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ('text/x-ini', 'text/inf')), - 'IoLexer': ('pip._vendor.pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)), - 'IokeLexer': ('pip._vendor.pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)), - 'IrcLogsLexer': ('pip._vendor.pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)), - 'IsabelleLexer': ('pip._vendor.pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)), - 'JLexer': ('pip._vendor.pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)), - 'JSLTLexer': ('pip._vendor.pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)), - 'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()), - 'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()), - 'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)), - 'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')), - 'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')), - 'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')), - 'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')), - 'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')), - 'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')), - 'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()), - 'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)), - 'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')), - 'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()), - 'JsonLdLexer': ('pip._vendor.pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)), - 'JsonLexer': ('pip._vendor.pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', 'Pipfile.lock'), ('application/json', 'application/json-object')), - 'JspLexer': ('pip._vendor.pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)), - 'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()), - 'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')), - 'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')), - 'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()), - 'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')), - 'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)), - 'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()), - 'KokaLexer': ('pip._vendor.pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)), - 'KotlinLexer': ('pip._vendor.pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)), - 'KuinLexer': ('pip._vendor.pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()), - 'LSLLexer': ('pip._vendor.pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)), - 'LassoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)), - 'LassoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')), - 'LassoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')), - 'LassoLexer': ('pip._vendor.pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)), - 'LassoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)), - 'LeanLexer': ('pip._vendor.pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)), - 'LessCssLexer': ('pip._vendor.pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)), - 'LighttpdConfLexer': ('pip._vendor.pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)), - 'LilyPondLexer': ('pip._vendor.pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()), - 'LimboLexer': ('pip._vendor.pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)), - 'LiquidLexer': ('pip._vendor.pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()), - 'LiterateAgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)), - 'LiterateCryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)), - 'LiterateHaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)), - 'LiterateIdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)), - 'LiveScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)), - 'LlvmLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)), - 'LlvmMirBodyLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()), - 'LlvmMirLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()), - 'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)), - 'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)), - 'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')), - 'MCFunctionLexer': ('pip._vendor.pygments.lexers.mcfunction', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)), - 'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')), - 'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)), - 'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()), - 'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()), - 'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)), - 'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)), - 'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)), - 'MakoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')), - 'MakoLexer': ('pip._vendor.pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)), - 'MakoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)), - 'MaqlLexer': ('pip._vendor.pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')), - 'MarkdownLexer': ('pip._vendor.pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)), - 'MaskLexer': ('pip._vendor.pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)), - 'MasonLexer': ('pip._vendor.pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)), - 'MathematicaLexer': ('pip._vendor.pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')), - 'MatlabLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)), - 'MatlabSessionLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()), - 'MaximaLexer': ('pip._vendor.pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()), - 'MesonLexer': ('pip._vendor.pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)), - 'MiniDLexer': ('pip._vendor.pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)), - 'MiniScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')), - 'ModelicaLexer': ('pip._vendor.pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)), - 'Modula2Lexer': ('pip._vendor.pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)), - 'MoinWikiLexer': ('pip._vendor.pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)), - 'MonkeyLexer': ('pip._vendor.pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)), - 'MonteLexer': ('pip._vendor.pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()), - 'MoonScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')), - 'MoselLexer': ('pip._vendor.pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()), - 'MozPreprocCssLexer': ('pip._vendor.pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()), - 'MozPreprocHashLexer': ('pip._vendor.pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()), - 'MozPreprocJavascriptLexer': ('pip._vendor.pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()), - 'MozPreprocPercentLexer': ('pip._vendor.pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()), - 'MozPreprocXulLexer': ('pip._vendor.pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()), - 'MqlLexer': ('pip._vendor.pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)), - 'MscgenLexer': ('pip._vendor.pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()), - 'MuPADLexer': ('pip._vendor.pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()), - 'MxmlLexer': ('pip._vendor.pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()), - 'MySqlLexer': ('pip._vendor.pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)), - 'MyghtyCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)), - 'MyghtyHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)), - 'MyghtyJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')), - 'MyghtyLexer': ('pip._vendor.pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)), - 'MyghtyXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)), - 'NCLLexer': ('pip._vendor.pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)), - 'NSISLexer': ('pip._vendor.pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)), - 'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)), - 'NasmObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)), - 'NemerleLexer': ('pip._vendor.pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)), - 'NesCLexer': ('pip._vendor.pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)), - 'NestedTextLexer': ('pip._vendor.pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()), - 'NewLispLexer': ('pip._vendor.pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')), - 'NewspeakLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)), - 'NginxConfLexer': ('pip._vendor.pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)), - 'NimrodLexer': ('pip._vendor.pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)), - 'NitLexer': ('pip._vendor.pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()), - 'NixLexer': ('pip._vendor.pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)), - 'NodeConsoleLexer': ('pip._vendor.pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)), - 'NotmuchLexer': ('pip._vendor.pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()), - 'NuSMVLexer': ('pip._vendor.pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()), - 'NumPyLexer': ('pip._vendor.pygments.lexers.python', 'NumPy', ('numpy',), (), ()), - 'ObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)), - 'ObjectiveCLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)), - 'ObjectiveCppLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)), - 'ObjectiveJLexer': ('pip._vendor.pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)), - 'OcamlLexer': ('pip._vendor.pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)), - 'OctaveLexer': ('pip._vendor.pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)), - 'OdinLexer': ('pip._vendor.pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)), - 'OmgIdlLexer': ('pip._vendor.pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()), - 'OocLexer': ('pip._vendor.pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)), - 'OpaLexer': ('pip._vendor.pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)), - 'OpenEdgeLexer': ('pip._vendor.pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')), - 'OutputLexer': ('pip._vendor.pygments.lexers.special', 'Text output', ('output',), (), ()), - 'PacmanConfLexer': ('pip._vendor.pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()), - 'PanLexer': ('pip._vendor.pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()), - 'ParaSailLexer': ('pip._vendor.pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)), - 'PawnLexer': ('pip._vendor.pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)), - 'PegLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)), - 'Perl6Lexer': ('pip._vendor.pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')), - 'PerlLexer': ('pip._vendor.pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')), - 'PhpLexer': ('pip._vendor.pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)), - 'PigLexer': ('pip._vendor.pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)), - 'PikeLexer': ('pip._vendor.pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)), - 'PkgConfigLexer': ('pip._vendor.pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()), - 'PlPgsqlLexer': ('pip._vendor.pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)), - 'PointlessLexer': ('pip._vendor.pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()), - 'PonyLexer': ('pip._vendor.pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()), - 'PostScriptLexer': ('pip._vendor.pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)), - 'PostgresConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)), - 'PostgresLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)), - 'PovrayLexer': ('pip._vendor.pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)), - 'PowerShellLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)), - 'PowerShellSessionLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()), - 'PraatLexer': ('pip._vendor.pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()), - 'ProcfileLexer': ('pip._vendor.pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()), - 'PrologLexer': ('pip._vendor.pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)), - 'PromQLLexer': ('pip._vendor.pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()), - 'PropertiesLexer': ('pip._vendor.pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)), - 'ProtoBufLexer': ('pip._vendor.pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()), - 'PsyshConsoleLexer': ('pip._vendor.pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()), - 'PugLexer': ('pip._vendor.pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')), - 'PuppetLexer': ('pip._vendor.pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()), - 'PyPyLogLexer': ('pip._vendor.pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)), - 'Python2Lexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')), - 'Python2TracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)), - 'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)), - 'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')), - 'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')), - 'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()), - 'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)), - 'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()), - 'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()), - 'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()), - 'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')), - 'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()), - 'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()), - 'RPMSpecLexer': ('pip._vendor.pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)), - 'RacketLexer': ('pip._vendor.pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')), - 'RagelCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()), - 'RagelCppLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()), - 'RagelDLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()), - 'RagelEmbeddedLexer': ('pip._vendor.pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()), - 'RagelJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()), - 'RagelLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()), - 'RagelObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()), - 'RagelRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()), - 'RawTokenLexer': ('pip._vendor.pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)), - 'RdLexer': ('pip._vendor.pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)), - 'ReasonLexer': ('pip._vendor.pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)), - 'RebolLexer': ('pip._vendor.pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)), - 'RedLexer': ('pip._vendor.pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')), - 'RedcodeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()), - 'RegeditLexer': ('pip._vendor.pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)), - 'ResourceLexer': ('pip._vendor.pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()), - 'RexxLexer': ('pip._vendor.pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)), - 'RhtmlLexer': ('pip._vendor.pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)), - 'RideLexer': ('pip._vendor.pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)), - 'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)), - 'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()), - 'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()), - 'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)), - 'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)), - 'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)), - 'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')), - 'RtsLexer': ('pip._vendor.pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()), - 'RubyConsoleLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)), - 'RubyLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')), - 'RustLexer': ('pip._vendor.pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')), - 'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')), - 'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')), - 'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')), - 'SNBTLexer': ('pip._vendor.pygments.lexers.mcfunction', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)), - 'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)), - 'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)), - 'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()), - 'ScalaLexer': ('pip._vendor.pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)), - 'ScamlLexer': ('pip._vendor.pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)), - 'ScdocLexer': ('pip._vendor.pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()), - 'SchemeLexer': ('pip._vendor.pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')), - 'ScilabLexer': ('pip._vendor.pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)), - 'ScssLexer': ('pip._vendor.pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)), - 'SedLexer': ('pip._vendor.pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)), - 'ShExCLexer': ('pip._vendor.pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)), - 'ShenLexer': ('pip._vendor.pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')), - 'SieveLexer': ('pip._vendor.pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()), - 'SilverLexer': ('pip._vendor.pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()), - 'SingularityLexer': ('pip._vendor.pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()), - 'SlashLexer': ('pip._vendor.pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()), - 'SlimLexer': ('pip._vendor.pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)), - 'SlurmBashLexer': ('pip._vendor.pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()), - 'SmaliLexer': ('pip._vendor.pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)), - 'SmalltalkLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)), - 'SmartGameFormatLexer': ('pip._vendor.pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()), - 'SmartyLexer': ('pip._vendor.pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)), - 'SmithyLexer': ('pip._vendor.pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()), - 'SnobolLexer': ('pip._vendor.pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)), - 'SnowballLexer': ('pip._vendor.pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()), - 'SolidityLexer': ('pip._vendor.pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()), - 'SophiaLexer': ('pip._vendor.pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()), - 'SourcePawnLexer': ('pip._vendor.pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)), - 'SourcesListLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()), - 'SparqlLexer': ('pip._vendor.pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)), - 'SpiceLexer': ('pip._vendor.pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)), - 'SqlLexer': ('pip._vendor.pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)), - 'SqliteConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)), - 'SquidConfLexer': ('pip._vendor.pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)), - 'SrcinfoLexer': ('pip._vendor.pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()), - 'SspLexer': ('pip._vendor.pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)), - 'StanLexer': ('pip._vendor.pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()), - 'StataLexer': ('pip._vendor.pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')), - 'SuperColliderLexer': ('pip._vendor.pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')), - 'SwiftLexer': ('pip._vendor.pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)), - 'SwigLexer': ('pip._vendor.pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)), - 'SystemVerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)), - 'TAPLexer': ('pip._vendor.pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()), - 'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()), - 'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()), - 'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()), - 'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)), - 'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)), - 'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')), - 'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)), - 'TcshSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()), - 'TeaTemplateLexer': ('pip._vendor.pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)), - 'TealLexer': ('pip._vendor.pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()), - 'TeraTermLexer': ('pip._vendor.pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)), - 'TermcapLexer': ('pip._vendor.pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()), - 'TerminfoLexer': ('pip._vendor.pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()), - 'TerraformLexer': ('pip._vendor.pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')), - 'TexLexer': ('pip._vendor.pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')), - 'TextLexer': ('pip._vendor.pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)), - 'ThingsDBLexer': ('pip._vendor.pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()), - 'ThriftLexer': ('pip._vendor.pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)), - 'TiddlyWiki5Lexer': ('pip._vendor.pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)), - 'TodotxtLexer': ('pip._vendor.pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)), - 'TransactSqlLexer': ('pip._vendor.pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)), - 'TreetopLexer': ('pip._vendor.pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()), - 'TurtleLexer': ('pip._vendor.pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')), - 'TwigHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)), - 'TwigLexer': ('pip._vendor.pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)), - 'TypeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')), - 'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()), - 'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()), - 'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)), - 'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()), - 'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()), - 'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)), - 'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()), - 'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)), - 'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()), - 'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()), - 'VCLLexer': ('pip._vendor.pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)), - 'VCLSnippetLexer': ('pip._vendor.pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)), - 'VCTreeStatusLexer': ('pip._vendor.pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()), - 'VGLLexer': ('pip._vendor.pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()), - 'ValaLexer': ('pip._vendor.pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)), - 'VbNetAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), - 'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')), - 'VelocityHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)), - 'VelocityLexer': ('pip._vendor.pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()), - 'VelocityXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)), - 'VerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)), - 'VhdlLexer': ('pip._vendor.pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)), - 'VimLexer': ('pip._vendor.pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)), - 'WDiffLexer': ('pip._vendor.pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()), - 'WatLexer': ('pip._vendor.pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()), - 'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()), - 'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)), - 'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)), - 'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()), - 'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')), - 'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')), - 'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)), - 'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')), - 'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)), - 'XmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)), - 'XorgLexer': ('pip._vendor.pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()), - 'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')), - 'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)), - 'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()), - 'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')), - 'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)), - 'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)), - 'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()), - 'ZephirLexer': ('pip._vendor.pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()), - 'ZigLexer': ('pip._vendor.pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)), - 'apdlexer': ('pip._vendor.pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()), -} - -if __name__ == '__main__': # pragma: no cover - import sys - import os - - # lookup lexers - found_lexers = [] - sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..')) - for root, dirs, files in os.walk('.'): - for filename in files: - if filename.endswith('.py') and not filename.startswith('_'): - module_name = 'pygments.lexers%s.%s' % ( - root[1:].replace('/', '.'), filename[:-3]) - print(module_name) - module = __import__(module_name, None, None, ['']) - for lexer_name in module.__all__: - lexer = getattr(module, lexer_name) - found_lexers.append( - '%r: %r' % (lexer_name, - (module_name, - lexer.name, - tuple(lexer.aliases), - tuple(lexer.filenames), - tuple(lexer.mimetypes)))) - # sort them to make the diff minimal - found_lexers.sort() - - # extract useful sourcecode from this file - with open(__file__) as fp: - content = fp.read() - # replace crnl to nl for Windows. - # - # Note that, originally, contributors should keep nl of master - # repository, for example by using some kind of automatic - # management EOL, like `EolExtension - # <https://www.mercurial-scm.org/wiki/EolExtension>`. - content = content.replace("\r\n", "\n") - header = content[:content.find('LEXERS = {')] - footer = content[content.find("if __name__ == '__main__':"):] - - # write new file - with open(__file__, 'w') as fp: - fp.write(header) - fp.write('LEXERS = {\n %s,\n}\n\n' % ',\n '.join(found_lexers)) - fp.write(footer) - - print ('=== %d lexers processed.' % len(found_lexers)) diff --git a/spaces/tom-beer/birds-israel/data_module.py b/spaces/tom-beer/birds-israel/data_module.py deleted file mode 100644 index 45d96d5f82672bab4b45ae26d081bb7bd8ca975b..0000000000000000000000000000000000000000 --- a/spaces/tom-beer/birds-israel/data_module.py +++ /dev/null @@ -1,81 +0,0 @@ -from torch.utils.data import DataLoader -from torchvision import transforms -import pytorch_lightning as pl -from torch.utils.data import Subset -from sklearn.model_selection import train_test_split -from timm.data import ImageDataset -from timm.data.transforms_factory import create_transform - -from constants import INPUT_IMAGE_SIZE - -timm_transform = create_transform(224, scale=(0.7, 1.0), is_training=True, auto_augment='rand-mstd0.5') -NUM_WORKERS = 0 -batch_size = 40 -IMAGENET_STATS = ([0.485, 0.456, 0.406], - [0.229, 0.224, 0.225]) - - -inference_transforms = transforms.Compose([ - transforms.Resize(size=256), - transforms.CenterCrop(size=INPUT_IMAGE_SIZE), - transforms.ToTensor(), - transforms.Normalize(*IMAGENET_STATS) - ]) - - -class BirdsDataModule(pl.LightningDataModule): - def __init__(self, data_dir='./'): - super().__init__() - self.data_dir = data_dir - self.batch_size = batch_size - self.augmentation = transforms.Compose([ - transforms.RandomResizedCrop(size=256, scale=(0.8, 1.0)), - transforms.RandomRotation(degrees=15), - transforms.RandomHorizontalFlip(), - transforms.CenterCrop(size=INPUT_IMAGE_SIZE), - transforms.ToTensor(), - transforms.Normalize(*IMAGENET_STATS) - ]) - self.transform = inference_transforms - - def prepare_data(self): - pass - - def setup(self, stage=None): - tfms = transforms.Compose([ - transforms.ToTensor(), - transforms.Resize((256, 256)) - ]) - ids = ImageDataset('data', transform=tfms) - # index_to_name = {v: k for k, v in ids.parser.class_to_idx.items()} - # import json - # with open('index_to_name.json', 'w') as f: - # json.dump(index_to_name, f) - - targets = [c for (f, c) in ids.parser.samples] - train_indices, val_indices = train_test_split(list(range(len(targets))), test_size=0.13, - stratify=targets, shuffle=True) - self.train_dataset = Subset(ids, train_indices) - self.train_dataset.transform = self.augmentation - self.val_dataset = Subset(ids, val_indices) - self.val_dataset.transform = self.transform - - # we define a separate DataLoader for each of train/val/test - def train_dataloader(self): - mnist_train = DataLoader(self.train_dataset, batch_size=self.batch_size, num_workers=NUM_WORKERS) - return mnist_train - - def val_dataloader(self): - mnist_val = DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=NUM_WORKERS) - return mnist_val - - def test_dataloader(self): - mnist_test = DataLoader(self.val_dataset, batch_size=self.batch_size, num_workers=NUM_WORKERS) - return mnist_test - - -birds = BirdsDataModule() -birds.prepare_data() -birds.setup() - -samples = next(iter(birds.val_dataloader())) diff --git a/spaces/tomofi/MMOCR/tests/test_dataset/test_ner_dataset.py b/spaces/tomofi/MMOCR/tests/test_dataset/test_ner_dataset.py deleted file mode 100644 index 145b731cdc89bba2e3a6d78c4f4beb259f7f29ba..0000000000000000000000000000000000000000 --- a/spaces/tomofi/MMOCR/tests/test_dataset/test_ner_dataset.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import json -import os.path as osp -import tempfile - -import torch - -from mmocr.datasets.ner_dataset import NerDataset -from mmocr.models.ner.convertors.ner_convertor import NerConvertor -from mmocr.utils import list_to_file - - -def _create_dummy_ann_file(ann_file): - data = { - 'text': '彭小军认为,国内银行现在走的是台湾的发卡模式', - 'label': { - 'address': { - '台湾': [[15, 16]] - }, - 'name': { - '彭小军': [[0, 2]] - } - } - } - - list_to_file(ann_file, [json.dumps(data, ensure_ascii=False)]) - - -def _create_dummy_vocab_file(vocab_file): - for char in list(map(chr, range(ord('a'), ord('z') + 1))): - list_to_file(vocab_file, [json.dumps(char + '\n', ensure_ascii=False)]) - - -def _create_dummy_loader(): - loader = dict( - type='HardDiskLoader', - repeat=1, - parser=dict(type='LineJsonParser', keys=['text', 'label'])) - return loader - - -def test_ner_dataset(): - # test initialization - loader = _create_dummy_loader() - categories = [ - 'address', 'book', 'company', 'game', 'government', 'movie', 'name', - 'organization', 'position', 'scene' - ] - - # create dummy data - tmp_dir = tempfile.TemporaryDirectory() - ann_file = osp.join(tmp_dir.name, 'fake_data.txt') - vocab_file = osp.join(tmp_dir.name, 'fake_vocab.txt') - _create_dummy_ann_file(ann_file) - _create_dummy_vocab_file(vocab_file) - - max_len = 128 - ner_convertor = dict( - type='NerConvertor', - annotation_type='bio', - vocab_file=vocab_file, - categories=categories, - max_len=max_len) - - test_pipeline = [ - dict( - type='NerTransform', - label_convertor=ner_convertor, - max_len=max_len), - dict(type='ToTensorNER') - ] - dataset = NerDataset(ann_file, loader, pipeline=test_pipeline) - - # test pre_pipeline - img_info = dataset.data_infos[0] - results = dict(img_info=img_info) - dataset.pre_pipeline(results) - - # test prepare_train_img - dataset.prepare_train_img(0) - - # test evaluation - result = [[['address', 15, 16], ['name', 0, 2]]] - - dataset.evaluate(result) - - # test pred convert2entity function - pred = [ - 21, 7, 17, 17, 21, 21, 21, 21, 21, 21, 13, 21, 21, 21, 21, 21, 1, 11, - 21, 21, 7, 17, 17, 21, 21, 21, 21, 21, 21, 13, 21, 21, 21, 21, 21, 1, - 11, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, - 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 1, 21, 21, 21, 21, 21, - 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 1, 21, 21, 21, 21, - 21, 21 - ] - preds = [pred[:128]] - mask = [0] * 128 - for i in range(10): - mask[i] = 1 - assert len(preds[0]) == len(mask) - masks = torch.tensor([mask]) - convertor = NerConvertor( - annotation_type='bio', - vocab_file=vocab_file, - categories=categories, - max_len=128) - all_entities = convertor.convert_pred2entities(preds=preds, masks=masks) - assert len(all_entities[0][0]) == 3 - - tmp_dir.cleanup() diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py deleted file mode 100644 index 5556c4977e221182b013b68fef4b73d1b0605bf3..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py +++ /dev/null @@ -1,6 +0,0 @@ -_base_ = './faster_rcnn_r50_fpn_1x_coco.py' -model = dict( - roi_head=dict( - bbox_head=dict( - reg_decoded_bbox=True, - loss_bbox=dict(type='GIoULoss', loss_weight=10.0)))) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/datasets/pipelines/transforms.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/datasets/pipelines/transforms.py deleted file mode 100644 index c777b31f14b586277a331cca7b03a8c4ec2d279a..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/datasets/pipelines/transforms.py +++ /dev/null @@ -1,1901 +0,0 @@ -import copy -import inspect - -import mmcv -import numpy as np -from numpy import random - -from mmdet.core import PolygonMasks -from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps -from ..builder import PIPELINES - -try: - from imagecorruptions import corrupt -except ImportError: - corrupt = None - -try: - import albumentations - from albumentations import Compose -except ImportError: - albumentations = None - Compose = None - - -@PIPELINES.register_module() -class Resize(object): - """Resize images & bbox & mask. - - This transform resizes the input image to some scale. Bboxes and masks are - then resized with the same scale factor. If the input dict contains the key - "scale", then the scale in the input dict is used, otherwise the specified - scale in the init method is used. If the input dict contains the key - "scale_factor" (if MultiScaleFlipAug does not give img_scale but - scale_factor), the actual scale will be computed by image shape and - scale_factor. - - `img_scale` can either be a tuple (single-scale) or a list of tuple - (multi-scale). There are 3 multiscale modes: - - - ``ratio_range is not None``: randomly sample a ratio from the ratio \ - range and multiply it with the image scale. - - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \ - sample a scale from the multiscale range. - - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \ - sample a scale from multiple scales. - - Args: - img_scale (tuple or list[tuple]): Images scales for resizing. - multiscale_mode (str): Either "range" or "value". - ratio_range (tuple[float]): (min_ratio, max_ratio) - keep_ratio (bool): Whether to keep the aspect ratio when resizing the - image. - bbox_clip_border (bool, optional): Whether clip the objects outside - the border of the image. Defaults to True. - backend (str): Image resize backend, choices are 'cv2' and 'pillow'. - These two backends generates slightly different results. Defaults - to 'cv2'. - override (bool, optional): Whether to override `scale` and - `scale_factor` so as to call resize twice. Default False. If True, - after the first resizing, the existed `scale` and `scale_factor` - will be ignored so the second resizing can be allowed. - This option is a work-around for multiple times of resize in DETR. - Defaults to False. - """ - - def __init__(self, - img_scale=None, - multiscale_mode='range', - ratio_range=None, - keep_ratio=True, - bbox_clip_border=True, - backend='cv2', - override=False): - if img_scale is None: - self.img_scale = None - else: - if isinstance(img_scale, list): - self.img_scale = img_scale - else: - self.img_scale = [img_scale] - assert mmcv.is_list_of(self.img_scale, tuple) - - if ratio_range is not None: - # mode 1: given a scale and a range of image ratio - assert len(self.img_scale) == 1 - else: - # mode 2: given multiple scales or a range of scales - assert multiscale_mode in ['value', 'range'] - - self.backend = backend - self.multiscale_mode = multiscale_mode - self.ratio_range = ratio_range - self.keep_ratio = keep_ratio - # TODO: refactor the override option in Resize - self.override = override - self.bbox_clip_border = bbox_clip_border - - @staticmethod - def random_select(img_scales): - """Randomly select an img_scale from given candidates. - - Args: - img_scales (list[tuple]): Images scales for selection. - - Returns: - (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \ - where ``img_scale`` is the selected image scale and \ - ``scale_idx`` is the selected index in the given candidates. - """ - - assert mmcv.is_list_of(img_scales, tuple) - scale_idx = np.random.randint(len(img_scales)) - img_scale = img_scales[scale_idx] - return img_scale, scale_idx - - @staticmethod - def random_sample(img_scales): - """Randomly sample an img_scale when ``multiscale_mode=='range'``. - - Args: - img_scales (list[tuple]): Images scale range for sampling. - There must be two tuples in img_scales, which specify the lower - and upper bound of image scales. - - Returns: - (tuple, None): Returns a tuple ``(img_scale, None)``, where \ - ``img_scale`` is sampled scale and None is just a placeholder \ - to be consistent with :func:`random_select`. - """ - - assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2 - img_scale_long = [max(s) for s in img_scales] - img_scale_short = [min(s) for s in img_scales] - long_edge = np.random.randint( - min(img_scale_long), - max(img_scale_long) + 1) - short_edge = np.random.randint( - min(img_scale_short), - max(img_scale_short) + 1) - img_scale = (long_edge, short_edge) - return img_scale, None - - @staticmethod - def random_sample_ratio(img_scale, ratio_range): - """Randomly sample an img_scale when ``ratio_range`` is specified. - - A ratio will be randomly sampled from the range specified by - ``ratio_range``. Then it would be multiplied with ``img_scale`` to - generate sampled scale. - - Args: - img_scale (tuple): Images scale base to multiply with ratio. - ratio_range (tuple[float]): The minimum and maximum ratio to scale - the ``img_scale``. - - Returns: - (tuple, None): Returns a tuple ``(scale, None)``, where \ - ``scale`` is sampled ratio multiplied with ``img_scale`` and \ - None is just a placeholder to be consistent with \ - :func:`random_select`. - """ - - assert isinstance(img_scale, tuple) and len(img_scale) == 2 - min_ratio, max_ratio = ratio_range - assert min_ratio <= max_ratio - ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio - scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) - return scale, None - - def _random_scale(self, results): - """Randomly sample an img_scale according to ``ratio_range`` and - ``multiscale_mode``. - - If ``ratio_range`` is specified, a ratio will be sampled and be - multiplied with ``img_scale``. - If multiple scales are specified by ``img_scale``, a scale will be - sampled according to ``multiscale_mode``. - Otherwise, single scale will be used. - - Args: - results (dict): Result dict from :obj:`dataset`. - - Returns: - dict: Two new keys 'scale` and 'scale_idx` are added into \ - ``results``, which would be used by subsequent pipelines. - """ - - if self.ratio_range is not None: - scale, scale_idx = self.random_sample_ratio( - self.img_scale[0], self.ratio_range) - elif len(self.img_scale) == 1: - scale, scale_idx = self.img_scale[0], 0 - elif self.multiscale_mode == 'range': - scale, scale_idx = self.random_sample(self.img_scale) - elif self.multiscale_mode == 'value': - scale, scale_idx = self.random_select(self.img_scale) - else: - raise NotImplementedError - - results['scale'] = scale - results['scale_idx'] = scale_idx - - def _resize_img(self, results): - """Resize images with ``results['scale']``.""" - for key in results.get('img_fields', ['img']): - if self.keep_ratio: - img, scale_factor = mmcv.imrescale( - results[key], - results['scale'], - return_scale=True, - backend=self.backend) - # the w_scale and h_scale has minor difference - # a real fix should be done in the mmcv.imrescale in the future - new_h, new_w = img.shape[:2] - h, w = results[key].shape[:2] - w_scale = new_w / w - h_scale = new_h / h - else: - img, w_scale, h_scale = mmcv.imresize( - results[key], - results['scale'], - return_scale=True, - backend=self.backend) - results[key] = img - - scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], - dtype=np.float32) - results['img_shape'] = img.shape - # in case that there is no padding - results['pad_shape'] = img.shape - results['scale_factor'] = scale_factor - results['keep_ratio'] = self.keep_ratio - - def _resize_bboxes(self, results): - """Resize bounding boxes with ``results['scale_factor']``.""" - for key in results.get('bbox_fields', []): - bboxes = results[key] * results['scale_factor'] - if self.bbox_clip_border: - img_shape = results['img_shape'] - bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) - bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) - results[key] = bboxes - - def _resize_masks(self, results): - """Resize masks with ``results['scale']``""" - for key in results.get('mask_fields', []): - if results[key] is None: - continue - if self.keep_ratio: - results[key] = results[key].rescale(results['scale']) - else: - results[key] = results[key].resize(results['img_shape'][:2]) - - def _resize_seg(self, results): - """Resize semantic segmentation map with ``results['scale']``.""" - for key in results.get('seg_fields', []): - if self.keep_ratio: - gt_seg = mmcv.imrescale( - results[key], - results['scale'], - interpolation='nearest', - backend=self.backend) - else: - gt_seg = mmcv.imresize( - results[key], - results['scale'], - interpolation='nearest', - backend=self.backend) - results['gt_semantic_seg'] = gt_seg - - def __call__(self, results): - """Call function to resize images, bounding boxes, masks, semantic - segmentation map. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \ - 'keep_ratio' keys are added into result dict. - """ - - if 'scale' not in results: - if 'scale_factor' in results: - img_shape = results['img'].shape[:2] - scale_factor = results['scale_factor'] - assert isinstance(scale_factor, float) - results['scale'] = tuple( - [int(x * scale_factor) for x in img_shape][::-1]) - else: - self._random_scale(results) - else: - if not self.override: - assert 'scale_factor' not in results, ( - 'scale and scale_factor cannot be both set.') - else: - results.pop('scale') - if 'scale_factor' in results: - results.pop('scale_factor') - self._random_scale(results) - - self._resize_img(results) - self._resize_bboxes(results) - self._resize_masks(results) - self._resize_seg(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(img_scale={self.img_scale}, ' - repr_str += f'multiscale_mode={self.multiscale_mode}, ' - repr_str += f'ratio_range={self.ratio_range}, ' - repr_str += f'keep_ratio={self.keep_ratio}, ' - repr_str += f'bbox_clip_border={self.bbox_clip_border})' - return repr_str - - -@PIPELINES.register_module() -class RandomFlip(object): - """Flip the image & bbox & mask. - - If the input dict contains the key "flip", then the flag will be used, - otherwise it will be randomly decided by a ratio specified in the init - method. - - When random flip is enabled, ``flip_ratio``/``direction`` can either be a - float/string or tuple of float/string. There are 3 flip modes: - - - ``flip_ratio`` is float, ``direction`` is string: the image will be - ``direction``ly flipped with probability of ``flip_ratio`` . - E.g., ``flip_ratio=0.5``, ``direction='horizontal'``, - then image will be horizontally flipped with probability of 0.5. - - ``flip_ratio`` is float, ``direction`` is list of string: the image wil - be ``direction[i]``ly flipped with probability of - ``flip_ratio/len(direction)``. - E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``, - then image will be horizontally flipped with probability of 0.25, - vertically with probability of 0.25. - - ``flip_ratio`` is list of float, ``direction`` is list of string: - given ``len(flip_ratio) == len(direction)``, the image wil - be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``. - E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal', - 'vertical']``, then image will be horizontally flipped with probability - of 0.3, vertically with probability of 0.5 - - Args: - flip_ratio (float | list[float], optional): The flipping probability. - Default: None. - direction(str | list[str], optional): The flipping direction. Options - are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'. - If input is a list, the length must equal ``flip_ratio``. Each - element in ``flip_ratio`` indicates the flip probability of - corresponding direction. - """ - - def __init__(self, flip_ratio=None, direction='horizontal'): - if isinstance(flip_ratio, list): - assert mmcv.is_list_of(flip_ratio, float) - assert 0 <= sum(flip_ratio) <= 1 - elif isinstance(flip_ratio, float): - assert 0 <= flip_ratio <= 1 - elif flip_ratio is None: - pass - else: - raise ValueError('flip_ratios must be None, float, ' - 'or list of float') - self.flip_ratio = flip_ratio - - valid_directions = ['horizontal', 'vertical', 'diagonal'] - if isinstance(direction, str): - assert direction in valid_directions - elif isinstance(direction, list): - assert mmcv.is_list_of(direction, str) - assert set(direction).issubset(set(valid_directions)) - else: - raise ValueError('direction must be either str or list of str') - self.direction = direction - - if isinstance(flip_ratio, list): - assert len(self.flip_ratio) == len(self.direction) - - def bbox_flip(self, bboxes, img_shape, direction): - """Flip bboxes horizontally. - - Args: - bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k) - img_shape (tuple[int]): Image shape (height, width) - direction (str): Flip direction. Options are 'horizontal', - 'vertical'. - - Returns: - numpy.ndarray: Flipped bounding boxes. - """ - - assert bboxes.shape[-1] % 4 == 0 - flipped = bboxes.copy() - if direction == 'horizontal': - w = img_shape[1] - flipped[..., 0::4] = w - bboxes[..., 2::4] - flipped[..., 2::4] = w - bboxes[..., 0::4] - elif direction == 'vertical': - h = img_shape[0] - flipped[..., 1::4] = h - bboxes[..., 3::4] - flipped[..., 3::4] = h - bboxes[..., 1::4] - elif direction == 'diagonal': - w = img_shape[1] - h = img_shape[0] - flipped[..., 0::4] = w - bboxes[..., 2::4] - flipped[..., 1::4] = h - bboxes[..., 3::4] - flipped[..., 2::4] = w - bboxes[..., 0::4] - flipped[..., 3::4] = h - bboxes[..., 1::4] - else: - raise ValueError(f"Invalid flipping direction '{direction}'") - return flipped - - def __call__(self, results): - """Call function to flip bounding boxes, masks, semantic segmentation - maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Flipped results, 'flip', 'flip_direction' keys are added \ - into result dict. - """ - - if 'flip' not in results: - if isinstance(self.direction, list): - # None means non-flip - direction_list = self.direction + [None] - else: - # None means non-flip - direction_list = [self.direction, None] - - if isinstance(self.flip_ratio, list): - non_flip_ratio = 1 - sum(self.flip_ratio) - flip_ratio_list = self.flip_ratio + [non_flip_ratio] - else: - non_flip_ratio = 1 - self.flip_ratio - # exclude non-flip - single_ratio = self.flip_ratio / (len(direction_list) - 1) - flip_ratio_list = [single_ratio] * (len(direction_list) - - 1) + [non_flip_ratio] - - cur_dir = np.random.choice(direction_list, p=flip_ratio_list) - - results['flip'] = cur_dir is not None - if 'flip_direction' not in results: - results['flip_direction'] = cur_dir - if results['flip']: - # flip image - for key in results.get('img_fields', ['img']): - results[key] = mmcv.imflip( - results[key], direction=results['flip_direction']) - # flip bboxes - for key in results.get('bbox_fields', []): - results[key] = self.bbox_flip(results[key], - results['img_shape'], - results['flip_direction']) - # flip masks - for key in results.get('mask_fields', []): - results[key] = results[key].flip(results['flip_direction']) - - # flip segs - for key in results.get('seg_fields', []): - results[key] = mmcv.imflip( - results[key], direction=results['flip_direction']) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})' - - -@PIPELINES.register_module() -class RandomShift(object): - """Shift the image and box given shift pixels and probability. - - Args: - shift_ratio (float): Probability of shifts. Default 0.5. - max_shift_px (int): The max pixels for shifting. Default 32. - filter_thr_px (int): The width and height threshold for filtering. - The bbox and the rest of the targets below the width and - height threshold will be filtered. Default 1. - """ - - def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1): - assert 0 <= shift_ratio <= 1 - assert max_shift_px >= 0 - self.shift_ratio = shift_ratio - self.max_shift_px = max_shift_px - self.filter_thr_px = int(filter_thr_px) - # The key correspondence from bboxes to labels. - self.bbox2label = { - 'gt_bboxes': 'gt_labels', - 'gt_bboxes_ignore': 'gt_labels_ignore' - } - - def __call__(self, results): - """Call function to random shift images, bounding boxes. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Shift results. - """ - if random.random() < self.shift_ratio: - img_shape = results['img'].shape[:2] - - random_shift_x = random.randint(-self.max_shift_px, - self.max_shift_px) - random_shift_y = random.randint(-self.max_shift_px, - self.max_shift_px) - new_x = max(0, random_shift_x) - orig_x = max(0, -random_shift_x) - new_y = max(0, random_shift_y) - orig_y = max(0, -random_shift_y) - - # TODO: support mask and semantic segmentation maps. - for key in results.get('bbox_fields', []): - bboxes = results[key].copy() - bboxes[..., 0::2] += random_shift_x - bboxes[..., 1::2] += random_shift_y - - # clip border - bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1]) - bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0]) - - # remove invalid bboxes - bbox_w = bboxes[..., 2] - bboxes[..., 0] - bbox_h = bboxes[..., 3] - bboxes[..., 1] - valid_inds = (bbox_w > self.filter_thr_px) & ( - bbox_h > self.filter_thr_px) - # If the shift does not contain any gt-bbox area, skip this - # image. - if key == 'gt_bboxes' and not valid_inds.any(): - return results - bboxes = bboxes[valid_inds] - results[key] = bboxes - - # label fields. e.g. gt_labels and gt_labels_ignore - label_key = self.bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][valid_inds] - - for key in results.get('img_fields', ['img']): - img = results[key] - new_img = np.zeros_like(img) - img_h, img_w = img.shape[:2] - new_h = img_h - np.abs(random_shift_y) - new_w = img_w - np.abs(random_shift_x) - new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ - = img[orig_y:orig_y + new_h, orig_x:orig_x + new_w] - results[key] = new_img - - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(max_shift_px={self.max_shift_px}, ' - return repr_str - - -@PIPELINES.register_module() -class Pad(object): - """Pad the image & mask. - - There are two padding modes: (1) pad to a fixed size and (2) pad to the - minimum size that is divisible by some number. - Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor", - - Args: - size (tuple, optional): Fixed padding size. - size_divisor (int, optional): The divisor of padded size. - pad_val (float, optional): Padding value, 0 by default. - """ - - def __init__(self, size=None, size_divisor=None, pad_val=0): - self.size = size - self.size_divisor = size_divisor - self.pad_val = pad_val - # only one of size and size_divisor should be valid - assert size is not None or size_divisor is not None - assert size is None or size_divisor is None - - def _pad_img(self, results): - """Pad images according to ``self.size``.""" - for key in results.get('img_fields', ['img']): - if self.size is not None: - padded_img = mmcv.impad( - results[key], shape=self.size, pad_val=self.pad_val) - elif self.size_divisor is not None: - padded_img = mmcv.impad_to_multiple( - results[key], self.size_divisor, pad_val=self.pad_val) - results[key] = padded_img - results['pad_shape'] = padded_img.shape - results['pad_fixed_size'] = self.size - results['pad_size_divisor'] = self.size_divisor - - def _pad_masks(self, results): - """Pad masks according to ``results['pad_shape']``.""" - pad_shape = results['pad_shape'][:2] - for key in results.get('mask_fields', []): - results[key] = results[key].pad(pad_shape, pad_val=self.pad_val) - - def _pad_seg(self, results): - """Pad semantic segmentation map according to - ``results['pad_shape']``.""" - for key in results.get('seg_fields', []): - results[key] = mmcv.impad( - results[key], shape=results['pad_shape'][:2]) - - def __call__(self, results): - """Call function to pad images, masks, semantic segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Updated result dict. - """ - self._pad_img(results) - self._pad_masks(results) - self._pad_seg(results) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(size={self.size}, ' - repr_str += f'size_divisor={self.size_divisor}, ' - repr_str += f'pad_val={self.pad_val})' - return repr_str - - -@PIPELINES.register_module() -class Normalize(object): - """Normalize the image. - - Added key is "img_norm_cfg". - - Args: - mean (sequence): Mean values of 3 channels. - std (sequence): Std values of 3 channels. - to_rgb (bool): Whether to convert the image from BGR to RGB, - default is true. - """ - - def __init__(self, mean, std, to_rgb=True): - self.mean = np.array(mean, dtype=np.float32) - self.std = np.array(std, dtype=np.float32) - self.to_rgb = to_rgb - - def __call__(self, results): - """Call function to normalize images. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Normalized results, 'img_norm_cfg' key is added into - result dict. - """ - for key in results.get('img_fields', ['img']): - results[key] = mmcv.imnormalize(results[key], self.mean, self.std, - self.to_rgb) - results['img_norm_cfg'] = dict( - mean=self.mean, std=self.std, to_rgb=self.to_rgb) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})' - return repr_str - - -@PIPELINES.register_module() -class RandomCrop(object): - """Random crop the image & bboxes & masks. - - The absolute `crop_size` is sampled based on `crop_type` and `image_size`, - then the cropped results are generated. - - Args: - crop_size (tuple): The relative ratio or absolute pixels of - height and width. - crop_type (str, optional): one of "relative_range", "relative", - "absolute", "absolute_range". "relative" randomly crops - (h * crop_size[0], w * crop_size[1]) part from an input of size - (h, w). "relative_range" uniformly samples relative crop size from - range [crop_size[0], 1] and [crop_size[1], 1] for height and width - respectively. "absolute" crops from an input with absolute size - (crop_size[0], crop_size[1]). "absolute_range" uniformly samples - crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w - in range [crop_size[0], min(w, crop_size[1])]. Default "absolute". - allow_negative_crop (bool, optional): Whether to allow a crop that does - not contain any bbox area. Default False. - bbox_clip_border (bool, optional): Whether clip the objects outside - the border of the image. Defaults to True. - - Note: - - If the image is smaller than the absolute crop size, return the - original image. - - The keys for bboxes, labels and masks must be aligned. That is, - `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and - `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and - `gt_masks_ignore`. - - If the crop does not contain any gt-bbox region and - `allow_negative_crop` is set to False, skip this image. - """ - - def __init__(self, - crop_size, - crop_type='absolute', - allow_negative_crop=False, - bbox_clip_border=True): - if crop_type not in [ - 'relative_range', 'relative', 'absolute', 'absolute_range' - ]: - raise ValueError(f'Invalid crop_type {crop_type}.') - if crop_type in ['absolute', 'absolute_range']: - assert crop_size[0] > 0 and crop_size[1] > 0 - assert isinstance(crop_size[0], int) and isinstance( - crop_size[1], int) - else: - assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 - self.crop_size = crop_size - self.crop_type = crop_type - self.allow_negative_crop = allow_negative_crop - self.bbox_clip_border = bbox_clip_border - # The key correspondence from bboxes to labels and masks. - self.bbox2label = { - 'gt_bboxes': 'gt_labels', - 'gt_bboxes_ignore': 'gt_labels_ignore' - } - self.bbox2mask = { - 'gt_bboxes': 'gt_masks', - 'gt_bboxes_ignore': 'gt_masks_ignore' - } - - def _crop_data(self, results, crop_size, allow_negative_crop): - """Function to randomly crop images, bounding boxes, masks, semantic - segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - crop_size (tuple): Expected absolute size after cropping, (h, w). - allow_negative_crop (bool): Whether to allow a crop that does not - contain any bbox area. Default to False. - - Returns: - dict: Randomly cropped results, 'img_shape' key in result dict is - updated according to crop size. - """ - assert crop_size[0] > 0 and crop_size[1] > 0 - for key in results.get('img_fields', ['img']): - img = results[key] - margin_h = max(img.shape[0] - crop_size[0], 0) - margin_w = max(img.shape[1] - crop_size[1], 0) - offset_h = np.random.randint(0, margin_h + 1) - offset_w = np.random.randint(0, margin_w + 1) - crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] - crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] - - # crop the image - img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] - img_shape = img.shape - results[key] = img - results['img_shape'] = img_shape - - # crop bboxes accordingly and clip to the image boundary - for key in results.get('bbox_fields', []): - # e.g. gt_bboxes and gt_bboxes_ignore - bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h], - dtype=np.float32) - bboxes = results[key] - bbox_offset - if self.bbox_clip_border: - bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1]) - bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0]) - valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & ( - bboxes[:, 3] > bboxes[:, 1]) - # If the crop does not contain any gt-bbox area and - # allow_negative_crop is False, skip this image. - if (key == 'gt_bboxes' and not valid_inds.any() - and not allow_negative_crop): - return None - results[key] = bboxes[valid_inds, :] - # label fields. e.g. gt_labels and gt_labels_ignore - label_key = self.bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][valid_inds] - - # mask fields, e.g. gt_masks and gt_masks_ignore - mask_key = self.bbox2mask.get(key) - if mask_key in results: - results[mask_key] = results[mask_key][ - valid_inds.nonzero()[0]].crop( - np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) - - # crop semantic seg - for key in results.get('seg_fields', []): - results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2] - - return results - - def _get_crop_size(self, image_size): - """Randomly generates the absolute crop size based on `crop_type` and - `image_size`. - - Args: - image_size (tuple): (h, w). - - Returns: - crop_size (tuple): (crop_h, crop_w) in absolute pixels. - """ - h, w = image_size - if self.crop_type == 'absolute': - return (min(self.crop_size[0], h), min(self.crop_size[1], w)) - elif self.crop_type == 'absolute_range': - assert self.crop_size[0] <= self.crop_size[1] - crop_h = np.random.randint( - min(h, self.crop_size[0]), - min(h, self.crop_size[1]) + 1) - crop_w = np.random.randint( - min(w, self.crop_size[0]), - min(w, self.crop_size[1]) + 1) - return crop_h, crop_w - elif self.crop_type == 'relative': - crop_h, crop_w = self.crop_size - return int(h * crop_h + 0.5), int(w * crop_w + 0.5) - elif self.crop_type == 'relative_range': - crop_size = np.asarray(self.crop_size, dtype=np.float32) - crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) - return int(h * crop_h + 0.5), int(w * crop_w + 0.5) - - def __call__(self, results): - """Call function to randomly crop images, bounding boxes, masks, - semantic segmentation maps. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Randomly cropped results, 'img_shape' key in result dict is - updated according to crop size. - """ - image_size = results['img'].shape[:2] - crop_size = self._get_crop_size(image_size) - results = self._crop_data(results, crop_size, self.allow_negative_crop) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(crop_size={self.crop_size}, ' - repr_str += f'crop_type={self.crop_type}, ' - repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' - repr_str += f'bbox_clip_border={self.bbox_clip_border})' - return repr_str - - -@PIPELINES.register_module() -class SegRescale(object): - """Rescale semantic segmentation maps. - - Args: - scale_factor (float): The scale factor of the final output. - backend (str): Image rescale backend, choices are 'cv2' and 'pillow'. - These two backends generates slightly different results. Defaults - to 'cv2'. - """ - - def __init__(self, scale_factor=1, backend='cv2'): - self.scale_factor = scale_factor - self.backend = backend - - def __call__(self, results): - """Call function to scale the semantic segmentation map. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with semantic segmentation map scaled. - """ - - for key in results.get('seg_fields', []): - if self.scale_factor != 1: - results[key] = mmcv.imrescale( - results[key], - self.scale_factor, - interpolation='nearest', - backend=self.backend) - return results - - def __repr__(self): - return self.__class__.__name__ + f'(scale_factor={self.scale_factor})' - - -@PIPELINES.register_module() -class PhotoMetricDistortion(object): - """Apply photometric distortion to image sequentially, every transformation - is applied with a probability of 0.5. The position of random contrast is in - second or second to last. - - 1. random brightness - 2. random contrast (mode 0) - 3. convert color from BGR to HSV - 4. random saturation - 5. random hue - 6. convert color from HSV to BGR - 7. random contrast (mode 1) - 8. randomly swap channels - - Args: - brightness_delta (int): delta of brightness. - contrast_range (tuple): range of contrast. - saturation_range (tuple): range of saturation. - hue_delta (int): delta of hue. - """ - - def __init__(self, - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18): - self.brightness_delta = brightness_delta - self.contrast_lower, self.contrast_upper = contrast_range - self.saturation_lower, self.saturation_upper = saturation_range - self.hue_delta = hue_delta - - def __call__(self, results): - """Call function to perform photometric distortion on images. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with images distorted. - """ - - if 'img_fields' in results: - assert results['img_fields'] == ['img'], \ - 'Only single img_fields is allowed' - img = results['img'] - assert img.dtype == np.float32, \ - 'PhotoMetricDistortion needs the input image of dtype np.float32,'\ - ' please set "to_float32=True" in "LoadImageFromFile" pipeline' - # random brightness - if random.randint(2): - delta = random.uniform(-self.brightness_delta, - self.brightness_delta) - img += delta - - # mode == 0 --> do random contrast first - # mode == 1 --> do random contrast last - mode = random.randint(2) - if mode == 1: - if random.randint(2): - alpha = random.uniform(self.contrast_lower, - self.contrast_upper) - img *= alpha - - # convert color from BGR to HSV - img = mmcv.bgr2hsv(img) - - # random saturation - if random.randint(2): - img[..., 1] *= random.uniform(self.saturation_lower, - self.saturation_upper) - - # random hue - if random.randint(2): - img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta) - img[..., 0][img[..., 0] > 360] -= 360 - img[..., 0][img[..., 0] < 0] += 360 - - # convert color from HSV to BGR - img = mmcv.hsv2bgr(img) - - # random contrast - if mode == 0: - if random.randint(2): - alpha = random.uniform(self.contrast_lower, - self.contrast_upper) - img *= alpha - - # randomly swap channels - if random.randint(2): - img = img[..., random.permutation(3)] - - results['img'] = img - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(\nbrightness_delta={self.brightness_delta},\n' - repr_str += 'contrast_range=' - repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n' - repr_str += 'saturation_range=' - repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n' - repr_str += f'hue_delta={self.hue_delta})' - return repr_str - - -@PIPELINES.register_module() -class Expand(object): - """Random expand the image & bboxes. - - Randomly place the original image on a canvas of 'ratio' x original image - size filled with mean values. The ratio is in the range of ratio_range. - - Args: - mean (tuple): mean value of dataset. - to_rgb (bool): if need to convert the order of mean to align with RGB. - ratio_range (tuple): range of expand ratio. - prob (float): probability of applying this transformation - """ - - def __init__(self, - mean=(0, 0, 0), - to_rgb=True, - ratio_range=(1, 4), - seg_ignore_label=None, - prob=0.5): - self.to_rgb = to_rgb - self.ratio_range = ratio_range - if to_rgb: - self.mean = mean[::-1] - else: - self.mean = mean - self.min_ratio, self.max_ratio = ratio_range - self.seg_ignore_label = seg_ignore_label - self.prob = prob - - def __call__(self, results): - """Call function to expand images, bounding boxes. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with images, bounding boxes expanded - """ - - if random.uniform(0, 1) > self.prob: - return results - - if 'img_fields' in results: - assert results['img_fields'] == ['img'], \ - 'Only single img_fields is allowed' - img = results['img'] - - h, w, c = img.shape - ratio = random.uniform(self.min_ratio, self.max_ratio) - # speedup expand when meets large image - if np.all(self.mean == self.mean[0]): - expand_img = np.empty((int(h * ratio), int(w * ratio), c), - img.dtype) - expand_img.fill(self.mean[0]) - else: - expand_img = np.full((int(h * ratio), int(w * ratio), c), - self.mean, - dtype=img.dtype) - left = int(random.uniform(0, w * ratio - w)) - top = int(random.uniform(0, h * ratio - h)) - expand_img[top:top + h, left:left + w] = img - - results['img'] = expand_img - # expand bboxes - for key in results.get('bbox_fields', []): - results[key] = results[key] + np.tile( - (left, top), 2).astype(results[key].dtype) - - # expand masks - for key in results.get('mask_fields', []): - results[key] = results[key].expand( - int(h * ratio), int(w * ratio), top, left) - - # expand segs - for key in results.get('seg_fields', []): - gt_seg = results[key] - expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), - self.seg_ignore_label, - dtype=gt_seg.dtype) - expand_gt_seg[top:top + h, left:left + w] = gt_seg - results[key] = expand_gt_seg - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' - repr_str += f'ratio_range={self.ratio_range}, ' - repr_str += f'seg_ignore_label={self.seg_ignore_label})' - return repr_str - - -@PIPELINES.register_module() -class MinIoURandomCrop(object): - """Random crop the image & bboxes, the cropped patches have minimum IoU - requirement with original image & bboxes, the IoU threshold is randomly - selected from min_ious. - - Args: - min_ious (tuple): minimum IoU threshold for all intersections with - bounding boxes - min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, - where a >= min_crop_size). - bbox_clip_border (bool, optional): Whether clip the objects outside - the border of the image. Defaults to True. - - Note: - The keys for bboxes, labels and masks should be paired. That is, \ - `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \ - `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`. - """ - - def __init__(self, - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3, - bbox_clip_border=True): - # 1: return ori img - self.min_ious = min_ious - self.sample_mode = (1, *min_ious, 0) - self.min_crop_size = min_crop_size - self.bbox_clip_border = bbox_clip_border - self.bbox2label = { - 'gt_bboxes': 'gt_labels', - 'gt_bboxes_ignore': 'gt_labels_ignore' - } - self.bbox2mask = { - 'gt_bboxes': 'gt_masks', - 'gt_bboxes_ignore': 'gt_masks_ignore' - } - - def __call__(self, results): - """Call function to crop images and bounding boxes with minimum IoU - constraint. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with images and bounding boxes cropped, \ - 'img_shape' key is updated. - """ - - if 'img_fields' in results: - assert results['img_fields'] == ['img'], \ - 'Only single img_fields is allowed' - img = results['img'] - assert 'bbox_fields' in results - boxes = [results[key] for key in results['bbox_fields']] - boxes = np.concatenate(boxes, 0) - h, w, c = img.shape - while True: - mode = random.choice(self.sample_mode) - self.mode = mode - if mode == 1: - return results - - min_iou = mode - for i in range(50): - new_w = random.uniform(self.min_crop_size * w, w) - new_h = random.uniform(self.min_crop_size * h, h) - - # h / w in [0.5, 2] - if new_h / new_w < 0.5 or new_h / new_w > 2: - continue - - left = random.uniform(w - new_w) - top = random.uniform(h - new_h) - - patch = np.array( - (int(left), int(top), int(left + new_w), int(top + new_h))) - # Line or point crop is not allowed - if patch[2] == patch[0] or patch[3] == patch[1]: - continue - overlaps = bbox_overlaps( - patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1) - if len(overlaps) > 0 and overlaps.min() < min_iou: - continue - - # center of boxes should inside the crop img - # only adjust boxes and instance masks when the gt is not empty - if len(overlaps) > 0: - # adjust boxes - def is_center_of_bboxes_in_patch(boxes, patch): - center = (boxes[:, :2] + boxes[:, 2:]) / 2 - mask = ((center[:, 0] > patch[0]) * - (center[:, 1] > patch[1]) * - (center[:, 0] < patch[2]) * - (center[:, 1] < patch[3])) - return mask - - mask = is_center_of_bboxes_in_patch(boxes, patch) - if not mask.any(): - continue - for key in results.get('bbox_fields', []): - boxes = results[key].copy() - mask = is_center_of_bboxes_in_patch(boxes, patch) - boxes = boxes[mask] - if self.bbox_clip_border: - boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:]) - boxes[:, :2] = boxes[:, :2].clip(min=patch[:2]) - boxes -= np.tile(patch[:2], 2) - - results[key] = boxes - # labels - label_key = self.bbox2label.get(key) - if label_key in results: - results[label_key] = results[label_key][mask] - - # mask fields - mask_key = self.bbox2mask.get(key) - if mask_key in results: - results[mask_key] = results[mask_key][ - mask.nonzero()[0]].crop(patch) - # adjust the img no matter whether the gt is empty before crop - img = img[patch[1]:patch[3], patch[0]:patch[2]] - results['img'] = img - results['img_shape'] = img.shape - - # seg fields - for key in results.get('seg_fields', []): - results[key] = results[key][patch[1]:patch[3], - patch[0]:patch[2]] - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(min_ious={self.min_ious}, ' - repr_str += f'min_crop_size={self.min_crop_size}, ' - repr_str += f'bbox_clip_border={self.bbox_clip_border})' - return repr_str - - -@PIPELINES.register_module() -class Corrupt(object): - """Corruption augmentation. - - Corruption transforms implemented based on - `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_. - - Args: - corruption (str): Corruption name. - severity (int, optional): The severity of corruption. Default: 1. - """ - - def __init__(self, corruption, severity=1): - self.corruption = corruption - self.severity = severity - - def __call__(self, results): - """Call function to corrupt image. - - Args: - results (dict): Result dict from loading pipeline. - - Returns: - dict: Result dict with images corrupted. - """ - - if corrupt is None: - raise RuntimeError('imagecorruptions is not installed') - if 'img_fields' in results: - assert results['img_fields'] == ['img'], \ - 'Only single img_fields is allowed' - results['img'] = corrupt( - results['img'].astype(np.uint8), - corruption_name=self.corruption, - severity=self.severity) - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(corruption={self.corruption}, ' - repr_str += f'severity={self.severity})' - return repr_str - - -@PIPELINES.register_module() -class Albu(object): - """Albumentation augmentation. - - Adds custom transformations from Albumentations library. - Please, visit `https://albumentations.readthedocs.io` - to get more information. - - An example of ``transforms`` is as followed: - - .. code-block:: - - [ - dict( - type='ShiftScaleRotate', - shift_limit=0.0625, - scale_limit=0.0, - rotate_limit=0, - interpolation=1, - p=0.5), - dict( - type='RandomBrightnessContrast', - brightness_limit=[0.1, 0.3], - contrast_limit=[0.1, 0.3], - p=0.2), - dict(type='ChannelShuffle', p=0.1), - dict( - type='OneOf', - transforms=[ - dict(type='Blur', blur_limit=3, p=1.0), - dict(type='MedianBlur', blur_limit=3, p=1.0) - ], - p=0.1), - ] - - Args: - transforms (list[dict]): A list of albu transformations - bbox_params (dict): Bbox_params for albumentation `Compose` - keymap (dict): Contains {'input key':'albumentation-style key'} - skip_img_without_anno (bool): Whether to skip the image if no ann left - after aug - """ - - def __init__(self, - transforms, - bbox_params=None, - keymap=None, - update_pad_shape=False, - skip_img_without_anno=False): - if Compose is None: - raise RuntimeError('albumentations is not installed') - - # Args will be modified later, copying it will be safer - transforms = copy.deepcopy(transforms) - if bbox_params is not None: - bbox_params = copy.deepcopy(bbox_params) - if keymap is not None: - keymap = copy.deepcopy(keymap) - self.transforms = transforms - self.filter_lost_elements = False - self.update_pad_shape = update_pad_shape - self.skip_img_without_anno = skip_img_without_anno - - # A simple workaround to remove masks without boxes - if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params - and 'filter_lost_elements' in bbox_params): - self.filter_lost_elements = True - self.origin_label_fields = bbox_params['label_fields'] - bbox_params['label_fields'] = ['idx_mapper'] - del bbox_params['filter_lost_elements'] - - self.bbox_params = ( - self.albu_builder(bbox_params) if bbox_params else None) - self.aug = Compose([self.albu_builder(t) for t in self.transforms], - bbox_params=self.bbox_params) - - if not keymap: - self.keymap_to_albu = { - 'img': 'image', - 'gt_masks': 'masks', - 'gt_bboxes': 'bboxes' - } - else: - self.keymap_to_albu = keymap - self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()} - - def albu_builder(self, cfg): - """Import a module from albumentations. - - It inherits some of :func:`build_from_cfg` logic. - - Args: - cfg (dict): Config dict. It should at least contain the key "type". - - Returns: - obj: The constructed object. - """ - - assert isinstance(cfg, dict) and 'type' in cfg - args = cfg.copy() - - obj_type = args.pop('type') - if mmcv.is_str(obj_type): - if albumentations is None: - raise RuntimeError('albumentations is not installed') - obj_cls = getattr(albumentations, obj_type) - elif inspect.isclass(obj_type): - obj_cls = obj_type - else: - raise TypeError( - f'type must be a str or valid type, but got {type(obj_type)}') - - if 'transforms' in args: - args['transforms'] = [ - self.albu_builder(transform) - for transform in args['transforms'] - ] - - return obj_cls(**args) - - @staticmethod - def mapper(d, keymap): - """Dictionary mapper. Renames keys according to keymap provided. - - Args: - d (dict): old dict - keymap (dict): {'old_key':'new_key'} - Returns: - dict: new dict. - """ - - updated_dict = {} - for k, v in zip(d.keys(), d.values()): - new_k = keymap.get(k, k) - updated_dict[new_k] = d[k] - return updated_dict - - def __call__(self, results): - # dict to albumentations format - results = self.mapper(results, self.keymap_to_albu) - # TODO: add bbox_fields - if 'bboxes' in results: - # to list of boxes - if isinstance(results['bboxes'], np.ndarray): - results['bboxes'] = [x for x in results['bboxes']] - # add pseudo-field for filtration - if self.filter_lost_elements: - results['idx_mapper'] = np.arange(len(results['bboxes'])) - - # TODO: Support mask structure in albu - if 'masks' in results: - if isinstance(results['masks'], PolygonMasks): - raise NotImplementedError( - 'Albu only supports BitMap masks now') - ori_masks = results['masks'] - if albumentations.__version__ < '0.5': - results['masks'] = results['masks'].masks - else: - results['masks'] = [mask for mask in results['masks'].masks] - - results = self.aug(**results) - - if 'bboxes' in results: - if isinstance(results['bboxes'], list): - results['bboxes'] = np.array( - results['bboxes'], dtype=np.float32) - results['bboxes'] = results['bboxes'].reshape(-1, 4) - - # filter label_fields - if self.filter_lost_elements: - - for label in self.origin_label_fields: - results[label] = np.array( - [results[label][i] for i in results['idx_mapper']]) - if 'masks' in results: - results['masks'] = np.array( - [results['masks'][i] for i in results['idx_mapper']]) - results['masks'] = ori_masks.__class__( - results['masks'], results['image'].shape[0], - results['image'].shape[1]) - - if (not len(results['idx_mapper']) - and self.skip_img_without_anno): - return None - - if 'gt_labels' in results: - if isinstance(results['gt_labels'], list): - results['gt_labels'] = np.array(results['gt_labels']) - results['gt_labels'] = results['gt_labels'].astype(np.int64) - - # back to the original format - results = self.mapper(results, self.keymap_back) - - # update final shape - if self.update_pad_shape: - results['pad_shape'] = results['img'].shape - - return results - - def __repr__(self): - repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' - return repr_str - - -@PIPELINES.register_module() -class RandomCenterCropPad(object): - """Random center crop and random around padding for CornerNet. - - This operation generates randomly cropped image from the original image and - pads it simultaneously. Different from :class:`RandomCrop`, the output - shape may not equal to ``crop_size`` strictly. We choose a random value - from ``ratios`` and the output shape could be larger or smaller than - ``crop_size``. The padding operation is also different from :class:`Pad`, - here we use around padding instead of right-bottom padding. - - The relation between output image (padding image) and original image: - - .. code:: text - - output image - - +----------------------------+ - | padded area | - +------|----------------------------|----------+ - | | cropped area | | - | | +---------------+ | | - | | | . center | | | original image - | | | range | | | - | | +---------------+ | | - +------|----------------------------|----------+ - | padded area | - +----------------------------+ - - There are 5 main areas in the figure: - - - output image: output image of this operation, also called padding - image in following instruction. - - original image: input image of this operation. - - padded area: non-intersect area of output image and original image. - - cropped area: the overlap of output image and original image. - - center range: a smaller area where random center chosen from. - center range is computed by ``border`` and original image's shape - to avoid our random center is too close to original image's border. - - Also this operation act differently in train and test mode, the summary - pipeline is listed below. - - Train pipeline: - - 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image - will be ``random_ratio * crop_size``. - 2. Choose a ``random_center`` in center range. - 3. Generate padding image with center matches the ``random_center``. - 4. Initialize the padding image with pixel value equals to ``mean``. - 5. Copy the cropped area to padding image. - 6. Refine annotations. - - Test pipeline: - - 1. Compute output shape according to ``test_pad_mode``. - 2. Generate padding image with center matches the original image - center. - 3. Initialize the padding image with pixel value equals to ``mean``. - 4. Copy the ``cropped area`` to padding image. - - Args: - crop_size (tuple | None): expected size after crop, final size will - computed according to ratio. Requires (h, w) in train mode, and - None in test mode. - ratios (tuple): random select a ratio from tuple and crop image to - (crop_size[0] * ratio) * (crop_size[1] * ratio). - Only available in train mode. - border (int): max distance from center select area to image border. - Only available in train mode. - mean (sequence): Mean values of 3 channels. - std (sequence): Std values of 3 channels. - to_rgb (bool): Whether to convert the image from BGR to RGB. - test_mode (bool): whether involve random variables in transform. - In train mode, crop_size is fixed, center coords and ratio is - random selected from predefined lists. In test mode, crop_size - is image's original shape, center coords and ratio is fixed. - test_pad_mode (tuple): padding method and padding shape value, only - available in test mode. Default is using 'logical_or' with - 127 as padding shape value. - - - 'logical_or': final_shape = input_shape | padding_shape_value - - 'size_divisor': final_shape = int( - ceil(input_shape / padding_shape_value) * padding_shape_value) - bbox_clip_border (bool, optional): Whether clip the objects outside - the border of the image. Defaults to True. - """ - - def __init__(self, - crop_size=None, - ratios=(0.9, 1.0, 1.1), - border=128, - mean=None, - std=None, - to_rgb=None, - test_mode=False, - test_pad_mode=('logical_or', 127), - bbox_clip_border=True): - if test_mode: - assert crop_size is None, 'crop_size must be None in test mode' - assert ratios is None, 'ratios must be None in test mode' - assert border is None, 'border must be None in test mode' - assert isinstance(test_pad_mode, (list, tuple)) - assert test_pad_mode[0] in ['logical_or', 'size_divisor'] - else: - assert isinstance(crop_size, (list, tuple)) - assert crop_size[0] > 0 and crop_size[1] > 0, ( - 'crop_size must > 0 in train mode') - assert isinstance(ratios, (list, tuple)) - assert test_pad_mode is None, ( - 'test_pad_mode must be None in train mode') - - self.crop_size = crop_size - self.ratios = ratios - self.border = border - # We do not set default value to mean, std and to_rgb because these - # hyper-parameters are easy to forget but could affect the performance. - # Please use the same setting as Normalize for performance assurance. - assert mean is not None and std is not None and to_rgb is not None - self.to_rgb = to_rgb - self.input_mean = mean - self.input_std = std - if to_rgb: - self.mean = mean[::-1] - self.std = std[::-1] - else: - self.mean = mean - self.std = std - self.test_mode = test_mode - self.test_pad_mode = test_pad_mode - self.bbox_clip_border = bbox_clip_border - - def _get_border(self, border, size): - """Get final border for the target size. - - This function generates a ``final_border`` according to image's shape. - The area between ``final_border`` and ``size - final_border`` is the - ``center range``. We randomly choose center from the ``center range`` - to avoid our random center is too close to original image's border. - Also ``center range`` should be larger than 0. - - Args: - border (int): The initial border, default is 128. - size (int): The width or height of original image. - Returns: - int: The final border. - """ - k = 2 * border / size - i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k))) - return border // i - - def _filter_boxes(self, patch, boxes): - """Check whether the center of each box is in the patch. - - Args: - patch (list[int]): The cropped area, [left, top, right, bottom]. - boxes (numpy array, (N x 4)): Ground truth boxes. - - Returns: - mask (numpy array, (N,)): Each box is inside or outside the patch. - """ - center = (boxes[:, :2] + boxes[:, 2:]) / 2 - mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * ( - center[:, 0] < patch[2]) * ( - center[:, 1] < patch[3]) - return mask - - def _crop_image_and_paste(self, image, center, size): - """Crop image with a given center and size, then paste the cropped - image to a blank image with two centers align. - - This function is equivalent to generating a blank image with ``size`` - as its shape. Then cover it on the original image with two centers ( - the center of blank image and the random center of original image) - aligned. The overlap area is paste from the original image and the - outside area is filled with ``mean pixel``. - - Args: - image (np array, H x W x C): Original image. - center (list[int]): Target crop center coord. - size (list[int]): Target crop size. [target_h, target_w] - - Returns: - cropped_img (np array, target_h x target_w x C): Cropped image. - border (np array, 4): The distance of four border of - ``cropped_img`` to the original image area, [top, bottom, - left, right] - patch (list[int]): The cropped area, [left, top, right, bottom]. - """ - center_y, center_x = center - target_h, target_w = size - img_h, img_w, img_c = image.shape - - x0 = max(0, center_x - target_w // 2) - x1 = min(center_x + target_w // 2, img_w) - y0 = max(0, center_y - target_h // 2) - y1 = min(center_y + target_h // 2, img_h) - patch = np.array((int(x0), int(y0), int(x1), int(y1))) - - left, right = center_x - x0, x1 - center_x - top, bottom = center_y - y0, y1 - center_y - - cropped_center_y, cropped_center_x = target_h // 2, target_w // 2 - cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype) - for i in range(img_c): - cropped_img[:, :, i] += self.mean[i] - y_slice = slice(cropped_center_y - top, cropped_center_y + bottom) - x_slice = slice(cropped_center_x - left, cropped_center_x + right) - cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :] - - border = np.array([ - cropped_center_y - top, cropped_center_y + bottom, - cropped_center_x - left, cropped_center_x + right - ], - dtype=np.float32) - - return cropped_img, border, patch - - def _train_aug(self, results): - """Random crop and around padding the original image. - - Args: - results (dict): Image infomations in the augment pipeline. - - Returns: - results (dict): The updated dict. - """ - img = results['img'] - h, w, c = img.shape - boxes = results['gt_bboxes'] - while True: - scale = random.choice(self.ratios) - new_h = int(self.crop_size[0] * scale) - new_w = int(self.crop_size[1] * scale) - h_border = self._get_border(self.border, h) - w_border = self._get_border(self.border, w) - - for i in range(50): - center_x = random.randint(low=w_border, high=w - w_border) - center_y = random.randint(low=h_border, high=h - h_border) - - cropped_img, border, patch = self._crop_image_and_paste( - img, [center_y, center_x], [new_h, new_w]) - - mask = self._filter_boxes(patch, boxes) - # if image do not have valid bbox, any crop patch is valid. - if not mask.any() and len(boxes) > 0: - continue - - results['img'] = cropped_img - results['img_shape'] = cropped_img.shape - results['pad_shape'] = cropped_img.shape - - x0, y0, x1, y1 = patch - - left_w, top_h = center_x - x0, center_y - y0 - cropped_center_x, cropped_center_y = new_w // 2, new_h // 2 - - # crop bboxes accordingly and clip to the image boundary - for key in results.get('bbox_fields', []): - mask = self._filter_boxes(patch, results[key]) - bboxes = results[key][mask] - bboxes[:, 0:4:2] += cropped_center_x - left_w - x0 - bboxes[:, 1:4:2] += cropped_center_y - top_h - y0 - if self.bbox_clip_border: - bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w) - bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h) - keep = (bboxes[:, 2] > bboxes[:, 0]) & ( - bboxes[:, 3] > bboxes[:, 1]) - bboxes = bboxes[keep] - results[key] = bboxes - if key in ['gt_bboxes']: - if 'gt_labels' in results: - labels = results['gt_labels'][mask] - labels = labels[keep] - results['gt_labels'] = labels - if 'gt_masks' in results: - raise NotImplementedError( - 'RandomCenterCropPad only supports bbox.') - - # crop semantic seg - for key in results.get('seg_fields', []): - raise NotImplementedError( - 'RandomCenterCropPad only supports bbox.') - return results - - def _test_aug(self, results): - """Around padding the original image without cropping. - - The padding mode and value are from ``test_pad_mode``. - - Args: - results (dict): Image infomations in the augment pipeline. - - Returns: - results (dict): The updated dict. - """ - img = results['img'] - h, w, c = img.shape - results['img_shape'] = img.shape - if self.test_pad_mode[0] in ['logical_or']: - target_h = h | self.test_pad_mode[1] - target_w = w | self.test_pad_mode[1] - elif self.test_pad_mode[0] in ['size_divisor']: - divisor = self.test_pad_mode[1] - target_h = int(np.ceil(h / divisor)) * divisor - target_w = int(np.ceil(w / divisor)) * divisor - else: - raise NotImplementedError( - 'RandomCenterCropPad only support two testing pad mode:' - 'logical-or and size_divisor.') - - cropped_img, border, _ = self._crop_image_and_paste( - img, [h // 2, w // 2], [target_h, target_w]) - results['img'] = cropped_img - results['pad_shape'] = cropped_img.shape - results['border'] = border - return results - - def __call__(self, results): - img = results['img'] - assert img.dtype == np.float32, ( - 'RandomCenterCropPad needs the input image of dtype np.float32,' - ' please set "to_float32=True" in "LoadImageFromFile" pipeline') - h, w, c = img.shape - assert c == len(self.mean) - if self.test_mode: - return self._test_aug(results) - else: - return self._train_aug(results) - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(crop_size={self.crop_size}, ' - repr_str += f'ratios={self.ratios}, ' - repr_str += f'border={self.border}, ' - repr_str += f'mean={self.input_mean}, ' - repr_str += f'std={self.input_std}, ' - repr_str += f'to_rgb={self.to_rgb}, ' - repr_str += f'test_mode={self.test_mode}, ' - repr_str += f'test_pad_mode={self.test_pad_mode}, ' - repr_str += f'bbox_clip_border={self.bbox_clip_border})' - return repr_str - - -@PIPELINES.register_module() -class CutOut(object): - """CutOut operation. - - Randomly drop some regions of image used in - `Cutout <https://arxiv.org/abs/1708.04552>`_. - - Args: - n_holes (int | tuple[int, int]): Number of regions to be dropped. - If it is given as a list, number of holes will be randomly - selected from the closed interval [`n_holes[0]`, `n_holes[1]`]. - cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate - shape of dropped regions. It can be `tuple[int, int]` to use a - fixed cutout shape, or `list[tuple[int, int]]` to randomly choose - shape from the list. - cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The - candidate ratio of dropped regions. It can be `tuple[float, float]` - to use a fixed ratio or `list[tuple[float, float]]` to randomly - choose ratio from the list. Please note that `cutout_shape` - and `cutout_ratio` cannot be both given at the same time. - fill_in (tuple[float, float, float] | tuple[int, int, int]): The value - of pixel to fill in the dropped regions. Default: (0, 0, 0). - """ - - def __init__(self, - n_holes, - cutout_shape=None, - cutout_ratio=None, - fill_in=(0, 0, 0)): - - assert (cutout_shape is None) ^ (cutout_ratio is None), \ - 'Either cutout_shape or cutout_ratio should be specified.' - assert (isinstance(cutout_shape, (list, tuple)) - or isinstance(cutout_ratio, (list, tuple))) - if isinstance(n_holes, tuple): - assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1] - else: - n_holes = (n_holes, n_holes) - self.n_holes = n_holes - self.fill_in = fill_in - self.with_ratio = cutout_ratio is not None - self.candidates = cutout_ratio if self.with_ratio else cutout_shape - if not isinstance(self.candidates, list): - self.candidates = [self.candidates] - - def __call__(self, results): - """Call function to drop some regions of image.""" - h, w, c = results['img'].shape - n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1) - for _ in range(n_holes): - x1 = np.random.randint(0, w) - y1 = np.random.randint(0, h) - index = np.random.randint(0, len(self.candidates)) - if not self.with_ratio: - cutout_w, cutout_h = self.candidates[index] - else: - cutout_w = int(self.candidates[index][0] * w) - cutout_h = int(self.candidates[index][1] * h) - - x2 = np.clip(x1 + cutout_w, 0, w) - y2 = np.clip(y1 + cutout_h, 0, h) - results['img'][y1:y2, x1:x2, :] = self.fill_in - - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(n_holes={self.n_holes}, ' - repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio - else f'cutout_shape={self.candidates}, ') - repr_str += f'fill_in={self.fill_in})' - return repr_str diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py deleted file mode 100644 index 35758f4f4e3b2bddd460edb8a7f482b3a9da2919..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py +++ /dev/null @@ -1,76 +0,0 @@ -from mmdet.models.builder import HEADS -from .convfc_bbox_head import ConvFCBBoxHead - - -@HEADS.register_module() -class SCNetBBoxHead(ConvFCBBoxHead): - """BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_. - - This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us - to get intermediate shared feature. - """ - - def _forward_shared(self, x): - """Forward function for shared part.""" - if self.num_shared_convs > 0: - for conv in self.shared_convs: - x = conv(x) - - if self.num_shared_fcs > 0: - if self.with_avg_pool: - x = self.avg_pool(x) - - x = x.flatten(1) - - for fc in self.shared_fcs: - x = self.relu(fc(x)) - - return x - - def _forward_cls_reg(self, x): - """Forward function for classification and regression parts.""" - x_cls = x - x_reg = x - - for conv in self.cls_convs: - x_cls = conv(x_cls) - if x_cls.dim() > 2: - if self.with_avg_pool: - x_cls = self.avg_pool(x_cls) - x_cls = x_cls.flatten(1) - for fc in self.cls_fcs: - x_cls = self.relu(fc(x_cls)) - - for conv in self.reg_convs: - x_reg = conv(x_reg) - if x_reg.dim() > 2: - if self.with_avg_pool: - x_reg = self.avg_pool(x_reg) - x_reg = x_reg.flatten(1) - for fc in self.reg_fcs: - x_reg = self.relu(fc(x_reg)) - - cls_score = self.fc_cls(x_cls) if self.with_cls else None - bbox_pred = self.fc_reg(x_reg) if self.with_reg else None - - return cls_score, bbox_pred - - def forward(self, x, return_shared_feat=False): - """Forward function. - - Args: - x (Tensor): input features - return_shared_feat (bool): If True, return cls-reg-shared feature. - - Return: - out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``, - if ``return_shared_feat`` is True, append ``x_shared`` to the - returned tuple. - """ - x_shared = self._forward_shared(x) - out = self._forward_cls_reg(x_shared) - - if return_shared_feat: - out += (x_shared, ) - - return out diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/pisa_roi_head.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/pisa_roi_head.py deleted file mode 100644 index e01113629837eb9c065ba40cd4025899b7bd0172..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/mmdet/models/roi_heads/pisa_roi_head.py +++ /dev/null @@ -1,159 +0,0 @@ -from mmdet.core import bbox2roi -from ..builder import HEADS -from ..losses.pisa_loss import carl_loss, isr_p -from .standard_roi_head import StandardRoIHead - - -@HEADS.register_module() -class PISARoIHead(StandardRoIHead): - r"""The RoI head for `Prime Sample Attention in Object Detection - <https://arxiv.org/abs/1904.04821>`_.""" - - def forward_train(self, - x, - img_metas, - proposal_list, - gt_bboxes, - gt_labels, - gt_bboxes_ignore=None, - gt_masks=None): - """Forward function for training. - - Args: - x (list[Tensor]): List of multi-level img features. - img_metas (list[dict]): List of image info dict where each dict - has: 'img_shape', 'scale_factor', 'flip', and may also contain - 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. - For details on the values of these keys see - `mmdet/datasets/pipelines/formatting.py:Collect`. - proposals (list[Tensors]): List of region proposals. - gt_bboxes (list[Tensor]): Each item are the truth boxes for each - image in [tl_x, tl_y, br_x, br_y] format. - gt_labels (list[Tensor]): Class indices corresponding to each box - gt_bboxes_ignore (list[Tensor], optional): Specify which bounding - boxes can be ignored when computing the loss. - gt_masks (None | Tensor) : True segmentation masks for each box - used if the architecture supports a segmentation task. - - Returns: - dict[str, Tensor]: a dictionary of loss components - """ - # assign gts and sample proposals - if self.with_bbox or self.with_mask: - num_imgs = len(img_metas) - if gt_bboxes_ignore is None: - gt_bboxes_ignore = [None for _ in range(num_imgs)] - sampling_results = [] - neg_label_weights = [] - for i in range(num_imgs): - assign_result = self.bbox_assigner.assign( - proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i], - gt_labels[i]) - sampling_result = self.bbox_sampler.sample( - assign_result, - proposal_list[i], - gt_bboxes[i], - gt_labels[i], - feats=[lvl_feat[i][None] for lvl_feat in x]) - # neg label weight is obtained by sampling when using ISR-N - neg_label_weight = None - if isinstance(sampling_result, tuple): - sampling_result, neg_label_weight = sampling_result - sampling_results.append(sampling_result) - neg_label_weights.append(neg_label_weight) - - losses = dict() - # bbox head forward and loss - if self.with_bbox: - bbox_results = self._bbox_forward_train( - x, - sampling_results, - gt_bboxes, - gt_labels, - img_metas, - neg_label_weights=neg_label_weights) - losses.update(bbox_results['loss_bbox']) - - # mask head forward and loss - if self.with_mask: - mask_results = self._mask_forward_train(x, sampling_results, - bbox_results['bbox_feats'], - gt_masks, img_metas) - losses.update(mask_results['loss_mask']) - - return losses - - def _bbox_forward(self, x, rois): - """Box forward function used in both training and testing.""" - # TODO: a more flexible way to decide which feature maps to use - bbox_feats = self.bbox_roi_extractor( - x[:self.bbox_roi_extractor.num_inputs], rois) - if self.with_shared_head: - bbox_feats = self.shared_head(bbox_feats) - cls_score, bbox_pred = self.bbox_head(bbox_feats) - - bbox_results = dict( - cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats) - return bbox_results - - def _bbox_forward_train(self, - x, - sampling_results, - gt_bboxes, - gt_labels, - img_metas, - neg_label_weights=None): - """Run forward function and calculate loss for box head in training.""" - rois = bbox2roi([res.bboxes for res in sampling_results]) - - bbox_results = self._bbox_forward(x, rois) - - bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes, - gt_labels, self.train_cfg) - - # neg_label_weights obtained by sampler is image-wise, mapping back to - # the corresponding location in label weights - if neg_label_weights[0] is not None: - label_weights = bbox_targets[1] - cur_num_rois = 0 - for i in range(len(sampling_results)): - num_pos = sampling_results[i].pos_inds.size(0) - num_neg = sampling_results[i].neg_inds.size(0) - label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos + - num_neg] = neg_label_weights[i] - cur_num_rois += num_pos + num_neg - - cls_score = bbox_results['cls_score'] - bbox_pred = bbox_results['bbox_pred'] - - # Apply ISR-P - isr_cfg = self.train_cfg.get('isr', None) - if isr_cfg is not None: - bbox_targets = isr_p( - cls_score, - bbox_pred, - bbox_targets, - rois, - sampling_results, - self.bbox_head.loss_cls, - self.bbox_head.bbox_coder, - **isr_cfg, - num_class=self.bbox_head.num_classes) - loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois, - *bbox_targets) - - # Add CARL Loss - carl_cfg = self.train_cfg.get('carl', None) - if carl_cfg is not None: - loss_carl = carl_loss( - cls_score, - bbox_targets[0], - bbox_pred, - bbox_targets[2], - self.bbox_head.loss_bbox, - **carl_cfg, - num_class=self.bbox_head.num_classes) - loss_bbox.update(loss_carl) - - bbox_results.update(loss_bbox=loss_bbox) - return bbox_results diff --git a/spaces/tomofi/NEologd/README.md b/spaces/tomofi/NEologd/README.md deleted file mode 100644 index b465aec975ee1d1246fd015f02b9cb9638944621..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NEologd/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: NEologd -emoji: 🚀 -colorFrom: yellow -colorTo: yellow -sdk: streamlit -sdk_version: 1.2.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Boots Yakata Byd 99 BEST.md b/spaces/usbethFlerru/sovits-modelsV2/example/Boots Yakata Byd 99 BEST.md deleted file mode 100644 index 3abe864652cd518b35c87f8032839d1db4685a2e..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Boots Yakata Byd 99 BEST.md +++ /dev/null @@ -1,6 +0,0 @@ -<h2>Boots Yakata Byd 99</h2><br /><p><b><b>DOWNLOAD</b> →→→ <a href="https://urlcod.com/2uyVu2">https://urlcod.com/2uyVu2</a></b></p><br /><br /> -<br /> - 4d29de3e1b<br /> -<br /> -<br /> -<p></p> diff --git a/spaces/user238921933/stable-diffusion-webui/webui-user.bat b/spaces/user238921933/stable-diffusion-webui/webui-user.bat deleted file mode 100644 index e5a257bef06f5bfcaff1c8b33c64a767eb8b3fe5..0000000000000000000000000000000000000000 --- a/spaces/user238921933/stable-diffusion-webui/webui-user.bat +++ /dev/null @@ -1,8 +0,0 @@ -@echo off - -set PYTHON= -set GIT= -set VENV_DIR= -set COMMANDLINE_ARGS= - -call webui.bat diff --git a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/tracker/README.md b/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/tracker/README.md deleted file mode 100644 index 26ec0c3246b3e2caf8c499fb476d7da6ede60386..0000000000000000000000000000000000000000 --- a/spaces/vaishanthr/Simultaneous-Segmented-Depth-Prediction/yolov8/ultralytics/tracker/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# Tracker - -## Supported Trackers - -- [x] ByteTracker -- [x] BoT-SORT - -## Usage - -### python interface: - -You can use the Python interface to track objects using the YOLO model. - -```python -from ultralytics import YOLO - -model = YOLO("yolov8n.pt") # or a segmentation model .i.e yolov8n-seg.pt -model.track( - source="video/streams", - stream=True, - tracker="botsort.yaml", # or 'bytetrack.yaml' - show=True, -) -``` - -You can get the IDs of the tracked objects using the following code: - -```python -from ultralytics import YOLO - -model = YOLO("yolov8n.pt") - -for result in model.track(source="video.mp4"): - print( - result.boxes.id.cpu().numpy().astype(int) - ) # this will print the IDs of the tracked objects in the frame -``` - -If you want to use the tracker with a folder of images or when you loop on the video frames, you should use the `persist` parameter to tell the model that these frames are related to each other so the IDs will be fixed for the same objects. Otherwise, the IDs will be different in each frame because in each loop, the model creates a new object for tracking, but the `persist` parameter makes it use the same object for tracking. - -```python -import cv2 -from ultralytics import YOLO - -cap = cv2.VideoCapture("video.mp4") -model = YOLO("yolov8n.pt") -while True: - ret, frame = cap.read() - if not ret: - break - results = model.track(frame, persist=True) - boxes = results[0].boxes.xyxy.cpu().numpy().astype(int) - ids = results[0].boxes.id.cpu().numpy().astype(int) - for box, id in zip(boxes, ids): - cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2) - cv2.putText( - frame, - f"Id {id}", - (box[0], box[1]), - cv2.FONT_HERSHEY_SIMPLEX, - 1, - (0, 0, 255), - 2, - ) - cv2.imshow("frame", frame) - if cv2.waitKey(1) & 0xFF == ord("q"): - break -``` - -## Change tracker parameters - -You can change the tracker parameters by eding the `tracker.yaml` file which is located in the ultralytics/tracker/cfg folder. - -## Command Line Interface (CLI) - -You can also use the command line interface to track objects using the YOLO model. - -```bash -yolo detect track source=... tracker=... -yolo segment track source=... tracker=... -yolo pose track source=... tracker=... -``` - -By default, trackers will use the configuration in `ultralytics/tracker/cfg`. -We also support using a modified tracker config file. Please refer to the tracker config files -in `ultralytics/tracker/cfg`.<br> diff --git a/spaces/vasistasaimagam/FoodVision_Big/app.py b/spaces/vasistasaimagam/FoodVision_Big/app.py deleted file mode 100644 index 0d74acb8015b6f25b1e0d94ca8398291722ef50c..0000000000000000000000000000000000000000 --- a/spaces/vasistasaimagam/FoodVision_Big/app.py +++ /dev/null @@ -1,81 +0,0 @@ -### 1. Imports and class names setup ### -import gradio as gr -import os -import torch - -from model import create_effnetb2_model -from timeit import default_timer as timer -from typing import Tuple, Dict - -# Setup class names -with open("class_names.txt", "r") as f: # reading them in from class_names.txt - class_names = [food_name.strip() for food_name in f.readlines()] - -### 2. Model and transforms preparation ### - -# Create model -effnetb2, effnetb2_transforms = create_effnetb2_model( - num_classes=101, # could also use len(class_names) -) - -# Load saved weights -effnetb2.load_state_dict( - torch.load( - f="09_pretrained_effnetb2_feature_extractor_food101_20_percent.pth", - map_location=torch.device("cpu"), # load to CPU - ) -) - -### 3. Predict function ### - -# Create predict function -def predict(img) -> Tuple[Dict, float]: - """Transforms and performs a prediction on img and returns prediction and time taken. - """ - # Start the timer - start_time = timer() - - # Transform the target image and add a batch dimension - img = effnetb2_transforms(img).unsqueeze(0) - - # Put model into evaluation mode and turn on inference mode - effnetb2.eval() - with torch.inference_mode(): - # Pass the transformed image through the model and turn the prediction logits into prediction probabilities - pred_probs = torch.softmax(effnetb2(img), dim=1) - - # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter) - pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))} - - # Calculate the prediction time - pred_time = round(timer() - start_time, 5) - - # Return the prediction dictionary and prediction time - return pred_labels_and_probs, pred_time - -### 4. Gradio app ### - -# Create title, description and article strings -title = "FoodVision Big 🍔👁" -description = "An EfficientNetB2 feature extractor computer vision model to classify images of food into [101 different classes](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/extras/food101_class_names.txt)." -##article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)." - -# Create examples list from "examples/" directory -example_list = [["examples/" + example] for example in os.listdir("examples")] - -# Create Gradio interface -demo = gr.Interface( - fn=predict, - inputs=gr.Image(type="pil"), - outputs=[ - gr.Label(num_top_classes=5, label="Predictions"), - gr.Number(label="Prediction time (s)"), - ], - examples=example_list, - title=title, - description=description, - ##article=article, -) - -# Launch the app! -demo.launch() diff --git a/spaces/videfikri/aicover/docs/README.ja.md b/spaces/videfikri/aicover/docs/README.ja.md deleted file mode 100644 index 492af2c454890f0e6e14e5c21f7b15052631e57a..0000000000000000000000000000000000000000 --- a/spaces/videfikri/aicover/docs/README.ja.md +++ /dev/null @@ -1,106 +0,0 @@ -<div align="center"> - -<h1>Retrieval-based-Voice-Conversion-WebUI</h1> -VITSに基づく使いやすい音声変換(voice changer)framework<br><br> - -[![madewithlove](https://forthebadge.com/images/badges/built-with-love.svg)](https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI) - -<img src="https://counter.seku.su/cmoe?name=rvc&theme=r34" /><br> - -[![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/liujing04/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb) -[![Licence](https://img.shields.io/github/license/liujing04/Retrieval-based-Voice-Conversion-WebUI?style=for-the-badge)](https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI/blob/main/%E4%BD%BF%E7%94%A8%E9%9C%80%E9%81%B5%E5%AE%88%E7%9A%84%E5%8D%8F%E8%AE%AE-LICENSE.txt) -[![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/) - -[![Discord](https://img.shields.io/badge/RVC%20Developers-Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/HcsmBBGyVk) - -</div> - ------- - -[**更新日誌**](https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI/blob/main/Changelog_CN.md) - -[**English**](./README.en.md) | [**中文简体**](../README.md) | [**日本語**](./README.ja.md) | [**한국어**](./README.ko.md) ([**韓國語**](./README.ko.han.md)) - -> デモ動画は[こちら](https://www.bilibili.com/video/BV1pm4y1z7Gm/)でご覧ください。 - -> RVCによるリアルタイム音声変換: [w-okada/voice-changer](https://github.com/w-okada/voice-changer) - -> 著作権侵害を心配することなく使用できるように、基底モデルは約50時間の高品質なオープンソースデータセットで訓練されています。 - -> 今後も、次々と使用許可のある高品質な歌声の資料集を追加し、基底モデルを訓練する予定です。 - -## はじめに -本リポジトリには下記の特徴があります。 - -+ Top1検索を用いることで、生の特徴量を訓練用データセット特徴量に変換し、トーンリーケージを削減します。 -+ 比較的貧弱なGPUでも、高速かつ簡単に訓練できます。 -+ 少量のデータセットからでも、比較的良い結果を得ることができます。(10分以上のノイズの少ない音声を推奨します。) -+ モデルを融合することで、音声を混ぜることができます。(ckpt processingタブの、ckpt mergeを使用します。) -+ 使いやすいWebUI。 -+ UVR5 Modelも含んでいるため、人の声とBGMを素早く分離できます。 - -## 環境構築 -Poetryで依存関係をインストールすることをお勧めします。 - -下記のコマンドは、Python3.8以上の環境で実行する必要があります: -```bash -# PyTorch関連の依存関係をインストール。インストール済の場合は省略。 -# 参照先: https://pytorch.org/get-started/locally/ -pip install torch torchvision torchaudio - -#Windows+ Nvidia Ampere Architecture(RTX30xx)の場合、 #21 に従い、pytorchに対応するcuda versionを指定する必要があります。 -#pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117 - -# PyTorch関連の依存関係をインストール。インストール済の場合は省略。 -# 参照先: https://python-poetry.org/docs/#installation -curl -sSL https://install.python-poetry.org | python3 - - -# Poetry経由で依存関係をインストール -poetry install -``` - -pipでも依存関係のインストールが可能です: - -**注意**:`faiss 1.7.2`は`macOS`で`Segmentation Fault: 11`を起こすので、マニュアルインストールする場合は、 `pip install faiss-cpu==1.7.0`を実行してください。 - -```bash -pip install -r requirements.txt -``` - -## 基底modelsを準備 -RVCは推論/訓練のために、様々な事前訓練を行った基底モデルを必要とします。 - -modelsは[Hugging Face space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)からダウンロードできます。 - -以下は、RVCに必要な基底モデルやその他のファイルの一覧です。 -```bash -hubert_base.pt - -./pretrained - -./uvr5_weights - -# ffmpegがすでにinstallされている場合は省略 -./ffmpeg -``` -その後、下記のコマンドでWebUIを起動します。 -```bash -python infer-web.py -``` -Windowsをお使いの方は、直接`RVC-beta.7z`をダウンロード後に展開し、`go-web.bat`をクリックすることで、WebUIを起動することができます。(7zipが必要です。) - -また、リポジトリに[小白简易教程.doc](./小白简易教程.doc)がありますので、参考にしてください(中国語版のみ)。 - -## 参考プロジェクト -+ [ContentVec](https://github.com/auspicious3000/contentvec/) -+ [VITS](https://github.com/jaywalnut310/vits) -+ [HIFIGAN](https://github.com/jik876/hifi-gan) -+ [Gradio](https://github.com/gradio-app/gradio) -+ [FFmpeg](https://github.com/FFmpeg/FFmpeg) -+ [Ultimate Vocal Remover](https://github.com/Anjok07/ultimatevocalremovergui) -+ [audio-slicer](https://github.com/openvpi/audio-slicer) - -## 貢献者(contributor)の皆様の尽力に感謝します -<a href="https://github.com/liujing04/Retrieval-based-Voice-Conversion-WebUI/graphs/contributors" target="_blank"> - <img src="https://contrib.rocks/image?repo=liujing04/Retrieval-based-Voice-Conversion-WebUI" /> -</a> diff --git a/spaces/videfikri/aicover/i18n.py b/spaces/videfikri/aicover/i18n.py deleted file mode 100644 index 1d7fe71d0e443a90492ff033ee34460e3429379f..0000000000000000000000000000000000000000 --- a/spaces/videfikri/aicover/i18n.py +++ /dev/null @@ -1,25 +0,0 @@ -import locale -import json -import os - - -def load_language_list(language): - with open(f"./i18n/{language}.json", "r", encoding="utf-8") as f: - language_list = json.load(f) - return language_list - - -class I18nAuto: - def __init__(self, language=None): - if language in ["Auto", None]: - language = locale.getdefaultlocale()[ - 0 - ] # getlocale can't identify the system's language ((None, None)) - if not os.path.exists(f"./i18n/{language}.json"): - language = "en_US" - self.language = language - print("Use Language:", language) - self.language_map = load_language_list(language) - - def __call__(self, key): - return self.language_map.get(key, key) diff --git a/spaces/vishnu0001/text2mesh/shap_e/models/transmitter/base.py b/spaces/vishnu0001/text2mesh/shap_e/models/transmitter/base.py deleted file mode 100644 index cdfb143472dc98efb713c1298ce36a4b0af80e80..0000000000000000000000000000000000000000 --- a/spaces/vishnu0001/text2mesh/shap_e/models/transmitter/base.py +++ /dev/null @@ -1,198 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, Dict, Optional, Tuple - -import torch.nn as nn -from torch import torch - -from shap_e.models.renderer import Renderer -from shap_e.util.collections import AttrDict - -from .bottleneck import latent_bottleneck_from_config, latent_warp_from_config -from .params_proj import flatten_param_shapes, params_proj_from_config - - -class Encoder(nn.Module, ABC): - def __init__(self, *, device: torch.device, param_shapes: Dict[str, Tuple[int]]): - """ - Instantiate the encoder with information about the renderer's input - parameters. This information can be used to create output layers to - generate the necessary latents. - """ - super().__init__() - self.param_shapes = param_shapes - self.device = device - - @abstractmethod - def forward(self, batch: AttrDict, options: Optional[AttrDict] = None) -> AttrDict: - """ - Encode a batch of data into a batch of latent information. - """ - - -class VectorEncoder(Encoder): - def __init__( - self, - *, - device: torch.device, - param_shapes: Dict[str, Tuple[int]], - params_proj: Dict[str, Any], - d_latent: int, - latent_bottleneck: Optional[Dict[str, Any]] = None, - latent_warp: Optional[Dict[str, Any]] = None, - ): - super().__init__(device=device, param_shapes=param_shapes) - if latent_bottleneck is None: - latent_bottleneck = dict(name="identity") - if latent_warp is None: - latent_warp = dict(name="identity") - self.d_latent = d_latent - self.params_proj = params_proj_from_config( - params_proj, device=device, param_shapes=param_shapes, d_latent=d_latent - ) - self.latent_bottleneck = latent_bottleneck_from_config( - latent_bottleneck, device=device, d_latent=d_latent - ) - self.latent_warp = latent_warp_from_config(latent_warp, device=device) - - def forward(self, batch: AttrDict, options: Optional[AttrDict] = None) -> AttrDict: - h = self.encode_to_bottleneck(batch, options=options) - return self.bottleneck_to_params(h, options=options) - - def encode_to_bottleneck( - self, batch: AttrDict, options: Optional[AttrDict] = None - ) -> torch.Tensor: - return self.latent_warp.warp( - self.latent_bottleneck(self.encode_to_vector(batch, options=options), options=options), - options=options, - ) - - @abstractmethod - def encode_to_vector(self, batch: AttrDict, options: Optional[AttrDict] = None) -> torch.Tensor: - """ - Encode the batch into a single latent vector. - """ - - def bottleneck_to_params( - self, vector: torch.Tensor, options: Optional[AttrDict] = None - ) -> AttrDict: - _ = options - return self.params_proj(self.latent_warp.unwarp(vector, options=options), options=options) - - -class ChannelsEncoder(VectorEncoder): - def __init__( - self, - *, - device: torch.device, - param_shapes: Dict[str, Tuple[int]], - params_proj: Dict[str, Any], - d_latent: int, - latent_bottleneck: Optional[Dict[str, Any]] = None, - latent_warp: Optional[Dict[str, Any]] = None, - ): - super().__init__( - device=device, - param_shapes=param_shapes, - params_proj=params_proj, - d_latent=d_latent, - latent_bottleneck=latent_bottleneck, - latent_warp=latent_warp, - ) - self.flat_shapes = flatten_param_shapes(param_shapes) - self.latent_ctx = sum(flat[0] for flat in self.flat_shapes.values()) - - @abstractmethod - def encode_to_channels( - self, batch: AttrDict, options: Optional[AttrDict] = None - ) -> torch.Tensor: - """ - Encode the batch into a per-data-point set of latents. - :return: [batch_size, latent_ctx, latent_width] - """ - - def encode_to_vector(self, batch: AttrDict, options: Optional[AttrDict] = None) -> torch.Tensor: - return self.encode_to_channels(batch, options=options).flatten(1) - - def bottleneck_to_channels( - self, vector: torch.Tensor, options: Optional[AttrDict] = None - ) -> torch.Tensor: - _ = options - return vector.view(vector.shape[0], self.latent_ctx, -1) - - def bottleneck_to_params( - self, vector: torch.Tensor, options: Optional[AttrDict] = None - ) -> AttrDict: - _ = options - return self.params_proj( - self.bottleneck_to_channels(self.latent_warp.unwarp(vector)), options=options - ) - - -class Transmitter(nn.Module): - def __init__(self, encoder: Encoder, renderer: Renderer): - super().__init__() - self.encoder = encoder - self.renderer = renderer - - def forward(self, batch: AttrDict, options: Optional[AttrDict] = None) -> AttrDict: - """ - Transmit the batch through the encoder and then the renderer. - """ - params = self.encoder(batch, options=options) - return self.renderer(batch, params=params, options=options) - - -class VectorDecoder(nn.Module): - def __init__( - self, - *, - device: torch.device, - param_shapes: Dict[str, Tuple[int]], - params_proj: Dict[str, Any], - d_latent: int, - latent_warp: Optional[Dict[str, Any]] = None, - renderer: Renderer, - ): - super().__init__() - self.device = device - self.param_shapes = param_shapes - - if latent_warp is None: - latent_warp = dict(name="identity") - self.d_latent = d_latent - self.params_proj = params_proj_from_config( - params_proj, device=device, param_shapes=param_shapes, d_latent=d_latent - ) - self.latent_warp = latent_warp_from_config(latent_warp, device=device) - self.renderer = renderer - - def bottleneck_to_params( - self, vector: torch.Tensor, options: Optional[AttrDict] = None - ) -> AttrDict: - _ = options - return self.params_proj(self.latent_warp.unwarp(vector, options=options), options=options) - - -class ChannelsDecoder(VectorDecoder): - def __init__( - self, - *, - latent_ctx: int, - **kwargs, - ): - super().__init__(**kwargs) - self.latent_ctx = latent_ctx - - def bottleneck_to_channels( - self, vector: torch.Tensor, options: Optional[AttrDict] = None - ) -> torch.Tensor: - _ = options - return vector.view(vector.shape[0], self.latent_ctx, -1) - - def bottleneck_to_params( - self, vector: torch.Tensor, options: Optional[AttrDict] = None - ) -> AttrDict: - _ = options - return self.params_proj( - self.bottleneck_to_channels(self.latent_warp.unwarp(vector)), options=options - ) diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/profiler.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/profiler.py deleted file mode 100644 index b70236997eec59c2209ef351ae38863b4112d0ec..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmcv/runner/hooks/profiler.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import warnings -from typing import Callable, List, Optional, Union - -import torch - -from ..dist_utils import master_only -from .hook import HOOKS, Hook - - -@HOOKS.register_module() -class ProfilerHook(Hook): - """Profiler to analyze performance during training. - - PyTorch Profiler is a tool that allows the collection of the performance - metrics during the training. More details on Profiler can be found at - https://pytorch.org/docs/1.8.1/profiler.html#torch.profiler.profile - - Args: - by_epoch (bool): Profile performance by epoch or by iteration. - Default: True. - profile_iters (int): Number of iterations for profiling. - If ``by_epoch=True``, profile_iters indicates that they are the - first profile_iters epochs at the beginning of the - training, otherwise it indicates the first profile_iters - iterations. Default: 1. - activities (list[str]): List of activity groups (CPU, CUDA) to use in - profiling. Default: ['cpu', 'cuda']. - schedule (dict, optional): Config of generating the callable schedule. - if schedule is None, profiler will not add step markers into the - trace and table view. Default: None. - on_trace_ready (callable, dict): Either a handler or a dict of generate - handler. Default: None. - record_shapes (bool): Save information about operator's input shapes. - Default: False. - profile_memory (bool): Track tensor memory allocation/deallocation. - Default: False. - with_stack (bool): Record source information (file and line number) - for the ops. Default: False. - with_flops (bool): Use formula to estimate the FLOPS of specific - operators (matrix multiplication and 2D convolution). - Default: False. - json_trace_path (str, optional): Exports the collected trace in Chrome - JSON format. Default: None. - - Example: - >>> runner = ... # instantiate a Runner - >>> # tensorboard trace - >>> trace_config = dict(type='tb_trace', dir_name='work_dir') - >>> profiler_config = dict(on_trace_ready=trace_config) - >>> runner.register_profiler_hook(profiler_config) - >>> runner.run(data_loaders=[trainloader], workflow=[('train', 1)]) - """ - - def __init__(self, - by_epoch: bool = True, - profile_iters: int = 1, - activities: List[str] = ['cpu', 'cuda'], - schedule: Optional[dict] = None, - on_trace_ready: Optional[Union[Callable, dict]] = None, - record_shapes: bool = False, - profile_memory: bool = False, - with_stack: bool = False, - with_flops: bool = False, - json_trace_path: Optional[str] = None) -> None: - try: - from torch import profiler # torch version >= 1.8.1 - except ImportError: - raise ImportError('profiler is the new feature of torch1.8.1, ' - f'but your version is {torch.__version__}') - - assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean.' - self.by_epoch = by_epoch - - if profile_iters < 1: - raise ValueError('profile_iters should be greater than 0, but got ' - f'{profile_iters}') - self.profile_iters = profile_iters - - if not isinstance(activities, list): - raise ValueError( - f'activities should be list, but got {type(activities)}') - self.activities = [] - for activity in activities: - activity = activity.lower() - if activity == 'cpu': - self.activities.append(profiler.ProfilerActivity.CPU) - elif activity == 'cuda': - self.activities.append(profiler.ProfilerActivity.CUDA) - else: - raise ValueError( - f'activity should be "cpu" or "cuda", but got {activity}') - - if schedule is not None: - self.schedule = profiler.schedule(**schedule) - else: - self.schedule = None - - self.on_trace_ready = on_trace_ready - self.record_shapes = record_shapes - self.profile_memory = profile_memory - self.with_stack = with_stack - self.with_flops = with_flops - self.json_trace_path = json_trace_path - - @master_only - def before_run(self, runner): - if self.by_epoch and runner.max_epochs < self.profile_iters: - raise ValueError('self.profile_iters should not be greater than ' - f'{runner.max_epochs}') - - if not self.by_epoch and runner.max_iters < self.profile_iters: - raise ValueError('self.profile_iters should not be greater than ' - f'{runner.max_iters}') - - if callable(self.on_trace_ready): # handler - _on_trace_ready = self.on_trace_ready - elif isinstance(self.on_trace_ready, dict): # config of handler - trace_cfg = self.on_trace_ready.copy() - trace_type = trace_cfg.pop('type') # log_trace handler - if trace_type == 'log_trace': - - def _log_handler(prof): - print(prof.key_averages().table(**trace_cfg)) - - _on_trace_ready = _log_handler - elif trace_type == 'tb_trace': # tensorboard_trace handler - try: - import torch_tb_profiler # noqa: F401 - except ImportError: - raise ImportError('please run "pip install ' - 'torch-tb-profiler" to install ' - 'torch_tb_profiler') - _on_trace_ready = torch.profiler.tensorboard_trace_handler( - **trace_cfg) - else: - raise ValueError('trace_type should be "log_trace" or ' - f'"tb_trace", but got {trace_type}') - elif self.on_trace_ready is None: - _on_trace_ready = None # type: ignore - else: - raise ValueError('on_trace_ready should be handler, dict or None, ' - f'but got {type(self.on_trace_ready)}') - - if runner.max_epochs > 1: - warnings.warn(f'profiler will profile {runner.max_epochs} epochs ' - 'instead of 1 epoch. Since profiler will slow down ' - 'the training, it is recommended to train 1 epoch ' - 'with ProfilerHook and adjust your setting according' - ' to the profiler summary. During normal training ' - '(epoch > 1), you may disable the ProfilerHook.') - - self.profiler = torch.profiler.profile( - activities=self.activities, - schedule=self.schedule, - on_trace_ready=_on_trace_ready, - record_shapes=self.record_shapes, - profile_memory=self.profile_memory, - with_stack=self.with_stack, - with_flops=self.with_flops) - - self.profiler.__enter__() - runner.logger.info('profiler is profiling...') - - @master_only - def after_train_epoch(self, runner): - if self.by_epoch and runner.epoch == self.profile_iters - 1: - runner.logger.info('profiler may take a few minutes...') - self.profiler.__exit__(None, None, None) - if self.json_trace_path is not None: - self.profiler.export_chrome_trace(self.json_trace_path) - - @master_only - def after_train_iter(self, runner): - self.profiler.step() - if not self.by_epoch and runner.iter == self.profile_iters - 1: - runner.logger.info('profiler may take a few minutes...') - self.profiler.__exit__(None, None, None) - if self.json_trace_path is not None: - self.profiler.export_chrome_trace(self.json_trace_path) diff --git a/spaces/vumichien/canvas_controlnet/ldm/modules/midas/utils.py b/spaces/vumichien/canvas_controlnet/ldm/modules/midas/utils.py deleted file mode 100644 index 9a9d3b5b66370fa98da9e067ba53ead848ea9a59..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/ldm/modules/midas/utils.py +++ /dev/null @@ -1,189 +0,0 @@ -"""Utils for monoDepth.""" -import sys -import re -import numpy as np -import cv2 -import torch - - -def read_pfm(path): - """Read pfm file. - - Args: - path (str): path to file - - Returns: - tuple: (data, scale) - """ - with open(path, "rb") as file: - - color = None - width = None - height = None - scale = None - endian = None - - header = file.readline().rstrip() - if header.decode("ascii") == "PF": - color = True - elif header.decode("ascii") == "Pf": - color = False - else: - raise Exception("Not a PFM file: " + path) - - dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) - if dim_match: - width, height = list(map(int, dim_match.groups())) - else: - raise Exception("Malformed PFM header.") - - scale = float(file.readline().decode("ascii").rstrip()) - if scale < 0: - # little-endian - endian = "<" - scale = -scale - else: - # big-endian - endian = ">" - - data = np.fromfile(file, endian + "f") - shape = (height, width, 3) if color else (height, width) - - data = np.reshape(data, shape) - data = np.flipud(data) - - return data, scale - - -def write_pfm(path, image, scale=1): - """Write pfm file. - - Args: - path (str): pathto file - image (array): data - scale (int, optional): Scale. Defaults to 1. - """ - - with open(path, "wb") as file: - color = None - - if image.dtype.name != "float32": - raise Exception("Image dtype must be float32.") - - image = np.flipud(image) - - if len(image.shape) == 3 and image.shape[2] == 3: # color image - color = True - elif ( - len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1 - ): # greyscale - color = False - else: - raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.") - - file.write("PF\n" if color else "Pf\n".encode()) - file.write("%d %d\n".encode() % (image.shape[1], image.shape[0])) - - endian = image.dtype.byteorder - - if endian == "<" or endian == "=" and sys.byteorder == "little": - scale = -scale - - file.write("%f\n".encode() % scale) - - image.tofile(file) - - -def read_image(path): - """Read image and output RGB image (0-1). - - Args: - path (str): path to file - - Returns: - array: RGB image (0-1) - """ - img = cv2.imread(path) - - if img.ndim == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0 - - return img - - -def resize_image(img): - """Resize image and make it fit for network. - - Args: - img (array): image - - Returns: - tensor: data ready for network - """ - height_orig = img.shape[0] - width_orig = img.shape[1] - - if width_orig > height_orig: - scale = width_orig / 384 - else: - scale = height_orig / 384 - - height = (np.ceil(height_orig / scale / 32) * 32).astype(int) - width = (np.ceil(width_orig / scale / 32) * 32).astype(int) - - img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA) - - img_resized = ( - torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float() - ) - img_resized = img_resized.unsqueeze(0) - - return img_resized - - -def resize_depth(depth, width, height): - """Resize depth map and bring to CPU (numpy). - - Args: - depth (tensor): depth - width (int): image width - height (int): image height - - Returns: - array: processed depth - """ - depth = torch.squeeze(depth[0, :, :, :]).to("cpu") - - depth_resized = cv2.resize( - depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC - ) - - return depth_resized - -def write_depth(path, depth, bits=1): - """Write depth map to pfm and png file. - - Args: - path (str): filepath without extension - depth (array): depth - """ - write_pfm(path + ".pfm", depth.astype(np.float32)) - - depth_min = depth.min() - depth_max = depth.max() - - max_val = (2**(8*bits))-1 - - if depth_max - depth_min > np.finfo("float").eps: - out = max_val * (depth - depth_min) / (depth_max - depth_min) - else: - out = np.zeros(depth.shape, dtype=depth.type) - - if bits == 1: - cv2.imwrite(path + ".png", out.astype("uint8")) - elif bits == 2: - cv2.imwrite(path + ".png", out.astype("uint16")) - - return diff --git a/spaces/vvv214/sdxldbooth/Dockerfile b/spaces/vvv214/sdxldbooth/Dockerfile deleted file mode 100644 index a4c8b4f88ec3000f75b1413a72ba55e294692201..0000000000000000000000000000000000000000 --- a/spaces/vvv214/sdxldbooth/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM huggingface/autotrain-advanced:latest -CMD autotrain setup && autotrain app --port 7860 diff --git a/spaces/weidacn/deepdanbooru/deepdanbooru/model/resnet.py b/spaces/weidacn/deepdanbooru/deepdanbooru/model/resnet.py deleted file mode 100644 index 5ec6d7677cfe56e6b2bcf6cdf60ce0dbe8f49734..0000000000000000000000000000000000000000 --- a/spaces/weidacn/deepdanbooru/deepdanbooru/model/resnet.py +++ /dev/null @@ -1,203 +0,0 @@ -import numpy as np -import tensorflow as tf -import deepdanbooru as dd - - -def resnet_bottleneck_block( - x, output_filters, inter_filters, activation=True, se=False -): - c1 = dd.model.layers.conv_bn_relu(x, inter_filters, (1, 1)) - c2 = dd.model.layers.conv_bn_relu(c1, inter_filters, (3, 3)) - c3 = dd.model.layers.conv_bn( - c2, output_filters, (1, 1), bn_gamma_initializer="zeros" - ) - - if se: - c3 = dd.model.layers.squeeze_excitation(c3) - - p = tf.keras.layers.Add()([c3, x]) - - if activation: - return tf.keras.layers.Activation("relu")(p) - else: - return p - - -def resnet_bottleneck_inc_block( - x, output_filters, inter_filters, strides1x1=(1, 1), strides2x2=(2, 2), se=False -): - c1 = dd.model.layers.conv_bn_relu(x, inter_filters, (1, 1), strides=strides1x1) - c2 = dd.model.layers.conv_bn_relu(c1, inter_filters, (3, 3), strides=strides2x2) - c3 = dd.model.layers.conv_bn( - c2, output_filters, (1, 1), bn_gamma_initializer="zeros" - ) - - if se: - c3 = dd.model.layers.squeeze_excitation(c3) - - strides = np.multiply(strides1x1, strides2x2) - s = dd.model.layers.conv_bn(x, output_filters, (1, 1), strides=strides) # shortcut - - p = tf.keras.layers.Add()([c3, s]) - - return tf.keras.layers.Activation("relu")(p) - - -def resnet_original_bottleneck_model( - x, filter_sizes, repeat_sizes, final_pool=True, se=False -): - """ - https://github.com/Microsoft/CNTK/blob/master/Examples/Image/Classification/ResNet/Python/resnet_models.py - """ - assert len(filter_sizes) == len(repeat_sizes) - - x = dd.model.layers.conv_bn_relu(x, filter_sizes[0] // 4, (7, 7), strides=(2, 2)) - x = tf.keras.layers.MaxPool2D((3, 3), strides=(2, 2), padding="same")(x) - - for i in range(len(repeat_sizes)): - x = resnet_bottleneck_inc_block( - x=x, - output_filters=filter_sizes[i], - inter_filters=filter_sizes[i] // 4, - strides2x2=(2, 2) if i > 0 else (1, 1), - se=se, - ) - x = dd.model.layers.repeat_blocks( - x=x, - block_delegate=resnet_bottleneck_block, - count=repeat_sizes[i], - output_filters=filter_sizes[i], - inter_filters=filter_sizes[i] // 4, - se=se, - ) - - if final_pool: - x = tf.keras.layers.AveragePooling2D((7, 7), name="ap_final")(x) - - return x - - -def resnet_longterm_bottleneck_model( - x, filter_sizes, repeat_sizes, final_pool=True, se=False -): - """ - Add long-term shortcut. - """ - assert len(filter_sizes) == len(repeat_sizes) - - x = dd.model.layers.conv_bn_relu(x, filter_sizes[0] // 4, (7, 7), strides=(2, 2)) - x = tf.keras.layers.MaxPool2D((3, 3), strides=(2, 2), padding="same")(x) - - for i in range(len(repeat_sizes)): - x = resnet_bottleneck_inc_block( - x=x, - output_filters=filter_sizes[i], - inter_filters=filter_sizes[i] // 4, - strides2x2=(2, 2) if i > 0 else (1, 1), - se=se, - ) - x_1 = dd.model.layers.repeat_blocks( - x=x, - block_delegate=resnet_bottleneck_block, - count=repeat_sizes[i] - 1, - output_filters=filter_sizes[i], - inter_filters=filter_sizes[i] // 4, - se=se, - ) - x_1 = resnet_bottleneck_block( - x_1, - output_filters=filter_sizes[i], - inter_filters=filter_sizes[i] // 4, - activation=False, - se=se, - ) - - x = tf.keras.layers.Add()([x_1, x]) # long-term shortcut - x = tf.keras.layers.Activation("relu")(x) - - if final_pool: - x = tf.keras.layers.AveragePooling2D((7, 7), name="ap_final")(x) - - return x - - -def create_resnet_152(x, output_dim): - """ - Original ResNet-152 Model. - """ - filter_sizes = [256, 512, 1024, 2048] - repeat_sizes = [2, 7, 35, 2] - - x = resnet_original_bottleneck_model( - x, filter_sizes=filter_sizes, repeat_sizes=repeat_sizes - ) - - x = tf.keras.layers.Flatten()(x) - x = tf.keras.layers.Dense(output_dim)(x) - x = tf.keras.layers.Activation("sigmoid", dtype="float32")(x) - - return x - - -def create_resnet_custom_v1(x, output_dim): - """ - DeepDanbooru web (until 2019/04/20) - Short, wide - """ - filter_sizes = [256, 512, 1024, 2048, 4096] - repeat_sizes = [2, 7, 35, 2, 2] - - x = resnet_original_bottleneck_model( - x, filter_sizes=filter_sizes, repeat_sizes=repeat_sizes, final_pool=False - ) - - x = dd.model.layers.conv_gap(x, output_dim) - x = tf.keras.layers.Activation("sigmoid", dtype="float32")(x) - - return x - - -def create_resnet_custom_v2(x, output_dim): - """ - Experimental (blazing-deep network) - Deep, narrow - """ - filter_sizes = [256, 512, 1024, 1024, 1024, 2048] - repeat_sizes = [2, 7, 40, 16, 16, 6] - - x = resnet_original_bottleneck_model( - x, filter_sizes=filter_sizes, repeat_sizes=repeat_sizes, final_pool=False - ) - - x = dd.model.layers.conv_gap(x, output_dim) - x = tf.keras.layers.Activation("sigmoid", dtype="float32")(x) - - return x - - -def create_resnet_custom_v3(x, output_dim): - filter_sizes = [256, 512, 1024, 1024, 2048, 4096] - repeat_sizes = [2, 7, 19, 19, 2, 2] - - x = resnet_original_bottleneck_model( - x, filter_sizes=filter_sizes, repeat_sizes=repeat_sizes, final_pool=False - ) - - x = dd.model.layers.conv_gap(x, output_dim) - x = tf.keras.layers.Activation("sigmoid", dtype="float32")(x) - - return x - - -def create_resnet_custom_v4(x, output_dim): - filter_sizes = [256, 512, 1024, 1024, 1024, 2048] - repeat_sizes = [2, 7, 10, 10, 10, 2] - - x = resnet_original_bottleneck_model( - x, filter_sizes=filter_sizes, repeat_sizes=repeat_sizes, final_pool=False - ) - - x = dd.model.layers.conv_gap(x, output_dim) - x = tf.keras.layers.Activation("sigmoid", dtype="float32")(x) - - return x diff --git a/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/actions/test_design_api.py b/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/actions/test_design_api.py deleted file mode 100644 index e6a396ad008c0b890afeb196456c027a013e76de..0000000000000000000000000000000000000000 --- a/spaces/wffcyrus/MetaGPT-v1/tests/metagpt/actions/test_design_api.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""" -@Time : 2023/5/11 19:26 -@Author : alexanderwu -@File : test_design_api.py -""" -import pytest - -from metagpt.actions.design_api import WriteDesign -from metagpt.logs import logger -from tests.metagpt.actions.mock import PRD_SAMPLE - - -@pytest.mark.asyncio -async def test_design_api(): - prd = "我们需要一个音乐播放器,它应该有播放、暂停、上一曲、下一曲等功能。" - - design_api = WriteDesign("design_api") - - result = await design_api.run(prd) - logger.info(result) - assert len(result) > 0 - - -@pytest.mark.asyncio -async def test_design_api_calculator(): - prd = PRD_SAMPLE - - design_api = WriteDesign("design_api") - result = await design_api.run(prd) - logger.info(result) - - assert len(result) > 10 diff --git a/spaces/wing-nus/SciAssist/controlled_summarization.py b/spaces/wing-nus/SciAssist/controlled_summarization.py deleted file mode 100644 index 904785643f7585bdf9bb95073ae0716db84d22af..0000000000000000000000000000000000000000 --- a/spaces/wing-nus/SciAssist/controlled_summarization.py +++ /dev/null @@ -1,87 +0,0 @@ -from typing import List, Tuple -import torch -from SciAssist import Summarization -import os -import requests -device = "gpu" if torch.cuda.is_available() else "cpu" - -ctrlsum_pipeline = Summarization(os_name="nt",checkpoint="google/flan-t5-base",device=device) - - -def download_pdf(url, dest_folder): - """ - Download a PDF from a given URL and save it to a specified destination folder. - Parameters: - url (str): URL of the PDF - dest_folder (str): Destination folder to save the downloaded PDF - """ - - if not os.path.exists(dest_folder): - os.makedirs(dest_folder) - - response = requests.get(url, stream=True) - filename = os.path.join(dest_folder, url.split("/")[-1]) - - with open(filename, 'wb') as file: - for chunk in response.iter_content(chunk_size=1024): - if chunk: - file.write(chunk) - print(f"Downloaded {url} to {filename}") - return filename - - -def ctrlsum_for_str(input,length=None, keywords=None) -> List[Tuple[str, str]]: - - if keywords is not None: - keywords = keywords.strip().split(",") - if keywords[0] == "": - keywords = None - if length==0 or length is None: - length = None - results = ctrlsum_pipeline.predict(input, type="str", - length=length, keywords=keywords) - - output = [] - for res in results["summary"]: - output.append(f"{res}\n\n") - return "".join(output) - - -def ctrlsum_for_file(input=None, length=None, keywords="", text="", url="") -> List[Tuple[str, str, str]]: - if input == None and url == "": - if text=="": - return None,"Input cannot be left blank.",None - else: - return ctrlsum_for_str(text,length,keywords),text, None - else: - filename="" - if url != "": - if len(url) > 4: - filename = download_pdf(url, './cache/') - else: - filename = input.name - if keywords != "": - keywords = keywords.strip().split(",") - if keywords[0] == "": - keywords = None - if length==0: - length = None - # Identify the format of input and parse reference strings - if filename[-4:] == ".txt": - results = ctrlsum_pipeline.predict(filename, type="txt", - save_results=False, - length=length, keywords=keywords) - elif filename[-4:] == ".pdf": - results = ctrlsum_pipeline.predict(filename, - save_results=False, length=length, keywords=keywords) - else: - return "File Format Error !", None, filename - - output = [] - for res in results["summary"]: - output.append(f"{res}\n\n") - return "".join(output), results["raw_text"], filename - - - -ctrlsum_str_example = "Language model pre-training has been shown to be effective for improving many natural language processing tasks ( Dai and Le , 2015 ; Peters et al. , 2018a ; Radford et al. , 2018 ; Howard and Ruder , 2018 ) . These include sentence-level tasks such as natural language inference ( Bowman et al. , 2015 ; Williams et al. , 2018 ) and paraphrasing ( Dolan and Brockett , 2005 ) , which aim to predict the relationships between sentences by analyzing them holistically , as well as token-level tasks such as named entity recognition and question answering , where models are required to produce fine-grained output at the token level ( Tjong Kim Sang and De Meulder , 2003 ; Rajpurkar et al. , 2016 ) . There are two existing strategies for applying pre-trained language representations to downstream tasks : feature-based and fine-tuning . The feature-based approach , such as ELMo ( Peters et al. , 2018a ) , uses task-specific architectures that include the pre-trained representations as additional features . The fine-tuning approach , such as the Generative Pre-trained Transformer ( OpenAI GPT ) ( Radford et al. , 2018 ) , introduces minimal task-specific parameters , and is trained on the downstream tasks by simply fine-tuning all pretrained parameters . The two approaches share the same objective function during pre-training , where they use unidirectional language models to learn general language representations . We argue that current techniques restrict the power of the pre-trained representations , especially for the fine-tuning approaches . The major limitation is that standard language models are unidirectional , and this limits the choice of architectures that can be used during pre-training . For example , in OpenAI GPT , the authors use a left-toright architecture , where every token can only attend to previous tokens in the self-attention layers of the Transformer ( Vaswani et al. , 2017 ) . Such restrictions are sub-optimal for sentence-level tasks , and could be very harmful when applying finetuning based approaches to token-level tasks such as question answering , where it is crucial to incorporate context from both directions . In this paper , we improve the fine-tuning based approaches by proposing BERT : Bidirectional Encoder Representations from Transformers . BERT alleviates the previously mentioned unidirectionality constraint by using a `` masked language model '' ( MLM ) pre-training objective , inspired by the Cloze task ( Taylor , 1953 ) . The masked language model randomly masks some of the tokens from the input , and the objective is to predict the original vocabulary id of the masked arXiv:1810.04805v2 [ cs.CL ] 24 May 2019 word based only on its context . Unlike left-toright language model pre-training , the MLM objective enables the representation to fuse the left and the right context , which allows us to pretrain a deep bidirectional Transformer . In addition to the masked language model , we also use a `` next sentence prediction '' task that jointly pretrains text-pair representations . The contributions of our paper are as follows : • We demonstrate the importance of bidirectional pre-training for language representations . Unlike Radford et al . ( 2018 ) , which uses unidirectional language models for pre-training , BERT uses masked language models to enable pretrained deep bidirectional representations . This is also in contrast to Peters et al . ( 2018a ) , which uses a shallow concatenation of independently trained left-to-right and right-to-left LMs . • We show that pre-trained representations reduce the need for many heavily-engineered taskspecific architectures . BERT is the first finetuning based representation model that achieves state-of-the-art performance on a large suite of sentence-level and token-level tasks , outperforming many task-specific architectures . • BERT advances the state of the art for eleven NLP tasks . The code and pre-trained models are available at https : //github.com/ google-research/bert . " \ No newline at end of file diff --git a/spaces/wong26/faster-whisper-webui/src/source.py b/spaces/wong26/faster-whisper-webui/src/source.py deleted file mode 100644 index e304e278bfae8ef289c999fc76311ce01b547991..0000000000000000000000000000000000000000 --- a/spaces/wong26/faster-whisper-webui/src/source.py +++ /dev/null @@ -1,80 +0,0 @@ -# Gradio seems to truncate files without keeping the extension, so we need to truncate the file prefix ourself -import os -import pathlib -from typing import List -import zipfile - -import ffmpeg -from more_itertools import unzip - -from src.download import ExceededMaximumDuration, download_url - -MAX_FILE_PREFIX_LENGTH = 17 - -class AudioSource: - def __init__(self, source_path, source_name = None, audio_duration = None): - self.source_path = source_path - self.source_name = source_name - self._audio_duration = audio_duration - - # Load source name if not provided - if (self.source_name is None): - file_path = pathlib.Path(self.source_path) - self.source_name = file_path.name - - def get_audio_duration(self): - if self._audio_duration is None: - self._audio_duration = float(ffmpeg.probe(self.source_path)["format"]["duration"]) - - return self._audio_duration - - def get_full_name(self): - return self.source_name - - def get_short_name(self, max_length: int = MAX_FILE_PREFIX_LENGTH): - file_path = pathlib.Path(self.source_name) - short_name = file_path.stem[:max_length] + file_path.suffix - - return short_name - - def __str__(self) -> str: - return self.source_path - -class AudioSourceCollection: - def __init__(self, sources: List[AudioSource]): - self.sources = sources - - def __iter__(self): - return iter(self.sources) - -def get_audio_source_collection(urlData: str, multipleFiles: List, microphoneData: str, input_audio_max_duration: float = -1) -> List[AudioSource]: - output: List[AudioSource] = [] - - if urlData: - # Download from YouTube. This could also be a playlist or a channel. - output.extend([ AudioSource(x) for x in download_url(urlData, input_audio_max_duration, playlistItems=None) ]) - else: - # Add input files - if (multipleFiles is not None): - output.extend([ AudioSource(x.name) for x in multipleFiles ]) - if (microphoneData is not None): - output.append(AudioSource(microphoneData)) - - total_duration = 0 - - # Calculate total audio length. We do this even if input_audio_max_duration - # is disabled to ensure that all the audio files are valid. - for source in output: - audioDuration = ffmpeg.probe(source.source_path)["format"]["duration"] - total_duration += float(audioDuration) - - # Save audio duration - source._audio_duration = float(audioDuration) - - # Ensure the total duration of the audio is not too long - if input_audio_max_duration > 0: - if float(total_duration) > input_audio_max_duration: - raise ExceededMaximumDuration(videoDuration=total_duration, maxDuration=input_audio_max_duration, message="Video(s) is too long") - - # Return a list of audio sources - return output \ No newline at end of file diff --git a/spaces/xc9/VITS-Umamusume-voice-synthesizer/attentions.py b/spaces/xc9/VITS-Umamusume-voice-synthesizer/attentions.py deleted file mode 100644 index 86bc73b5fe98cc7b443e9078553920346c996707..0000000000000000000000000000000000000000 --- a/spaces/xc9/VITS-Umamusume-voice-synthesizer/attentions.py +++ /dev/null @@ -1,300 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -from modules import LayerNorm - - -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, commons.convert_pad_shape(padding)) - return x diff --git a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/tools/parse_test_res.py b/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/tools/parse_test_res.py deleted file mode 100644 index fd5b0189d4ef130b0e772d483cf4c1e5643dff06..0000000000000000000000000000000000000000 --- a/spaces/xfys/yolov5_tracking/trackers/strong_sort/deep/reid/tools/parse_test_res.py +++ /dev/null @@ -1,103 +0,0 @@ -""" -This script aims to automate the process of calculating average results -stored in the test.log files over multiple splits. - -How to use: -For example, you have done evaluation over 20 splits on VIPeR, leading to -the following file structure - -log/ - eval_viper/ - split_0/ - test.log-xxxx - split_1/ - test.log-xxxx - split_2/ - test.log-xxxx - ... - -You can run the following command in your terminal to get the average performance: -$ python tools/parse_test_res.py log/eval_viper -""" -import os -import re -import glob -import numpy as np -import argparse -from collections import defaultdict - -from torchreid.utils import check_isfile, listdir_nohidden - - -def parse_file(filepath, regex_mAP, regex_r1, regex_r5, regex_r10, regex_r20): - results = {} - - with open(filepath, 'r') as f: - lines = f.readlines() - - for line in lines: - line = line.strip() - - match_mAP = regex_mAP.search(line) - if match_mAP: - mAP = float(match_mAP.group(1)) - results['mAP'] = mAP - - match_r1 = regex_r1.search(line) - if match_r1: - r1 = float(match_r1.group(1)) - results['r1'] = r1 - - match_r5 = regex_r5.search(line) - if match_r5: - r5 = float(match_r5.group(1)) - results['r5'] = r5 - - match_r10 = regex_r10.search(line) - if match_r10: - r10 = float(match_r10.group(1)) - results['r10'] = r10 - - match_r20 = regex_r20.search(line) - if match_r20: - r20 = float(match_r20.group(1)) - results['r20'] = r20 - - return results - - -def main(args): - regex_mAP = re.compile(r'mAP: ([\.\deE+-]+)%') - regex_r1 = re.compile(r'Rank-1 : ([\.\deE+-]+)%') - regex_r5 = re.compile(r'Rank-5 : ([\.\deE+-]+)%') - regex_r10 = re.compile(r'Rank-10 : ([\.\deE+-]+)%') - regex_r20 = re.compile(r'Rank-20 : ([\.\deE+-]+)%') - - final_res = defaultdict(list) - - directories = listdir_nohidden(args.directory, sort=True) - num_dirs = len(directories) - for directory in directories: - fullpath = os.path.join(args.directory, directory) - filepath = glob.glob(os.path.join(fullpath, 'test.log*'))[0] - check_isfile(filepath) - print(f'Parsing {filepath}') - res = parse_file( - filepath, regex_mAP, regex_r1, regex_r5, regex_r10, regex_r20 - ) - for key, value in res.items(): - final_res[key].append(value) - - print('Finished parsing') - print(f'The average results over {num_dirs} splits are shown below') - - for key, values in final_res.items(): - mean_val = np.mean(values) - print(f'{key}: {mean_val:.1f}') - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('directory', type=str, help='Path to directory') - args = parser.parse_args() - main(args) diff --git a/spaces/xnetba/Chat_advance/modules/models/minimax.py b/spaces/xnetba/Chat_advance/modules/models/minimax.py deleted file mode 100644 index 2e1b50280fd2fbc43a69caaf660a0d64beaa405b..0000000000000000000000000000000000000000 --- a/spaces/xnetba/Chat_advance/modules/models/minimax.py +++ /dev/null @@ -1,161 +0,0 @@ -import json -import os - -import colorama -import requests -import logging - -from modules.models.base_model import BaseLLMModel -from modules.presets import STANDARD_ERROR_MSG, GENERAL_ERROR_MSG, TIMEOUT_STREAMING, TIMEOUT_ALL, i18n - -group_id = os.environ.get("MINIMAX_GROUP_ID", "") - - -class MiniMax_Client(BaseLLMModel): - """ - MiniMax Client - 接口文档见 https://api.minimax.chat/document/guides/chat - """ - - def __init__(self, model_name, api_key, user_name="", system_prompt=None): - super().__init__(model_name=model_name, user=user_name) - self.url = f'https://api.minimax.chat/v1/text/chatcompletion?GroupId={group_id}' - self.history = [] - self.api_key = api_key - self.system_prompt = system_prompt - self.headers = { - "Authorization": f"Bearer {api_key}", - "Content-Type": "application/json" - } - - def get_answer_at_once(self): - # minimax temperature is (0,1] and base model temperature is [0,2], and yuan 0.9 == base 1 so need to convert - temperature = self.temperature * 0.9 if self.temperature <= 1 else 0.9 + (self.temperature - 1) / 10 - - request_body = { - "model": self.model_name.replace('minimax-', ''), - "temperature": temperature, - "skip_info_mask": True, - 'messages': [{"sender_type": "USER", "text": self.history[-1]['content']}] - } - if self.n_choices: - request_body['beam_width'] = self.n_choices - if self.system_prompt: - request_body['prompt'] = self.system_prompt - if self.max_generation_token: - request_body['tokens_to_generate'] = self.max_generation_token - if self.top_p: - request_body['top_p'] = self.top_p - - response = requests.post(self.url, headers=self.headers, json=request_body) - - res = response.json() - answer = res['reply'] - total_token_count = res["usage"]["total_tokens"] - return answer, total_token_count - - def get_answer_stream_iter(self): - response = self._get_response(stream=True) - if response is not None: - iter = self._decode_chat_response(response) - partial_text = "" - for i in iter: - partial_text += i - yield partial_text - else: - yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG - - def _get_response(self, stream=False): - minimax_api_key = self.api_key - history = self.history - logging.debug(colorama.Fore.YELLOW + - f"{history}" + colorama.Fore.RESET) - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {minimax_api_key}", - } - - temperature = self.temperature * 0.9 if self.temperature <= 1 else 0.9 + (self.temperature - 1) / 10 - - messages = [] - for msg in self.history: - if msg['role'] == 'user': - messages.append({"sender_type": "USER", "text": msg['content']}) - else: - messages.append({"sender_type": "BOT", "text": msg['content']}) - - request_body = { - "model": self.model_name.replace('minimax-', ''), - "temperature": temperature, - "skip_info_mask": True, - 'messages': messages - } - if self.n_choices: - request_body['beam_width'] = self.n_choices - if self.system_prompt: - lines = self.system_prompt.splitlines() - if lines[0].find(":") != -1 and len(lines[0]) < 20: - request_body["role_meta"] = { - "user_name": lines[0].split(":")[0], - "bot_name": lines[0].split(":")[1] - } - lines.pop() - request_body["prompt"] = "\n".join(lines) - if self.max_generation_token: - request_body['tokens_to_generate'] = self.max_generation_token - else: - request_body['tokens_to_generate'] = 512 - if self.top_p: - request_body['top_p'] = self.top_p - - if stream: - timeout = TIMEOUT_STREAMING - request_body['stream'] = True - request_body['use_standard_sse'] = True - else: - timeout = TIMEOUT_ALL - try: - response = requests.post( - self.url, - headers=headers, - json=request_body, - stream=stream, - timeout=timeout, - ) - except: - return None - - return response - - def _decode_chat_response(self, response): - error_msg = "" - for chunk in response.iter_lines(): - if chunk: - chunk = chunk.decode() - chunk_length = len(chunk) - print(chunk) - try: - chunk = json.loads(chunk[6:]) - except json.JSONDecodeError: - print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") - error_msg += chunk - continue - if chunk_length > 6 and "delta" in chunk["choices"][0]: - if "finish_reason" in chunk["choices"][0] and chunk["choices"][0]["finish_reason"] == "stop": - self.all_token_counts.append(chunk["usage"]["total_tokens"] - sum(self.all_token_counts)) - break - try: - yield chunk["choices"][0]["delta"] - except Exception as e: - logging.error(f"Error: {e}") - continue - if error_msg: - try: - error_msg = json.loads(error_msg) - if 'base_resp' in error_msg: - status_code = error_msg['base_resp']['status_code'] - status_msg = error_msg['base_resp']['status_msg'] - raise Exception(f"{status_code} - {status_msg}") - except json.JSONDecodeError: - pass - raise Exception(error_msg) diff --git a/spaces/xp3857/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/sync_batchnorm/replicate.py b/spaces/xp3857/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/sync_batchnorm/replicate.py deleted file mode 100644 index b71c7b8ed51a1d6c55b1f753bdd8d90bad79bd06..0000000000000000000000000000000000000000 --- a/spaces/xp3857/Image_Restoration_Colorization/Global/detection_models/Synchronized-BatchNorm-PyTorch/sync_batchnorm/replicate.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- -# File : replicate.py -# Author : Jiayuan Mao -# Email : maojiayuan@gmail.com -# Date : 27/01/2018 -# -# This file is part of Synchronized-BatchNorm-PyTorch. -# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch -# Distributed under MIT License. - -import functools - -from torch.nn.parallel.data_parallel import DataParallel - -__all__ = [ - 'CallbackContext', - 'execute_replication_callbacks', - 'DataParallelWithCallback', - 'patch_replication_callback' -] - - -class CallbackContext(object): - pass - - -def execute_replication_callbacks(modules): - """ - Execute an replication callback `__data_parallel_replicate__` on each module created by original replication. - - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Note that, as all modules are isomorphism, we assign each sub-module with a context - (shared among multiple copies of this module on different devices). - Through this context, different copies can share some information. - - We guarantee that the callback on the master copy (the first copy) will be called ahead of calling the callback - of any slave copies. - """ - master_copy = modules[0] - nr_modules = len(list(master_copy.modules())) - ctxs = [CallbackContext() for _ in range(nr_modules)] - - for i, module in enumerate(modules): - for j, m in enumerate(module.modules()): - if hasattr(m, '__data_parallel_replicate__'): - m.__data_parallel_replicate__(ctxs[j], i) - - -class DataParallelWithCallback(DataParallel): - """ - Data Parallel with a replication callback. - - An replication callback `__data_parallel_replicate__` of each module will be invoked after being created by - original `replicate` function. - The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)` - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - # sync_bn.__data_parallel_replicate__ will be invoked. - """ - - def replicate(self, module, device_ids): - modules = super(DataParallelWithCallback, self).replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - -def patch_replication_callback(data_parallel): - """ - Monkey-patch an existing `DataParallel` object. Add the replication callback. - Useful when you have customized `DataParallel` implementation. - - Examples: - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallel(sync_bn, device_ids=[0, 1]) - > patch_replication_callback(sync_bn) - # this is equivalent to - > sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) - > sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) - """ - - assert isinstance(data_parallel, DataParallel) - - old_replicate = data_parallel.replicate - - @functools.wraps(old_replicate) - def new_replicate(module, device_ids): - modules = old_replicate(module, device_ids) - execute_replication_callbacks(modules) - return modules - - data_parallel.replicate = new_replicate diff --git a/spaces/xuxw98/TAPA/generate/lora.py b/spaces/xuxw98/TAPA/generate/lora.py deleted file mode 100644 index 38a3cf63e96b4a938c8fddc6bcd450a8a5c2d0ce..0000000000000000000000000000000000000000 --- a/spaces/xuxw98/TAPA/generate/lora.py +++ /dev/null @@ -1,118 +0,0 @@ -import sys -import time -import warnings -from pathlib import Path -from typing import Optional - -import lightning as L -import torch - -# support running without installing as a package -wd = Path(__file__).parent.parent.resolve() -sys.path.append(str(wd)) - -from generate import generate -from lit_llama import Tokenizer, LLaMA -from lit_llama.lora import lora -from lit_llama.utils import lazy_load, llama_model_lookup -from scripts.prepare_alpaca import generate_prompt - -lora_r = 8 -lora_alpha = 16 -lora_dropout = 0.05 - - -def main( - prompt: str = "What food do lamas eat?", - input: str = "", - lora_path: Path = Path("out/lora/alpaca/lit-llama-lora-finetuned.pth"), - pretrained_path: Path = Path("checkpoints/lit-llama/7B/lit-llama.pth"), - tokenizer_path: Path = Path("checkpoints/lit-llama/tokenizer.model"), - quantize: Optional[str] = None, - max_new_tokens: int = 100, - top_k: int = 200, - temperature: float = 0.8, -) -> None: - """Generates a response based on a given instruction and an optional input. - This script will only work with checkpoints from the instruction-tuned LoRA model. - See `finetune_lora.py`. - - Args: - prompt: The prompt/instruction (Alpaca style). - lora_path: Path to the checkpoint with trained LoRA weights, which are the output of - `finetune_lora.py`. - input: Optional input (Alpaca style). - pretrained_path: The path to the checkpoint with pretrained LLaMA weights. - tokenizer_path: The tokenizer path to load. - quantize: Whether to quantize the model and using which method: - ``"llm.int8"``: LLM.int8() mode, - ``"gptq.int4"``: GPTQ 4-bit mode. - max_new_tokens: The number of generation steps to take. - top_k: The number of top most probable tokens to consider in the sampling process. - temperature: A value controlling the randomness of the sampling process. Higher values result in more random - samples. - """ - assert lora_path.is_file() - assert pretrained_path.is_file() - assert tokenizer_path.is_file() - - if quantize is not None: - raise NotImplementedError("Quantization in LoRA is not supported yet") - - precision = "bf16-true" if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else "32-true" - fabric = L.Fabric(devices=1, precision=precision) - - print("Loading model ...", file=sys.stderr) - t0 = time.time() - - with lazy_load(pretrained_path) as pretrained_checkpoint, lazy_load(lora_path) as lora_checkpoint: - name = llama_model_lookup(pretrained_checkpoint) - - with fabric.init_module(empty_init=True), lora(r=lora_r, alpha=lora_alpha, dropout=lora_dropout, enabled=True): - model = LLaMA.from_name(name) - - # 1. Load the pretrained weights - model.load_state_dict(pretrained_checkpoint, strict=False) - # 2. Load the fine-tuned lora weights - model.load_state_dict(lora_checkpoint, strict=False) - - print(f"Time to load model: {time.time() - t0:.02f} seconds.", file=sys.stderr) - - model.eval() - model = fabric.setup(model) - - tokenizer = Tokenizer(tokenizer_path) - sample = {"instruction": prompt, "input": input} - prompt = generate_prompt(sample) - encoded = tokenizer.encode(prompt, bos=True, eos=False, device=model.device) - - t0 = time.perf_counter() - output = generate( - model, - idx=encoded, - max_new_tokens=max_new_tokens, - temperature=temperature, - top_k=top_k, - eos_id=tokenizer.eos_id - ) - t = time.perf_counter() - t0 - - output = tokenizer.decode(output) - output = output.split("### Response:")[1].strip() - print(output) - - print(f"\n\nTime for inference: {t:.02f} sec total, {max_new_tokens / t:.02f} tokens/sec", file=sys.stderr) - if fabric.device.type == "cuda": - print(f"Memory used: {torch.cuda.max_memory_reserved() / 1e9:.02f} GB", file=sys.stderr) - - -if __name__ == "__main__": - from jsonargparse import CLI - - torch.set_float32_matmul_precision("high") - warnings.filterwarnings( - # Triggered internally at ../aten/src/ATen/EmptyTensor.cpp:31 - "ignore", - message="ComplexHalf support is experimental and many operators don't support it yet" - ) - CLI(main) diff --git a/spaces/yanli01/gpt01/modules/shared.py b/spaces/yanli01/gpt01/modules/shared.py deleted file mode 100644 index a9e72580aa7ae48f907e923a09099513570a9ad8..0000000000000000000000000000000000000000 --- a/spaces/yanli01/gpt01/modules/shared.py +++ /dev/null @@ -1,55 +0,0 @@ -from modules.presets import COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST -import os -import queue - -class State: - interrupted = False - multi_api_key = False - completion_url = COMPLETION_URL - balance_api_url = BALANCE_API_URL - usage_api_url = USAGE_API_URL - - def interrupt(self): - self.interrupted = True - - def recover(self): - self.interrupted = False - - def set_api_host(self, api_host): - self.completion_url = f"https://{api_host}/v1/chat/completions" - self.balance_api_url = f"https://{api_host}/dashboard/billing/credit_grants" - self.usage_api_url = f"https://{api_host}/dashboard/billing/usage" - os.environ["OPENAI_API_BASE"] = f"https://{api_host}/v1" - - def reset_api_host(self): - self.completion_url = COMPLETION_URL - self.balance_api_url = BALANCE_API_URL - self.usage_api_url = USAGE_API_URL - os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}/v1" - return API_HOST - - def reset_all(self): - self.interrupted = False - self.completion_url = COMPLETION_URL - - def set_api_key_queue(self, api_key_list): - self.multi_api_key = True - self.api_key_queue = queue.Queue() - for api_key in api_key_list: - self.api_key_queue.put(api_key) - - def switching_api_key(self, func): - if not hasattr(self, "api_key_queue"): - return func - - def wrapped(*args, **kwargs): - api_key = self.api_key_queue.get() - args[0].api_key = api_key - ret = func(*args, **kwargs) - self.api_key_queue.put(api_key) - return ret - - return wrapped - - -state = State() diff --git a/spaces/yderre-aubay/midi-player-demo/src/components/TextField.tsx b/spaces/yderre-aubay/midi-player-demo/src/components/TextField.tsx deleted file mode 100644 index 1e07d8c31bb7c0133ed168709db852cf5a55638b..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/components/TextField.tsx +++ /dev/null @@ -1,23 +0,0 @@ -import styled from "@emotion/styled" - -export const TextField = styled.input` - display: block; - appearance: none; - border: none; - background: inherit; - border: 1px solid ${({ theme }) => theme.dividerColor}; - border-radius: 0.25rem; - height: 3rem; - padding: 0 1rem; - align-items: center; - justify-content: center; - box-sizing: border-box; - color: inherit; - font-size: 1rem; - font-family: inherit; - outline: none; - - &:focus { - border-color: ${({ theme }) => theme.themeColor}; - } -` diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/KeyboardShortcut/GlobalKeyboardShortcut.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/KeyboardShortcut/GlobalKeyboardShortcut.tsx deleted file mode 100644 index 154d97236c5e4485beb212e410e86cb6dbdc17e9..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/KeyboardShortcut/GlobalKeyboardShortcut.tsx +++ /dev/null @@ -1,100 +0,0 @@ -import { FC, useEffect } from "react" -import { - fastForwardOneBar, - nextTrack, - playOrPause, - previousTrack, - rewindOneBar, - stop, - toggleGhost, - toggleMute, - toggleRecording, - toggleSolo, -} from "../../actions" -import { redo, undo } from "../../actions/history" -import { useStores } from "../../hooks/useStores" -import { KeyboardShortcut } from "./KeyboardShortcut" - -export const GlobalKeyboardShortcut: FC = () => { - const rootStore = useStores() - const { rootViewStore, router } = rootStore - - useEffect(() => { - // prevent zooming - const onWheel = (e: WheelEvent) => { - // Touchpad pinches are translated into wheel with ctrl event - if (e.ctrlKey) { - e.preventDefault() - } - } - - document.addEventListener("wheel", onWheel, { passive: false }) - - // disable bounce scroll (Safari does not support overscroll-behavior CSS) - const onTouchMove = (e: TouchEvent) => { - e.preventDefault - } - - document.addEventListener("touchmove", onTouchMove, { passive: false }) - - // do not allow to open the default context menu - document.oncontextmenu = (e) => e.preventDefault() - - return () => { - document.removeEventListener("wheel", onWheel) - document.removeEventListener("touchmove", onTouchMove) - document.oncontextmenu = null - } - }, []) - - return ( - <KeyboardShortcut - actions={[ - { code: "Space", run: () => playOrPause(rootStore)() }, - { - code: "KeyZ", - metaKey: true, - shiftKey: true, - run: () => redo(rootStore)(), - }, - { - code: "KeyZ", - metaKey: true, - shiftKey: false, - run: () => undo(rootStore)(), - }, - { code: "KeyY", metaKey: true, run: () => redo(rootStore)() }, - { - // Press ? - code: "Slash", - shiftKey: true, - run: () => (rootViewStore.openHelp = true), - }, - { code: "Enter", run: () => stop(rootStore)() }, - { code: "KeyA", run: () => rewindOneBar(rootStore)() }, - { code: "KeyD", run: () => fastForwardOneBar(rootStore)() }, - { code: "KeyS", run: () => nextTrack(rootStore)() }, - { code: "KeyW", run: () => previousTrack(rootStore)() }, - { code: "KeyN", run: () => toggleSolo(rootStore)() }, - { code: "KeyM", run: () => toggleMute(rootStore)() }, - { code: "KeyR", run: () => toggleRecording(rootStore)() }, - { code: "Comma", run: () => toggleGhost(rootStore)() }, - { - code: "Digit1", - metaKey: true, - run: () => (router.path = "/track"), - }, - { - code: "Digit2", - metaKey: true, - run: () => (router.path = "/arrange"), - }, - { - code: "Digit3", - metaKey: true, - run: () => (router.path = "/tempo"), - }, - ]} - /> - ) -} diff --git a/spaces/yhevis/Real-CUGAN2/upcunet_v3.py b/spaces/yhevis/Real-CUGAN2/upcunet_v3.py deleted file mode 100644 index f7919a6cc9efe3b8af73a73e30825a4c7d7d76da..0000000000000000000000000000000000000000 --- a/spaces/yhevis/Real-CUGAN2/upcunet_v3.py +++ /dev/null @@ -1,714 +0,0 @@ -import torch -from torch import nn as nn -from torch.nn import functional as F -import os, sys -import numpy as np - -root_path = os.path.abspath('.') -sys.path.append(root_path) - - -class SEBlock(nn.Module): - def __init__(self, in_channels, reduction=8, bias=False): - super(SEBlock, self).__init__() - self.conv1 = nn.Conv2d(in_channels, in_channels // reduction, 1, 1, 0, bias=bias) - self.conv2 = nn.Conv2d(in_channels // reduction, in_channels, 1, 1, 0, bias=bias) - - def forward(self, x): - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - x0 = torch.mean(x.float(), dim=(2, 3), keepdim=True).half() - else: - x0 = torch.mean(x, dim=(2, 3), keepdim=True) - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - def forward_mean(self, x, x0): - x0 = self.conv1(x0) - x0 = F.relu(x0, inplace=True) - x0 = self.conv2(x0) - x0 = torch.sigmoid(x0) - x = torch.mul(x, x0) - return x - - -class UNetConv(nn.Module): - def __init__(self, in_channels, mid_channels, out_channels, se): - super(UNetConv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d(in_channels, mid_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - nn.Conv2d(mid_channels, out_channels, 3, 1, 0), - nn.LeakyReLU(0.1, inplace=True), - ) - if se: - self.seblock = SEBlock(out_channels, reduction=8, bias=True) - else: - self.seblock = None - - def forward(self, x): - z = self.conv(x) - if self.seblock is not None: - z = self.seblock(z) - return z - - -class UNet1(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet1x3(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet1x3, self).__init__() - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 128, 64, se=True) - self.conv2_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv3 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 5, 3, 2) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - def forward_a(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x1, x2): - x2 = self.conv2_up(x2) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - - x1 = F.pad(x1, (-4, -4, -4, -4)) - x3 = self.conv3(x1 + x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - z = self.conv_bottom(x3) - return z - - -class UNet2(nn.Module): - def __init__(self, in_channels, out_channels, deconv): - super(UNet2, self).__init__() - - self.conv1 = UNetConv(in_channels, 32, 64, se=False) - self.conv1_down = nn.Conv2d(64, 64, 2, 2, 0) - self.conv2 = UNetConv(64, 64, 128, se=True) - self.conv2_down = nn.Conv2d(128, 128, 2, 2, 0) - self.conv3 = UNetConv(128, 256, 128, se=True) - self.conv3_up = nn.ConvTranspose2d(128, 128, 2, 2, 0) - self.conv4 = UNetConv(128, 64, 64, se=True) - self.conv4_up = nn.ConvTranspose2d(64, 64, 2, 2, 0) - self.conv5 = nn.Conv2d(64, 64, 3, 1, 0) - - if deconv: - self.conv_bottom = nn.ConvTranspose2d(64, out_channels, 4, 2, 3) - else: - self.conv_bottom = nn.Conv2d(64, out_channels, 3, 1, 0) - - for m in self.modules(): - if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.Linear): - nn.init.normal_(m.weight, 0, 0.01) - if m.bias is not None: - nn.init.constant_(m.bias, 0) - - def forward(self, x): - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2(x2) - - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3(x3) - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4(x2 + x3) - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - def forward_a(self, x): # conv234结尾有se - x1 = self.conv1(x) - x2 = self.conv1_down(x1) - x2 = F.leaky_relu(x2, 0.1, inplace=True) - x2 = self.conv2.conv(x2) - return x1, x2 - - def forward_b(self, x2): # conv234结尾有se - x3 = self.conv2_down(x2) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - x3 = self.conv3.conv(x3) - return x3 - - def forward_c(self, x2, x3): # conv234结尾有se - x3 = self.conv3_up(x3) - x3 = F.leaky_relu(x3, 0.1, inplace=True) - - x2 = F.pad(x2, (-4, -4, -4, -4)) - x4 = self.conv4.conv(x2 + x3) - return x4 - - def forward_d(self, x1, x4): # conv234结尾有se - x4 = self.conv4_up(x4) - x4 = F.leaky_relu(x4, 0.1, inplace=True) - - x1 = F.pad(x1, (-16, -16, -16, -16)) - x5 = self.conv5(x1 + x4) - x5 = F.leaky_relu(x5, 0.1, inplace=True) - - z = self.conv_bottom(x5) - return z - - -class UpCunet2x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet2x, self).__init__() - self.unet1 = UNet1(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 2, :w0 * 2] - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除 - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.2G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (18, 18 + pw - w0, 18, 18 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 36, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 36, j:j + crop_size[1] + 36] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 36, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 36, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 2 - 72, w * 2 - 72)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 36, crop_size[0]): - for j in range(0, w - 36, crop_size[1]): - res[:, :, i * 2:i * 2 + h1 * 2 - 72, j * 2:j * 2 + w1 * 2 - 72] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 2, :w0 * 2] - return res # - - -class UpCunet3x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet3x, self).__init__() - self.unet1 = UNet1x3(in_channels, out_channels, deconv=True) - self.unet2 = UNet2(in_channels, out_channels, deconv=False) - - def forward(self, x, tile_mode): # 1.7G - n, c, h0, w0 = x.shape - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 4 + 1) * 4 - pw = ((w0 - 1) // 4 + 1) * 4 - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 3, :w0 * 3] - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除 - crop_size_h = (h0 - 1) // 4 * 4 + 4 # 能被4整除 - else: - crop_size_h = ((h0 - 1) // 8 * 8 + 8) // 2 # 减半后能被4整除,所以要先被8整除 - crop_size_w = (w0 - 1) // 4 * 4 + 4 # 能被4整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 2, ((w0 - 1) // 8 * 8 + 8) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 12 * 12 + 12) // 3, ((w0 - 1) // 12 * 12 + 12) // 3) # 4.2G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 16 * 16 + 16) // 4, ((w0 - 1) // 16 * 16 + 16) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (14, 14 + pw - w0, 14, 14 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 28, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 28, j:j + crop_size[1] + 28] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 28, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 28, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - opt_res_dict[i][j] = x_crop # - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 3 - 84, w * 3 - 84)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 28, crop_size[0]): - for j in range(0, w - 28, crop_size[1]): - res[:, :, i * 3:i * 3 + h1 * 3 - 84, j * 3:j * 3 + w1 * 3 - 84] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 3, :w0 * 3] - return res - - -class UpCunet4x(nn.Module): # 完美tile,全程无损 - def __init__(self, in_channels=3, out_channels=3): - super(UpCunet4x, self).__init__() - self.unet1 = UNet1(in_channels, 64, deconv=True) - self.unet2 = UNet2(64, 64, deconv=False) - self.ps = nn.PixelShuffle(2) - self.conv_final = nn.Conv2d(64, 12, 3, 1, padding=0, bias=True) - - def forward(self, x, tile_mode): - n, c, h0, w0 = x.shape - x00 = x - if (tile_mode == 0): # 不tile - ph = ((h0 - 1) // 2 + 1) * 2 - pw = ((w0 - 1) // 2 + 1) * 2 - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') # 需要保证被2整除 - x = self.unet1.forward(x) - x0 = self.unet2.forward(x) - x1 = F.pad(x, (-20, -20, -20, -20)) - x = torch.add(x0, x1) - x = self.conv_final(x) - x = F.pad(x, (-1, -1, -1, -1)) - x = self.ps(x) - if (w0 != pw or h0 != ph): x = x[:, :, :h0 * 4, :w0 * 4] - x += F.interpolate(x00, scale_factor=4, mode='nearest') - return x - elif (tile_mode == 1): # 对长边减半 - if (w0 >= h0): - crop_size_w = ((w0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_h = (h0 - 1) // 2 * 2 + 2 # 能被2整除 - else: - crop_size_h = ((h0 - 1) // 4 * 4 + 4) // 2 # 减半后能被2整除,所以要先被4整除 - crop_size_w = (w0 - 1) // 2 * 2 + 2 # 能被2整除 - crop_size = (crop_size_h, crop_size_w) # 6.6G - elif (tile_mode == 2): # hw都减半 - crop_size = (((h0 - 1) // 4 * 4 + 4) // 2, ((w0 - 1) // 4 * 4 + 4) // 2) # 5.6G - elif (tile_mode == 3): # hw都三分之一 - crop_size = (((h0 - 1) // 6 * 6 + 6) // 3, ((w0 - 1) // 6 * 6 + 6) // 3) # 4.1G - elif (tile_mode == 4): # hw都四分之一 - crop_size = (((h0 - 1) // 8 * 8 + 8) // 4, ((w0 - 1) // 8 * 8 + 8) // 4) # 3.7G - ph = ((h0 - 1) // crop_size[0] + 1) * crop_size[0] - pw = ((w0 - 1) // crop_size[1] + 1) * crop_size[1] - x = F.pad(x, (19, 19 + pw - w0, 19, 19 + ph - h0), 'reflect') - n, c, h, w = x.shape - se_mean0 = torch.zeros((n, 64, 1, 1)).to(x.device) - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - n_patch = 0 - tmp_dict = {} - opt_res_dict = {} - for i in range(0, h - 38, crop_size[0]): - tmp_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - x_crop = x[:, :, i:i + crop_size[0] + 38, j:j + crop_size[1] + 38] - n, c1, h1, w1 = x_crop.shape - tmp0, x_crop = self.unet1.forward_a(x_crop) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(x_crop.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(x_crop, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - n_patch += 1 - tmp_dict[i][j] = (tmp0, x_crop) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - tmp0, x_crop = tmp_dict[i][j] - x_crop = self.unet1.conv2.seblock.forward_mean(x_crop, se_mean0) - opt_unet1 = self.unet1.forward_b(tmp0, x_crop) - tmp_x1, tmp_x2 = self.unet2.forward_a(opt_unet1) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x2.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x2, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2) - se_mean1 /= n_patch - se_mean0 = torch.zeros((n, 128, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean0 = se_mean0.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2 = tmp_dict[i][j] - tmp_x2 = self.unet2.conv2.seblock.forward_mean(tmp_x2, se_mean1) - tmp_x3 = self.unet2.forward_b(tmp_x2) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x3.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x3, dim=(2, 3), keepdim=True) - se_mean0 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x2, tmp_x3) - se_mean0 /= n_patch - se_mean1 = torch.zeros((n, 64, 1, 1)).to(x.device) # 64#128#128#64 - if ("Half" in x.type()): - se_mean1 = se_mean1.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x2, tmp_x3 = tmp_dict[i][j] - tmp_x3 = self.unet2.conv3.seblock.forward_mean(tmp_x3, se_mean0) - tmp_x4 = self.unet2.forward_c(tmp_x2, tmp_x3) - if ("Half" in x.type()): # torch.HalfTensor/torch.cuda.HalfTensor - tmp_se_mean = torch.mean(tmp_x4.float(), dim=(2, 3), keepdim=True).half() - else: - tmp_se_mean = torch.mean(tmp_x4, dim=(2, 3), keepdim=True) - se_mean1 += tmp_se_mean - tmp_dict[i][j] = (opt_unet1, tmp_x1, tmp_x4) - se_mean1 /= n_patch - for i in range(0, h - 38, crop_size[0]): - opt_res_dict[i] = {} - for j in range(0, w - 38, crop_size[1]): - opt_unet1, tmp_x1, tmp_x4 = tmp_dict[i][j] - tmp_x4 = self.unet2.conv4.seblock.forward_mean(tmp_x4, se_mean1) - x0 = self.unet2.forward_d(tmp_x1, tmp_x4) - x1 = F.pad(opt_unet1, (-20, -20, -20, -20)) - x_crop = torch.add(x0, x1) # x0是unet2的最终输出 - x_crop = self.conv_final(x_crop) - x_crop = F.pad(x_crop, (-1, -1, -1, -1)) - x_crop = self.ps(x_crop) - opt_res_dict[i][j] = x_crop - del tmp_dict - torch.cuda.empty_cache() - res = torch.zeros((n, c, h * 4 - 152, w * 4 - 152)).to(x.device) - if ("Half" in x.type()): - res = res.half() - for i in range(0, h - 38, crop_size[0]): - for j in range(0, w - 38, crop_size[1]): - # print(opt_res_dict[i][j].shape,res[:, :, i * 4:i * 4 + h1 * 4 - 144, j * 4:j * 4 + w1 * 4 - 144].shape) - res[:, :, i * 4:i * 4 + h1 * 4 - 152, j * 4:j * 4 + w1 * 4 - 152] = opt_res_dict[i][j] - del opt_res_dict - torch.cuda.empty_cache() - if (w0 != pw or h0 != ph): res = res[:, :, :h0 * 4, :w0 * 4] - res += F.interpolate(x00, scale_factor=4, mode='nearest') - return res # - - -class RealWaifuUpScaler(object): - def __init__(self, scale, weight_path, half, device): - weight = torch.load(weight_path, map_location="cpu") - self.model = eval("UpCunet%sx" % scale)() - if (half == True): - self.model = self.model.half().to(device) - else: - self.model = self.model.to(device) - self.model.load_state_dict(weight, strict=True) - self.model.eval() - self.half = half - self.device = device - - def np2tensor(self, np_frame): - if (self.half == False): - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).float() / 255 - else: - return torch.from_numpy(np.transpose(np_frame, (2, 0, 1))).unsqueeze(0).to(self.device).half() / 255 - - def tensor2np(self, tensor): - if (self.half == False): - return ( - np.transpose((tensor.data.squeeze() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), (1, 2, 0))) - else: - return (np.transpose((tensor.data.squeeze().float() * 255.0).round().clamp_(0, 255).byte().cpu().numpy(), - (1, 2, 0))) - - def __call__(self, frame, tile_mode): - with torch.no_grad(): - tensor = self.np2tensor(frame) - result = self.tensor2np(self.model(tensor, tile_mode)) - return result - - -if __name__ == "__main__": - ###########inference_img - import time, cv2, sys - from time import time as ttime - - for weight_path, scale in [("weights_v3/up2x-latest-denoise3x.pth", 2), ("weights_v3/up3x-latest-denoise3x.pth", 3), - ("weights_v3/up4x-latest-denoise3x.pth", 4)]: - for tile_mode in [0, 1, 2, 3, 4]: - upscaler2x = RealWaifuUpScaler(scale, weight_path, half=True, device="cuda:0") - input_dir = "%s/input_dir1" % root_path - output_dir = "%s/opt-dir-all-test" % root_path - os.makedirs(output_dir, exist_ok=True) - for name in os.listdir(input_dir): - print(name) - tmp = name.split(".") - inp_path = os.path.join(input_dir, name) - suffix = tmp[-1] - prefix = ".".join(tmp[:-1]) - tmp_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - print(inp_path, tmp_path) - # 支持中文路径 - # os.link(inp_path, tmp_path)#win用硬链接 - os.symlink(inp_path, tmp_path) # linux用软链接 - frame = cv2.imread(tmp_path)[:, :, [2, 1, 0]] - t0 = ttime() - result = upscaler2x(frame, tile_mode=tile_mode)[:, :, ::-1] - t1 = ttime() - print(prefix, "done", t1 - t0) - tmp_opt_path = os.path.join(root_path, "tmp", "%s.%s" % (int(time.time() * 1000000), suffix)) - cv2.imwrite(tmp_opt_path, result) - n = 0 - while (1): - if (n == 0): - suffix = "_%sx_tile%s.png" % (scale, tile_mode) - else: - suffix = "_%sx_tile%s_%s.png" % (scale, tile_mode, n) # - if (os.path.exists(os.path.join(output_dir, prefix + suffix)) == False): - break - else: - n += 1 - final_opt_path = os.path.join(output_dir, prefix + suffix) - os.rename(tmp_opt_path, final_opt_path) - os.remove(tmp_path) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/blip/convert_blip_original_pytorch_to_hf.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/blip/convert_blip_original_pytorch_to_hf.py deleted file mode 100644 index 7609b4a40e857fd3909fe93a8a1b49858e838bbe..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/blip/convert_blip_original_pytorch_to_hf.py +++ /dev/null @@ -1,191 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import argparse -import re - -import requests -import torch - -# git clone https://github.com/salesforce/BLIP.git -from models.blip import blip_decoder -from models.blip_itm import blip_itm -from models.blip_vqa import blip_vqa -from PIL import Image -from torchvision import transforms -from torchvision.transforms.functional import InterpolationMode - -from transformers import ( - BertTokenizer, - BlipConfig, - BlipForConditionalGeneration, - BlipForImageTextRetrieval, - BlipForQuestionAnswering, -) - - -def load_demo_image(image_size, device): - img_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" - raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") - - transform = transforms.Compose( - [ - transforms.Resize((image_size, image_size), interpolation=InterpolationMode.BICUBIC), - transforms.ToTensor(), - transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), - ] - ) - image = transform(raw_image).unsqueeze(0).to(device) - return image - - -def rename_key(key): - if "visual_encoder" in key: - key = re.sub("visual_encoder*", "vision_model.encoder", key) - if "blocks" in key: - key = re.sub(r"blocks", "layers", key) - if "attn" in key: - key = re.sub(r"attn", "self_attn", key) - if "norm1" in key: - key = re.sub(r"norm1", "layer_norm1", key) - if "norm2" in key: - key = re.sub(r"norm2", "layer_norm2", key) - if "encoder.norm" in key: - key = re.sub(r"encoder.norm", "post_layernorm", key) - if "encoder.patch_embed.proj" in key: - key = re.sub(r"encoder.patch_embed.proj", "embeddings.patch_embedding", key) - - if "encoder.pos_embed" in key: - key = re.sub(r"encoder.pos_embed", "embeddings.position_embedding", key) - if "encoder.cls_token" in key: - key = re.sub(r"encoder.cls_token", "embeddings.class_embedding", key) - - if "self_attn" in key: - key = re.sub(r"self_attn.proj", "self_attn.projection", key) - - return key - - -@torch.no_grad() -def convert_blip_checkpoint(pytorch_dump_folder_path, config_path=None): - """ - Copy/paste/tweak model's weights to transformers design. - """ - if config_path is not None: - config = BlipConfig.from_pretrained(config_path) - else: - config = BlipConfig(projection_dim=512, text_config={}, vision_config={}) - - hf_model = BlipForConditionalGeneration(config).eval() - - model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth" - - pt_model = blip_decoder(pretrained=model_url, image_size=384, vit="base") - pt_model = pt_model.eval() - - modified_state_dict = pt_model.state_dict() - for key in modified_state_dict.copy(): - value = modified_state_dict.pop(key) - renamed_key = rename_key(key) - modified_state_dict[renamed_key] = value - - hf_model.load_state_dict(modified_state_dict) - - image_size = 384 - image = load_demo_image(image_size=image_size, device="cpu") - tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") - input_ids = tokenizer(["a picture of"]).input_ids - - out = hf_model.generate(image, input_ids) - - assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] - - out = hf_model.generate(image) - - assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] - - if pytorch_dump_folder_path is not None: - hf_model.save_pretrained(pytorch_dump_folder_path) - - # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' - model_url = ( - "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth" - ) - - vqa_model = blip_vqa(pretrained=model_url, image_size=image_size, vit="base") - vqa_model.eval() - - modified_state_dict = vqa_model.state_dict() - for key in modified_state_dict.copy(): - value = modified_state_dict.pop(key) - renamed_key = rename_key(key) - modified_state_dict[renamed_key] = value - - hf_vqa_model = BlipForQuestionAnswering(config) - - hf_vqa_model.load_state_dict(modified_state_dict) - - question = ["How many dogs are in this image?"] - question_input_ids = tokenizer(question, return_tensors="pt").input_ids - - answer = hf_vqa_model.generate(question_input_ids, image) - print(tokenizer.decode(answer[0])) - - assert tokenizer.decode(answer[0]) == "[UNK] 1 [SEP]" - if pytorch_dump_folder_path is not None: - hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa") - - model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth" - - itm_model = blip_itm(pretrained=model_url, image_size=image_size, vit="base") - itm_model.eval() - - modified_state_dict = itm_model.state_dict() - for key in modified_state_dict.copy(): - value = modified_state_dict.pop(key) - renamed_key = rename_key(key) - modified_state_dict[renamed_key] = value - - hf_itm_model = BlipForImageTextRetrieval(config) - - question = ["A picture of a woman with a dog sitting in a beach"] - question_input_ids = tokenizer( - question, - return_tensors="pt", - padding="max_length", - truncation=True, - max_length=35, - ).input_ids - - hf_itm_model.load_state_dict(modified_state_dict) - hf_itm_model.eval() - - out_itm = hf_itm_model(question_input_ids, image, use_itm_head=True) - out = hf_itm_model(question_input_ids, image, use_itm_head=False) - - assert out[0].item() == 0.2110687494277954 - assert torch.nn.functional.softmax(out_itm[0], dim=1)[:, 1].item() == 0.45698845386505127 - - if pytorch_dump_folder_path is not None: - hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") - parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") - args = parser.parse_args() - - convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deta/convert_deta_swin_to_pytorch.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deta/convert_deta_swin_to_pytorch.py deleted file mode 100644 index 911bc434e14265f9fe21dc8166b4d9eafb0d9cc0..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/deta/convert_deta_swin_to_pytorch.py +++ /dev/null @@ -1,327 +0,0 @@ -# coding=utf-8 -# Copyright 2022 The HuggingFace Inc. team. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Convert DETA checkpoints from the original repository. - -URL: https://github.com/jozhang97/DETA/tree/master""" - - -import argparse -import json -from pathlib import Path - -import requests -import torch -from huggingface_hub import cached_download, hf_hub_download, hf_hub_url -from PIL import Image - -from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig -from transformers.utils import logging - - -logging.set_verbosity_info() -logger = logging.get_logger(__name__) - - -def get_deta_config(model_name): - backbone_config = SwinConfig( - embed_dim=192, - depths=(2, 2, 18, 2), - num_heads=(6, 12, 24, 48), - window_size=12, - out_features=["stage2", "stage3", "stage4"], - ) - - config = DetaConfig( - backbone_config=backbone_config, - num_queries=900, - encoder_ffn_dim=2048, - decoder_ffn_dim=2048, - num_feature_levels=5, - assign_first_stage=True, - with_box_refine=True, - two_stage=True, - ) - - # set labels - repo_id = "huggingface/label-files" - if "o365" in model_name: - num_labels = 366 - filename = "object365-id2label.json" - else: - num_labels = 91 - filename = "coco-detection-id2label.json" - - config.num_labels = num_labels - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) - id2label = {int(k): v for k, v in id2label.items()} - config.id2label = id2label - config.label2id = {v: k for k, v in id2label.items()} - - return config - - -# here we list all keys to be renamed (original name on the left, our name on the right) -def create_rename_keys(config): - rename_keys = [] - - # stem - # fmt: off - rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight")) - rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias")) - rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight")) - rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias")) - # stages - for i in range(len(config.backbone_config.depths)): - for j in range(config.backbone_config.depths[i]): - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight")) - rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias")) - - if i < 3: - rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight")) - rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight")) - rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias")) - - rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight")) - rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias")) - rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight")) - rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias")) - rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight")) - rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias")) - - # transformer encoder - for i in range(config.encoder_layers): - rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight")) - rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias")) - rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight")) - rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias")) - rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight")) - rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias")) - rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight")) - rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias")) - rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight")) - rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias")) - rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight")) - rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias")) - rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight")) - rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias")) - rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight")) - rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias")) - - # transformer decoder - for i in range(config.decoder_layers): - rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight")) - rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias")) - rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight")) - rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias")) - rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight")) - rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias")) - rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight")) - rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias")) - rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight")) - rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias")) - rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight")) - rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias")) - rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight")) - rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias")) - rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight")) - rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias")) - rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight")) - rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias")) - rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight")) - rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias")) - - # fmt: on - - return rename_keys - - -def rename_key(dct, old, new): - val = dct.pop(old) - dct[new] = val - - -# we split up the matrix of each encoder layer into queries, keys and values -def read_in_swin_q_k_v(state_dict, backbone_config): - num_features = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))] - for i in range(len(backbone_config.depths)): - dim = num_features[i] - for j in range(backbone_config.depths[i]): - # fmt: off - # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) - in_proj_weight = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight") - in_proj_bias = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias") - # next, add query, keys and values (in that order) to the state dict - state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.weight"] = in_proj_weight[:dim, :] - state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.query.bias"] = in_proj_bias[: dim] - state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.weight"] = in_proj_weight[ - dim : dim * 2, : - ] - state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.key.bias"] = in_proj_bias[ - dim : dim * 2 - ] - state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.weight"] = in_proj_weight[ - -dim :, : - ] - state_dict[f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.value.bias"] = in_proj_bias[-dim :] - # fmt: on - - -def read_in_decoder_q_k_v(state_dict, config): - # transformer decoder self-attention layers - hidden_size = config.d_model - for i in range(config.decoder_layers): - # read in weights + bias of input projection layer of self-attention - in_proj_weight = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight") - in_proj_bias = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias") - # next, add query, keys and values (in that order) to the state dict - state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:hidden_size, :] - state_dict[f"model.decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:hidden_size] - state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[ - hidden_size : hidden_size * 2, : - ] - state_dict[f"model.decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[hidden_size : hidden_size * 2] - state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-hidden_size:, :] - state_dict[f"model.decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-hidden_size:] - - -# We will verify our results on an image of cute cats -def prepare_img(): - url = "http://images.cocodataset.org/val2017/000000039769.jpg" - im = Image.open(requests.get(url, stream=True).raw) - - return im - - -@torch.no_grad() -def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub): - """ - Copy/paste/tweak model's weights to our DETA structure. - """ - - # load config - config = get_deta_config(model_name) - - # load original state dict - if model_name == "deta-swin-large": - checkpoint_path = hf_hub_download(repo_id="nielsr/deta-checkpoints", filename="adet_swin_ft.pth") - elif model_name == "deta-swin-large-o365": - checkpoint_path = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365", filename="deta_swin_pt_o365.pth") - else: - raise ValueError(f"Model name {model_name} not supported") - - state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] - - # original state dict - for name, param in state_dict.items(): - print(name, param.shape) - - # rename keys - rename_keys = create_rename_keys(config) - for src, dest in rename_keys: - rename_key(state_dict, src, dest) - read_in_swin_q_k_v(state_dict, config.backbone_config) - read_in_decoder_q_k_v(state_dict, config) - - # fix some prefixes - for key in state_dict.copy().keys(): - if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: - val = state_dict.pop(key) - state_dict[key.replace("transformer.decoder", "model.decoder")] = val - if "input_proj" in key: - val = state_dict.pop(key) - state_dict["model." + key] = val - if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: - val = state_dict.pop(key) - state_dict[key.replace("transformer", "model")] = val - - # finally, create HuggingFace model and load state dict - model = DetaForObjectDetection(config) - model.load_state_dict(state_dict) - model.eval() - - device = "cuda" if torch.cuda.is_available() else "cpu" - model.to(device) - - # load image processor - processor = DetaImageProcessor(format="coco_detection") - - # verify our conversion on image - img = prepare_img() - encoding = processor(images=img, return_tensors="pt") - pixel_values = encoding["pixel_values"] - outputs = model(pixel_values.to(device)) - - # verify logits - print("Logits:", outputs.logits[0, :3, :3]) - print("Boxes:", outputs.pred_boxes[0, :3, :3]) - if model_name == "deta-swin-large": - expected_logits = torch.tensor( - [[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] - ) - expected_boxes = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]]) - elif model_name == "deta-swin-large-o365": - expected_logits = torch.tensor( - [[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] - ) - expected_boxes = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]]) - assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4) - assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4) - print("Everything ok!") - - if pytorch_dump_folder_path: - # Save model and processor - logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}...") - Path(pytorch_dump_folder_path).mkdir(exist_ok=True) - model.save_pretrained(pytorch_dump_folder_path) - processor.save_pretrained(pytorch_dump_folder_path) - - # Push to hub - if push_to_hub: - print("Pushing model and processor to hub...") - model.push_to_hub(f"jozhang97/{model_name}") - processor.push_to_hub(f"jozhang97/{model_name}") - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - - parser.add_argument( - "--model_name", - type=str, - default="deta-swin-large", - choices=["deta-swin-large", "deta-swin-large-o365"], - help="Name of the model you'd like to convert.", - ) - parser.add_argument( - "--pytorch_dump_folder_path", - default=None, - type=str, - help="Path to the folder to output PyTorch model.", - ) - parser.add_argument( - "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." - ) - args = parser.parse_args() - convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/git/modeling_git.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/git/modeling_git.py deleted file mode 100644 index 00707e42dd085ab3a5c89e61cd7d433d41269f46..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/git/modeling_git.py +++ /dev/null @@ -1,1574 +0,0 @@ -# coding=utf-8 -# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""PyTorch GIT model.""" - - -import math -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union - -import torch -import torch.utils.checkpoint -from torch import nn -from torch.nn import CrossEntropyLoss - -from ...activations import ACT2FN -from ...file_utils import ModelOutput -from ...modeling_outputs import ( - BaseModelOutput, - BaseModelOutputWithPast, - BaseModelOutputWithPooling, - CausalLMOutputWithPast, -) -from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer -from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings -from .configuration_git import GitConfig, GitVisionConfig - - -logger = logging.get_logger(__name__) - -_CHECKPOINT_FOR_DOC = "microsoft/git-base" -_CONFIG_FOR_DOC = "GitConfig" - -GIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "microsoft/git-base", - # See all GIT models at https://huggingface.co/models?filter=git -] - - -@dataclass -# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Git -class GitVisionModelOutput(ModelOutput): - """ - Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. - - Args: - image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): - The image embeddings obtained by applying the projection layer to the pooler_output. - last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Sequence of hidden-states at the output of the last layer of the model. - hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): - Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + - one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. - - Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. - attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): - Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, - sequence_length)`. - - Attentions weights after the attention softmax, used to compute the weighted average in the self-attention - heads. - """ - - image_embeds: Optional[torch.FloatTensor] = None - last_hidden_state: torch.FloatTensor = None - hidden_states: Optional[Tuple[torch.FloatTensor]] = None - attentions: Optional[Tuple[torch.FloatTensor]] = None - - -# Copied from transformers.models.bart.modeling_bart._expand_mask -def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): - """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. - """ - bsz, src_len = mask.size() - tgt_len = tgt_len if tgt_len is not None else src_len - - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) - - inverted_mask = 1.0 - expanded_mask - - return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) - - -class GitEmbeddings(nn.Module): - """Construct the embeddings from word and position embeddings.""" - - def __init__(self, config): - super().__init__() - self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) - self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) - - # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load - # any TensorFlow checkpoint file - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - # position_ids (1, len position emb) is contiguous in memory and exported when serialized - self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - self.register_buffer( - "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False - ) - - def forward( - self, - input_ids: Optional[torch.LongTensor] = None, - position_ids: Optional[torch.LongTensor] = None, - inputs_embeds: Optional[torch.FloatTensor] = None, - past_key_values_length: int = 0, - ) -> torch.Tensor: - if input_ids is not None: - input_shape = input_ids.size() - else: - input_shape = inputs_embeds.size()[:-1] - - seq_length = input_shape[1] - - if position_ids is None: - position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] - - if inputs_embeds is None: - embeddings = self.word_embeddings(input_ids) - else: - embeddings = inputs_embeds - - if self.position_embedding_type == "absolute": - position_embeddings = self.position_embeddings(position_ids) - embeddings += position_embeddings - embeddings = self.LayerNorm(embeddings) - embeddings = self.dropout(embeddings) - return embeddings - - -class GitSelfAttention(nn.Module): - def __init__(self, config, position_embedding_type=None): - super().__init__() - if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): - raise ValueError( - f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " - f"heads ({config.num_attention_heads})" - ) - - self.num_attention_heads = config.num_attention_heads - self.attention_head_size = int(config.hidden_size / config.num_attention_heads) - self.all_head_size = self.num_attention_heads * self.attention_head_size - self.image_patch_tokens = int((config.vision_config.image_size / config.vision_config.patch_size) ** 2 + 1) - if config.num_image_with_embedding is not None: - self.image_patch_tokens *= config.num_image_with_embedding - - self.query = nn.Linear(config.hidden_size, self.all_head_size) - self.key = nn.Linear(config.hidden_size, self.all_head_size) - self.value = nn.Linear(config.hidden_size, self.all_head_size) - - self.dropout = nn.Dropout(config.attention_probs_dropout_prob) - self.position_embedding_type = position_embedding_type or getattr( - config, "position_embedding_type", "absolute" - ) - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - self.max_position_embeddings = config.max_position_embeddings - self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) - - def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: - new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) - x = x.view(new_x_shape) - return x.permute(0, 2, 1, 3) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - output_attentions: Optional[bool] = False, - pixel_values_present: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - mixed_query_layer = self.query(hidden_states) - - cutoff = self.image_patch_tokens if pixel_values_present else 0 - if past_key_value is not None: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - key_layer = torch.cat([key_layer[:, :, :cutoff, :], past_key_value[0], key_layer[:, :, -1:, :]], dim=2) - value_layer = torch.cat( - [value_layer[:, :, :cutoff, :], past_key_value[1], value_layer[:, :, -1:, :]], dim=2 - ) - else: - key_layer = self.transpose_for_scores(self.key(hidden_states)) - value_layer = self.transpose_for_scores(self.value(hidden_states)) - - query_layer = self.transpose_for_scores(mixed_query_layer) - - use_cache = past_key_value is not None - # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. - # Further calls to cross_attention layer can then reuse all cross-attention - # key/value_states (first "if" case) - # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of - # all previous decoder key/value_states. Further calls to uni-directional self-attention - # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) - # if encoder bi-directional self-attention `past_key_value` is always `None` - # NOTE: like in other caches, we store the text component. In GIT it means we discard the image component. - past_key_value = ( - key_layer[:, :, cutoff:, :], - value_layer[:, :, cutoff:, :], - ) - - # Take the dot product between "query" and "key" to get the raw attention scores. - attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) - - if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": - query_length, key_length = query_layer.shape[2], key_layer.shape[2] - if use_cache: - position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( - -1, 1 - ) - else: - position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) - position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) - distance = position_ids_l - position_ids_r - - positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) - positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility - - if self.position_embedding_type == "relative_key": - relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores - elif self.position_embedding_type == "relative_key_query": - relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) - relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) - attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key - - attention_scores = attention_scores / math.sqrt(self.attention_head_size) - if attention_mask is not None: - # Apply the attention mask is (precomputed for all layers in GitModel forward() function) - attention_scores = attention_scores + attention_mask - - # Normalize the attention scores to probabilities. - attention_probs = nn.functional.softmax(attention_scores, dim=-1) - - # This is actually dropping out entire tokens to attend to, which might - # seem a bit unusual, but is taken from the original Transformer paper. - attention_probs = self.dropout(attention_probs) - - # Mask heads if we want to - if head_mask is not None: - attention_probs = attention_probs * head_mask - - context_layer = torch.matmul(attention_probs, value_layer) - - context_layer = context_layer.permute(0, 2, 1, 3).contiguous() - new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) - context_layer = context_layer.view(new_context_layer_shape) - - outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) - - outputs = outputs + (past_key_value,) - return outputs - - -# Copied from transformers.models.bert.modeling_bert.BertSelfOutput -class GitSelfOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class GitAttention(nn.Module): - # Copied from transformers.models.bert.modeling_bert.BertAttention.__init__ with Bert->Git - def __init__(self, config, position_embedding_type=None): - super().__init__() - self.self = GitSelfAttention(config, position_embedding_type=position_embedding_type) - self.output = GitSelfOutput(config) - self.pruned_heads = set() - - # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads - def prune_heads(self, heads): - if len(heads) == 0: - return - heads, index = find_pruneable_heads_and_indices( - heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads - ) - - # Prune linear layers - self.self.query = prune_linear_layer(self.self.query, index) - self.self.key = prune_linear_layer(self.self.key, index) - self.self.value = prune_linear_layer(self.self.value, index) - self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) - - # Update hyper params and store pruned heads - self.self.num_attention_heads = self.self.num_attention_heads - len(heads) - self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads - self.pruned_heads = self.pruned_heads.union(heads) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - output_attentions: Optional[bool] = False, - pixel_values_present: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - self_outputs = self.self( - hidden_states, - attention_mask, - head_mask, - past_key_value, - output_attentions, - pixel_values_present, - ) - attention_output = self.output(self_outputs[0], hidden_states) - outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them - return outputs - - -# Copied from transformers.models.bert.modeling_bert.BertIntermediate -class GitIntermediate(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.hidden_size, config.intermediate_size) - if isinstance(config.hidden_act, str): - self.intermediate_act_fn = ACT2FN[config.hidden_act] - else: - self.intermediate_act_fn = config.hidden_act - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.intermediate_act_fn(hidden_states) - return hidden_states - - -# Copied from transformers.models.bert.modeling_bert.BertOutput -class GitOutput(nn.Module): - def __init__(self, config): - super().__init__() - self.dense = nn.Linear(config.intermediate_size, config.hidden_size) - self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - self.dropout = nn.Dropout(config.hidden_dropout_prob) - - def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: - hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.LayerNorm(hidden_states + input_tensor) - return hidden_states - - -class GitLayer(nn.Module): - def __init__(self, config): - super().__init__() - self.chunk_size_feed_forward = config.chunk_size_feed_forward - self.seq_len_dim = 1 - self.attention = GitAttention(config) - self.intermediate = GitIntermediate(config) - self.output = GitOutput(config) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - output_attentions: Optional[bool] = False, - pixel_values_present: Optional[bool] = False, - ) -> Tuple[torch.Tensor]: - # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 - self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None - self_attention_outputs = self.attention( - hidden_states, - attention_mask, - head_mask, - output_attentions=output_attentions, - past_key_value=self_attn_past_key_value, - pixel_values_present=pixel_values_present, - ) - attention_output = self_attention_outputs[0] - - # if decoder, the last output is tuple of self-attn cache - outputs = self_attention_outputs[1:-1] - present_key_value = self_attention_outputs[-1] - - layer_output = apply_chunking_to_forward( - self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output - ) - outputs = (layer_output,) + outputs - - # if decoder, return the attn key/values as the last output - outputs = outputs + (present_key_value,) - - return outputs - - def feed_forward_chunk(self, attention_output): - intermediate_output = self.intermediate(attention_output) - layer_output = self.output(intermediate_output, attention_output) - return layer_output - - -class GitEncoder(nn.Module): - # Copied from transformers.models.bert.modeling_bert.BertEncoder.__init__ with Bert->Git - def __init__(self, config): - super().__init__() - self.config = config - self.layer = nn.ModuleList([GitLayer(config) for _ in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.FloatTensor] = None, - head_mask: Optional[torch.FloatTensor] = None, - past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = False, - output_hidden_states: Optional[bool] = False, - pixel_values_present: Optional[bool] = False, - return_dict: Optional[bool] = True, - ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPast]: - if self.gradient_checkpointing and self.training: - if use_cache: - logger.warning_once( - "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." - ) - use_cache = False - - all_hidden_states = () if output_hidden_states else None - all_self_attentions = () if output_attentions else None - - next_decoder_cache = () if use_cache else None - for i, layer_module in enumerate(self.layer): - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - layer_head_mask = head_mask[i] if head_mask is not None else None - past_key_value = past_key_values[i] if past_key_values is not None else None - - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, past_key_value, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(layer_module), - hidden_states, - attention_mask, - layer_head_mask, - ) - else: - layer_outputs = layer_module( - hidden_states, - attention_mask, - layer_head_mask, - past_key_value, - output_attentions, - pixel_values_present, - ) - - hidden_states = layer_outputs[0] - if use_cache: - next_decoder_cache += (layer_outputs[-1],) - if output_attentions: - all_self_attentions = all_self_attentions + (layer_outputs[1],) - - if output_hidden_states: - all_hidden_states = all_hidden_states + (hidden_states,) - - if not return_dict: - return tuple( - v - for v in [ - hidden_states, - next_decoder_cache, - all_hidden_states, - all_self_attentions, - ] - if v is not None - ) - return BaseModelOutputWithPast( - last_hidden_state=hidden_states, - past_key_values=next_decoder_cache, - hidden_states=all_hidden_states, - attentions=all_self_attentions, - ) - - -class GitPreTrainedModel(PreTrainedModel): - """ - An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained - models. - """ - - config_class = GitConfig - base_model_prefix = "git" - supports_gradient_checkpointing = True - - def _init_weights(self, module): - """Initialize the weights""" - if isinstance(module, GitVisionEmbeddings): - nn.init.normal_(module.class_embedding, mean=0.0, std=self.config.initializer_range) - nn.init.normal_(module.patch_embedding.weight, std=self.config.initializer_range) - nn.init.normal_(module.position_embedding.weight, std=self.config.initializer_range) - if isinstance(module, nn.Linear): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.bias is not None: - module.bias.data.zero_() - elif isinstance(module, nn.Embedding): - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) - if module.padding_idx is not None: - module.weight.data[module.padding_idx].zero_() - elif isinstance(module, nn.LayerNorm): - module.bias.data.zero_() - module.weight.data.fill_(1.0) - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, (GitEncoder, GitVisionEncoder)): - module.gradient_checkpointing = value - - -GIT_START_DOCSTRING = r""" - - This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the - library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads - etc.) - - This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. - Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage - and behavior. - - Parameters: - config ([`GitConfig`]): Model configuration class with all the parameters of the model. - Initializing with a config file does not load the weights associated with the model, only the - configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. -""" - -GIT_INPUTS_DOCSTRING = r""" - Args: - input_ids (`torch.LongTensor` of shape `({0})`): - Indices of input sequence tokens in the vocabulary. - - Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and - [`PreTrainedTokenizer.__call__`] for details. - - [What are input IDs?](../glossary#input-ids) - attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - - position_ids (`torch.LongTensor` of shape `({0})`, *optional*): - Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, - config.max_position_embeddings - 1]`. - - [What are position IDs?](../glossary#position-ids) - - pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): - Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See - [`CLIPImageProcessor.__call__`] for details. - - head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): - Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - - - 1 indicates the head is **not masked**, - - 0 indicates the head is **masked**. - - inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This - is useful if you want more control over how to convert `input_ids` indices into associated vectors than the - model's internal embedding lookup matrix. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Git -class GitVisionEmbeddings(nn.Module): - def __init__(self, config: GitVisionConfig): - super().__init__() - self.config = config - self.embed_dim = config.hidden_size - self.image_size = config.image_size - self.patch_size = config.patch_size - - self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) - - self.patch_embedding = nn.Conv2d( - in_channels=config.num_channels, - out_channels=self.embed_dim, - kernel_size=self.patch_size, - stride=self.patch_size, - bias=False, - ) - - self.num_patches = (self.image_size // self.patch_size) ** 2 - self.num_positions = self.num_patches + 1 - self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) - self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) - - def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: - batch_size = pixel_values.shape[0] - target_dtype = self.patch_embedding.weight.dtype - patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] - patch_embeds = patch_embeds.flatten(2).transpose(1, 2) - - class_embeds = self.class_embedding.expand(batch_size, 1, -1) - embeddings = torch.cat([class_embeds, patch_embeds], dim=1) - embeddings = embeddings + self.position_embedding(self.position_ids) - return embeddings - - -# Copied from transformers.models.clip.modeling_clip.CLIPMLP -class GitVisionMLP(nn.Module): - def __init__(self, config): - super().__init__() - self.config = config - self.activation_fn = ACT2FN[config.hidden_act] - self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) - self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) - - def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: - hidden_states = self.fc1(hidden_states) - hidden_states = self.activation_fn(hidden_states) - hidden_states = self.fc2(hidden_states) - return hidden_states - - -# Copied from transformers.models.clip.modeling_clip.CLIPAttention -class GitVisionAttention(nn.Module): - """Multi-headed attention from 'Attention Is All You Need' paper""" - - def __init__(self, config): - super().__init__() - self.config = config - self.embed_dim = config.hidden_size - self.num_heads = config.num_attention_heads - self.head_dim = self.embed_dim // self.num_heads - if self.head_dim * self.num_heads != self.embed_dim: - raise ValueError( - f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" - f" {self.num_heads})." - ) - self.scale = self.head_dim**-0.5 - self.dropout = config.attention_dropout - - self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) - self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) - self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) - self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) - - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - causal_attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - """Input shape: Batch x Time x Channel""" - - bsz, tgt_len, embed_dim = hidden_states.size() - - # get query proj - query_states = self.q_proj(hidden_states) * self.scale - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states), -1, bsz) - - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) - key_states = key_states.view(*proj_shape) - value_states = value_states.view(*proj_shape) - - src_len = key_states.size(1) - attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) - - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): - raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {attn_weights.size()}" - ) - - # apply the causal_attention_mask first - if causal_attention_mask is not None: - if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): - raise ValueError( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {causal_attention_mask.size()}" - ) - attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - - if attention_mask is not None: - if attention_mask.size() != (bsz, 1, tgt_len, src_len): - raise ValueError( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" - ) - attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - - attn_weights = nn.functional.softmax(attn_weights, dim=-1) - - if output_attentions: - # this operation is a bit akward, but it's required to - # make sure that attn_weights keeps its gradient. - # In order to do so, attn_weights have to reshaped - # twice and have to be reused in the following - attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) - else: - attn_weights_reshaped = None - - attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) - - attn_output = torch.bmm(attn_probs, value_states) - - if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): - raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {attn_output.size()}" - ) - - attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) - attn_output = attn_output.transpose(1, 2) - attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) - - attn_output = self.out_proj(attn_output) - - return attn_output, attn_weights_reshaped - - -# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GitVision -class GitVisionEncoderLayer(nn.Module): - def __init__(self, config: GitVisionConfig): - super().__init__() - self.embed_dim = config.hidden_size - self.self_attn = GitVisionAttention(config) - self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) - self.mlp = GitVisionMLP(config) - self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) - - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: torch.Tensor, - causal_attention_mask: torch.Tensor, - output_attentions: Optional[bool] = False, - ) -> Tuple[torch.FloatTensor]: - """ - Args: - hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` - attention_mask (`torch.FloatTensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. - `(config.encoder_attention_heads,)`. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - """ - residual = hidden_states - - hidden_states = self.layer_norm1(hidden_states) - hidden_states, attn_weights = self.self_attn( - hidden_states=hidden_states, - attention_mask=attention_mask, - causal_attention_mask=causal_attention_mask, - output_attentions=output_attentions, - ) - hidden_states = residual + hidden_states - - residual = hidden_states - hidden_states = self.layer_norm2(hidden_states) - hidden_states = self.mlp(hidden_states) - hidden_states = residual + hidden_states - - outputs = (hidden_states,) - - if output_attentions: - outputs += (attn_weights,) - - return outputs - - -# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->GitVision, CLIPConfig -class GitVisionEncoder(nn.Module): - """ - Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a - [`GitVisionEncoderLayer`]. - - Args: - config: GitVisionConfig - """ - - def __init__(self, config: GitVisionConfig): - super().__init__() - self.config = config - self.layers = nn.ModuleList([GitVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) - self.gradient_checkpointing = False - - def forward( - self, - inputs_embeds, - attention_mask: Optional[torch.Tensor] = None, - causal_attention_mask: Optional[torch.Tensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutput]: - r""" - Args: - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): - Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. - This is useful if you want more control over how to convert `input_ids` indices into associated vectors - than the model's internal embedding lookup matrix. - attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): - Causal mask for the text model. Mask values selected in `[0, 1]`: - - - 1 for tokens that are **not masked**, - - 0 for tokens that are **masked**. - - [What are attention masks?](../glossary#attention-mask) - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under - returned tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors - for more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - encoder_states = () if output_hidden_states else None - all_attentions = () if output_attentions else None - - hidden_states = inputs_embeds - for idx, encoder_layer in enumerate(self.layers): - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - if self.gradient_checkpointing and self.training: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs, output_attentions) - - return custom_forward - - layer_outputs = torch.utils.checkpoint.checkpoint( - create_custom_forward(encoder_layer), - hidden_states, - attention_mask, - causal_attention_mask, - ) - else: - layer_outputs = encoder_layer( - hidden_states, - attention_mask, - causal_attention_mask, - output_attentions=output_attentions, - ) - - hidden_states = layer_outputs[0] - - if output_attentions: - all_attentions = all_attentions + (layer_outputs[1],) - - if output_hidden_states: - encoder_states = encoder_states + (hidden_states,) - - if not return_dict: - return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) - return BaseModelOutput( - last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions - ) - - -GIT_VISION_INPUTS_DOCSTRING = r""" - Args: - pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): - Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using - [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. - output_attentions (`bool`, *optional*): - Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned - tensors for more detail. - output_hidden_states (`bool`, *optional*): - Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for - more detail. - return_dict (`bool`, *optional*): - Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. -""" - - -class GitVisionTransformer(nn.Module): - # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.__init__ with CLIPEncoder->GitVisionEncoder, CLIP->Git - def __init__(self, config: GitVisionConfig): - super().__init__() - self.config = config - embed_dim = config.hidden_size - - self.embeddings = GitVisionEmbeddings(config) - self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) - self.encoder = GitVisionEncoder(config) - self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) - - @add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig) - def forward( - self, - pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutput]: - r""" - Returns: - - """ - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if pixel_values is None: - raise ValueError("You have to specify pixel_values") - - hidden_states = self.embeddings(pixel_values) - hidden_states = self.pre_layrnorm(hidden_states) - - encoder_outputs = self.encoder( - inputs_embeds=hidden_states, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - last_hidden_state = encoder_outputs[0] - - last_hidden_state = self.post_layernorm(last_hidden_state) - - if not return_dict: - return (last_hidden_state,) + encoder_outputs[1:] - - return BaseModelOutput( - last_hidden_state=last_hidden_state, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - ) - - -@add_start_docstrings( - """The vision model from CLIP, used in GIT, without any head or projection on top.""", - GIT_START_DOCSTRING, -) -class GitVisionModel(GitPreTrainedModel): - config_class = GitVisionConfig - main_input_name = "pixel_values" - - # Copied from transformers.models.clip.modeling_clip.CLIPVisionModel.__init__ with CLIP->Git - def __init__(self, config: GitVisionConfig): - super().__init__(config) - self.vision_model = GitVisionTransformer(config) - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self) -> nn.Module: - return self.vision_model.embeddings.patch_embedding - - @add_start_docstrings_to_model_forward(GIT_VISION_INPUTS_DOCSTRING) - @replace_return_docstrings(output_type=BaseModelOutput, config_class=GitVisionConfig) - def forward( - self, - pixel_values: Optional[torch.FloatTensor] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple, BaseModelOutput]: - r""" - Returns: - - Examples: - - ```python - >>> from PIL import Image - >>> import requests - >>> from transformers import AutoProcessor, GitVisionModel - - >>> processor = AutoProcessor.from_pretrained("microsoft/git-base") - >>> model = GitVisionModel.from_pretrained("microsoft/git-base") - - >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" - >>> image = Image.open(requests.get(url, stream=True).raw) - - >>> inputs = processor(images=image, return_tensors="pt") - - >>> outputs = model(**inputs) - >>> last_hidden_state = outputs.last_hidden_state - ```""" - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - return self.vision_model( - pixel_values=pixel_values, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - -class GitProjection(nn.Module): - def __init__(self, config: GitConfig): - super().__init__() - self.config = config - self.visual_projection = nn.Sequential( - nn.Linear(config.vision_config.hidden_size, config.hidden_size), - nn.LayerNorm(config.hidden_size, eps=config.vision_config.layer_norm_eps), - ) - - def forward(self, embeddings: torch.Tensor) -> torch.Tensor: - return self.visual_projection(embeddings) - - -@add_start_docstrings( - "The bare GIT Model transformer consisting of a CLIP image encoder and text decoder outputting raw hidden-states" - " without any specific head on top.", - GIT_START_DOCSTRING, -) -class GitModel(GitPreTrainedModel): - def __init__(self, config): - super().__init__(config) - self.config = config - - self.embeddings = GitEmbeddings(config) - self.image_encoder = GitVisionModel(config.vision_config) - self.encoder = GitEncoder(config) - - self.visual_projection = GitProjection(config) - - if config.num_image_with_embedding is not None: - self.img_temperal_embedding = nn.ParameterList( - nn.Parameter(torch.zeros(1, 1, config.vision_config.hidden_size)) - for _ in range(config.num_image_with_embedding) - ) - - # Initialize weights and apply final processing - self.post_init() - - def get_input_embeddings(self): - return self.embeddings.word_embeddings - - def set_input_embeddings(self, value): - self.embeddings.word_embeddings = value - - def _prune_heads(self, heads_to_prune): - """ - Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base - class PreTrainedModel - """ - for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) - - def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor: - # Default mask is for forward direction. Flip for backward direction. - mask = torch.triu(torch.ones(size, size, device=device, dtype=dtype), diagonal=1) - mask = mask.masked_fill(mask == 1, float("-inf")) - return mask - - def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None): - num_tgt = tgt.shape[1] - num_memory = memory.shape[1] - device = tgt.device - dtype = tgt.dtype - top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype) - top_right = torch.full( - (num_memory, num_tgt + past_key_values_length), - float("-inf"), - device=tgt.device, - dtype=dtype, - ) - bottom_left = torch.zeros( - (num_tgt, num_memory), - dtype=dtype, - device=tgt_mask.device, - ) - - if past_key_values_length > 0: - tgt_mask = torch.zeros( - (tgt_mask.shape[0], tgt_mask.shape[0] + past_key_values_length), - dtype=dtype, - device=tgt_mask.device, - ) - - left = torch.cat((top_left, bottom_left), dim=0) - right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0) - - full_attention_mask = torch.cat((left, right), dim=1)[None, :] - - if memory_key_padding_mask is None: - memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device) - # if it is False, it means valid. That is, it is not a padding - if memory_key_padding_mask.dtype != torch.bool: - raise ValueError("Memory key padding mask must be a boolean tensor.") - zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype) - zero_negative_infinity[memory_key_padding_mask] = float("-inf") - full_attention_mask = full_attention_mask.expand( - (memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + past_key_values_length + num_tgt) - ) - full_attention_mask = full_attention_mask.clone() - origin_left = full_attention_mask[:, :, :num_memory] - update = zero_negative_infinity[:, None, :] - full_attention_mask[:, :, :num_memory] = origin_left + update - - # add axis for multi-head - full_attention_mask = full_attention_mask[:, None, :, :] - - return full_attention_mask - - @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - pixel_values: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - past_key_values: Optional[List[torch.FloatTensor]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]: - r""" - past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that - don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all - `decoder_input_ids` of shape `(batch_size, sequence_length)`. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). - - Returns: - - Examples: - - ```python - >>> from transformers import AutoProcessor, AutoModel - >>> import requests - >>> from PIL import Image - - >>> processor = AutoProcessor.from_pretrained("microsoft/git-base") - >>> model = AutoModel.from_pretrained("microsoft/git-base") - - >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" - >>> image = Image.open(requests.get(url, stream=True).raw) - - >>> text = "this is an image of two cats" - - >>> inputs = processor(text, images=image, return_tensors="pt") - - >>> outputs = model(**inputs) - >>> last_hidden_state = outputs.last_hidden_state - ```""" - output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions - output_hidden_states = ( - output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states - ) - use_cache = use_cache if use_cache is not None else self.config.use_cache - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - - if input_ids is not None and inputs_embeds is not None: - raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") - elif input_ids is not None: - self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) - input_shape = input_ids.size() - elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] - else: - raise ValueError("You have to specify either input_ids or inputs_embeds") - - seq_length = input_shape[1] - - # past_key_values_length - past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 - - # Prepare head mask if needed - # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_heads x N x N - # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] - # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] - head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) - - projected_visual_features = None - if pixel_values is not None: - if pixel_values.ndim == 4: - # here we assume pixel_values is of shape (batch_size, num_channels, height, width) - visual_features = self.image_encoder(pixel_values).last_hidden_state - - elif pixel_values.ndim == 5: - # here we assume pixel_values is of shape (batch_size, num_frames, num_channels, height, width) - visual_features = [] - for frame_idx in range(pixel_values.shape[1]): - visual_features_frame = self.image_encoder(pixel_values[:, frame_idx, :, :]).last_hidden_state - visual_features_frame += self.img_temperal_embedding[frame_idx] - visual_features.append(visual_features_frame) - - # finally, concatenate all features along sequence dimension - visual_features = torch.cat(visual_features, dim=1) - - else: - raise ValueError("pixel_values must be of rank 4 or 5") - - projected_visual_features = self.visual_projection(visual_features) - - embedding_output = self.embeddings( - input_ids=input_ids, - position_ids=position_ids, - inputs_embeds=inputs_embeds, - past_key_values_length=past_key_values_length, - ) - - if projected_visual_features is None: - projected_visual_features = torch.zeros( - (embedding_output.shape[0], 0, embedding_output.shape[2]), - dtype=embedding_output.dtype, - device=embedding_output.device, - ) - - # Repeat visual features to match embedding batch size. - projected_visual_features = projected_visual_features.repeat( - embedding_output.size(0) // projected_visual_features.size(0), 1, 1 - ) - - # concatenate patch token and text token embeddings - hidden_states = torch.cat((projected_visual_features, embedding_output), dim=1) - - # By default, an additive causal mask is created - # for masking the future (one direction). - tgt_mask = self._generate_future_mask(seq_length, embedding_output.dtype, embedding_output.device) - - # Create an attention mask of shape (batch_size, 1, tgt_seq_len, src_seq_len) - combined_attention_mask = self.create_attention_mask( - tgt=embedding_output, - memory=projected_visual_features, - tgt_mask=tgt_mask, - past_key_values_length=past_key_values_length, - ) - - if attention_mask is not None: - # if the user provides an attention mask, we add it to the default one - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, embedding_output.dtype, tgt_len=input_shape[-1]).to( - embedding_output.device - ) - if past_key_values_length > 0: - expanded_attn_mask = expanded_attn_mask[:, :, -past_key_values_length:, :] - else: - combined_attention_mask[:, :, -input_shape[1] :, -input_shape[1] :] += expanded_attn_mask - - encoder_outputs = self.encoder( - hidden_states, - attention_mask=combined_attention_mask, - head_mask=head_mask, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - pixel_values_present=pixel_values is not None, - ) - sequence_output = encoder_outputs[0] - - if not return_dict: - return (sequence_output,) + encoder_outputs[1:] - - return BaseModelOutputWithPast( - last_hidden_state=sequence_output, - past_key_values=encoder_outputs.past_key_values, - hidden_states=encoder_outputs.hidden_states, - attentions=encoder_outputs.attentions, - ) - - -@add_start_docstrings( - """GIT Model with a `language modeling` head on top for autoregressive language modeling.""", GIT_START_DOCSTRING -) -class GitForCausalLM(GitPreTrainedModel): - _tied_weights_keys = ["output.weight"] - - def __init__(self, config): - super().__init__(config) - - self.git = GitModel(config) - self.output = nn.Linear(config.hidden_size, config.vocab_size) - - # Initialize weights and apply final processing - self.post_init() - - def get_output_embeddings(self): - return self.output - - def set_output_embeddings(self, new_embeddings): - self.output = new_embeddings - - @add_start_docstrings_to_model_forward(GIT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) - @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) - def forward( - self, - input_ids: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.Tensor] = None, - pixel_values: Optional[torch.Tensor] = None, - head_mask: Optional[torch.Tensor] = None, - inputs_embeds: Optional[torch.Tensor] = None, - labels: Optional[torch.Tensor] = None, - past_key_values: Optional[List[torch.Tensor]] = None, - use_cache: Optional[bool] = None, - output_attentions: Optional[bool] = None, - output_hidden_states: Optional[bool] = None, - return_dict: Optional[bool] = None, - ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]: - r""" - labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): - Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in - `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are - ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` - past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): - Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. - - If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that - don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all - `decoder_input_ids` of shape `(batch_size, sequence_length)`. - use_cache (`bool`, *optional*): - If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see - `past_key_values`). - - Returns: - - Examples: - - Image captioning example: - - ```python - >>> from transformers import AutoProcessor, AutoModelForCausalLM - >>> import requests - >>> from PIL import Image - - >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-coco") - >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco") - - >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" - >>> image = Image.open(requests.get(url, stream=True).raw) - - >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values - - >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50) - >>> generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] - >>> print(generated_caption) - two cats sleeping on a pink blanket next to remotes. - ``` - - Visual question answering (VQA) example: - - ```python - >>> from transformers import AutoProcessor, AutoModelForCausalLM - >>> from huggingface_hub import hf_hub_download - >>> from PIL import Image - - >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-textvqa") - >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-textvqa") - - >>> file_path = hf_hub_download(repo_id="nielsr/textvqa-sample", filename="bus.png", repo_type="dataset") - >>> image = Image.open(file_path).convert("RGB") - - >>> pixel_values = processor(images=image, return_tensors="pt").pixel_values - - >>> question = "what does the front of the bus say at the top?" - - >>> input_ids = processor(text=question, add_special_tokens=False).input_ids - >>> input_ids = [processor.tokenizer.cls_token_id] + input_ids - >>> input_ids = torch.tensor(input_ids).unsqueeze(0) - - >>> generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=50) - >>> print(processor.batch_decode(generated_ids, skip_special_tokens=True)) - ['what does the front of the bus say at the top? special'] - ``` - - Video captioning example: - - ```python - >>> import av - >>> import numpy as np - >>> from PIL import Image - >>> from huggingface_hub import hf_hub_download - >>> from transformers import AutoProcessor, AutoModelForCausalLM - - >>> processor = AutoProcessor.from_pretrained("microsoft/git-base-vatex") - >>> model = AutoModelForCausalLM.from_pretrained("microsoft/git-base-vatex") - - >>> # set seed for reproducability - >>> np.random.seed(45) - - - >>> def read_video_pyav(container, indices): - ... ''' - ... Decode the video with PyAV decoder. - ... Args: - ... container (`av.container.input.InputContainer`): PyAV container. - ... indices (`List[int]`): List of frame indices to decode. - ... Returns: - ... result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3). - ... ''' - ... frames = [] - ... container.seek(0) - ... start_index = indices[0] - ... end_index = indices[-1] - ... for i, frame in enumerate(container.decode(video=0)): - ... if i > end_index: - ... break - ... if i >= start_index and i in indices: - ... frames.append(frame) - ... return np.stack([x.to_ndarray(format="rgb24") for x in frames]) - - - >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): - ... ''' - ... Sample a given number of frame indices from the video. - ... Args: - ... clip_len (`int`): Total number of frames to sample. - ... frame_sample_rate (`int`): Sample every n-th frame. - ... seg_len (`int`): Maximum allowed index of sample's last frame. - ... Returns: - ... indices (`List[int]`): List of sampled frame indices - ... ''' - ... converted_len = int(clip_len * frame_sample_rate) - ... end_idx = np.random.randint(converted_len, seg_len) - ... start_idx = end_idx - converted_len - ... indices = np.linspace(start_idx, end_idx, num=clip_len) - ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) - ... return indices - - - >>> # load video - >>> file_path = hf_hub_download( - ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" - ... ) - >>> container = av.open(file_path) - - >>> # sample frames - >>> num_frames = model.config.num_image_with_embedding - >>> indices = sample_frame_indices( - ... clip_len=num_frames, frame_sample_rate=4, seg_len=container.streams.video[0].frames - ... ) - >>> frames = read_video_pyav(container, indices) - - >>> pixel_values = processor(images=list(frames), return_tensors="pt").pixel_values - - >>> generated_ids = model.generate(pixel_values=pixel_values, max_length=50) - - >>> print("Generated caption:", processor.batch_decode(generated_ids, skip_special_tokens=True)) - Generated caption: ['a woman is sitting at a table and she is talking about the food she is holding.'] - ``` - """ - return_dict = return_dict if return_dict is not None else self.config.use_return_dict - if labels is not None: - use_cache = False - - outputs = self.git( - input_ids, - attention_mask=attention_mask, - position_ids=position_ids, - pixel_values=pixel_values, - head_mask=head_mask, - inputs_embeds=inputs_embeds, - past_key_values=past_key_values, - use_cache=use_cache, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - return_dict=return_dict, - ) - - sequence_output = outputs[0] - logits = self.output(sequence_output) - - loss = None - if labels is not None: - # we are doing next-token prediction; shift prediction scores and input ids by one - num_image_tokens = self.git.encoder.layer[0].attention.self.image_patch_tokens - shifted_logits = logits[:, num_image_tokens:-1, :].contiguous() - labels = labels[:, 1:].contiguous() - loss_fct = CrossEntropyLoss() - loss = loss_fct(shifted_logits.view(-1, self.config.vocab_size), labels.view(-1)) - - if not return_dict: - output = (logits,) + outputs[1:] - return ((loss,) + output) if loss is not None else output - - return CausalLMOutputWithPast( - loss=loss, - logits=logits, - past_key_values=outputs.past_key_values, - hidden_states=outputs.hidden_states, - attentions=outputs.attentions, - ) - - def prepare_inputs_for_generation( - self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs - ): - # cut decoder_input_ids if past_key_values is used - if past_key_values is not None: - input_ids = input_ids[:, -1:] - - # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly - input_shape = input_ids.shape - if attention_mask is None: - attention_mask = input_ids.new_ones(input_shape) - - return { - "input_ids": input_ids, - "attention_mask": attention_mask, - "pixel_values": kwargs.get("pixel_values", None), - "past_key_values": past_key_values, - "use_cache": use_cache, - } - - def _reorder_cache(self, past_key_values, beam_idx): - reordered_past = () - for layer_past in past_key_values: - reordered_past += ( - tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), - ) - return reordered_past diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/F0Predictor/CrepeF0Predictor.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/F0Predictor/CrepeF0Predictor.py deleted file mode 100644 index e0052881b9b7b3aa373ebf69eb553815a564f610..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/F0Predictor/CrepeF0Predictor.py +++ /dev/null @@ -1,31 +0,0 @@ -from modules.F0Predictor.F0Predictor import F0Predictor -from modules.F0Predictor.crepe import CrepePitchExtractor -import torch - -class CrepeF0Predictor(F0Predictor): - def __init__(self,hop_length=512,f0_min=50,f0_max=1100,device=None,sampling_rate=44100,threshold=0.05,model="full"): - self.F0Creper = CrepePitchExtractor(hop_length=hop_length,f0_min=f0_min,f0_max=f0_max,device=device,threshold=threshold,model=model) - self.hop_length = hop_length - self.f0_min = f0_min - self.f0_max = f0_max - self.device = device - self.threshold = threshold - self.sampling_rate = sampling_rate - - def compute_f0(self,wav,p_len=None): - x = torch.FloatTensor(wav).to(self.device) - if p_len is None: - p_len = x.shape[0]//self.hop_length - else: - assert abs(p_len-x.shape[0]//self.hop_length) < 4, "pad length error" - f0,uv = self.F0Creper(x[None,:].float(),self.sampling_rate,pad_to=p_len) - return f0 - - def compute_f0_uv(self,wav,p_len=None): - x = torch.FloatTensor(wav).to(self.device) - if p_len is None: - p_len = x.shape[0]//self.hop_length - else: - assert abs(p_len-x.shape[0]//self.hop_length) < 4, "pad length error" - f0,uv = self.F0Creper(x[None,:].float(),self.sampling_rate,pad_to=p_len) - return f0,uv \ No newline at end of file diff --git a/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/CNHubertLarge.py b/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/CNHubertLarge.py deleted file mode 100644 index 9db93781c36884c4096fa6fa5a12a95d385e80b8..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Kitasan-Black/vencoder/CNHubertLarge.py +++ /dev/null @@ -1,33 +0,0 @@ -from vencoder.encoder import SpeechEncoder -import torch -from fairseq import checkpoint_utils - -class CNHubertLarge(SpeechEncoder): - def __init__(self,vec_path = "pretrain/chinese-hubert-large-fairseq-ckpt.pt",device=None): - print("load model(s) from {}".format(vec_path)) - self.hidden_dim = 1024 - models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( - [vec_path], - suffix="", - ) - if device is None: - self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") - else: - self.dev = torch.device(device) - self.model = models[0].to(self.dev) - self.model.eval() - - def encoder(self, wav): - feats = wav - if feats.dim() == 2: # double channels - feats = feats.mean(-1) - assert feats.dim() == 1, feats.dim() - feats = feats.view(1, -1) - padding_mask = torch.BoolTensor(feats.shape).fill_(False) - inputs = { - "source": feats.to(wav.device), - "padding_mask": padding_mask.to(wav.device) - } - with torch.no_grad(): - logits = self.model.extract_features(**inputs) - return logits[0].transpose(1, 2) \ No newline at end of file diff --git a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/app.py b/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/app.py deleted file mode 100644 index 174b4b26cd9997bf0fa108fbde14ac236481d6dd..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Matikanefukukitaru/app.py +++ /dev/null @@ -1,1066 +0,0 @@ -import multiprocessing -import os -import re -import torch -import glob -import gradio as gr -import librosa -import numpy as np -import soundfile as sf -from inference.infer_tool import Svc -import logging -import json -import yaml -import time -import subprocess -import shutil -import utils -import datetime -import traceback -from utils import mix_model -from onnxexport.model_onnx import SynthesizerTrn -from itertools import chain -from compress_model import removeOptimizer -from auto_slicer import AutoSlicer - -logging.getLogger('numba').setLevel(logging.WARNING) -logging.getLogger('markdown_it').setLevel(logging.WARNING) -logging.getLogger('urllib3').setLevel(logging.WARNING) -logging.getLogger('matplotlib').setLevel(logging.WARNING) - -workdir = "logs/44k" -diff_workdir = "logs/44k/diffusion" -config_dir = "configs/" -raw_path = "dataset_raw" -raw_wavs_path = "raw" -models_backup_path = 'models_backup' -root_dir = "checkpoints" -debug = False -sovits_params = {} -diff_params = {} - -loaded = None - -def debug_change(): - global debug - debug = debug_button.value - -def get_default_settings(): - global sovits_params, diff_params - yaml_path = "settings.yaml" - with open(yaml_path, 'r') as f: - default_settings = yaml.safe_load(f) - sovits_params = default_settings['sovits_params'] - diff_params = default_settings['diff_params'] - return sovits_params, diff_params - -def save_default_settings(log_interval,eval_interval,keep_ckpts,batch_size,learning_rate,fp16_run,all_in_mem,num_workers,cache_all_data,cache_device,amp_dtype,diff_batch_size,diff_lr,diff_interval_log,diff_interval_val,diff_force_save): - yaml_path = "settings.yaml" - with open(yaml_path, 'r') as f: - default_settings = yaml.safe_load(f) - default_settings['sovits_params']['log_interval'] = int(log_interval) - default_settings['sovits_params']['eval_interval'] = int(eval_interval) - default_settings['sovits_params']['keep_ckpts'] = int(keep_ckpts) - default_settings['sovits_params']['batch_size'] = int(batch_size) - default_settings['sovits_params']['learning_rate'] = float(learning_rate) - default_settings['sovits_params']['fp16_run'] = fp16_run - default_settings['sovits_params']['all_in_mem'] = all_in_mem - default_settings['diff_params']['num_workers'] = int(num_workers) - default_settings['diff_params']['cache_all_data'] = cache_all_data - default_settings['diff_params']['cache_device'] = str(cache_device) - default_settings['diff_params']['amp_dtype'] = str(amp_dtype) - default_settings['diff_params']['diff_batch_size'] = int(diff_batch_size) - default_settings['diff_params']['diff_lr'] = float(diff_lr) - default_settings['diff_params']['diff_interval_log'] = int(diff_interval_log) - default_settings['diff_params']['diff_interval_val'] = int(diff_interval_val) - default_settings['diff_params']['diff_force_save'] = int(diff_force_save) - with open(yaml_path, 'w') as y: - yaml.safe_dump(default_settings, y, default_flow_style=False, sort_keys=False) - return "成功保存默认配置" - -def get_model_info(choice_ckpt): - pthfile = os.path.join(workdir, choice_ckpt) - net = torch.load(pthfile, map_location=torch.device('cpu')) #cpu load - spk_emb = net["model"].get("emb_g.weight") - if spk_emb is None: - return "所选模型缺少emb_g.weight,你可能选择了一个底模" - _dim, _layer = spk_emb.size() - model_type = { - 768: "Vec768-Layer12", - 256: "Vec256-Layer9 / HubertSoft", - 1024: "Whisper-PPG" - } - return model_type.get(_layer, "不受支持的模型") - -def load_json_encoder(config_choice): - config_file = os.path.join(config_dir + config_choice) - with open(config_file, 'r') as f: - config = json.load(f) - try: - config_encoder = str(config["model"]["speech_encoder"]) - return config_encoder - except Exception as e: - if "speech_encoder" in str(e): - return "你的配置文件似乎是未作兼容的旧版,请根据文档指示对你的配置文件进行修改" - else: - return f"出错了: {e}" - -def load_model_func(ckpt_name,cluster_name,config_name,enhance,diff_model_name,diff_config_name,only_diffusion,encoder,using_device): - global model - config_path = os.path.join(config_dir, config_name) - diff_config_path = os.path.join(config_dir, diff_config_name) if diff_config_name != "no_diff_config" else "configs/diffusion.yaml" - with open(config_path, 'r') as f: - config = json.load(f) - spk_dict = config["spk"] - spk_name = config.get('spk', None) - spk_choice = next(iter(spk_name)) if spk_name else "未检测到音色" - ckpt_path = os.path.join(workdir, ckpt_name) - _, _suffix = os.path.splitext(cluster_name) - fr = True if _suffix == ".pkl" else False #如果是pkl后缀就启用特征检索 - cluster_path = os.path.join(workdir, cluster_name) - diff_model_path = os.path.join(diff_workdir, diff_model_name) - shallow_diffusion = True if diff_model_name != "no_diff" else False - use_spk_mix = False - device = None if using_device == "Auto" else using_device - model = Svc(ckpt_path, - config_path, - device, - cluster_path, - enhance, - diff_model_path, - diff_config_path, - shallow_diffusion, - only_diffusion, - use_spk_mix, - fr) - spk_list = list(spk_dict.keys()) - clip = 25 if encoder == "Whisper-PPG" else 0 #Whisper必须强制切片25秒 - device_name = torch.cuda.get_device_properties(model.dev).name if "cuda" in str(model.dev) else str(model.dev) - index_or_kmeans = "特征索引" if fr is True else "聚类模型" - clu_load = "未加载" if cluster_name == "no_clu" else cluster_name - diff_load = "未加载" if diff_model_name == "no_diff" else diff_model_name - output_msg = f"模型被成功加载到了{device_name}上\n{index_or_kmeans}:{clu_load}\n扩散模型:{diff_load}" - return output_msg, gr.Dropdown.update(choices=spk_list, value=spk_choice), clip - -def Newload_model_func(ckpt_name,cluster_name,config_name2,enhance2,diff_model_name2,diff_config_name2,only_diffusion2,encoder2,using_device2): - global model, loaded - config_name = config_name2.value - enhance = enhance2.value - diff_model_name = diff_model_name2.value - diff_config_name = (diff_config_name2).value - only_diffusion = (only_diffusion2).value - encoder = (encoder2).value - using_device = (using_device2).value - config_path = os.path.join(config_dir, config_name) - diff_config_path = os.path.join(config_dir, diff_config_name) if diff_config_name != "no_diff_config" else "configs/diffusion.yaml" - with open(config_path, 'r') as f: - config = json.load(f) - spk_dict = config["spk"] - spk_name = config.get('spk', None) - spk_choice = next(iter(spk_name)) if spk_name else "未检测到音色" - ckpt_path = os.path.join(workdir, ckpt_name) - _, _suffix = os.path.splitext(cluster_name) - fr = True if _suffix == ".pkl" else False #如果是pkl后缀就启用特征检索 - cluster_path = os.path.join(workdir, cluster_name) - diff_model_path = os.path.join(diff_workdir, diff_model_name) - shallow_diffusion = True if diff_model_name != "no_diff" else False - use_spk_mix = False - device = None if using_device == "Auto" else using_device - model = Svc(ckpt_path, - config_path, - device, - cluster_path, - enhance, - diff_model_path, - diff_config_path, - shallow_diffusion, - only_diffusion, - use_spk_mix, - fr) - spk_list = list(spk_dict.keys()) - clip = 25 if encoder == "Whisper-PPG" else 0 #Whisper必须强制切片25秒 - device_name = torch.cuda.get_device_properties(model.dev).name if "cuda" in str(model.dev) else str(model.dev) - index_or_kmeans = "特征索引" if fr is True else "聚类模型" - clu_load = "未加载" if cluster_name == "no_clu" else cluster_name - diff_load = "未加载" if diff_model_name == "no_diff" else diff_model_name - loaded = cluster_name - #output_msg = f"模型被成功加载到了{device_name}上\n{index_or_kmeans}:{clu_load}\n扩散模型:{diff_load}" - #return output_msg, gr.Dropdown.update(choices=spk_list, value=spk_choice), clip - -def get_file_options(directory, extension): - return [file for file in os.listdir(directory) if file.endswith(extension)] - -def load_options(): - ckpt_list = [file for file in get_file_options(workdir, ".pth") if not file.startswith("D_")] - config_list = get_file_options(config_dir, ".json") - cluster_list = ["no_clu"] + get_file_options(workdir, ".pt") + get_file_options(workdir, ".pkl") # 聚类和特征检索模型 - diff_list = ["no_diff"] + get_file_options(diff_workdir, ".pt") - diff_config_list = get_file_options(config_dir, ".yaml") - return ckpt_list, config_list, cluster_list, diff_list, diff_config_list - -def refresh_options(): - ckpt_list, config_list, cluster_list, diff_list, diff_config_list = load_options() - return ( - choice_ckpt.update(choices=ckpt_list), - config_choice.update(choices=config_list), - cluster_choice.update(choices=cluster_list), - diff_choice.update(choices=diff_list), - diff_config_choice.update(choices=diff_config_list) - ) - -def vc_infer(sid, input_audio, input_audio_path, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold, k_step, use_spk_mix, second_encoding, loudness_envelope_adjustment): - if np.issubdtype(input_audio.dtype, np.integer): - input_audio = (input_audio / np.iinfo(input_audio.dtype).max).astype(np.float32) - if len(input_audio.shape) > 1: - input_audio = librosa.to_mono(input_audio.transpose(1, 0)) - _audio = model.slice_inference( - input_audio_path, - sid, - vc_transform, - slice_db, - cluster_ratio, - auto_f0, - noise_scale, - pad_seconds, - cl_num, - lg_num, - lgr_num, - f0_predictor, - enhancer_adaptive_key, - cr_threshold, - k_step, - use_spk_mix, - second_encoding, - loudness_envelope_adjustment - ) - model.clear_empty() - timestamp = str(int(time.time())) - if not os.path.exists("results"): - os.makedirs("results") - output_file_name = os.path.splitext(os.path.basename(input_audio_path))[0] + "_" + sid + "_" + timestamp + ".wav" - output_file_path = os.path.join("results", output_file_name) - sf.write(output_file_path, _audio, model.target_sample, format="wav") - return output_file_path - -def vc_fn(sid, input_audio, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold, k_step, use_spk_mix, second_encoding, loudness_envelope_adjustment): - global model - try: - if input_audio is None: - return "You need to upload an audio", None - if model is None: - return "You need to upload an model", None - sampling_rate, audio = input_audio - temp_path = "temp.wav" - sf.write(temp_path, audio, sampling_rate, format="wav") - output_file_path = vc_infer(sid, audio, temp_path, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold, k_step, use_spk_mix, second_encoding, loudness_envelope_adjustment) - os.remove(temp_path) - return "Success", output_file_path - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - -def vc_batch_fn(sid, input_audio_files, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold, k_step, use_spk_mix, second_encoding, loudness_envelope_adjustment): - global model - try: - if input_audio_files is None or len(input_audio_files) == 0: - return "You need to upload at least one audio file" - if model is None: - return "You need to upload a model" - for file_obj in input_audio_files: - input_audio_path = file_obj.name - audio, sampling_rate = sf.read(input_audio_path) - vc_infer(sid, audio, input_audio_path, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold, k_step, use_spk_mix, second_encoding, loudness_envelope_adjustment) - return "批量推理完成,音频已经被保存到results文件夹" - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - -def tts_fn(_text, _speaker, sid, vc_transform, auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,f0_predictor,enhancer_adaptive_key,cr_threshold, k_step,use_spk_mix,second_encoding,loudness_envelope_adjustment): - global model - try: - subprocess.run([r"python", "tts.py", _text, _speaker]) - sr = 44100 - y, sr = librosa.load("tts.wav") - resampled_y = librosa.resample(y, orig_sr=sr, target_sr=sr) - sf.write("tts.wav", resampled_y, sr, subtype = "PCM_16") - input_audio = "tts.wav" - audio, sampling_rate = sf.read(input_audio) - if model is None: - return "You need to upload a model", None - output_file_path = vc_infer(sid, audio, input_audio, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold, k_step, use_spk_mix, second_encoding, loudness_envelope_adjustment) - return "Success", output_file_path - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - -def load_raw_dirs(): - illegal_files = [] - #检查文件名 - allowed_pattern = re.compile(r'^[a-zA-Z0-9_@#$%^&()_+\-=\s\.]*$') - for root, dirs, files in os.walk(raw_path): - if root != raw_path: # 只处理子文件夹内的文件 - for file in files: - file_name, _ = os.path.splitext(file) - if not allowed_pattern.match(file_name): - illegal_files.append(file) - if len(illegal_files)!=0: - return f"数据集文件名只能包含数字、字母、下划线,以下文件不符合要求,请改名后再试:{illegal_files}" - #检查有没有小可爱不用wav文件当数据集 - for root, dirs, files in os.walk(raw_path): - if root != raw_path: # 只处理子文件夹内的文件 - for file in files: - if not file.lower().endswith('.wav'): - illegal_files.append(file) - if len(illegal_files)!=0: - return f"以下文件为非wav格式文件,请删除后再试:{illegal_files}" - spk_dirs = [] - with os.scandir(raw_path) as entries: - for entry in entries: - if entry.is_dir(): - spk_dirs.append(entry.name) - if len(spk_dirs) != 0: - return raw_dirs_list.update(value=spk_dirs) - else: - return raw_dirs_list.update(value="未找到数据集,请检查dataset_raw文件夹") - -def dataset_preprocess(encoder, f0_predictor, use_diff, vol_aug, skip_loudnorm, num_processes): - diff_arg = "--use_diff" if use_diff else "" - vol_aug_arg = "--vol_aug" if vol_aug else "" - skip_loudnorm_arg = "--skip_loudnorm" if skip_loudnorm else "" - preprocess_commands = [ - r"python resample.py %s" % (skip_loudnorm_arg), - r"python preprocess_flist_config.py --speech_encoder %s %s" % (encoder, vol_aug_arg), - r"python preprocess_hubert_f0.py --num_processes %s --f0_predictor %s %s" % (num_processes ,f0_predictor, diff_arg) - ] - accumulated_output = "" - #清空dataset - dataset = os.listdir("dataset/44k") - if len(dataset) != 0: - for dir in dataset: - dataset_dir = "dataset/44k/" + str(dir) - if os.path.isdir(dataset_dir): - shutil.rmtree(dataset_dir) - accumulated_output += f"Deleting previous dataset: {dir}\n" - for command in preprocess_commands: - try: - result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, text=True) - accumulated_output += f"Command: {command}, Using Encoder: {encoder}, Using f0 Predictor: {f0_predictor}\n" - yield accumulated_output, None - progress_line = None - for line in result.stdout: - if r"it/s" in line or r"s/it" in line: #防止进度条刷屏 - progress_line = line - else: - accumulated_output += line - if progress_line is None: - yield accumulated_output, None - else: - yield accumulated_output + progress_line, None - result.communicate() - except subprocess.CalledProcessError as e: - result = e.output - accumulated_output += f"Error: {result}\n" - yield accumulated_output, None - if progress_line is not None: - accumulated_output += progress_line - accumulated_output += '-' * 50 + '\n' - yield accumulated_output, None - config_path = "configs/config.json" - with open(config_path, 'r') as f: - config = json.load(f) - spk_name = config.get('spk', None) - yield accumulated_output, gr.Textbox.update(value=spk_name) - -def regenerate_config(encoder, vol_aug): - vol_aug_arg = "--vol_aug" if vol_aug else "" - cmd = r"python preprocess_flist_config.py --speech_encoder %s %s" % (encoder, vol_aug_arg) - output = "" - try: - result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, text=True) - for line in result.stdout: - output += line - output += "Regenerate config file successfully." - except subprocess.CalledProcessError as e: - result = e.output - output += f"Error: {result}\n" - return output - -def clear_output(): - return gr.Textbox.update(value="Cleared!>_<") - -def read_config(config_path): - with open(config_path, 'r') as config_file: - config_data = json.load(config_file) - return config_data - -def config_fn(log_interval, eval_interval, keep_ckpts, batch_size, lr, fp16_run, all_in_mem, diff_num_workers, diff_cache_all_data, diff_batch_size, diff_lr, diff_interval_log, diff_interval_val, diff_cache_device, diff_amp_dtype, diff_force_save): - config_origin = "configs/config.json" - diff_config = "configs/diffusion.yaml" - config_data = read_config(config_origin) - config_data['train']['log_interval'] = int(log_interval) - config_data['train']['eval_interval'] = int(eval_interval) - config_data['train']['keep_ckpts'] = int(keep_ckpts) - config_data['train']['batch_size'] = int(batch_size) - config_data['train']['learning_rate'] = float(lr) - config_data['train']['fp16_run'] = fp16_run - config_data['train']['all_in_mem'] = all_in_mem - with open(config_origin, 'w') as config_file: - json.dump(config_data, config_file, indent=4) - with open(diff_config, 'r') as diff_yaml: - diff_config_data = yaml.safe_load(diff_yaml) - diff_config_data['train']['num_workers'] = int(diff_num_workers) - diff_config_data['train']['cache_all_data'] = diff_cache_all_data - diff_config_data['train']['batch_size'] = int(diff_batch_size) - diff_config_data['train']['lr'] = float(diff_lr) - diff_config_data['train']['interval_log'] = int(diff_interval_log) - diff_config_data['train']['interval_val'] = int(diff_interval_val) - diff_config_data['train']['cache_device'] = str(diff_cache_device) - diff_config_data['train']['amp_dtype'] = str(diff_amp_dtype) - diff_config_data['train']['interval_force_save'] = int(diff_force_save) - with open(diff_config, 'w') as diff_yaml: - yaml.safe_dump(diff_config_data, diff_yaml, default_flow_style=False, sort_keys=False) - return "配置文件写入完成" - -def check_dataset(dataset_path): - if not os.listdir(dataset_path): - return "数据集不存在,请检查dataset文件夹" - no_npy_pt_files = True - for root, dirs, files in os.walk(dataset_path): - for file in files: - if file.endswith('.npy') or file.endswith('.pt'): - no_npy_pt_files = False - break - if no_npy_pt_files: - return "数据集中未检测到f0和hubert文件,可能是预处理未完成" - return None - -def training(gpu_selection, encoder): - config_data = read_config("configs/config.json") - vol_emb = config_data["model"]["vol_embedding"] - dataset_warn = check_dataset("dataset/44k") - if dataset_warn is not None: - return dataset_warn - encoder_models = { #编码器好多,要塞不下了 - "vec256l9": ("D_0.pth", "G_0.pth", "pre_trained_model"), - "vec768l12": ("D_0.pth", "G_0.pth", "pre_trained_model/768l12/vol_emb" if vol_emb else "pre_trained_model/768l12"), - "hubertsoft": ("D_0.pth", "G_0.pth", "pre_trained_model/hubertsoft"), - "whisper-ppg": ("D_0.pth", "G_0.pth", "pre_trained_model/whisper-ppg"), - "cnhubertlarge": ("D_0.pth", "G_0.pth", "pre_trained_model/cnhubertlarge"), - "dphubert": ("D_0.pth", "G_0.pth", "pre_trained_model/dphubert"), - "whisper-ppg-large": ("D_0.pth", "G_0.pth", "pre_trained_model/whisper-ppg-large") - } - if encoder not in encoder_models: - return "未知编码器" - d_0_file, g_0_file, encoder_model_path = encoder_models[encoder] - d_0_path = os.path.join(encoder_model_path, d_0_file) - g_0_path = os.path.join(encoder_model_path, g_0_file) - timestamp = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M') - new_backup_folder = os.path.join(models_backup_path, str(timestamp)) - if os.listdir(workdir) != ['diffusion']: - os.makedirs(new_backup_folder, exist_ok=True) - for file in os.listdir(workdir): - if file != "diffusion": - shutil.move(os.path.join(workdir, file), os.path.join(new_backup_folder, file)) - shutil.copy(d_0_path, os.path.join(workdir, "D_0.pth")) - shutil.copy(g_0_path, os.path.join(workdir, "G_0.pth")) - cmd = r"set CUDA_VISIBLE_DEVICES=%s && python train.py -c configs/config.json -m 44k" % (gpu_selection) - subprocess.Popen(["cmd", "/c", "start", "cmd", "/k", cmd]) - return "已经在新的终端窗口开始训练,请监看终端窗口的训练日志。在终端中按Ctrl+C可暂停训练。" - -def continue_training(gpu_selection, encoder): - dataset_warn = check_dataset("dataset/44k") - if dataset_warn is not None: - return dataset_warn - if encoder == "": - return "请先选择预处理对应的编码器" - all_files = os.listdir(workdir) - model_files = [f for f in all_files if f.startswith('G_') and f.endswith('.pth')] - if len(model_files) == 0: - return "你还没有已开始的训练" - cmd = r"set CUDA_VISIBLE_DEVICES=%s && python train.py -c configs/config.json -m 44k" % (gpu_selection) - subprocess.Popen(["cmd", "/c", "start", "cmd", "/k", cmd]) - return "已经在新的终端窗口开始训练,请监看终端窗口的训练日志。在终端中按Ctrl+C可暂停训练。" - -def kmeans_training(kmeans_gpu): - if not os.listdir(r"dataset/44k"): - return "数据集不存在,请检查dataset文件夹" - cmd = r"python cluster/train_cluster.py --gpu" if kmeans_gpu else r"python cluster/train_cluster.py" - subprocess.Popen(["cmd", "/c", "start", "cmd", "/k", cmd]) - return "已经在新的终端窗口开始训练,训练聚类模型不会输出日志,CPU训练一般需要5-10分钟左右" - -def index_training(): - if not os.listdir(r"dataset/44k"): - return "数据集不存在,请检查dataset文件夹" - cmd = r"python train_index.py -c configs/config.json" - subprocess.Popen(["cmd", "/c", "start", "cmd", "/k", cmd]) - return "已经在新的终端窗口开始训练" - -def diff_training(encoder): - if not os.listdir(r"dataset/44k"): - return "数据集不存在,请检查dataset文件夹" - pre_trained_model_768l12 = "pre_trained_model/diffusion/768l12/model_0.pt" - pre_trained_model_hubertsoft = "pre_trained_model/diffusion/hubertsoft/model_0.pt" - timestamp = datetime.datetime.now().strftime('%Y_%m_%d_%H_%M') - new_backup_folder = os.path.join(models_backup_path, "diffusion", str(timestamp)) - if len(os.listdir(diff_workdir)) != 0: - os.makedirs(new_backup_folder, exist_ok=True) - for file in os.listdir(diff_workdir): - shutil.move(os.path.join(diff_workdir, file), os.path.join(new_backup_folder, file)) - if encoder == "vec256l9" or encoder == "whisper-ppg": - return "你所选的编码器暂时不支持训练扩散模型" - elif encoder == "vec768l12": - shutil.copy(pre_trained_model_768l12, os.path.join(diff_workdir, "model_0.pt")) - elif encoder == "hubertsoft": - shutil.copy(pre_trained_model_hubertsoft, os.path.join(diff_workdir, "model_0.pt")) - else: - return "请先选择编码器" - subprocess.Popen(["cmd", "/c", "start", "cmd", "/k", r"python train_diff.py -c configs/diffusion.yaml"]) - return "已经在新的终端窗口开始训练,请监看终端窗口的训练日志。在终端中按Ctrl+C可暂停训练。" - -def diff_continue_training(encoder): - if not os.listdir(r"dataset/44k"): - return "数据集不存在,请检查dataset文件夹" - if encoder == "": - return "请先选择预处理对应的编码器" - all_files = os.listdir(diff_workdir) - model_files = [f for f in all_files if f.endswith('.pt')] - if len(model_files) == 0: - return "你还没有已开始的训练" - subprocess.Popen(["cmd", "/c", "start", "cmd", "/k", r"python train_diff.py -c configs/diffusion.yaml"]) - return "已经在新的终端窗口开始训练,请监看终端窗口的训练日志。在终端中按Ctrl+C可暂停训练。" - -def upload_mix_append_file(files,sfiles): - try: - if(sfiles == None): - file_paths = [file.name for file in files] - else: - file_paths = [file.name for file in chain(files,sfiles)] - p = {file:100 for file in file_paths} - return file_paths,mix_model_output1.update(value=json.dumps(p,indent=2)) - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - -def mix_submit_click(js,mode): - try: - assert js.lstrip()!="" - modes = {"凸组合":0, "线性组合":1} - mode = modes[mode] - data = json.loads(js) - data = list(data.items()) - model_path,mix_rate = zip(*data) - path = mix_model(model_path,mix_rate,mode) - return f"成功,文件被保存在了{path}" - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - -def updata_mix_info(files): - try: - if files == None : return mix_model_output1.update(value="") - p = {file.name:100 for file in files} - return mix_model_output1.update(value=json.dumps(p,indent=2)) - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - -def pth_identify(): - if not os.path.exists(root_dir): - return f"未找到{root_dir}文件夹,请先创建一个{root_dir}文件夹并按第一步流程操作" - model_dirs = [d for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d))] - if not model_dirs: - return f"未在{root_dir}文件夹中找到模型文件夹,请确保每个模型和配置文件都被放置在单独的文件夹中" - valid_model_dirs = [] - for path in model_dirs: - pth_files = glob.glob(f"{root_dir}/{path}/*.pth") - json_files = glob.glob(f"{root_dir}/{path}/*.json") - if len(pth_files) != 1 or len(json_files) != 1: - return f"错误: 在{root_dir}/{path}中找到了{len(pth_files)}个.pth文件和{len(json_files)}个.json文件。应当确保每个文件夹内有且只有一个.pth文件和.json文件" - valid_model_dirs.append(path) - - return f"成功识别了{len(valid_model_dirs)}个模型:{valid_model_dirs}" - -def onnx_export(): - model_dirs = [d for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d))] - try: - for path in model_dirs: - pth_files = glob.glob(f"{root_dir}/{path}/*.pth") - json_files = glob.glob(f"{root_dir}/{path}/*.json") - model_file = pth_files[0] - json_file = json_files[0] - with open(json_file, 'r') as config_file: - config_data = json.load(config_file) - channels = config_data["model"]["gin_channels"] - if str(channels) == "256": - para1 = 1 - if str(channels) == "768": - para1 = 192 - device = torch.device("cpu") - hps = utils.get_hparams_from_file(json_file) - SVCVITS = SynthesizerTrn( - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model) - _ = utils.load_checkpoint(model_file, SVCVITS, None) - _ = SVCVITS.eval().to(device) - for i in SVCVITS.parameters(): - i.requires_grad = False - n_frame = 10 - test_hidden_unit = torch.rand(para1, n_frame, channels) - test_pitch = torch.rand(1, n_frame) - test_mel2ph = torch.arange(0, n_frame, dtype=torch.int64)[None] # torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).unsqueeze(0) - test_uv = torch.ones(1, n_frame, dtype=torch.float32) - test_noise = torch.randn(1, 192, n_frame) - test_sid = torch.LongTensor([0]) - input_names = ["c", "f0", "mel2ph", "uv", "noise", "sid"] - output_names = ["audio", ] - onnx_file = os.path.splitext(model_file)[0] + ".onnx" - torch.onnx.export(SVCVITS, - ( - test_hidden_unit.to(device), - test_pitch.to(device), - test_mel2ph.to(device), - test_uv.to(device), - test_noise.to(device), - test_sid.to(device) - ), - onnx_file, - dynamic_axes={ - "c": [0, 1], - "f0": [1], - "mel2ph": [1], - "uv": [1], - "noise": [2], - }, - do_constant_folding=False, - opset_version=16, - verbose=False, - input_names=input_names, - output_names=output_names) - return "转换成功,模型被保存在了checkpoints下的对应目录" - except Exception as e: - if debug: traceback.print_exc() - return "转换错误:"+str(e) - -def load_raw_audio(audio_path): - if not os.path.isdir(audio_path): - return "请输入正确的目录", None - files = os.listdir(audio_path) - wav_files = [file for file in files if file.lower().endswith('.wav')] - if not wav_files: - return "未在目录中找到.wav音频文件", None - return "成功加载", wav_files - -def slicer_fn(input_dir, output_dir, process_method, max_sec, min_sec): - if output_dir == "": - return "请先选择输出的文件夹" - slicer = AutoSlicer() - if not os.path.exists(output_dir): - os.makedirs(output_dir) - for filename in os.listdir(input_dir): - if filename.lower().endswith(".wav"): - slicer.auto_slice(filename, input_dir, output_dir, max_sec) - if process_method == "丢弃": - for filename in os.listdir(output_dir): - if filename.endswith(".wav"): - filepath = os.path.join(output_dir, filename) - audio, sr = librosa.load(filepath, sr=None, mono=False) - if librosa.get_duration(y=audio, sr=sr) < min_sec: - os.remove(filepath) - elif process_method == "将过短音频整合为长音频": - slicer.merge_short(output_dir, max_sec, min_sec) - file_count, max_duration, min_duration, orig_duration, final_duration = slicer.slice_count(input_dir, output_dir) - hrs = int(final_duration / 3600) - mins = int((final_duration % 3600) / 60) - sec = format(float(final_duration % 60), '.2f') - rate = format(100 * (final_duration / orig_duration), '.2f') - return f"成功将音频切分为{file_count}条片段,其中最长{max_duration}秒,最短{min_duration}秒,切片后的音频总时长{hrs:02d}小时{mins:02d}分{sec}秒,为原始音频时长的{rate}%" - -def model_compression(_model): - if _model == "": - return "请先选择要压缩的模型" - else: - model_path = os.path.join(workdir, _model) - filename, extension = os.path.splitext(_model) - output_model_name = f"{filename}_compressed{extension}" - output_path = os.path.join(workdir, output_model_name) - removeOptimizer(model_path, output_path) - return f"模型已成功被保存在了{output_path}" - -# read ckpt list -ckpt_list, config_list, cluster_list, diff_list, diff_config_list = load_options() - -#read GPU info -ngpu=torch.cuda.device_count() -gpu_infos=[] -if(torch.cuda.is_available()==False or ngpu==0):if_gpu_ok=False -else: - if_gpu_ok = False - for i in range(ngpu): - gpu_name=torch.cuda.get_device_name(i) - if("MX"in gpu_name):continue - if("10"in gpu_name or "16"in gpu_name or "20"in gpu_name or "30"in gpu_name or "40"in gpu_name or "A50"in gpu_name.upper() or "70"in gpu_name or "80"in gpu_name or "90"in gpu_name or "M4"in gpu_name or"P4"in gpu_name or "T4"in gpu_name or "TITAN"in gpu_name.upper()):#A10#A100#V100#A40#P40#M40#K80 - if_gpu_ok=True#至少有一张能用的N卡 - gpu_infos.append("%s\t%s"%(i,gpu_name)) -gpu_info="\n".join(gpu_infos)if if_gpu_ok==True and len(gpu_infos)>0 else "很遗憾您这没有能用的显卡来支持您训练" -gpus="-".join([i[0]for i in gpu_infos]) - -#read default params -sovits_params, diff_params = get_default_settings() - -app = gr.Blocks() - -def Newget_model_info(choice_ckpt2): - choice_ckpt = str(choice_ckpt2) - pthfile = os.path.join(workdir, choice_ckpt) - net = torch.load(pthfile, map_location=torch.device('cpu')) #cpu load - spk_emb = net["model"].get("emb_g.weight") - if spk_emb is None: - return "所选模型缺少emb_g.weight,你可能选择了一个底模" - _dim, _layer = spk_emb.size() - model_type = { - 768: "Vec768-Layer12", - 256: "Vec256-Layer9 / HubertSoft", - 1024: "Whisper-PPG" - } - return gr.Textbox(visible=False, value=model_type.get(_layer, "不受支持的模型")) - -with app: - gr.Markdown(value=""" - ### So-VITS-SVC 4.1-Stable - - 修改自原项目及bilibili@麦哲云 - - 仅供个人娱乐和非商业用途,禁止用于血腥、暴力、性相关、政治相关内容 - - weiui来自:bilibili@羽毛布団,交流③群:416656175 - - 镜像作者:bilibili@kiss丿冷鸟鸟,交流群:829974025 - - """) - with gr.Tabs(): - with gr.TabItem("FC"): - #with gr.Row(): - # choice_ckpt = gr.Dropdown(label="模型选择", choices=ckpt_list, value="no_model") - # model_branch = gr.Textbox(label="模型编码器", placeholder="请先选择模型", interactive=False) - #choice_ckpt = gr.Dropdown(value="G_388000.pth", visible=False) - #with gr.Row(): - # config_choice = gr.Dropdown(label="配置文件", choices=config_list, value="no_config") - # config_info = gr.Textbox(label="配置文件编码器", placeholder="请选择配置文件") - config_choice = gr.Dropdown(value="config.json", visible=False) - #gr.Markdown(value="""**请检查模型和配置文件的编码器是否匹配**""") - #with gr.Row(): - # diff_choice = gr.Dropdown(label="(可选)选择扩散模型", choices=diff_list, value="no_diff", interactive=True) - # diff_config_choice = gr.Dropdown(label="扩散模型配置文件", choices=diff_config_list, value="no_diff_config", interactive=True) - diff_choice = gr.Dropdown(value="no_diff", visible=False) - diff_config_choice = gr.Dropdown(value="no_diff_config", visible=False) - with gr.Row(): - cluster_choice = gr.Dropdown(label="(可选)选择聚类模型/特征检索模型", choices=cluster_list, value="no_clu") - with gr.Row(): - enhance = gr.Checkbox(label="是否使用NSF_HIFIGAN增强,该选项对部分训练集少的模型有一定的音质增强效果,但是对训练好的模型有反面效果,默认关闭", value=False) - #only_diffusion = gr.Checkbox(label="是否使用全扩散推理,开启后将不使用So-VITS模型,仅使用扩散模型进行完整扩散推理,默认关闭", value=False) - only_diffusion = gr.Checkbox(value=False, visible=False) - #using_device = gr.Dropdown(label="推理设备,默认为自动选择", choices=["Auto","cuda","cpu"], value="Auto") - using_device = gr.Dropdown(value='Auto', visible=False) - #refresh = gr.Button("刷新选项") - #loadckpt = gr.Button("加载模型", variant="primary") - #with gr.Row(): - # model_message = gr.Textbox(label="Output Message") - # sid = gr.Dropdown(label="So-VITS说话人", value="speaker0") - sid = gr.Dropdown(value="1056", visible=False) - - #choice_ckpt.change(get_model_info, [choice_ckpt], [model_branch]) - model_branch = Newget_model_info("G_388000.pth") - #config_choice.change(load_json_encoder, [config_choice], [config_info]) - #refresh.click(refresh_options,[],[choice_ckpt,config_choice,cluster_choice,diff_choice,diff_config_choice]) - - gr.Markdown(value=""" - 请稍等片刻,模型加载大约需要10秒。后续操作不需要重新加载模型 - """) - with gr.Tabs(): - with gr.TabItem("单个音频上传"): - vc_input3 = gr.Audio(label="单个音频上传") - with gr.TabItem("批量音频上传"): - vc_batch_files = gr.Files(label="批量音频上传", file_types=["audio"], file_count="multiple") - with gr.TabItem("文字转语音(实验性)"): - gr.Markdown(""" - 文字转语音(TTS)说明:使用edge_tts服务生成音频,并转换为So-VITS模型音色。可以在输入文字中使用标点符号简单控制情绪 - zh-CN-XiaoyiNeural:中文女声 - zh-CN-YunxiNeural: 中文男声 - ja-JP-NanamiNeural:日文女声 - ja-JP-KeitaNeural:日文男声 - zh-CN-liaoning-XiaobeiNeural:东北话女声 - zh-CN-shaanxi-XiaoniNeural: 陕西话女声 - zh-HK-HiuMaanNeural: 粤语女声 - zh-HK-WanLungNeural: 粤语男声 - """) - with gr.Row(): - text_input = gr.Textbox(label = "在此输入需要转译的文字(建议打开自动f0预测)",) - tts_spk = gr.Dropdown(label = "选择原始音频音色(来自微软TTS)", choices=["zh-CN-XiaoyiNeural", "zh-CN-YunxiNeural", "zh-CN-liaoning-XiaobeiNeural", "zh-CN-shaanxi-XiaoniNeural", "zh-HK-HiuMaanNeural", "zh-HK-WanLungNeural", "ja-JP-NanamiNeural", "ja-JP-KeitaNeural"], value = "zh-CN-XiaoyiNeural") - #with gr.Row(): - # tts_rate = gr.Slider(label = "TTS语音变速(倍速)", minimum = 0, maximum = 3, value = 1) - # tts_volume = gr.Slider(label = "TTS语音音量(相对值)", minimum = 0, maximum = 1.5, value = 1) - - with gr.Row(): - auto_f0 = gr.Checkbox(label="自动f0预测,配合聚类模型f0预测效果更好,会导致变调功能失效(仅限转换语音,歌声不要勾选此项会跑调)", value=False) - f0_predictor = gr.Radio(label="f0预测器选择(如遇哑音可以更换f0预测器解决,crepe为原F0使用均值滤波器)", choices=["pm","crepe","harvest","dio"], value="pm") - cr_threshold = gr.Number(label="F0过滤阈值,只有使用crepe时有效. 数值范围从0-1. 降低该值可减少跑调概率,但会增加哑音", value=0.05) - with gr.Row(): - vc_transform = gr.Number(label="变调(整数,可以正负,半音数量,升高八度就是12)", value=0) - cluster_ratio = gr.Number(label="聚类模型/特征检索混合比例,0-1之间,默认为0不启用聚类或特征检索,能提升音色相似度,但会导致咬字下降", value=0) - k_step = gr.Slider(label="浅扩散步数,只有使用了扩散模型才有效,步数越大越接近扩散模型的结果", value=100, minimum = 1, maximum = 1000) - with gr.Row(): - enhancer_adaptive_key = gr.Number(label="使NSF-HIFIGAN增强器适应更高的音域(单位为半音数)|默认为0", value=0,interactive=True) - slice_db = gr.Number(label="切片阈值", value=-50) - cl_num = gr.Number(label="音频自动切片,0为按默认方式切片,单位为秒/s,爆显存可以设置此处强制切片", value=0) - with gr.Accordion("高级设置(一般不需要动)", open=False): - noise_scale = gr.Number(label="noise_scale 建议不要动,会影响音质,玄学参数", value=0.4) - pad_seconds = gr.Number(label="推理音频pad秒数,由于未知原因开头结尾会有异响,pad一小段静音段后就不会出现", value=0.5) - lg_num = gr.Number(label="两端音频切片的交叉淡入长度,如果自动切片后出现人声不连贯可调整该数值,如果连贯建议采用默认值0,注意,该设置会影响推理速度,单位为秒/s", value=1) - lgr_num = gr.Number(label="自动音频切片后,需要舍弃每段切片的头尾。该参数设置交叉长度保留的比例,范围0-1,左开右闭", value=0.75,interactive=True) - second_encoding = gr.Checkbox(label = "二次编码,浅扩散前会对原始音频进行二次编码,玄学选项,效果时好时差,默认关闭", value=False) - loudness_envelope_adjustment = gr.Number(label="输入源响度包络替换输出响度包络融合比例,越靠近1越使用输出响度包络", value = 0) - use_spk_mix = gr.Checkbox(label="动态声线融合,暂时没做完", value=False, interactive=False) - with gr.Row(): - vc_submit = gr.Button("音频转换", variant="primary") - vc_batch_submit = gr.Button("批量转换", variant="primary") - vc_tts_submit = gr.Button("文本转语音", variant="primary") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio") - - def Newvc_fn(sid, input_audio, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold, k_step, use_spk_mix, second_encoding, loudness_envelope_adjustment, clus2): - global model, loaded - if loaded != clus2: - Newload_model_func("G_388000.pth",clus2,config_choice,enhance,diff_choice,diff_config_choice,only_diffusion,model_branch,using_device) - loaded = clus2 - try: - if input_audio is None: - return "You need to upload an audio", None - if model is None: - return "You need to upload an model", None - sampling_rate, audio = input_audio - temp_path = "temp.wav" - sf.write(temp_path, audio, sampling_rate, format="wav") - output_file_path = vc_infer(sid, audio, temp_path, vc_transform, auto_f0, cluster_ratio, slice_db, noise_scale, pad_seconds, cl_num, lg_num, lgr_num, f0_predictor, enhancer_adaptive_key, cr_threshold, k_step, use_spk_mix, second_encoding, loudness_envelope_adjustment) - os.remove(temp_path) - return "Success", output_file_path - except Exception as e: - if debug: traceback.print_exc() - raise gr.Error(e) - - #loadckpt.click(load_model_func,[choice_ckpt,cluster_choice,config_choice,enhance,diff_choice,diff_config_choice,only_diffusion,model_branch,using_device],[model_message, sid, cl_num]) - vc_submit.click(Newvc_fn, [sid, vc_input3, vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,f0_predictor,enhancer_adaptive_key,cr_threshold,k_step,use_spk_mix,second_encoding,loudness_envelope_adjustment,cluster_choice], [vc_output1, vc_output2]) - vc_batch_submit.click(vc_batch_fn, [sid, vc_batch_files, vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,f0_predictor,enhancer_adaptive_key,cr_threshold,k_step,use_spk_mix,second_encoding,loudness_envelope_adjustment], [vc_output1]) - vc_tts_submit.click(tts_fn, [text_input, tts_spk, sid, vc_transform,auto_f0,cluster_ratio, slice_db, noise_scale,pad_seconds,cl_num,lg_num,lgr_num,f0_predictor,enhancer_adaptive_key,cr_threshold,k_step,use_spk_mix,second_encoding,loudness_envelope_adjustment], [vc_output1, vc_output2]) - ''' - with gr.TabItem("训练"): - gr.Markdown(value="""请将数据集文件夹放置在dataset_raw文件夹下,确认放置正确后点击下方获取数据集名称""") - raw_dirs_list=gr.Textbox(label="Raw dataset directory(s):") - get_raw_dirs=gr.Button("识别数据集", variant="primary") - gr.Markdown(value="""确认数据集正确识别后请选择训练使用的特征编码器和f0预测器,**如果要训练扩散模型,请选择Vec768l12或hubertsoft,并确保So-VITS和扩散模型使用同一个编码器**""") - with gr.Row(): - gr.Markdown(value="""**vec256l9**: ContentVec(256Layer9),旧版本叫v1,So-VITS-SVC 4.0的基础版本,**暂不支持扩散模型** - **vec768l12**: 特征输入更换为ContentVec的第12层Transformer输出,模型理论上会更加还原训练集音色 - **hubertsoft**: So-VITS-SVC 3.0使用的编码器,咬字更为准确,但可能存在多说话人音色泄露问题 - **whisper-ppg**: 来自OpenAI,咬字最为准确,但和Hubertsoft一样存在多说话人音色泄露,且显存占用和训练时间有明显增加。**暂不支持扩散模型** - """) - gr.Markdown(value="""**crepe**: 抗噪能力最强,但预处理速度慢(不过如果你的显卡很强的话速度会很快) - **pm**: 预处理速度快,但抗噪能力较弱 - **dio**: 先前版本预处理默认使用的f0预测器 - **harvest**: 有一定抗噪能力,预处理显存占用友好,速度比较慢 - """) - with gr.Row(): - branch_selection = gr.Radio(label="选择训练使用的编码器", choices=["vec256l9","vec768l12","hubertsoft","whisper-ppg"], value="vec768l12", interactive=True) - f0_predictor_selection = gr.Radio(label="选择训练使用的f0预测器", choices=["crepe","pm","dio","harvest"], value="crepe", interactive=True) - use_diff = gr.Checkbox(label="是否使用浅扩散模型,如要训练浅扩散模型请勾选此项", value=True) - vol_aug=gr.Checkbox(label="是否启用响度嵌入和音量增强,启用后可以根据输入源控制输出响度,但对数据集质量的要求更高。**仅支持vec768l12编码器**", value=False) - with gr.Row(): - skip_loudnorm = gr.Checkbox(label="是否跳过响度匹配,如果你已经用音频处理软件做过响度匹配,请勾选此处") - num_processes = gr.Slider(label="预处理使用的CPU线程数,可以大幅加快预处理速度,但线程数过大容易爆显存,建议12G显存设置为2", minimum=1, maximum=multiprocessing.cpu_count(), value=1, step=1) - with gr.Row(): - raw_preprocess=gr.Button("数据预处理", variant="primary") - regenerate_config_btn=gr.Button("重新生成配置文件", variant="primary") - preprocess_output=gr.Textbox(label="预处理输出信息,完成后请检查一下是否有报错信息,如无则可以进行下一步", max_lines=999) - clear_preprocess_output=gr.Button("清空输出信息") - with gr.Group(): - gr.Markdown(value="""填写训练设置和超参数""") - with gr.Row(): - gr.Textbox(label="当前使用显卡信息", value=gpu_info) - gpu_selection=gr.Textbox(label="多卡用户请指定希望训练使用的显卡ID(0,1,2...)", value=gpus, interactive=True) - with gr.Row(): - log_interval=gr.Textbox(label="每隔多少步(steps)生成一次评估日志", value=sovits_params['log_interval']) - eval_interval=gr.Textbox(label="每隔多少步(steps)验证并保存一次模型", value=sovits_params['eval_interval']) - keep_ckpts=gr.Textbox(label="仅保留最新的X个模型,超出该数字的旧模型会被删除。设置为0则永不删除", value=sovits_params['keep_ckpts']) - with gr.Row(): - batch_size=gr.Textbox(label="批量大小,每步取多少条数据进行训练,大batch有助于训练但显著增加显存占用。6G显存建议设定为4", value=sovits_params['batch_size']) - lr=gr.Textbox(label="学习率,一般不用动,批量大小较大时可以适当增大学习率,但强烈不建议超过0.0002,有炸炉风险", value=sovits_params['learning_rate']) - fp16_run=gr.Checkbox(label="是否使用fp16混合精度训练,fp16训练可能降低显存占用和训练时间,但对模型质量的影响尚未查证", value=sovits_params['fp16_run']) - all_in_mem=gr.Checkbox(label="是否加载所有数据集到内存中,硬盘IO过于低下、同时内存容量远大于数据集体积时可以启用,能显著加快训练速度", value=sovits_params['all_in_mem']) - with gr.Row(): - gr.Markdown("请检查右侧的说话人列表是否和你要训练的目标说话人一致,确认无误后点击写入配置文件,然后就可以开始训练了") - speakers=gr.Textbox(label="说话人列表") - with gr.Accordion(label = "扩散模型配置(训练扩散模型需要写入此处)", open=True): - with gr.Row(): - diff_num_workers = gr.Number(label="num_workers, 如果你的电脑配置较高,可以将这里设置为0加快训练速度", value=diff_params['num_workers']) - diff_cache_all_data = gr.Checkbox(label="是否缓存数据,启用后可以加快训练速度,关闭后可以节省显存或内存,但会减慢训练速度", value=diff_params['cache_all_data']) - diff_cache_device = gr.Radio(label="若启用缓存数据,使用显存(cuda)还是内存(cpu)缓存,如果显卡显存充足,选择cuda以加快训练速度", choices=["cuda","cpu"], value=diff_params['cache_device']) - diff_amp_dtype = gr.Radio(label="训练数据类型,fp16可能会有更快的训练速度,前提是你的显卡支持", choices=["fp32","fp16"], value=diff_params['amp_dtype']) - with gr.Row(): - diff_batch_size = gr.Number(label="批量大小(batch_size),根据显卡显存设置,小显存适当降低该项,6G显存可以设定为48,但该数值不要超过数据集总数量的1/4", value=diff_params['diff_batch_size']) - diff_lr = gr.Number(label="学习率(一般不需要动)", value=diff_params['diff_lr']) - diff_interval_log = gr.Number(label="每隔多少步(steps)生成一次评估日志", value = diff_params['diff_interval_log']) - diff_interval_val = gr.Number(label="每隔多少步(steps)验证并保存一次模型,如果你的批量大小较大,可以适当减少这里的数字,但不建议设置为1000以下", value=diff_params['diff_interval_val']) - diff_force_save = gr.Number(label="每隔多少步强制保留模型,只有该步数的倍数保存的模型会被保留,其余会被删除。设置为与验证步数相同的值则每个模型都会被保留", value=diff_params['diff_force_save']) - with gr.Row(): - save_params=gr.Button("将当前设置保存为默认设置", variant="primary") - write_config=gr.Button("写入配置文件", variant="primary") - write_config_output=gr.Textbox(label="输出信息") - - gr.Markdown(value="""**点击从头开始训练**将会自动将已有的训练进度保存到models_backup文件夹,并自动装载预训练模型。 - **继续上一次的训练进度**将从上一个保存模型的进度继续训练。继续训练进度无需重新预处理和写入配置文件。 - 关于扩散、聚类和特征检索的详细说明请看[此处](https://www.yuque.com/umoubuton/ueupp5/kmui02dszo5zrqkz)。 - """) - with gr.Row(): - with gr.Column(): - start_training=gr.Button("从头开始训练", variant="primary") - training_output=gr.Textbox(label="训练输出信息") - with gr.Column(): - continue_training_btn=gr.Button("继续上一次的训练进度", variant="primary") - continue_training_output=gr.Textbox(label="训练输出信息") - with gr.Row(): - with gr.Column(): - diff_training_btn=gr.Button("从头训练扩散模型", variant="primary") - diff_training_output=gr.Textbox(label="训练输出信息") - with gr.Column(): - diff_continue_training_btn=gr.Button("继续训练扩散模型", variant="primary") - diff_continue_training_output=gr.Textbox(label="训练输出信息") - with gr.Accordion(label = "聚类、特征检索训练", open=False): - with gr.Row(): - with gr.Column(): - kmeans_button=gr.Button("训练聚类模型", variant="primary") - kmeans_gpu = gr.Checkbox(label="使用GPU训练", value=True) - kmeans_output=gr.Textbox(label="训练输出信息") - with gr.Column(): - index_button=gr.Button("训练特征检索模型", variant="primary") - index_output=gr.Textbox(label="训练输出信息") - ''' - with gr.TabItem("小工具/实验室特性"): - gr.Markdown(value=""" - ### So-vits-svc 4.1 小工具/实验室特性 - 提供了一些有趣或实用的小工具,可以自行探索 - """) - with gr.Tabs(): - with gr.TabItem("静态声线融合"): - gr.Markdown(value=""" - <font size=2> 介绍:该功能可以将多个声音模型合成为一个声音模型(多个模型参数的凸组合或线性组合),从而制造出现实中不存在的声线 - 注意: - 1.该功能仅支持单说话人的模型 - 2.如果强行使用多说话人模型,需要保证多个模型的说话人数量相同,这样可以混合同一个SpaekerID下的声音 - 3.保证所有待混合模型的config.json中的model字段是相同的 - 4.输出的混合模型可以使用待合成模型的任意一个config.json,但聚类模型将不能使用 - 5.批量上传模型的时候最好把模型放到一个文件夹选中后一起上传 - 6.混合比例调整建议大小在0-100之间,也可以调为其他数字,但在线性组合模式下会出现未知的效果 - 7.混合完毕后,文件将会保存在项目根目录中,文件名为output.pth - 8.凸组合模式会将混合比例执行Softmax使混合比例相加为1,而线性组合模式不会 - </font> - """) - mix_model_path = gr.Files(label="选择需要混合模型文件") - mix_model_upload_button = gr.UploadButton("选择/追加需要混合模型文件", file_count="multiple") - mix_model_output1 = gr.Textbox( - label="混合比例调整,单位/%", - interactive = True - ) - mix_mode = gr.Radio(choices=["凸组合", "线性组合"], label="融合模式",value="凸组合",interactive = True) - mix_submit = gr.Button("声线融合启动", variant="primary") - mix_model_output2 = gr.Textbox( - label="Output Message" - ) - with gr.TabItem("onnx转换"): - gr.Markdown(value=""" - 提供了将.pth模型(批量)转换为.onnx模型的功能 - 源项目本身自带转换的功能,但不支持批量,操作也不够简单,这个工具可以支持在WebUI中以可视化的操作方式批量转换.onnx模型 - 有人可能会问,转.onnx模型有什么作用呢?相信我,如果你问出了这个问题,说明这个工具你应该用不上 - - ### Step 1: - 在整合包根目录下新建一个"checkpoints"文件夹,将pth模型和对应的json配置文件按目录分别放置到checkpoints文件夹下 - 看起来应该像这样: - checkpoints - ├───xxxx - │ ├───xxxx.pth - │ └───xxxx.json - ├───xxxx - │ ├───xxxx.pth - │ └───xxxx.json - └───…… - """) - pth_dir_msg = gr.Textbox(label="识别待转换模型", placeholder="请将模型和配置文件按上述说明放置在正确位置") - pth_dir_identify_btn = gr.Button("识别", variant="primary") - gr.Markdown(value=""" - ### Step 2: - 识别正确后点击下方开始转换,转换一个模型可能需要一分钟甚至更久 - """) - pth2onnx_btn = gr.Button("开始转换", variant="primary") - pth2onnx_msg = gr.Textbox(label="输出信息") - - with gr.TabItem("智能音频切片"): - gr.Markdown(value=""" - 该工具可以实现对音频的切片,无需调整参数即可完成符合要求的数据集制作。 - 数据集要求的音频切片约在2-15秒内,用传统的Slicer-GUI切片工具需要精准调参和二次切片才能符合要求,该工具省去了上述繁琐的操作,只要上传原始音频即可一键制作数据集。 - """) - with gr.Row(): - raw_audio_path = gr.Textbox(label="原始音频文件夹", placeholder="包含所有待切片音频的文件夹,示例: D:\干声\speakers") - load_raw_audio_btn = gr.Button("加载原始音频", variant = "primary") - load_raw_audio_output = gr.Textbox(label = "输出信息") - raw_audio_dataset = gr.Textbox(label = "音频列表", value = "") - slicer_output_dir = gr.Textbox(label = "输出目录", placeholder = "选择输出目录") - with gr.Row(): - process_method = gr.Radio(label = "对过短音频的处理方式", choices = ["丢弃","将过短音频整合为长音频"], value = "丢弃") - max_sec = gr.Number(label = "切片的最长秒数", value = 15) - min_sec = gr.Number(label = "切片的最短秒数", value = 2) - slicer_btn = gr.Button("开始切片", variant = "primary") - slicer_output_msg = gr.Textbox(label = "输出信息") - - mix_model_path.change(updata_mix_info,[mix_model_path],[mix_model_output1]) - mix_model_upload_button.upload(upload_mix_append_file, [mix_model_upload_button,mix_model_path], [mix_model_path,mix_model_output1]) - mix_submit.click(mix_submit_click, [mix_model_output1,mix_mode], [mix_model_output2]) - pth_dir_identify_btn.click(pth_identify, [], [pth_dir_msg]) - pth2onnx_btn.click(onnx_export, [], [pth2onnx_msg]) - load_raw_audio_btn.click(load_raw_audio, [raw_audio_path], [load_raw_audio_output, raw_audio_dataset]) - slicer_btn.click(slicer_fn, [raw_audio_path, slicer_output_dir, process_method, max_sec, min_sec], [slicer_output_msg]) - - with gr.TabItem("模型压缩工具"): - gr.Markdown(value=""" - 该工具可以实现对模型的体积压缩,在**不影响模型推理功能**的情况下,将原本约600M的So-VITS模型压缩至约200M, 大大减少了硬盘的压力。 - **注意:压缩后的模型将无法继续训练,请在确认封炉后再压缩。** - 将模型文件放置在logs/44k下,然后选择需要压缩的模型 - """) - model_to_compress = gr.Dropdown(label="模型选择", choices=ckpt_list, value="") - compress_model_btn = gr.Button("压缩模型", variant="primary") - compress_model_output = gr.Textbox(label="输出信息", value="") - - compress_model_btn.click(model_compression, [model_to_compress], [compress_model_output]) - """ - get_raw_dirs.click(load_raw_dirs,[],[raw_dirs_list]) - raw_preprocess.click(dataset_preprocess,[branch_selection, f0_predictor_selection, use_diff, vol_aug, skip_loudnorm, num_processes],[preprocess_output, speakers]) - regenerate_config_btn.click(regenerate_config,[branch_selection, vol_aug],[preprocess_output]) - clear_preprocess_output.click(clear_output,[],[preprocess_output]) - save_params.click(save_default_settings, [log_interval,eval_interval,keep_ckpts,batch_size,lr,fp16_run,all_in_mem,diff_num_workers,diff_cache_all_data,diff_cache_device,diff_amp_dtype,diff_batch_size,diff_lr,diff_interval_log,diff_interval_val,diff_force_save], [write_config_output]) - write_config.click(config_fn,[log_interval, eval_interval, keep_ckpts, batch_size, lr, fp16_run, all_in_mem, diff_num_workers, diff_cache_all_data, diff_batch_size, diff_lr, diff_interval_log, diff_interval_val, diff_cache_device, diff_amp_dtype, diff_force_save],[write_config_output]) - start_training.click(training,[gpu_selection, branch_selection],[training_output]) - diff_training_btn.click(diff_training,[branch_selection],[diff_training_output]) - continue_training_btn.click(continue_training,[gpu_selection, branch_selection],[continue_training_output]) - diff_continue_training_btn.click(diff_continue_training,[branch_selection],[diff_continue_training_output]) - kmeans_button.click(kmeans_training,[kmeans_gpu],[kmeans_output]) - index_button.click(index_training, [], [index_output]) - """ - with gr.Tabs(): - with gr.Row(variant="panel"): - with gr.Column(): - gr.Markdown(value=""" - <font size=2> WebUI设置</font> - """) - debug_button = gr.Checkbox(label="Debug模式,反馈BUG需要打开,打开后控制台可以显示具体错误提示", value=debug) - - debug_button.change(debug_change,[],[]) - - app.queue(concurrency_count=1022, max_size=2044).launch() diff --git a/spaces/yoon-gu/pokemon-quiz/utils.py b/spaces/yoon-gu/pokemon-quiz/utils.py deleted file mode 100644 index 3a5f7267f757f8d81e885e3552e84d34daef6427..0000000000000000000000000000000000000000 --- a/spaces/yoon-gu/pokemon-quiz/utils.py +++ /dev/null @@ -1,24 +0,0 @@ -from pydantic import BaseModel -from typing import Optional - -initial_info = {"done" : True, - "score": 0, "count": 0, - "best_score": 0, "best_time": float("inf"), - "time": 0.0, "comment": "", - "history": [], - "candidates": ['1번', '2번', '3번', '4번']} - -class Info(BaseModel): - done: bool = True - score: int = 0 - count: int = 0 - answer: Optional[str] = None - best_score: int = 0 - best_time: float = float("inf") - time: float = 0.0 - comment: str = "" - history: list = [] - candidates: list = ['1번', '2번', '3번', '4번'] - name: str = None - generations: list = [] - types: list = [] diff --git a/spaces/ysharma/GPT-JT-copy/README.md b/spaces/ysharma/GPT-JT-copy/README.md deleted file mode 100644 index 83d602e1cc0f6a6aca2cdd116056e3913c822dba..0000000000000000000000000000000000000000 --- a/spaces/ysharma/GPT-JT-copy/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: GPT-JT -emoji: 🚀 -colorFrom: blue -colorTo: pink -sdk: streamlit -sdk_version: 1.10.0 -app_file: app.py -pinned: false -duplicated_from: togethercomputer/GPT-JT ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ysheng/SSN-Soft-Shadow-Network-for-Image-Composition/models/SSN_v1.py b/spaces/ysheng/SSN-Soft-Shadow-Network-for-Image-Composition/models/SSN_v1.py deleted file mode 100644 index 33dd8fb117e250161ac8daf52690be81b20d516b..0000000000000000000000000000000000000000 --- a/spaces/ysheng/SSN-Soft-Shadow-Network-for-Image-Composition/models/SSN_v1.py +++ /dev/null @@ -1,290 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from torchvision import transforms -import numpy as np - -def get_activation(activation_func): - act_func = { - "relu":nn.ReLU(), - "sigmoid":nn.Sigmoid(), - "prelu":nn.PReLU(num_parameters=1), - "leaky_relu": nn.LeakyReLU(negative_slope=0.2, inplace=False), - "gelu":nn.GELU() - } - - if activation_func is None: - return nn.Identity() - - if activation_func not in act_func.keys(): - raise ValueError("activation function({}) is not found".format(activation_func)) - - activation = act_func[activation_func] - return activation - - -def get_layer_info(out_channels, activation_func='relu'): - #act_func = {"relu":nn.ReLU(), "sigmoid":nn.Sigmoid(), "prelu":nn.PReLU(num_parameters=out_channels)} - - # norm_layer = nn.BatchNorm2d(out_channels, momentum=0.9) - if out_channels >= 32: - groups = 32 - else: - groups = 1 - - norm_layer = nn.GroupNorm(groups, out_channels) - activation = get_activation(activation_func) - return norm_layer, activation - - -class Conv(nn.Module): - """ (convolution => [BN] => ReLU) """ - def __init__(self, - in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1, - bias=True, - activation='leaky', - style=False, - resnet=True): - super().__init__() - - self.style = style - norm_layer, act_func = get_layer_info(in_channels, activation) - - if resnet and in_channels == out_channels: - self.resnet = True - else: - self.resnet = False - - if style: - self.styleconv = Conv2DMod(in_channels, out_channels, kernel_size) - self.relu = nn.LeakyReLU(0.2, inplace=True) - else: - self.norm = norm_layer - self.conv = nn.Conv2d(in_channels, out_channels, stride=stride, kernel_size=kernel_size, padding=padding, bias=bias) - self.act = act_func - - def forward(self, x, style_fea=None): - if self.style: - res = self.styleconv(x, style_fea) - res = self.relu(res) - else: - h = self.conv(self.act(self.norm(x))) - if self.resnet: - res = h + x - else: - res = h - - return res - - -class Conv2DMod(nn.Module): - def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, eps=1e-8, **kwargs): - super().__init__() - self.filters = out_chan - self.demod = demod - self.kernel = kernel - self.stride = stride - self.dilation = dilation - self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel))) - self.eps = eps - nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu') - - def _get_same_padding(self, size, kernel, dilation, stride): - return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2 - - def forward(self, x, y): - b, c, h, w = x.shape - - w1 = y[:, None, :, None, None] - w2 = self.weight[None, :, :, :, :] - weights = w2 * (w1 + 1) - - if self.demod: - d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps) - weights = weights * d - - x = x.reshape(1, -1, h, w) - - _, _, *ws = weights.shape - weights = weights.reshape(b * self.filters, *ws) - - padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride) - x = F.conv2d(x, weights, padding=padding, groups=b) - - x = x.reshape(-1, self.filters, h, w) - return x - - -class Up(nn.Module): - """ Upscaling then conv """ - - def __init__(self, in_channels, out_channels, activation='relu', resnet=True): - super().__init__() - self.up_layer = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) - self.up = Conv(in_channels, out_channels, activation=activation, resnet=resnet) - - def forward(self, x): - x = self.up_layer(x) - return self.up(x) - - - -class DConv(nn.Module): - """ Double Conv Layer - """ - def __init__(self, in_channels, out_channels, activation='relu', resnet=True): - super().__init__() - - self.conv1 = Conv(in_channels, out_channels, activation=activation, resnet=resnet) - self.conv2 = Conv(out_channels, out_channels, activation=activation, resnet=resnet) - - def forward(self, x): - return self.conv2(self.conv1(x)) - - -class Encoder(nn.Module): - def __init__(self, in_channels=3, mid_act='leaky', resnet=True): - super(Encoder, self).__init__() - self.in_conv = Conv(in_channels, 32-in_channels, stride=1, activation=mid_act, resnet=resnet) - self.down_32_64 = Conv(32, 64, stride=2, activation=mid_act, resnet=resnet) - self.down_64_64_1 = Conv(64, 64, activation=mid_act, resnet=resnet) - self.down_64_128 = Conv(64, 128, stride=2, activation=mid_act, resnet=resnet) - self.down_128_128_1 = Conv(128, 128, activation=mid_act, resnet=resnet) - self.down_128_256 = Conv(128, 256, stride=2, activation=mid_act, resnet=resnet) - self.down_256_256_1 = Conv(256, 256, activation=mid_act, resnet=resnet) - self.down_256_512 = Conv(256, 512, stride=2, activation=mid_act, resnet=resnet) - self.down_512_512_1 = Conv(512, 512, activation=mid_act, resnet=resnet) - self.down_512_512_2 = Conv(512, 512, activation=mid_act, resnet=resnet) - self.down_512_512_3 = Conv(512, 512, activation=mid_act, resnet=resnet) - - - def forward(self, x): - x1 = self.in_conv(x) # 32 x 256 x 256 - x1 = torch.cat((x, x1), dim=1) - - x2 = self.down_32_64(x1) - x3 = self.down_64_64_1(x2) - - x4 = self.down_64_128(x3) - x5 = self.down_128_128_1(x4) - - x6 = self.down_128_256(x5) - x7 = self.down_256_256_1(x6) - - x8 = self.down_256_512(x7) - x9 = self.down_512_512_1(x8) - x10 = self.down_512_512_2(x9) - x11 = self.down_512_512_3(x10) - - return x11, x10, x9, x8, x7, x6, x5, x4, x3, x2, x1 - - -class Decoder(nn.Module): - def __init__(self, - out_channels=3, - mid_act='relu', - out_act='sigmoid', - resnet = True): - - super(Decoder, self).__init__() - - input_channel = 512 - fea_dim = 100 - - self.to_style1 = nn.Linear(in_features=fea_dim, out_features=input_channel) - - self.up_16_16_1 = Conv(input_channel, 256, activation=mid_act, resnet=resnet) - self.up_16_16_2 = Conv(768, 512, activation=mid_act, resnet=resnet) - self.up_16_16_3 = Conv(1024, 512, activation=mid_act, resnet=resnet) - - self.up_16_32 = Up(1024, 256, activation=mid_act, resnet=resnet) - self.up_32_32_1 = Conv(512, 256, activation=mid_act, resnet=resnet) - - self.up_32_64 = Up(512, 128, activation=mid_act, resnet=resnet) - self.up_64_64_1 = Conv(256, 128, activation=mid_act, resnet=resnet) - - self.up_64_128 = Up(256, 64, activation=mid_act, resnet=resnet) - self.up_128_128_1 = Conv(128, 64, activation=mid_act, resnet=resnet) - - self.up_128_256 = Up(128, 32, activation=mid_act, resnet=resnet) - self.out_conv = Conv(64, out_channels, activation=mid_act) - - self.out_act = get_activation(out_act) - - - def forward(self, x): - x11, x10, x9, x8, x7, x6, x5, x4, x3, x2, x1 = x - - y = self.up_16_16_1(x11) - - y = torch.cat((x10, y), dim=1) - y = self.up_16_16_2(y) - - y = torch.cat((x9, y), dim=1) - y = self.up_16_16_3(y) - - y = torch.cat((x8, y), dim=1) - y = self.up_16_32(y) - - y = torch.cat((x7, y), dim=1) - y = self.up_32_32_1(y) - - y = torch.cat((x6, y), dim=1) - y = self.up_32_64(y) - - y = torch.cat((x5, y), dim=1) - y = self.up_64_64_1(y) # 128 x 64 x 64 - - y = torch.cat((x4, y), dim=1) - y = self.up_64_128(y) - - y = torch.cat((x3, y), dim=1) - y = self.up_128_128_1(y) # 64 x 128 x 128 - - y = torch.cat((x2, y), dim=1) - y = self.up_128_256(y) # 32 x 256 x 256 - - y = torch.cat((x1, y), dim=1) - y = self.out_conv(y) # 3 x 256 x 256 - y = self.out_act(y) - - return y - - -class SSN_v1(nn.Module): - """ Implementation of Relighting Net """ - - def __init__(self, - in_channels=3, - out_channels=3, - mid_act='leaky', - out_act='sigmoid', - resnet=True): - super(SSN_v1, self).__init__() - self.encoder = Encoder(in_channels, mid_act=mid_act, resnet=resnet) - self.decoder = Decoder(out_channels, mid_act=mid_act, out_act=out_act, resnet=resnet) - - - def forward(self, x, softness): - """ - Input is (source image, target light, source light, ) - Output is: predicted new image, predicted source light, self-supervision image - """ - latent = self.encoder(x) - pred = self.decoder(latent) - - return pred - - -if __name__ == '__main__': - test_input = torch.randn(5, 1, 256, 256) - style = torch.randn(5, 100) - - model = SSN_v1(1, 1, mid_act='gelu', out_act='gelu', resnet=True) - test_out = model(test_input, style) - - print('Ouptut shape: ', test_out.shape) diff --git a/spaces/yuan1615/EmpathyTTS/huggingface.py b/spaces/yuan1615/EmpathyTTS/huggingface.py deleted file mode 100644 index b58df7e7673988ed48655862753191ece6f45ad1..0000000000000000000000000000000000000000 --- a/spaces/yuan1615/EmpathyTTS/huggingface.py +++ /dev/null @@ -1,73 +0,0 @@ -import argparse -from models import SynthesizerTrn -from text.symbols import symbols -import utils -import ttsfrd -import gradio as gr -import tempfile -from scipy.io import wavfile -import numpy as np -from synthesize_fastapi import * -config = "./configs/baker_base.json" - -print("---------- Loading VITS Model ----------") -hps = utils.get_hparams_from_file(config) -net_g = SynthesizerTrn( - len(symbols), - hps.data.filter_length // 2 + 1, - hps.train.segment_size // hps.data.hop_length, - **hps.model).cuda() -_ = net_g.eval() - -_ = utils.load_checkpoint("./ckpt/pretrained_baker.pth", net_g, None) - -lexicon_mandarin = read_lexicon("./lexicon/pinyin-lexicon-r.txt") - -# 解压文件 -import zipfile -f = zipfile.ZipFile("./resource.zip", 'r') # 压缩文件位置 -for file in f.namelist(): - f.extract(file, "./") # 解压位置 -f.close() - -frontend = ttsfrd.TtsFrontendEngine() -model_dir = './resource' -frontend.initialize(model_dir) -frontend.set_lang_type('zhcn') - - -def tts(text): - audio_all = np.zeros(1, dtype=np.int16) # 设置初始音频 - pinyin_list, prosody_list = g2p_mandarin(frontend, text) - for texts, phone_prosody in zip(pinyin_list, prosody_list): - print(texts) - print(phone_prosody) - stn_tst = get_text(texts, hps) - prosody = get_prosody(phone_prosody, hps) - with torch.no_grad(): - x_tst = stn_tst.cuda().unsqueeze(0) - prosody = prosody.cuda().unsqueeze(0) - x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).cuda() - audio = net_g.infer(x_tst, x_tst_lengths, prosody, noise_scale=.667, noise_scale_w=0.0, length_scale=1)[0][ - 0, 0].data.cpu().float().numpy() * 32767.0 - i = np.random.uniform(0.12, 0.35, 1)[0] - space_time = np.zeros(int(i * 22050), dtype=np.int16) - audio_all = np.concatenate((audio_all, audio, space_time)) - with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp: - wavfile.write( - fp.name, - 22050, - audio_all.astype(np.int16), - ) - return fp.name - - -if __name__ == '__main__': - inputs = [gr.Textbox(label="Input", value='祝大家中秋节快乐', max_lines=3)] - outputs = gr.Audio(label="Output") - interface = gr.Interface(fn=tts, inputs=inputs, outputs=outputs, - examples=['目前申请好人贷支持身份证原件实时拍摄或上传相册照片两种方式,但复印件及临时身份证是不可以的哟。', - '国务院银行业监督管理机构会按照国家法律法规规定的程序处理。', - '我说点什么好呢?念一个绕口令吧。八百标兵奔北坡,炮兵并排北边跑,炮兵怕把标兵碰,标兵怕碰炮兵炮。八百标兵奔北坡,北坡八百炮兵炮,标兵怕碰炮兵炮,炮兵怕把标兵碰。八了百了标了兵了奔了北了坡,炮了兵了并了排了北了边了跑,炮了兵了怕了把了标了兵了碰,标了兵了怕了碰了炮了兵了炮。'], - title='Empathy-TTS') - interface.launch(server_name='0.0.0.0') diff --git a/spaces/zhan66/vits-simple-api/vits-simple-api-installer-latest.sh b/spaces/zhan66/vits-simple-api/vits-simple-api-installer-latest.sh deleted file mode 100644 index aca5d6acd97aa10157f3f86049feece616ce91aa..0000000000000000000000000000000000000000 --- a/spaces/zhan66/vits-simple-api/vits-simple-api-installer-latest.sh +++ /dev/null @@ -1,258 +0,0 @@ -INSTALL_DIR=/usr/local/vits-simple-api - -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[0;33m' -PLAIN='\033[0m' - -declare -A EN_MESSAGES -declare -A ZH_MESSAGES - -EN_MESSAGES=( - ["ATTEMPT_DOWNLOAD"]="Attempting to download" - ["FROM"]="from" - ["DOWNLOAD_FAIL"]="Failed to download" - ["FROM_ALL_URLS"]="from all provided URLs." - ["DOWNLOADING"]="Downloading..." - ["VERIFYING"]="Verifying..." - ["UNZIPPING"]="Unzipping..." - ["CHOOSE_VERSION"]="Which version of docker-compose.yaml do you want to download?" - ["DOCKER_CPU"]="docker-compose.yaml (CPU version)" - ["DOCKER_GPU"]="docker-compose-gpu.yaml (GPU version)" - ["ENTER_CHOICE"]="Enter your choice (1 or 2): " - ["INVALID_CHOICE"]="Invalid choice. Please enter 1 or 2." - ["DOWNLOAD_CONFIG"]="Downloading configuration file shortly..." - ["PULL_IMAGE"]="Do you want to start pulling the image? Enter 1 for yes or 2 for no" - ["DOWNLOAD_DICT"]="Do you want to download the pyopenjtalk dictionary file? Enter 1 for yes or 2 for no" - ["MUST_DOWNLOAD_JP"]="Japanese model must be downloaded." - ["DOWNLOAD_VITS_CHINESE"]="Do you want to download the bert model for vits_chinese? Enter 1 for yes, 2 for no." - ["MUST_DOWNLOAD_VITS_CHINESE"]="Using vits_chinese requires downloading these models, which will take up about 410MB." - ["DOWNLOAD_BERT_VITS2"]="Do you want to download chinese-roberta-wwm-ext-large? Enter 1 for yes or 2 for no" - ["MUST_DOWNLOAD_BERT_VITS2"]="To use Bert-VITS2, you must download these models, which will take up about 3.64GB." - ["DOWNLOADED"]="File is downloaded correctly." - ["CORRUPTED"]="File is corrupted or incomplete." - ["INSTALL_COMPLETE"]="The upgrade or installation has been completed." - ["CONFIG_DIR"]="The configuration file directory is" - ["IMPORT_NOTICE"]="If the vits model is not imported, it cannot be used. Import the model in the configuration file directory." - ["RESTART_NOTICE"]="After modifying the configuration file, restart the docker container for the modification to take effect." - ["ISSUE_NOTICE"]="If you have any questions, please put them in the issues." - ["GITHUB_LINK"]="https://github.com/Artrajz/vits-simple-api" -) - -ZH_MESSAGES=( - ["ATTEMPT_DOWNLOAD"]="正在尝试下载" - ["FROM"]="从" - ["DOWNLOAD_FAIL"]="都下载失败" - ["FROM_ALL_URLS"]="从所有提供的URLs" - ["DOWNLOADING"]="正在下载..." - ["VERIFYING"]="正在校验" - ["UNZIPPING"]="正在解压..." - ["CHOOSE_VERSION"]="你想下载哪个版本的docker-compose.yaml?" - ["DOCKER_CPU"]="docker-compose.yaml (CPU版本)" - ["DOCKER_GPU"]="docker-compose-gpu.yaml (GPU版本)" - ["ENTER_CHOICE"]="请输入您的选择 (1 或 2): " - ["INVALID_CHOICE"]="无效选择。 请重新输入 1 或 2。" - ["DOWNLOAD_CONFIG"]="即将下载配置文件..." - ["PULL_IMAGE"]="是否要开始拉取镜像?输入1表示是,2表示否。" - ["DOWNLOAD_DICT"]="是否要下载pyopenjtalk的词典文件?输入1表示是,2表示否。" - ["MUST_DOWNLOAD_JP"]="使用日语模型必须下载该词典文件,将占用大约102MB。" - ["DOWNLOAD_VITS_CHINESE"]="是否要下载vits_chinese的bert模型?输入1表示是,2表示否。" - ["MUST_DOWNLOAD_VITS_CHINESE"]="使用vits_chinese必须下载这些模型,将占用大约410MB。" - ["DOWNLOAD_BERT_VITS2"]="是否要下载chinese-roberta-wwm-ext-large?输入1表示是,2表示否。" - ["MUST_DOWNLOAD_BERT_VITS2"]="使用Bert-VITS2必须下载这些模型,将占用大约3.64GB。" - ["DOWNLOADED"]="文件已正确下载。" - ["CORRUPTED"]="文件已损坏或不完整。" - ["INSTALL_COMPLETE"]="更新或安装已完成。" - ["CONFIG_DIR"]="配置文件目录是" - ["IMPORT_NOTICE"]="如果vits模型没有被导入,它是无法使用的。请在配置文件目录中导入模型。" - ["RESTART_NOTICE"]="修改配置文件后,请重启docker容器以使修改生效。" - ["ISSUE_NOTICE"]="如果你有任何问题,请在issues中提出,或者加入q群提问。" - ["GITHUB_LINK"]="https://github.com/Artrajz/vits-simple-api" -) - -echo -e "${PLAIN}${GREEN}Choose a language/选择语言: ${PLAIN}" -echo "1. English" -echo "2. 中文" -read -p "Enter your choice (1 or 2): " choice_language - -declare -A MESSAGES -if [ "$choice_language" -eq 1 ]; then - for key in "${!EN_MESSAGES[@]}"; do - MESSAGES["$key"]="${EN_MESSAGES[$key]}" - done -else - for key in "${!ZH_MESSAGES[@]}"; do - MESSAGES["$key"]="${ZH_MESSAGES[$key]}" - done -fi - -mkdir -p $INSTALL_DIR -cd $INSTALL_DIR - -download_with_fallback() { - local filename=$1 - shift # Shift arguments to the left to handle URLs - - local success=0 - local url - for url in "$@"; do - echo -e "${YELLOW}${MESSAGES["ATTEMPT_DOWNLOAD"]} $filename ${MESSAGES["FROM"]} $url\n${PLAIN}" - if wget -O "$INSTALL_DIR/$filename" "$url"; then - success=1 - break - fi - done - - if [ "$success" -ne 1 ]; then - echo -e "${RED} $filename ${MESSAGES["FROM_ALL_URLS"]} ${MESSAGES["DOWNLOAD_FAIL"]}${PLAIN}" - exit 1 - fi -} - -version_gt() { - test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1" -} - -while true; do - echo -e "${GREEN}${MESSAGES["CHOOSE_VERSION"]}${PLAIN}" - echo -e "1. ${MESSAGES["DOCKER_CPU"]}" - echo -e "2. ${MESSAGES["DOCKER_GPU"]}" - read -p "${MESSAGES["ENTER_CHOICE"]}" choice_gpu - case $choice_gpu in - 1) - echo -e "${MESSAGES["DOWNLOADING"]}" - download_with_fallback docker-compose.yaml \ - "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/docker-compose.yaml" \ - "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/docker-compose.yaml" - break - ;; - 2) - echo -e "${MESSAGES["DOWNLOADING"]}" - download_with_fallback docker-compose.yaml \ - "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/docker-compose-gpu.yaml" \ - "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/docker-compose-gpu.yaml" - break - ;; - *) - echo -e "${RED}${MESSAGES["INVALID_CHOICE"]}${PLAIN}" - ;; - esac -done - -if [ "$choice_gpu" -eq 2 ]; then - DOCKER_VERSION=$(docker version --format '{{.Server.Version}}') - MIN_DOCKER_VERSION="19.03" - - if version_gt $MIN_DOCKER_VERSION $DOCKER_VERSION; then - echo -e "${RED}Your Docker version ($DOCKER_VERSION) does not support GPU. You need at least version $MIN_DOCKER_VERSION.${PLAIN}" - exit 1 - fi -fi - -if ! command -v docker-compose &>/dev/null; then - echo -e "${RED}docker-compose could not be found.${PLAIN}" - exit 1 -fi - -echo -e "${GREEN}${MESSAGES["PULL_IMAGE"]}${PLAIN}" -read -p "${MESSAGES["ENTER_CHOICE"]}" choice_pull - -if [ "$choice_pull" -eq 1 ]; then - docker compose pull - docker compose up -d -fi - -echo -e "${YELLOW}${MESSAGES["DOWNLOAD_CONFIG"]}${PLAIN}" - -if [ ! -f config.py ]; then - download_with_fallback config.py \ - "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/config.py" \ - "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/config.py" -fi - -if [ ! -f gunicorn_config.py ]; then - download_with_fallback gunicorn_config.py \ - "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/gunicorn_config.py" \ - "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/gunicorn_config.py" -fi - -download_with_fallback config.example.py \ - "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/config.py" \ - "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/config.py" - -download_with_fallback gunicorn_config.example.py \ - "https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/gunicorn_config.py" \ - "https://ghproxy.com/https://raw.githubusercontent.com/Artrajz/vits-simple-api/main/gunicorn_config.py" - -echo -e "${GREEN}${MESSAGES["DOWNLOAD_DICT"]}${PLAIN}" -echo -e "${GREEN}${MESSAGES["MUST_DOWNLOAD_JP"]}${PLAIN}" -read -p "${MESSAGES["ENTER_CHOICE"]}" choice_download_pyopenjtalk - -if [ "$choice_download_pyopenjtalk" -eq 1 ]; then - mkdir -p pyopenjtalk - echo -e "${MESSAGES["DOWNLOADING"]}" - download_with_fallback open_jtalk_dic_utf_8-1.11.tar.gz \ - "https://github.com/r9y9/open_jtalk/releases/download/v1.11.1/open_jtalk_dic_utf_8-1.11.tar.gz" \ - "https://ghproxy.com/https://github.com/r9y9/open_jtalk/releases/download/v1.11.1/open_jtalk_dic_utf_8-1.11.tar.gz" - echo -e "${MESSAGES["UNZIPPING"]}" - tar -xzvf open_jtalk_dic_utf_8-1.11.tar.gz -C pyopenjtalk/ - rm open_jtalk_dic_utf_8-1.11.tar.gz -fi - -echo -e "${GREEN}${MESSAGES["DOWNLOAD_VITS_CHINESE"]}${PLAIN}" -echo -e "${GREEN}${MESSAGES["MUST_DOWNLOAD_VITS_CHINESE"]}${PLAIN}" -read -p "${MESSAGES["ENTER_CHOICE"]}" choice_download_vits_chinese - -if [ "$choice_download_vits_chinese" -eq 1 ]; then - mkdir -p vits/bert - - EXPECTED_MD5="dea78034433141adc8002404aa1b3184" - FILE_PATH="vits/bert/prosody_model.pt" - echo -e "${MESSAGES["VERIFYING"]}$FILE_PATH" - ACTUAL_MD5=$(md5sum $FILE_PATH | awk '{print $1}') - - if [ "$EXPECTED_MD5" == "$ACTUAL_MD5" ]; then - echo "${MESSAGES["DOWNLOADED"]}" - else - echo "${MESSAGES["CORRUPTED"]}" - download_with_fallback vits/bert/prosody_model.pt \ - "https://huggingface.co/spaces/maxmax20160403/vits_chinese/resolve/main/bert/prosody_model.pt" - fi - -fi - -echo -e "${GREEN}${MESSAGES["DOWNLOAD_BERT_VITS2"]}${PLAIN}" -echo -e "${GREEN}${MESSAGES["MUST_DOWNLOAD_BERT_VITS2"]}${PLAIN}" -read -p "${MESSAGES["ENTER_CHOICE"]}" choice_download_bert_vits2 - -if [ "$choice_download_bert_vits2" -eq 1 ]; then - mkdir -p bert_vits2/bert/chinese-roberta-wwm-ext-large - - EXPECTED_MD5="15d7435868fef1bd4222ff7820149a2a" - FILE_PATH="bert_vits2/bert/chinese-roberta-wwm-ext-large/pytorch_model.bin" - echo -e "${MESSAGES["VERIFYING"]}$FILE_PATH" - ACTUAL_MD5=$(md5sum $FILE_PATH | awk '{print $1}') - - if [ "$EXPECTED_MD5" == "$ACTUAL_MD5" ]; then - echo "${MESSAGES["DOWNLOADED"]}" - else - echo ${MESSAGES["CORRUPTED"]} - download_with_fallback bert_vits2/bert/chinese-roberta-wwm-ext-large/pytorch_model.bin \ - "https://huggingface.co/hfl/chinese-roberta-wwm-ext-large/resolve/main/pytorch_model.bin" - fi - -fi - -if [ "$choice_gpu" -eq 2 ]; then - if ! docker run --gpus all artrajz/vits-simple-api:latest-gpu nvidia-smi &>/dev/null; then - echo -e "${RED}Your Docker does not seem to support GPU or NVIDIA Docker is not installed properly.${PLAIN}" - exit 1 - fi -fi - -echo -e "\n${MESSAGES["INSTALL_COMPLETE"]}" -echo -e "${MESSAGES["CONFIG_DIR"]} $(realpath $INSTALL_DIR)" -echo -e "${YELLOW}${MESSAGES["IMPORT_NOTICE"]}${PLAIN}" -echo -e "${YELLOW}${MESSAGES["RESTART_NOTICE"]}${PLAIN}" -echo -e "${MESSAGES["ISSUE_NOTICE"]}" -echo -e "${MESSAGES["GITHUB_LINK"]}" diff --git a/spaces/zhaoys/wfms-kuiwenc/src/components/theme-toggle.tsx b/spaces/zhaoys/wfms-kuiwenc/src/components/theme-toggle.tsx deleted file mode 100644 index 67d3f1a2c163ccbeb52c40a7e42f107190237154..0000000000000000000000000000000000000000 --- a/spaces/zhaoys/wfms-kuiwenc/src/components/theme-toggle.tsx +++ /dev/null @@ -1,31 +0,0 @@ -'use client' - -import * as React from 'react' -import { useTheme } from 'next-themes' - -import { Button } from '@/components/ui/button' -import { IconMoon, IconSun } from '@/components/ui/icons' - -export function ThemeToggle() { - const { setTheme, theme } = useTheme() - const [_, startTransition] = React.useTransition() - - return ( - <Button - variant="ghost" - size="icon" - onClick={() => { - startTransition(() => { - setTheme(theme === 'light' ? 'dark' : 'light') - }) - }} - > - {!theme ? null : theme === 'dark' ? ( - <IconMoon className="transition-all" /> - ) : ( - <IconSun className="transition-all" /> - )} - <span className="sr-only">Toggle theme</span> - </Button> - ) -} diff --git a/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/components/ui/alert-dialog.tsx b/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/components/ui/alert-dialog.tsx deleted file mode 100644 index 17fec4d16510328deacc1416569173c97761ef72..0000000000000000000000000000000000000000 --- a/spaces/zhoujiaxin/zhoujiaxinchatgpt/src/components/ui/alert-dialog.tsx +++ /dev/null @@ -1,150 +0,0 @@ -'use client' - -import * as React from 'react' -import * as AlertDialogPrimitive from '@radix-ui/react-alert-dialog' - -import { cn } from '@/lib/utils' -import { buttonVariants } from '@/components/ui/button' - -const AlertDialog = AlertDialogPrimitive.Root - -const AlertDialogTrigger = AlertDialogPrimitive.Trigger - -const AlertDialogPortal = ({ - className, - children, - ...props -}: AlertDialogPrimitive.AlertDialogPortalProps) => ( - <AlertDialogPrimitive.Portal className={cn(className)} {...props}> - <div className="fixed inset-0 z-50 flex items-end justify-center sm:items-center"> - {children} - </div> - </AlertDialogPrimitive.Portal> -) -AlertDialogPortal.displayName = AlertDialogPrimitive.Portal.displayName - -const AlertDialogOverlay = React.forwardRef< - React.ElementRef<typeof AlertDialogPrimitive.Overlay>, - React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Overlay> ->(({ className, children, ...props }, ref) => ( - <AlertDialogPrimitive.Overlay - className={cn( - 'fixed inset-0 z-50 bg-background/80 backdrop-blur-sm transition-opacity animate-in fade-in', - className - )} - {...props} - ref={ref} - /> -)) -AlertDialogOverlay.displayName = AlertDialogPrimitive.Overlay.displayName - -const AlertDialogContent = React.forwardRef< - React.ElementRef<typeof AlertDialogPrimitive.Content>, - React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Content> ->(({ className, ...props }, ref) => ( - <AlertDialogPortal> - <AlertDialogOverlay /> - <AlertDialogPrimitive.Content - ref={ref} - className={cn( - 'fixed z-50 grid w-full max-w-lg scale-100 gap-4 border bg-background p-6 opacity-100 shadow-lg animate-in fade-in-90 slide-in-from-bottom-10 sm:rounded-lg sm:zoom-in-90 sm:slide-in-from-bottom-0 md:w-full', - className - )} - {...props} - /> - </AlertDialogPortal> -)) -AlertDialogContent.displayName = AlertDialogPrimitive.Content.displayName - -const AlertDialogHeader = ({ - className, - ...props -}: React.HTMLAttributes<HTMLDivElement>) => ( - <div - className={cn( - 'flex flex-col space-y-2 text-center sm:text-left', - className - )} - {...props} - /> -) -AlertDialogHeader.displayName = 'AlertDialogHeader' - -const AlertDialogFooter = ({ - className, - ...props -}: React.HTMLAttributes<HTMLDivElement>) => ( - <div - className={cn( - 'flex flex-col-reverse sm:flex-row sm:justify-end sm:space-x-2', - className - )} - {...props} - /> -) -AlertDialogFooter.displayName = 'AlertDialogFooter' - -const AlertDialogTitle = React.forwardRef< - React.ElementRef<typeof AlertDialogPrimitive.Title>, - React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Title> ->(({ className, ...props }, ref) => ( - <AlertDialogPrimitive.Title - ref={ref} - className={cn('text-lg font-semibold', className)} - {...props} - /> -)) -AlertDialogTitle.displayName = AlertDialogPrimitive.Title.displayName - -const AlertDialogDescription = React.forwardRef< - React.ElementRef<typeof AlertDialogPrimitive.Description>, - React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Description> ->(({ className, ...props }, ref) => ( - <AlertDialogPrimitive.Description - ref={ref} - className={cn('text-sm text-muted-foreground', className)} - {...props} - /> -)) -AlertDialogDescription.displayName = - AlertDialogPrimitive.Description.displayName - -const AlertDialogAction = React.forwardRef< - React.ElementRef<typeof AlertDialogPrimitive.Action>, - React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Action> ->(({ className, ...props }, ref) => ( - <AlertDialogPrimitive.Action - ref={ref} - className={cn(buttonVariants(), className)} - {...props} - /> -)) -AlertDialogAction.displayName = AlertDialogPrimitive.Action.displayName - -const AlertDialogCancel = React.forwardRef< - React.ElementRef<typeof AlertDialogPrimitive.Cancel>, - React.ComponentPropsWithoutRef<typeof AlertDialogPrimitive.Cancel> ->(({ className, ...props }, ref) => ( - <AlertDialogPrimitive.Cancel - ref={ref} - className={cn( - buttonVariants({ variant: 'outline' }), - 'mt-2 sm:mt-0', - className - )} - {...props} - /> -)) -AlertDialogCancel.displayName = AlertDialogPrimitive.Cancel.displayName - -export { - AlertDialog, - AlertDialogTrigger, - AlertDialogContent, - AlertDialogHeader, - AlertDialogFooter, - AlertDialogTitle, - AlertDialogDescription, - AlertDialogAction, - AlertDialogCancel -} diff --git a/spaces/zhoupin30/zhoupin30/src/components/chat-notification.tsx b/spaces/zhoupin30/zhoupin30/src/components/chat-notification.tsx deleted file mode 100644 index 3474e522992c43a4d1d0eadcf205a9760d5b930b..0000000000000000000000000000000000000000 --- a/spaces/zhoupin30/zhoupin30/src/components/chat-notification.tsx +++ /dev/null @@ -1,91 +0,0 @@ -import { useEffect } from 'react' -import Image from 'next/image' - -import IconWarning from '@/assets/images/warning.svg' -import { ChatError, ErrorCode, ChatMessageModel } from '@/lib/bots/bing/types' -import { ExternalLink } from './external-link' -import { useBing } from '@/lib/hooks/use-bing' - -export interface ChatNotificationProps extends Pick<ReturnType<typeof useBing>, 'bot'> { - message?: ChatMessageModel -} - -function getAction(error: ChatError, reset: () => void) { - if (error.code === ErrorCode.THROTTLE_LIMIT) { - reset() - return ( - <div> - 你已达到每日最大发送消息次数,请<a href={`#dialog="settings"`}>更换账号</a>或隔一天后重试 - </div> - ) - } - if (error.code === ErrorCode.BING_IP_FORBIDDEN) { - return ( - <ExternalLink href="https://github.com/weaigc/bingo/issues"> - 你的服务器或代理已被封禁,请更换服务器或使用代理重试 - </ExternalLink> - ) - } - if (error.code === ErrorCode.BING_TRY_LATER) { - return ( - <ExternalLink href="/"> - 创建会话失败,请稍候重试 - </ExternalLink> - ) - } - if (error.code === ErrorCode.BING_FORBIDDEN) { - return ( - <ExternalLink href="https://bing.com/new"> - 你的账号已在黑名单,请尝试更换账号及申请解封 - </ExternalLink> - ) - } - if (error.code === ErrorCode.CONVERSATION_LIMIT) { - return ( - <div> - 当前话题已中止,请点 - <a href={`#dialog="reset"`}>重新开始</a> - 开启新的对话 - </div> - ) - } - if (error.code === ErrorCode.BING_CAPTCHA) { - return ( - <ExternalLink href="https://www.bing.com/turing/captcha/challenge"> - 点击通过人机验证 - </ExternalLink> - ) - } - if (error.code === ErrorCode.BING_UNAUTHORIZED) { - reset() - return ( - <a href={`#dialog="settings"`}>没有获取到身份信息或身份信息失效,点此重新设置</a> - ) - } - return error.message -} - -export function ChatNotification({ message, bot }: ChatNotificationProps) { - useEffect(() => { - window.scrollBy(0, 2000) - }, [message]) - - if (!message?.error) return - - return ( - <div - className="notification-container" - > - <div className="bottom-notifications"> - <div className="inline-type with-decorative-line"> - <div className="text-container mt-1"> - <div className="title inline-flex items-start"> - <Image alt="error" src={IconWarning} width={20} className="mr-1 mt-1" /> - {getAction(message.error, () => bot.resetConversation())} - </div> - </div> - </div> - </div> - </div> - ) -} diff --git a/spaces/zhoupin30/zhoupin30/src/pages/api/proxy.ts b/spaces/zhoupin30/zhoupin30/src/pages/api/proxy.ts deleted file mode 100644 index 240b5fb5561d993c6381649bf4544ce12f3cdab2..0000000000000000000000000000000000000000 --- a/spaces/zhoupin30/zhoupin30/src/pages/api/proxy.ts +++ /dev/null @@ -1,24 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { fetch } from '@/lib/isomorphic' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { url, headers, method = 'GET', body } = req.body - if (!url) { - return res.end('ok') - } - const response = await fetch(url, { headers, method, body, redirect: 'manual' }) - const text = await response.text() - res.writeHead(200, { - 'Content-Type': 'application/text', - 'x-url': response.url, - 'x-status': response.status, - }) - res.end(text) - } catch (e) { - console.log(e) - return res.end(e) - } -} diff --git a/spaces/zomehwh/sovits-goldship/onnx/model_onnx_48k.py b/spaces/zomehwh/sovits-goldship/onnx/model_onnx_48k.py deleted file mode 100644 index d35c92e5d0606d29f40a9ad08a50b60cc93bc48b..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-goldship/onnx/model_onnx_48k.py +++ /dev/null @@ -1,328 +0,0 @@ -import copy -import math -import torch -from torch import nn -from torch.nn import functional as F - -import modules.attentions as attentions -import modules.commons as commons -import modules.modules as modules - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from modules.commons import init_weights, get_padding -from vdecoder.hifigan.models import Generator -from utils import f0_to_coarse - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class Encoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - # print(x.shape,x_lengths.shape) - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - filter_channels=None, - n_heads=None, - p_dropout=None): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - self.f0_emb = nn.Embedding(256, hidden_channels) - - self.enc_ = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - - def forward(self, x, x_lengths, f0=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = x + self.f0_emb(f0.long()).transpose(1,2) - x = self.enc_(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - - return z, m, logs, x_mask - - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SpeakerEncoder(torch.nn.Module): - def __init__(self, mel_n_channels=80, model_num_layers=3, model_hidden_size=256, model_embedding_size=256): - super(SpeakerEncoder, self).__init__() - self.lstm = nn.LSTM(mel_n_channels, model_hidden_size, model_num_layers, batch_first=True) - self.linear = nn.Linear(model_hidden_size, model_embedding_size) - self.relu = nn.ReLU() - - def forward(self, mels): - self.lstm.flatten_parameters() - _, (hidden, _) = self.lstm(mels) - embeds_raw = self.relu(self.linear(hidden[-1])) - return embeds_raw / torch.norm(embeds_raw, dim=1, keepdim=True) - - def compute_partial_slices(self, total_frames, partial_frames, partial_hop): - mel_slices = [] - for i in range(0, total_frames-partial_frames, partial_hop): - mel_range = torch.arange(i, i+partial_frames) - mel_slices.append(mel_range) - - return mel_slices - - def embed_utterance(self, mel, partial_frames=128, partial_hop=64): - mel_len = mel.size(1) - last_mel = mel[:,-partial_frames:] - - if mel_len > partial_frames: - mel_slices = self.compute_partial_slices(mel_len, partial_frames, partial_hop) - mels = list(mel[:,s] for s in mel_slices) - mels.append(last_mel) - mels = torch.stack(tuple(mels), 0).squeeze(1) - - with torch.no_grad(): - partial_embeds = self(mels) - embed = torch.mean(partial_embeds, axis=0).unsqueeze(0) - #embed = embed / torch.linalg.norm(embed, 2) - else: - with torch.no_grad(): - embed = self(last_mel) - - return embed - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - ssl_dim, - n_speakers, - **kwargs): - - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - self.ssl_dim = ssl_dim - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - self.enc_p_ = TextEncoder(ssl_dim, inter_channels, hidden_channels, 5, 1, 16,0, filter_channels, n_heads, p_dropout) - hps = { - "sampling_rate": 48000, - "inter_channels": 192, - "resblock": "1", - "resblock_kernel_sizes": [3, 7, 11], - "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]], - "upsample_rates": [10, 8, 2, 2], - "upsample_initial_channel": 512, - "upsample_kernel_sizes": [16, 16, 4, 4], - "gin_channels": 256, - } - self.dec = Generator(h=hps) - self.enc_q = Encoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - def forward(self, c, c_lengths, f0, g=None): - g = self.emb_g(g.unsqueeze(0)).transpose(1,2) - z_p, m_p, logs_p, c_mask = self.enc_p_(c.transpose(1,2), c_lengths, f0=f0_to_coarse(f0)) - z = self.flow(z_p, c_mask, g=g, reverse=True) - o = self.dec(z * c_mask, g=g, f0=f0.float()) - return o - diff --git a/spaces/zomehwh/sovits-xiaoke/vdecoder/hifigan/utils.py b/spaces/zomehwh/sovits-xiaoke/vdecoder/hifigan/utils.py deleted file mode 100644 index 84bff024f4d2e2de194b2a88ee7bbe5f0d33f67c..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-xiaoke/vdecoder/hifigan/utils.py +++ /dev/null @@ -1,68 +0,0 @@ -import glob -import os -import matplotlib -import torch -from torch.nn.utils import weight_norm -matplotlib.use("Agg") -import matplotlib.pylab as plt - - -def plot_spectrogram(spectrogram): - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", - interpolation='none') - plt.colorbar(im, ax=ax) - - fig.canvas.draw() - plt.close() - - return fig - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def apply_weight_norm(m): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - weight_norm(m) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def load_checkpoint(filepath, device): - assert os.path.isfile(filepath) - print("Loading '{}'".format(filepath)) - checkpoint_dict = torch.load(filepath, map_location=device) - print("Complete.") - return checkpoint_dict - - -def save_checkpoint(filepath, obj): - print("Saving checkpoint to {}".format(filepath)) - torch.save(obj, filepath) - print("Complete.") - - -def del_old_checkpoints(cp_dir, prefix, n_models=2): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) # get checkpoint paths - cp_list = sorted(cp_list)# sort by iter - if len(cp_list) > n_models: # if more than n_models models are found - for cp in cp_list[:-n_models]:# delete the oldest models other than lastest n_models - open(cp, 'w').close()# empty file contents - os.unlink(cp)# delete file (move to trash when using Colab) - - -def scan_checkpoint(cp_dir, prefix): - pattern = os.path.join(cp_dir, prefix + '????????') - cp_list = glob.glob(pattern) - if len(cp_list) == 0: - return None - return sorted(cp_list)[-1] - diff --git a/spaces/zxc314/vits-uma-genshin-honkai/commons.py b/spaces/zxc314/vits-uma-genshin-honkai/commons.py deleted file mode 100644 index 40fcc05364d4815971f5c6f9dbb8dcef8e3ec1e9..0000000000000000000000000000000000000000 --- a/spaces/zxc314/vits-uma-genshin-honkai/commons.py +++ /dev/null @@ -1,172 +0,0 @@ -import math -import torch -from torch.nn import functional as F -import torch.jit - - -def script_method(fn, _rcb=None): - return fn - - -def script(obj, optimize=True, _frames_up=0, _rcb=None): - return obj - - -torch.jit.script_method = script_method -torch.jit.script = script - - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - - -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - device = duration.device - - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm