diff --git a/spaces/17TheWord/RealESRGAN/scripts/generate_multiscale_DF2K.py b/spaces/17TheWord/RealESRGAN/scripts/generate_multiscale_DF2K.py
deleted file mode 100644
index d4f5d8324b1624e4cb6163754703b8dac2d188fd..0000000000000000000000000000000000000000
--- a/spaces/17TheWord/RealESRGAN/scripts/generate_multiscale_DF2K.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import argparse
-import glob
-import os
-from PIL import Image
-
-
-def main(args):
- # For DF2K, we consider the following three scales,
- # and the smallest image whose shortest edge is 400
- scale_list = [0.75, 0.5, 1 / 3]
- shortest_edge = 400
-
- path_list = sorted(glob.glob(os.path.join(args.input, '*')))
- for path in path_list:
- print(path)
- basename = os.path.splitext(os.path.basename(path))[0]
-
- img = Image.open(path)
- width, height = img.size
- for idx, scale in enumerate(scale_list):
- print(f'\t{scale:.2f}')
- rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS)
- rlt.save(os.path.join(args.output, f'{basename}T{idx}.png'))
-
- # save the smallest image which the shortest edge is 400
- if width < height:
- ratio = height / width
- width = shortest_edge
- height = int(width * ratio)
- else:
- ratio = width / height
- height = shortest_edge
- width = int(height * ratio)
- rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS)
- rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png'))
-
-
-if __name__ == '__main__':
- """Generate multi-scale versions for GT images with LANCZOS resampling.
- It is now used for DF2K dataset (DIV2K + Flickr 2K)
- """
- parser = argparse.ArgumentParser()
- parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder')
- parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_multiscale', help='Output folder')
- args = parser.parse_args()
-
- os.makedirs(args.output, exist_ok=True)
- main(args)
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download LibFredo6 54b Learn More About This Plugin Library That Powers Many SketchUp Extensions.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download LibFredo6 54b Learn More About This Plugin Library That Powers Many SketchUp Extensions.md
deleted file mode 100644
index d62ef02cd6d5bc73dabbecef3f0cfb9c17e98b91..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download LibFredo6 54b Learn More About This Plugin Library That Powers Many SketchUp Extensions.md
+++ /dev/null
@@ -1,144 +0,0 @@
-
-
**Download LibFredo6 54b**
|
If you are a SketchUp user who loves to create amazing 3D models, you might have heard of LibFredo6. It is a plugin library that contains a set of tools and extensions that enhance the functionality and usability of SketchUp. In this article, we will show you how to download and install LibFredo6 54b, the latest version of this plugin library, and how to use its features to create stunning 3D designs.
-
Introduction
-
What is LibFredo6 and why do you need it?
-
LibFredo6 is a plugin library that was created by Fredo6, a renowned SketchUp developer and community member. It is a collection of scripts that provide various functions and features that are not available in the native SketchUp tools. For example, LibFredo6 allows you to create complex curves, surfaces, shapes, animations, textures, and more.
LibFredo6 is not a standalone plugin, but rather a library that supports other plugins and extensions that depend on it. Some of the most popular plugins that require LibFredo6 are:
-
-
FredoScale: A tool that allows you to scale, stretch, twist, bend, and shear your 3D models in any direction.
-
RoundCorner: A tool that allows you to round the edges and corners of your 3D models with different profiles and options.
-
Curviloft: A tool that allows you to create skins and lofts between contours with smooth transitions.
-
HoverSelect: A tool that allows you to select entities by hovering over them with your mouse cursor.
-
Animator: A tool that allows you to create animations of your 3D models with keyframes, transitions, scenes, and cameras.
-
-
These are just some examples of the plugins that depend on LibFredo6. There are many more plugins that use this library to provide additional functionality and features for SketchUp users.
-
What are the main features and benefits of LibFredo6?
-
LibFredo6 is a plugin library that offers many features and benefits for SketchUp users. Some of the main ones are:
-
-
It is free and open source. You can download and use LibFredo6 without any cost or license restrictions.
-
It is compatible with SketchUp versions from 2017 and above. You can use LibFredo6 with any recent version of SketchUp without any compatibility issues.
-
It is easy to install and update. You can download and install LibFredo6 with a few clicks using the SketchUcation Plugin Store or manually by copying the files into your SketchUp plugins folder. You can also update LibFredo6 easily using the built-in updater or by downloading the latest version from the Plugin Store.
-
It is customizable and user-friendly. You can access the LibFredo6 settings and preferences from the Window menu in SketchUp. You can adjust various options such as language, colors, icons, tooltips, shortcuts, menus, dialogs, etc. You can also access the documentation and tutorials for each plugin that uses LibFredo6 from the same menu.
-
It is reliable and stable. LibFredo6 has been tested and improved over many years by Fredo6 and the SketchUp community. It has a low rate of bugs and errors and a high rate of performance and efficiency.
-
-
How to download and install LibFredo6 54b?
-
Downloading and installing LibFredo6 54b is very easy. There are two methods you can use:
-
Method 1: Using the SketchUcation Plugin Store
-
The SketchUcation Plugin Store is a website that allows you to browse, download, install, update, and manage SketchUp plugins easily. You can access it from https://sketchucation.com/pluginstore.
-
How to download LibFredo6 54b for SketchUp
-LibFredo6 54b download link
-Download LibFredo6 54b plugin for free
-LibFredo6 54b installation guide
-Download LibFredo6 54b latest version
-LibFredo6 54b features and benefits
-Download LibFredo6 54b for Windows
-Download LibFredo6 54b for Mac
-LibFredo6 54b compatibility with SketchUp versions
-Download LibFredo6 54b with license key
-LibFredo6 54b tutorial and examples
-Download LibFredo6 54b from official website
-LibFredo6 54b reviews and ratings
-Download LibFredo6 54b for SketchUp Pro
-LibFredo6 54b alternatives and comparisons
-Download LibFredo6 54b for SketchUp Make
-LibFredo6 54b troubleshooting and support
-Download LibFredo6 54b for SketchUp Web
-LibFredo6 54b update and changelog
-Download LibFredo6 54b for SketchUp Studio
-LibFredo6 54b FAQs and tips
-Download LibFredo6 54b for SketchUp Shop
-LibFredo6 54b user manual and documentation
-Download LibFredo6 54b for SketchUp Viewer
-LibFredo6 54b video and audio tutorials
-Download LibFredo6 54b for SketchUp Free
-LibFredo6 54b forum and community
-Download LibFredo6 54b for SketchUp Classic
-LibFredo6 54b best practices and recommendations
-Download LibFredo6 54b for SketchUp Education
-LibFredo6 54b feedback and suggestions
-Download LibFredo6 54b for SketchUp Enterprise
-LibFredo6 54b bugs and issues
-Download LibFredo6 54b for SketchUp Mobile Viewer
-LibFredo6 54b testimonials and case studies
-Download LibFredo6 54b for SketchUp AR/VR Viewer
-LibFredo6 54b advantages and disadvantages
-Download LibFredo6 54b for SketchUp Sefaira
-LibFredo6 54b pricing and plans
-Download LibFredo6 54b for SketchUp Trimble Connect
-LibFredo6 54b requirements and specifications
-Download LibFredo6 54b for SketchUp LayOut
-LibFredo6 54b demo and trial version
-Download LibFredo6 54b for SketchUp Style Builder
-LibFredo6 54b awards and recognition
-Download LibFredo6 54b for SketchUp Extension Warehouse
-LibFredo6 54b coupons and discounts
-Download LibFredo6 54b for SketchUp Warehouse Browser Extension
- and install LibFredo6 54b using this method, follow these steps:
-
-
Extract the contents of the file libfredo6_v54b.zip on your computer
-
Copy all the files inside the folder libfredo6_v54b into your SketchUp plugins folder (usually located at C:\Users\YourName\AppData\Roaming\SketchUp\SketchUp 2023\SketchUp\Plugins)
-
Restart SketchUp
-
-
How to use LibFredo6 54b
-
Once you have downloaded and installed LibFredo6 54b, you can start using it with any plugin or extension that depends on it.
-
How to access the LibFredo6 settings and preferences?
-
To access the LibFredo6 settings and preferences, go to Window > LibFredo6 Settings... in SketchUp.
You can use this dialog box to customize various aspects of LibFredo6 and its plugins. For example, you can:
-
-
Select your preferred language from the drop-down menu at the top left corner.
-
Change the colors, icons, tooltips, and shortcuts of the tools and extensions from the tabs at the top.
-
Enable or disable the menus, dialogs, and toolbars of the plugins from the tabs at the bottom.
-
Check for updates, view documentation, and access tutorials from the buttons at the bottom right corner.
-
-
You can also click on Help > About LibFredo6... in SketchUp to see more information about LibFredo6 and its plugins.
-
How to use the various tools and extensions included in LibFredo6?
-
To use the various tools and extensions included in LibFredo6, you need to activate them from the menus or toolbars in SketchUp. You can also use keyboard shortcuts or context menus to access them.
-
Here is a table that shows some of the most common tools and extensions that use LibFredo6 and how to access them:
- | Tool or extension | Description | How to access | | --- | --- | --- | | FredoScale | A tool that allows you to scale, stretch, twist, bend, and shear your 3D models in any direction. | Go to Tools > FredoScale or click on its icon in the toolbar. | | RoundCorner | A tool that allows you to round the edges and corners of your 3D models with different profiles and options. | Go to Tools > Fredo Collection > RoundCorner or click on its icon in the toolbar. | | Curviloft | A tool that allows you to create skins and lofts between contours with smooth transitions. | Go to Tools > Fredo Collection > Curviloft or click on its icon in the toolbar. | | HoverSelect | A tool that allows you to select entities by hovering over them with your mouse cursor. | Go to Tools > Fredo Collection > HoverSelect or press Ctrl+H on your keyboard. | | Animator | A tool that allows you to create animations of your 3D models with keyframes, transitions, scenes, and cameras. | Go to Tools > Fredo Collection > Animator or click on its icon in the toolbar. |
How to troubleshoot common issues and errors with LibFredo6?
-
If you encounter any issues or errors with LibFredo6 or its plugins, here are some tips that might help you:
-
-
Make sure you have downloaded and installed the latest version of LibFredo6 and its plugins. You can check for updates from Window > LibFredo6 Settings... > Check Plugins for Update.
-
Make sure you have enabled all the required plugins and extensions in SketchUp. You can enable or disable them from Window > Extension Manager.
-
Make sure you have granted all the necessary permissions for LibFredo6 and its plugins. You can grant or revoke permissions from Window > Extension Manager > Settings.
-
If you still have problems, you can contact Fredo6 or post your issue on https://sketchucation.com/forums/viewtopic.php?t=17947. You can also send a trace log file from Window > LibFredo6 Settings... > Trace Logging.
-
-
Conclusion
-
In this article, we have shown you how to download and install LibFredo6 54b, a plugin library that contains a set of tools and extensions that enhance the functionality and usability of SketchUp. We have also shown you how to use its features and benefits to create stunning 3D designs.
-
We hope you have found this article helpful and informative. If you have any questions or feedback, please feel free to share them with us in the comments section below. We would love to hear from you!
-
Happy Sketching!
-
Frequently Asked Questions
-
-
What is LibFredo6?
-
LibFredo6 is a plugin library that contains a set of tools and extensions that enhance the functionality and usability of SketchUp.
-
Why do I need LibFredo6?
-
You need LibFredo6 if you want to use any of the plugins or extensions that depend on it. For example, FredoScale, RoundCorner, Curviloft, HoverSelect, Animator, etc.
-
How do I download and install LibFredo6 54b?
-the SketchUcation Plugin Store or manually by copying the files into your SketchUp plugins folder.
-
How do I use LibFredo6 54b?
-
You can use LibFredo6 54b with any plugin or extension that depends on it. You can access them from the menus or toolbars in SketchUp. You can also customize the settings and preferences of LibFredo6 and its plugins from Window > LibFredo6 Settings...
-
How do I troubleshoot common issues and errors with LibFredo6?
-
If you encounter any issues or errors with LibFredo6 or its plugins, you can check for updates, enable or disable plugins, grant or revoke permissions, contact Fredo6 or post your issue on the forum, or send a trace log file.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Global Mapper Trial A Comprehensive Guide to the Best GIS Software.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Global Mapper Trial A Comprehensive Guide to the Best GIS Software.md
deleted file mode 100644
index 0e5cc7990bf40fce475b589365799e9f7f8b4572..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Global Mapper Trial A Comprehensive Guide to the Best GIS Software.md
+++ /dev/null
@@ -1,31 +0,0 @@
-
-
Global Mapper Trial: A Must-Have GIS Software for Spatial Data
-
Global Mapper is a powerful and versatile GIS software that supports more than 300 spatial data formats. It offers a complete suite of data creation and editing tools, as well as cutting-edge 3D visualization and analysis capabilities. Whether you are a professional mapper, a student, or a hobbyist, Global Mapper can help you with your spatial data needs.
If you want to try Global Mapper for yourself, you can download a free trial version from the official website of Blue Marble Geographics. The trial version allows you to evaluate all the features of the software for 14 days, without any limitations. You can also request a demo or contact a sales representative to learn more about the software and its pricing options.
-
Global Mapper is compatible with Windows 10 and 11 (64-bit version), and Windows Server 2012/2016/2019/2022. It requires 8 GB of RAM and 600 MB of hard drive space for the installation. The software is also available in Chinese, French, German, Italian, Japanese, Korean, Polish, Spanish, Portuguese, and Turkish versions.
-
Global Mapper is widely used by geospatial professionals, researchers, educators, and enthusiasts around the world. It has been praised for its intuitive user interface, logical layout, and unmatched technical support. Some of the companies that use Global Mapper include NASA, USGS, NOAA, FEMA, Google, Apple, Microsoft, and many more.
-
Global Mapper is constantly updated with new features and improvements to meet the evolving needs of its users. The latest version of Global Mapper is 24.1, which was released in October 2022. Some of the new features in this version include:
-
-
-
A new Global Mapper Pro edition that includes advanced tools for point cloud processing, raster analysis, terrain modeling, and more.
-
A new option to export vector data to Google Earth KML/KMZ format with embedded attributes and styles.
-
A new tool to create custom contour lines from point cloud or raster data.
-
A new option to import and export GeoPackage files.
-
A new tool to calculate viewshed analysis from multiple observer points.
-
A new tool to create slope maps from elevation data.
-
And many more enhancements and bug fixes.
-
-
If you are looking for a reliable, affordable, and easy-to-use GIS software that can handle any spatial data challenge, look no further than Global Mapper. Download the trial version today and see for yourself why Global Mapper is a must-have GIS software for anyone who works with maps or spatial data.
Global Mapper is not only a GIS software, but also a data converter, a map editor, a projection tool, a 3D viewer, and much more. It can handle any type of spatial data, from vector to raster, from elevation to imagery, from LiDAR to GPS. It can also connect to online data sources, such as WMS, WFS, WCS, and Tile Services. With Global Mapper, you can access, view, edit, analyze, and export any spatial data with ease.
-
One of the most impressive features of Global Mapper is its 3D functionality. You can view your data in 3D mode, create realistic terrain models, drape vector or raster layers over the terrain, perform 3D measurements and calculations, and export 3D models to various formats. You can also create fly-through animations and videos of your 3D scenes. Global Mapper supports various 3D formats, such as COLLADA, STL, OBJ, VRML, and more.
-
Another remarkable feature of Global Mapper is its point cloud processing capability. You can import point cloud data from various sources, such as LiDAR scanners, drones, photogrammetry software, and more. You can then classify, filter, crop, edit, colorize, and extract features from your point cloud data. You can also generate raster or vector layers from your point cloud data, such as elevation grids, contours, buildings, trees, power lines, and more.
-
Global Mapper is not only a powerful GIS software but also an affordable one. You can purchase a single-user license of Global Mapper for $549 USD. If you need more advanced tools for point cloud processing and raster analysis, you can upgrade to Global Mapper Pro for $999 USD. You can also purchase optional modules for specific purposes, such as LiDAR Module ($499 USD), Pixels-to-Points Module ($299 USD), and Georeferencing Module ($199 USD). All licenses include one year of maintenance and support.
-
If you are still not convinced that Global Mapper is the best GIS software for you, you can read some testimonials from satisfied customers. Here are some examples:
-
-
"Global Mapper is an affordable and easy-to-use GIS application that offers access to an unparalleled variety of spatial datasets and provides just the right level of functionality to satisfy both experienced GIS professionals and beginning users." - Equator Graphics
-
"Global Mapper has been an essential tool for our company for over 10 years. It allows us to work with virtually any type of geospatial data in a fast and efficient way. It has saved us countless hours of work and has enabled us to deliver high-quality products to our clients." - GeoSolutions Consulting
-
"Global Mapper is the Swiss Army Knife of GIS software. It can do anything you need it to do with spatial data. It is easy to use, reliable, and constantly updated with new features. It is by far the best value for money in the GIS market." - TerraImage USA
-
-
Don't wait any longer. Download the trial version of Global Mapper today and discover why it is the ultimate GIS software for spatial data.
ddb901b051
-
-
\ No newline at end of file
diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Capturix ScanShare V7.06.848 Enterprise Edition-CRD.rar __FULL__.md b/spaces/1gistliPinn/ChatGPT4/Examples/Capturix ScanShare V7.06.848 Enterprise Edition-CRD.rar __FULL__.md
deleted file mode 100644
index ce440e2c5404d74b03731f1a36b1e17bbbdf37ef..0000000000000000000000000000000000000000
--- a/spaces/1gistliPinn/ChatGPT4/Examples/Capturix ScanShare V7.06.848 Enterprise Edition-CRD.rar __FULL__.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-In this version of jag justicia militar audio latino descarga gratis you can capture ... Capturix ScanShare v7.06.848 Enterprise Edition-CRD.rar. 1fdad05405
-
-
-
diff --git a/spaces/1phancelerku/anime-remove-background/Bulk Download Messenger Photos A Simple and Effective Method.md b/spaces/1phancelerku/anime-remove-background/Bulk Download Messenger Photos A Simple and Effective Method.md
deleted file mode 100644
index 9746d81bc10c56d2e2a1ac19ba06efa9fdf7e8b3..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Bulk Download Messenger Photos A Simple and Effective Method.md
+++ /dev/null
@@ -1,167 +0,0 @@
-
-
How to Bulk Download Messenger Photos
-
If you frequently use Facebook Messenger, you know what a terrific platform it is for sharing photos. Saving images from your Messenger threads can be a great way to collect memories. But what if you want to download all the photos from a messenger conversation at once? Is there a way to do that without having to save each photo individually?
-
In this article, we will show you how to bulk download messenger photos on your PC, iPhone, or Android device. Whether you want to backup your photos, transfer them to another device, or print them out, we have got you covered. Let's get started!
Why you might want to download all photos from a messenger conversation
-
There are many reasons why you might want to download all photos from a messenger conversation. Here are some of them:
-
-
You want to keep a copy of your photos in case you lose access to your Facebook account or Messenger app.
-
You want to free up some space on your device by deleting the Messenger app or clearing its cache.
-
You want to share your photos with someone who is not on Facebook or Messenger.
-
You want to edit, organize, or print your photos using another app or software.
-
You want to create a photo album, collage, or slideshow of your memories.
-
-
What you need to download all photos from a messenger conversation
-
Before we dive into the methods of downloading all photos from a messenger conversation, here are some things you need:
-
-
A Facebook account and a Messenger app. You can use either the web version or the mobile version of Messenger, depending on your device.
-
A stable internet connection. Downloading multiple photos can take some time and bandwidth, so make sure you have a good connection.
-
A device with enough storage space. Depending on how many photos you have in your conversation, you might need some extra space on your device or an external storage device.
-
An email address. If you choose to download your photos in bulk from Facebook, you will need an email address to receive the download link.
-
-
How to download all photos from a messenger conversation on a PC
-
Method 1: Download photos manually
-
If you only have a few photos in your conversation, you can download them manually one by one. Here are the steps to do that:
-
How to download all photos from a Messenger conversation
-Save pictures on Facebook Messenger on a PC or Mac
-Batch download photos you sent through Facebook Messenger
-Download your information from Facebook Messenger
-Download multiple images from a Messenger chat
-Save all media files from a Messenger thread
-Download photos from Messenger to your iPhone or Android
-Use DownAlbum extension to download photos from Messenger
-Download images from Messenger website
-Download photos from Messenger to your computer or laptop
-Save photos from Messenger to your gallery or camera roll
-Download high-quality photos from Messenger
-Download photos from group chats on Messenger
-Download photos from secret conversations on Messenger
-Download photos from archived chats on Messenger
-Download photos from deleted chats on Messenger
-Download photos from Messenger without notification
-Download photos from Messenger without opening them
-Download photos from Messenger without saving them
-Download photos from Messenger without internet connection
-Download GIFs and videos from Messenger
-Download stickers and emojis from Messenger
-Download voice messages and audio files from Messenger
-Download documents and attachments from Messenger
-Download links and web pages from Messenger
-Backup your photos from Messenger to cloud storage
-Transfer your photos from Messenger to another device
-Sync your photos from Messenger to your social media accounts
-Edit your photos from Messenger before downloading them
-Compress your photos from Messenger to save space
-Organize your photos from Messenger by date or sender
-Share your photos from Messenger with other apps or contacts
-Print your photos from Messenger directly or online
-Recover your photos from Messenger if you lost them or deleted them accidentally
-Protect your photos from Messenger with password or encryption
-Delete your photos from Messenger after downloading them
-Manage your photo settings on Messenger app or website
-Turn on or off auto-download of photos on Messenger app or website
-Troubleshoot photo download issues on Messenger app or website
-Contact Facebook support for photo download problems on Messenger app or website
-
Step 1: Tap on the sender's name
-
Open your Messenger app on your PC and select the conversation that contains the photos you want to download. Then, tap on the sender's name at the top of the chat window. This will open a panel with more options.
-
Step 2: Scroll to the images
-
In the panel, scroll down to the section that says "Shared Photos". Here, you will see all the photos that have been exchanged in the conversation. You can use the arrows to navigate through them.
-
Step 3: Click on the photo and then click download
-
Click on the photo that you want to download and it will open in a larger view. Then, click on the download icon at the bottom right corner of the photo. This will prompt you to choose a location on your PC where you want to save the photo. Repeat this process for each photo you want to download.
-
Method 2: Download photos in bulk
-
If you have a lot of photos in your conversation, downloading them manually can be tedious and time-consuming. Fortunately, there is a way to download all your photos in bulk from Facebook. Here are the steps to do that:
-
Step 1: Go to the Messenger website and open the menu
-
Open your web browser and go to https://www.messenger.com/. Log in with your Facebook account if you haven't already. Then, click on the menu icon at the top left corner of the screen. This will open a sidebar with more options.
-
Step 2: Go to your Facebook information and request a download
-
In the sidebar, click on "Settings". Then, click on "Your Facebook Information". This will take you to a page where you can access and manage your Facebook data. Here, click on "Download Your Information". This will allow you to request a copy of your Facebook data, including your Messenger photos.
-
On the next page, you can select what data you want to download. You can choose the date range, format, and quality of your download. To download only your Messenger photos, uncheck all the boxes except for "Messages". Then, click on "Create File". This will start processing your request.
-
Step 3: Open the email and download the files
-
Once your request is ready, Facebook will send you an email with a link to download your files. Open the email and click on the link. This will take you back to the Download Your Information page. Here, click on "Download" next to your file. You might need to enter your password again for security reasons.
-
This will download a ZIP file containing all your Messenger data, including your photos. To access your photos, extract the ZIP file and open the folder named "messages". Inside this folder, you will find subfolders for each of your conversations. Each subfolder will contain all the photos from that conversation. You can then copy or move these photos to any location on your PC.
-
How to download all photos from a messenger conversation on an iPhone
-
Method 1: Download photos manually
-
If you only have a few photos in your conversation, you can download them manually one by one. Here are the steps to do that:
-
Step 1: Open your Messenger app and locate the image
-
Open your Messenger app on your iPhone and select the conversation that contains the photos you want to download. Then, scroll through the chat until you find the image you want to save.
-
Step 2: Long-press on the image and click save
-
Long-press on the image until a menu pops up. Then, tap on "Save". This will save the image to your camera roll. Repeat this process for each image you want to download.
-
Method 2: Download multiple photos simultaneously
-
If you have a lot of photos in your conversation, downloading them manually can be tedious and time-consuming. Fortunately, there is a way to download multiple photos simultaneously from Messenger. Here are the steps to do that:
-
Step 1: Open the conversation thread and tap on the sender's name
-
Open your Messenger app on your iPhone and select the conversation that contains the photos you want to download. Then, tap on the sender's name at the top of the chat window. This will open a panel with more options.
-
Step 2: Scroll down to more actions and tap view photos & videos
-
In the panel, scroll down to the section that says "More Actions". Here, you will see an option to "View Photos & Videos". Tap on it. This will show you all the photos and videos that have been exchanged in the conversation.
-
Step 3: Select each image and tap more then save
-
To select multiple images, tap and hold on one image until a checkmark appears. Then, tap on other images you want to select. You can also tap on "Select All" at the top right corner to select all images at once. Once you have selected the images you want to download, tap on "More" at the bottom right corner. Then, tap on "Save". This will save all the selected images to your camera roll.
-
How to download all photos from a messenger conversation on an Android device
-
Method 1: Download photos manually
-
If you only have a few photos in your conversation, you can download them manually one by one. Here are the steps to do that:
-
Step 1: Open your Messenger app and select the conversation
-
Open your Messenger app on your Android device and select the conversation that contains the photos you want to download. Then, scroll through the chat until you find the image you want to save.
-
Step 2: Tap and hold on the image and click save to device
-
Tap and hold on the image until a menu pops up. Then, tap on "Save to Device". This will save the image to your gallery. Repeat this process for each image you want to download.
-
Method 2: Download photos in bulk
-
If you have a lot of photos in your conversation, downloading them manually can be tedious and time-consuming. Fortunately, there is a way to download multiple photos in bulk from Messenger. Here are the steps to do that:
-
Step 1: Open the Messenger app and select the conversation
-
Open your Messenger app on your Android device and select the conversation that contains the photos you want to download.
-
Step 2: Tap on the i icon at the top right corner
-
Tap on the i icon at the top right corner of the chat window. This will open a panel with more options.
-
Step 3: Tap on shared media and select all images
-
In the panel, tap on "Shared Media". This will show you all the media files that have been exchanged in the conversation. To select all images, tap on "Select All" at the top right corner.
-
Step 4: Tap on the three dots icon at the top right corner and click save to device
-
Once you have selected all images, tap on the three dots icon at the top right corner of the screen. Then, tap on "Save to Device". This will save all the selected images to your gallery.
-
Conclusion
-
Summary of the main points
-
In this article, we have shown you how to bulk download messenger photos on your PC, iPhone, or Android device. You can use either manual or bulk methods depending on how many photos you have in your conversation. Downloading all your photos from a messenger conversation can be a great way to backup your memories, share them with others, or use them for other purposes.
-
Call to action
-
We hope you found this article helpful and informative. If you did, please share it with your friends and family who might also want to download their messenger photos. Also, let us know in the comments below if you have any questions or feedback about this topic. Thank you for reading!
-
Frequently Asked Questions
-
-
Q: How can I delete all photos from a messenger conversation?
-
A: If you want to delete all photos from a messenger conversation, you can follow these steps:
-
Open your Messenger app and select the conversation that contains the photos you want to delete.
-
Tap on the sender's name at the top of the chat window.
-
Scroll down to "Shared Photos" and tap on it.
-
Select each photo you want to delete and tap on "Delete" at [user]( the bottom of the screen.
-
-
-
Q: How can I download all photos from multiple messenger conversations at once?
-
A: If you want to download all photos from multiple messenger conversations at once, you can use the bulk method on your PC. Here are the steps to do that:
-
Go to the Messenger website and open the menu.
-
Go to your Facebook information and request a download.
-
Select the date range and format of your download.
-
Check the box next to "Messages" and uncheck all other boxes.
-
Click on "Create File" and wait for your request to be ready.
-
Open the email and download the files.
-
-This will download a ZIP file containing all your Messenger data, including photos from all your conversations.
-
Q: How can I view all photos from a messenger conversation without downloading them?
-
A: If you want to view all photos from a messenger conversation without downloading them, you can use these steps:
-
Open your Messenger app and select the conversation that contains the photos you want to view.
-
Tap on the sender's name at the top of the chat window.
-
Scroll down to "Shared Photos" and tap on it.
-
Swipe left or right to view all the photos in the conversation.
-
-
-
Q: How can I change the quality of the photos I download from Messenger?
-
A: If you want to change the quality of the photos you download from Messenger, you can use these steps:
-
Go to the Messenger website and open the menu.
-
Go to your Facebook information and request a download.
-
Select the date range and format of your download.
-
Check the box next to "Messages" and uncheck all other boxes.
-
Click on "Media Quality" and choose between high, medium, or low quality.
-
Click on "Create File" and wait for your request to be ready.
-
Open the email and download the files.
-
-This will affect the size and resolution of the photos you download from Messenger.
-
Q: How can I stop Messenger from automatically saving photos to my device?
-
A: If you want to stop Messenger from automatically saving photos to your device, you can use these steps:
-
Open your Messenger app and tap on your profile picture at the top left corner of the screen.
-
Scroll down to "Photos & Media" and tap on it.
-
Toggle off the switch next to "Save Photos".
-
-This will prevent Messenger from saving photos to your device unless you manually save them.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Cmo descargar metal slug 1 2 3 4 5 6 apk en tu PC o Android.md b/spaces/1phancelerku/anime-remove-background/Cmo descargar metal slug 1 2 3 4 5 6 apk en tu PC o Android.md
deleted file mode 100644
index d4a27f77d7bb986ac82019902b9621ebc6f4aef9..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Cmo descargar metal slug 1 2 3 4 5 6 apk en tu PC o Android.md
+++ /dev/null
@@ -1,111 +0,0 @@
-
-
Descargar Metal Slug 1 2 3 4 5 6 APK: How to Play the Classic Run and Gun Games on Your Android Device
-
If you are a fan of retro arcade games, you probably have heard of Metal Slug, a series of run and gun video games created by SNK. Metal Slug games are known for their fast-paced action, humorous graphics, and addictive gameplay. They have been released on various platforms such as Neo Geo, PlayStation, Xbox, Nintendo DS, and more.
But did you know that you can also play Metal Slug games on your Android device? Yes, you can enjoy these classic games on your smartphone or tablet with just a few steps. In this article, we will show you how to download and install Metal Slug APK, which is a file that contains all six main games in the series: Metal Slug 1, 2, X, 3, 4, and 5. We will also give you an overview of each game and some tips and tricks for playing them.
-
So what are you waiting for? Let's get started!
-
Metal Slug Series Overview
-
Metal Slug is a series of run and gun video games that started in 1996 with Metal Slug: Super Vehicle-001. The games follow the adventures of the Peregrine Falcon Squad, a group of elite soldiers who fight against various enemies such as rebels, aliens, zombies, mummies, and more. The games are famous for their cartoonish graphics, humorous animations, explosive sound effects, and diverse weapons and vehicles.
-
Here is a brief overview of each game in the series:
-
Metal Slug 1
-
Metal Slug was released in 1996 for Neo Geo arcade machines and home consoles. It was also ported to other platforms such as Sega Saturn, PlayStation, and PC. It introduced the main characters of the series: Marco Rossi, Tarma Roving, General Morden, and Allen O'Neil. The game has six stages that take place in various locations such as forests, deserts, snowfields, and military bases. The game features a variety of weapons such as pistols, machine guns, rocket launchers, grenades, and the iconic Metal Slug tank. The game also has hidden items and prisoners of war that can be rescued for extra points and bonuses.
-
descargar metal slug collection pc 1 link
-descargar metal slug x para android apk
-descargar metal slug anthology android apk
-descargar metal slug 1 2 3 4 5 6 x mega
-descargar metal slug complete pc español
-descargar metal slug saga completa para android
-descargar metal slug 3 apk sin emulador
-descargar metal slug x pc full español
-descargar metal slug 6 para android apk
-descargar metal slug collection pc mega
-descargar metal slug 1 apk + datos obb
-descargar metal slug x android gratis
-descargar metal slug anthology psp español
-descargar metal slug 4 apk sin emulador
-descargar metal slug complete pc full
-descargar metal slug saga completa pc
-descargar metal slug 3 apk mod
-descargar metal slug x psx iso español
-descargar metal slug 6 para ppsspp android
-descargar metal slug collection pc portable
-descargar metal slug 1 para android gratis
-descargar metal slug x apk + datos obb
-descargar metal slug anthology ps2 iso español
-descargar metal slug 4 para android gratis
-descargar metal slug complete sound box
-descargar metal slug saga completa mega
-descargar metal slug 3 apk full gratis
-descargar metal slug x steam edition
-descargar metal slug 6 para android sin emulador
-descargar metal slug collection pc mediafire
-descargar metal slug 1 para pc gratis español
-descargar metal slug x apk mod
-descargar metal slug anthology wii iso español
-descargar metal slug 4 para ppsspp android
-descargar metal slug complete pc mf
-descargar metal slug saga completa android apk
-descargar metal slug 3 apk hack
-descargar metal slug x apk full gratis
-descargar metal slug 6 para pc full español mega
-descargar metal slug collection pc windows 10
-descargar metal slug 1 para android apk full mega
-descargar metal slug x apk hack
-descargar metal slug anthology pc español mega
-descargar metal slug 4 para android apk full mega
-descargar metal slug complete pc crack no cd
-descargar metal slug saga completa gratis
-descargar metal slug 3 apk + datos sd
-descargar metal slug x apk sin emulador
-descargar metal slug 6 para ps2 iso español
-descargar metal slug collection pc sin emulador
-
Metal Slug 2 / X
-
Metal Slug 2 was released in 1998 for Neo Geo arcade machines and home consoles. It was also ported to other platforms such as PlayStation, PC, and iOS. It added two new playable characters: Eri Kasamoto and Fio Germi. The game has six stages that take place in new locations such as ancient ruins, Arabian towns, alien spaceships, and pyramids. The game features new weapons such as lasers, flame shots, iron lizards, and enemy chasers. The game also introduces new enemies such as mummies, aliens, and mutants. The game also has new vehicles such as camels, planes, and submarines.
-
Metal Slug X was released in 1999 for Neo Geo arcade machines and home consoles. It was also ported to other platforms such as PlayStation, PC, iOS, and Android. It is an improved version of Metal Slug 2 that fixes some of the issues of the original game such as slowdowns and glitches. It also changes some of the stage layouts, enemy placements, weapon drops, and boss battles. It also adds some new features such as time attack mode, combat school mode, and secret paths.
-
Metal Slug 3
-
Metal Slug 3 was released in 2000 for Neo Geo arcade machines and home consoles. It was also ported to other platforms such as PlayStation 2, Xbox, PC, iOS, Android, and Nintendo Switch. It is considered by many fans to be the best game in the series due to its variety and replay value. The game has five stages that take place in diverse locations such as jungles, oceans, caves, factories, and outer space. The game features new weapons such as shotguns, homing missiles, dual machine guns, and satellite lasers. The game also introduces new enemies such as zombies, giant crabs, yetis, and martians. The game also has new vehicles such as elephants, ostriches, and helicopters. The game also has branching paths that lead to different endings and bonus stages.
-
Metal Slug 4
-
Metal Slug 4 was released in 2002 for Neo Geo arcade machines and home consoles. It was also ported to other platforms such as PlayStation 2, Xbox, PC, and Nintendo Switch. It replaced Eri and Tarma with two new playable characters: Nadia Cassel and Trevor Spacey. The game has six stages that take place in urban settings such as cities, subways, airports, and military bases. The game features new weapons such as dual pistols, thunder shots, and landmines. The game also introduces new enemies such as cyborgs, robots, and hackers. The game also has new vehicles such as motorcycles, trucks, and tanks.
-
Metal Slug 5
-
Metal Slug 5 was released in 2003 for Neo Geo arcade machines and home consoles. It was also ported to other platforms such as PlayStation 2, Xbox, PC, and Nintendo Switch. It brought back Eri and Tarma as playable characters along with Marco and Fio. The game has six stages that take place in exotic locations such as jungles, waterfalls, ancient ruins, and underground caves. The game features new weapons such as flame whips, grenade launchers, and laser rifles. The game also introduces new enemies such as masked soldiers, ninjas, and giant worms. The game also has new vehicles such as boats, jet skis, and slides.
-
Metal Slug 6
-
Metal Slug 6 was released in 2006 for Atomiswave arcade machines and PlayStation 2. It was also ported to other platforms such as PC and Nintendo Wii. It added two new playable characters: Ralf Jones and Clark Still from the King of Fighters and Ikari Warriors series. The game has seven stages that take place in futuristic settings such as space stations, moon bases, and alien planets. The game features new weapons such as machine guns, flame throwers, and rocket launchers. The game also introduces new enemies such as clones, mutants, and aliens. The game also has new vehicles such as mechs, hovercrafts, and spaceships.
-
How to Download and Install Metal Slug APK on Android
-
Now that you have a brief idea of what each Metal Slug game is about, you might be wondering how to play them on your Android device. Well, it's not that hard if you follow these simple steps:
-
Download a PPSSPP emulator and a file manager app
-
The first thing you need to do is to download a PPSSPP emulator and a file manager app on your Android device. A PPSSPP emulator is a software that allows you to run PlayStation Portable games on your device. A file manager app is a software that allows you to manage your files on your device.
Once you have downloaded both apps, install them on your device by following the instructions on the screen.
-
Download the Metal Slug ISO files from a trusted source
-
The next thing you need to do is to download the Metal Slug ISO files from a trusted source. An ISO file is a file that contains the data of a disc image. In this case, you need the ISO files of the Metal Slug games that were released for PlayStation Portable.
Make sure you check the file size and format of the downloaded files before opening them. The ISO files should be around 200 MB to 500 MB in size and have the .iso extension. If the files are compressed in ZIP or RAR format, you need to extract them using the file manager app.
-
Load the Metal Slug ISO files on PPSSPP and start playing
-
The final thing you need to do is to load the Metal Slug ISO files on PPSSPP and start playing. To do this, you need to open PPSSPP and locate the folder where the ISO files are stored using the file manager app. You can create a separate folder for the Metal Slug games on your device's internal storage or external SD card for easier access.
-
Once you have found the folder, select and load the desired Metal Slug game on PPSSPP. You can adjust the settings of the emulator such as graphics, sound, controls, and performance according to your preference. You can also save and load your progress using the save states feature of PPSSPP.
-
To play the game, you can use the virtual buttons on the screen or connect a controller to your device via Bluetooth or USB. You can also play with your friends using the multiplayer mode of PPSSPP. You can either join an online server or create a local network with your friends using Wi-Fi or hotspot.
-
Conclusion
-
Playing Metal Slug games on your Android device is a great way to relive the nostalgia of these classic run and gun games. You can enjoy the fast-paced action, humorous graphics, and addictive gameplay of these games anytime and anywhere with just a few steps. All you need is a PPSSPP emulator, a file manager app, and the Metal Slug ISO files.
-
Here are some tips and tricks for playing Metal Slug games on your Android device:
-
-
Use different weapons and vehicles to deal with different enemies and situations. Don't be afraid to experiment with different combinations.
-
Rescue as many prisoners of war as possible to get extra points and bonuses. Some of them may also give you special items or weapons.
-
Look for hidden items and secrets in each stage. Some of them may reveal new paths, modes, or characters.
-
Use cheats if you want to have some fun or challenge yourself. Some of the cheats include unlimited ammo, invincibility, level select, and more.
-
Have fun and don't give up. Metal Slug games are known for their difficulty and unpredictability. But they are also rewarding and satisfying once you complete them.
-
-
We hope this article has helped you learn how to download and install Metal Slug APK on your Android device. If you have any feedback or questions, please feel free to leave a comment below or contact us for more information. Thank you for reading!
-
Frequently Asked Questions
-
Here are some of the frequently asked questions about Metal Slug APK:
-
Q: Is Metal Slug APK safe to download?
-
A: Yes, as long as you download it from a trusted source and scan it with an antivirus app before opening it. However, we do not endorse or promote any illegal downloading or piracy of these games. Please support the original developers by buying their games from official sources.
-
Q: Is Metal Slug APK free to download?
-
A: Yes, most of the websites or torrents that offer these files do not charge any fee for downloading them. However, some of them may require you to register an account or complete a survey before accessing them. Please be careful of any scams or malware that may harm your device or data.
-
Q: Can I play Metal Slug APK offline?
-
A: Yes, you can play these games offline once you have downloaded and installed them on your device. You do not need an internet connection to play them unless you want to use the multiplayer mode of PPSSPP.
-
Q: Can I play Metal Slug APK on other devices?
-
A: Yes, you can play these games on other devices that support PPSSPP emulator such as Windows PC, Mac OS, Linux, iOS, PSP, PS Vita, and more. You just need to download and install PPSSPP emulator and the Metal Slug ISO files on those devices.
-
Q: Which Metal Slug game is the best?
-
A: This is a subjective question that depends on your personal preference and taste. However, most fans agree that Metal Slug 3 is the best game in the series due to its variety and replay value. But you can also try other games in the series and see which one suits you best.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Dominoes Gold APK - Play Dominoes with Friends and Earn Money.md b/spaces/1phancelerku/anime-remove-background/Dominoes Gold APK - Play Dominoes with Friends and Earn Money.md
deleted file mode 100644
index e0d7843f51252a2ae16332995fdc223f5b50ce5c..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Dominoes Gold APK - Play Dominoes with Friends and Earn Money.md
+++ /dev/null
@@ -1,150 +0,0 @@
-
-
Dominoes Gold APK Uptodown: How to Play and Win Real Money
-
Do you love playing dominoes and want to earn some cash while having fun? If yes, then you should check out Dominoes Gold, a popular app that lets you play dominoes online with real players and win real money. In this article, we will tell you everything you need to know about Dominoes Gold, including how to download and install it from Uptodown, how to play and win, how to withdraw your winnings, and the pros and cons of the app.
-
What is Dominoes Gold?
-
Dominoes Gold is an app that allows you to play dominoes online with real players from all over the world. You can join tournaments, challenge your friends, or play solo games. The best part is that you can win real money by playing dominoes. You can also chat with other players, customize your avatar, and enjoy various game modes.
Click on the green "Download" button and wait for the APK file to be downloaded.
-
Once the download is complete, open the APK file and tap on "Install".
-
If you see a warning message that says "Install blocked", go to your device settings and enable "Unknown sources".
-
After the installation is done, open the app and sign up with your email or Facebook account.
-
Enjoy playing dominoes and winning real money!
-
-
How to play Dominoes Gold
-
Playing Dominoes Gold is easy and fun. Here are some basic instructions on how to play:
-
Rules of the game
-
The rules of dominoes vary depending on the game mode you choose. However, the general rules are as follows:
-
-
The game is played with a set of 28 tiles, each with two numbers from 0 to 6.
-
The tiles are shuffled and each player draws a certain number of tiles. The remaining tiles are left in the "boneyard".
-
The player with the highest double tile (the tile with the same number on both sides) starts the game by placing it on the board.
-
The next player must place a tile that matches one of the open ends of the board. For example, if the board has a 6-5 tile, the next player can place a 6-6, 6-4, 6-3, 6-2, 6-1, or 6-0 tile.
-
If a player cannot play a tile, they must draw a tile from the boneyard. If the boneyard is empty, they must pass their turn.
-
The game ends when one player runs out of tiles or when both players cannot play any tile.
-
The player who ends the game scores the sum of the numbers on the tiles left in their opponent's hand.
-
The first player to reach a certain number of points (usually 100 or 150) wins the game.
-
-
Tips and tricks to win
-
Here are some tips and tricks to help you win more games and money on Dominoes Gold:
-
-
Pay attention to the tiles that have been played and the tiles that are left in your hand. This will help you plan your moves and anticipate your opponent's moves.
-
Try to play tiles that have high numbers or doubles. This will reduce the points you might lose if your opponent ends the game.
-
Try to block your opponent from playing their tiles. This will force them to draw more tiles or pass their turn.
-
Try to play tiles that match both ends of the board. This will give you more options and flexibility.
-
Use the chat feature to communicate with your opponent. You can compliment them, taunt them, or bluff them. This can affect their mood and strategy.
-
-
How to withdraw your winnings from Dominoes Gold
-
If you have won some money on Dominoes Gold, you might want to withdraw it and enjoy your rewards. Here is how you can do that:
-
Payment methods and fees
-
The main payment method that Dominoes Gold supports is PayPal. You can also use other methods such as Skrill, Neteller, or bank transfer, depending on your country and availability. You can check the list of supported payment methods in the app settings.
To withdraw your winnings, you need to have a minimum balance of $10 in your account. You can request a withdrawal by tapping on the "Cash Out" button in the app. You will need to enter your payment details and confirm your request.
-
The withdrawal process usually takes 24 hours to complete. However, it may take longer depending on the payment method, bank, or verification status. You will receive an email confirmation when your withdrawal is completed.
-
Dominoes Gold does not charge any fees for withdrawals. However, you may incur some fees from your payment provider or bank. You should check their terms and conditions before requesting a withdrawal.
-
Verification and security
-
To ensure the safety and fairness of the app, Dominoes Gold may require you to verify your identity and age before you can withdraw your winnings. You may need to provide a copy of your ID card, passport, driver's license, or other documents that prove your identity and age.
-
You may also need to verify your payment method by providing a screenshot of your PayPal account, bank statement, or other documents that show your name and payment details.
-
Dominoes Gold uses SSL encryption and other security measures to protect your personal and financial information. You can rest assured that your data is safe and secure with Dominoes Gold.
-
Pros and cons of Dominoes Gold
-
Dominoes Gold is a fun and rewarding app that lets you play dominoes online with real players and win real money. However, like any app, it has its pros and cons. Here are some of them:
-
Pros
-
-
You can play dominoes for free or for real money.
-
You can win cash prizes by joining tournaments or playing head-to-head matches.
-
You can withdraw your winnings easily and securely using PayPal or other payment methods.
-
You can play with millions of players from different countries and skill levels.
-
You can chat with other players and make new friends.
-
You can customize your avatar, profile, and game settings.
-
You can enjoy different game modes, such as Draw, Block, All Fives, and more.
-
You can earn bonus coins by watching videos, completing offers, or inviting friends.
-
-
Cons
-
-
You may lose money if you play recklessly or unluckily.
-
You may encounter some technical issues or bugs in the app.
-
You may face some delays or errors in the withdrawal process.
-
You may have to verify your identity and age before you can withdraw your winnings.
-
You may have to deal with some rude or cheating players.
-
-
Conclusion
-
Dominoes Gold is a great app for dominoes lovers who want to play online with real players and win real money. It has many features, game modes, and payment options that make it fun and rewarding. However, it also has some drawbacks, such as technical issues, verification requirements, and potential losses. Therefore, you should play responsibly and carefully, and only use money that you can afford to lose. If you are looking for a new way to enjoy dominoes and earn some cash, you should give Dominoes Gold a try. You can download and install it from Uptodown and start playing today!
-
FAQs
-
Here are some frequently asked questions about Dominoes Gold:
-
-
Is Dominoes Gold legal?
-
Yes, Dominoes Gold is legal in most countries where online gaming is allowed. However, you should check the laws and regulations of your country before playing for real money.
-
Is Dominoes Gold safe?
-
Yes, Dominoes Gold is safe and secure. It uses SSL encryption and other security measures to protect your personal and financial information. It also has a fair and random gameplay system that ensures the integrity of the games.
-
How can I contact Dominoes Gold support?
-
You can contact Dominoes Gold support by emailing them at support@loopgames.net or by using the in-app chat feature. They will respond to your queries as soon as possible.
-
How can I get more coins on Dominoes Gold?
-
You can get more coins on Dominoes Gold by winning games, joining tournaments, watching videos, completing offers, or inviting friends. You can also buy coins with real money if you want to.
-
Can I play Dominoes Gold offline?
-
No, you cannot play Dominoes Gold offline. You need an internet connection to play online with real players and win real money.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download NBA 2K20 for PC Free - SteamUnlocked Edition.md b/spaces/1phancelerku/anime-remove-background/Download NBA 2K20 for PC Free - SteamUnlocked Edition.md
deleted file mode 100644
index d2752c59025de97b71b5ed3cd6cbc289ba9a9773..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download NBA 2K20 for PC Free - SteamUnlocked Edition.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-
NBA 2K20 PC Download Steamunlocked: How to Get the Best Basketball Game for Free
-
If you are a fan of basketball and video games, you might have heard of NBA 2K20, the latest installment in the popular NBA 2K series. This game offers an immersive and realistic basketball experience, with stunning graphics, gameplay, modes, and features. But what if you don't want to pay for it? Is there a way to get NBA 2K20 for free on your PC?
The answer is yes, thanks to a website called Steamunlocked. In this article, we will explain what Steamunlocked is, how it works, and how you can use it to download NBA 2K20 on your PC without spending a dime. We will also cover some of the pros and cons of using Steamunlocked, as well as some tips and tricks to make the most out of your gaming experience. Let's get started!
-
What is NBA 2K20?
-
NBA 2K20 is a basketball simulation game developed by Visual Concepts and published by 2K Sports. It was released on September 6, 2019, for PlayStation 4, Xbox One, Nintendo Switch, Microsoft Windows, iOS, and Android devices. It is the 21st game in the NBA 2K franchise and the successor to NBA 2K19.
-
Features and gameplay of NBA 2K20
-
NBA 2K20 boasts a variety of features and gameplay improvements that make it one of the best basketball games ever made. Some of these include:
-
-
A revamped motion engine that makes the players move more realistically and fluidly on the court.
-
A new sprint system that affects the stamina and speed of the players depending on how they use it.
-
A new dribbling system that gives different players different ball-handling abilities and styles.
-
An updated shooting system that takes into account the skill, timing, and context of each shot.
-
A new dynamic soundtrack that features songs from various genres and artists curated by UnitedMasters.
-
A new MyCareer mode that follows the story of a custom player as he rises from college to the NBA, featuring performances from actors like Idris Elba, Rosario Dawson, and LeBron James.
-
A new MyGM mode that lets you take control of an NBA franchise as a general manager, with more options and challenges than ever before.
-
A new MyTeam mode that lets you create your own fantasy team with cards from different eras and leagues.
-
A new WNBA mode that lets you play with all 12 WNBA teams and over 140 WNBA players for the first time in the series.
-
A new Neighborhood mode that lets you explore an open-world environment with other online players, where you can participate in various activities and events.
-
-
System requirements for NBA 2K20
-
To run NBA 2K20 on your PC, you will need at least the following specifications:
-
nba 2k20 codex crack download
-nba 2k20 steamunlocked update v1.07
-nba 2k20 pc game free torrent
-nba 2k20 steamunlocked direct link
-nba 2k20 codex patchnotes.txt
-nba 2k20 pc full version mega
-nba 2k20 steamunlocked setup.exe
-nba 2k20 codex dir installdir
-nba 2k20 pc simulation genre
-nba 2k20 steamunlocked releases
-nba 2k21 free download steamunlocked
-nba 2k21 pc torrent codex
-nba 2k21 steamunlocked latest title
-nba 2k21 codex crack installdir
-nba 2k21 pc game direct link
-nba 2k21 steamunlocked best-selling series
-nba 2k21 codex patchnotes.txt update
-nba 2k21 pc full version mega
-nba 2k21 steamunlocked setup.exe run
-nba 2k21 codex dir protection
-nba 2k21 pc simulation sports game
-nba 2k21 steamunlocked industry-leading experience
-nba 2k19 free download steamunlocked
-nba 2k19 pc torrent codex
-nba 2k19 steamunlocked previous title
-nba 2k19 codex crack installdir
-nba 2k19 pc game direct link
-nba 2k19 steamunlocked world-renowned series
-nba 2k19 codex patchnotes.txt update
-nba 2k19 pc full version mega
-nba 2k19 steamunlocked setup.exe run
-nba 2k19 codex dir protection
-nba 2k19 pc simulation sports game
-nba 2k19 steamunlocked immersive experience
Steamunlocked is a website that offers free downloads of PC games that are normally sold on Steam or other platforms. Steamunlocked claims to provide the full versions of the games, with no viruses, malware, or DRM protection. Steamunlocked also claims to update its library regularly with new releases and patches.
-
How does Steamunlocked work?
-
Steamunlocked works by hosting the files of the games on its own servers, which can be accessed by anyone who visits the website. The files are usually compressed in zip or rar format, which can be extracted using a software like WinRAR or 7-Zip. The extracted files contain the setup file and the crack file of the game, which can be used to install and run the game without needing Steam or any other launcher.
-
Is Steamunlocked safe and legal?
-
This is a tricky question to answer, as there are different opinions and perspectives on this matter. On one hand, Steamunlocked claims to be safe and legal, as it does not host any illegal content, but only provides links to the files that are already available on the internet. Steamunlocked also states that it does not encourage piracy, but only provides an alternative way for people who cannot afford or access the games legally.
-
On the other hand, Steamunlocked can be considered unsafe and illegal, as it violates the terms and conditions of the game developers and publishers, who have the rights to distribute and sell their products. Steamunlocked also exposes its users to potential risks of downloading corrupted, infected, or outdated files, which can harm their devices or compromise their personal information. Steamunlocked also faces legal actions from the game companies, who can sue them for copyright infringement or request them to remove their games from the website.
-
Pros and cons of Steamunlocked
-
As with any other website or service, Steamunlocked has its own advantages and disadvantages that you should be aware of before using it. Here are some of them:
-
-
Pros
Cons
-
You can download PC games for free without paying anything.
You can get into legal trouble for downloading pirated games.
-
You can access a large library of games from different genres and categories.
You can encounter broken links, missing files, or slow downloads.
-
You can play the games offline without needing an internet connection or a Steam account.
You can miss out on the online features, updates, and support of the games.
-
You can try out the games before buying them legally if you like them.
You can harm the game industry and discourage the developers from making more games.
-
-
How to download NBA 2K20 from Steamunlocked
-
If you have decided to download NBA 2K20 from Steamunlocked, you will need to follow these steps:
-
Step 1: Visit the Steamunlocked website
-
The first thing you need to do is to go to the official website of Steamunlocked, which is https://steamunlocked.net/. You will see a homepage with a search bar and a list of featured games. You can also browse the games by genre, popularity, or alphabetically.
-
Step 2: Search for NBA 2K20
-
The next thing you need to do is to find the game you are looking for, which is NBA 2K20. You can either type the name of the game in the search bar and hit enter, or scroll down the list of games until you see it. You will then be directed to the game page, where you will see some information about the game, such as the release date, genre, developer, publisher, size, and rating.
-
Step 3: Click on the download button
-
The next thing you need to do is to click on the download button, which is located below the game information. You will then see a pop-up window that asks you to verify that you are not a robot. You will need to complete a simple captcha test to prove that you are human. After that, you will see another pop-up window that shows you the download link. You will need to copy and paste the link into your browser's address bar and hit enter.
-
Step 4: Extract the zip file
-
The next thing you need to do is to extract the zip file that you have downloaded. The zip file contains the game files and the crack files that you will need to install and run the game. You will need a software like WinRAR or 7-Zip to extract the zip file. You can download these software for free from their official websites. To extract the zip file, you will need to right-click on it and select "Extract here" or "Extract to NBA 2K20/" depending on your preference. You will then see a folder with the same name as the zip file.
-
Step 5: Run the setup file and install the game
-
The next thing you need to do is to run the setup file and install the game. The setup file is usually named "setup.exe" or something similar. You will need to double-click on it and follow the instructions on the screen. You will be asked to choose a destination folder for the game, accept the terms and conditions, and select some options. You will also be asked to copy and paste the crack files into the game folder. The crack files are usually located in a folder named "CODEX", "SKIDROW", "PLAZA", or something similar. You will need to copy all the files in that folder and paste them into the game folder, replacing any existing files. This will allow you to bypass the Steam verification and play the game without any issues.
-
Step 6: Enjoy NBA 2K20 on your PC
-
The final thing you need to do is to enjoy NBA 2K20 on your PC. You can launch the game by double-clicking on the game icon, which is usually named "NBA2K20.exe" or something similar. You can then customize your settings, choose your mode, create your player, and start playing. You can also invite your friends and play online with them if you want.
-
Conclusion
-
NBA 2K20 is one of the best basketball games ever made, with amazing graphics, gameplay, modes, and features. However, if you don't want to pay for it, you can download it for free from Steamunlocked, a website that offers free downloads of PC games. In this article, we have explained what Steamunlocked is, how it works, and how you can use it to download NBA 2K20 on your PC without spending a dime. We have also covered some of the pros and cons of using Steamunlocked, as well as some tips and tricks to make the most out of your gaming experience.
-
We hope that this article has been helpful and informative for you. If you have any questions or comments, feel free to leave them below. Thank you for reading!
-
FAQs
-
-
Q: Is NBA 2K20 worth playing?
-
A: Yes, NBA 2K20 is worth playing if you are a fan of basketball and video games. It offers an immersive and realistic basketball experience, with stunning graphics, gameplay, modes, and features.
-
Q: How long does it take to download NBA 2K20 from Steamunlocked?
-
A: The download time depends on your internet speed and connection quality. The size of NBA 2K20 is about 75 GB, so it might take several hours or even days to download depending on your situation.
-
Q: Can I play NBA 2K20 online with Steamunlocked?
-
A: Yes, you can play NBA 2K20 online with Steamunlocked if you use a VPN service or a proxy server to hide your IP address and location. However, this might affect your performance and stability of the game.
Q: What are some alternatives to Steamunlocked?
-
A: Some alternatives to Steamunlocked are Ocean of Games, FitGirl Repacks, Skidrow Reloaded, and IGG Games. These are also websites that offer free downloads of PC games, but they might have different features, quality, and reliability.
-
Q: What are some tips and tricks to improve NBA 2K20 performance on PC?
-
A: Some tips and tricks to improve NBA 2K20 performance on PC are:
-
-
Update your drivers and software to the latest versions.
-
Adjust your graphics settings to match your system capabilities and preferences.
-
Close any unnecessary programs or background processes that might consume your CPU, RAM, or bandwidth.
-
Use a wired connection instead of a wireless one for better stability and speed.
-
Clean your PC from dust, dirt, or overheating issues that might affect your hardware performance.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1toTree/lora_test/ppdiffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py b/spaces/1toTree/lora_test/ppdiffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py
deleted file mode 100644
index 2efc32350fb7fe72f244c1f9f7b4cff045fef3ff..0000000000000000000000000000000000000000
--- a/spaces/1toTree/lora_test/ppdiffusers/pipelines/stochastic_karras_ve/pipeline_stochastic_karras_ve.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
-# Copyright 2022 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from typing import List, Optional, Tuple, Union
-
-import paddle
-
-from ...models import UNet2DModel
-from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
-from ...schedulers import KarrasVeScheduler
-
-
-class KarrasVePipeline(DiffusionPipeline):
- r"""
- Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and
- the VE column of Table 1 from [1] for reference.
-
- [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models."
- https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic
- differential equations." https://arxiv.org/abs/2011.13456
-
- Parameters:
- unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
- scheduler ([`KarrasVeScheduler`]):
- Scheduler for the diffusion process to be used in combination with `unet` to denoise the encoded image.
- """
-
- # add type hints for linting
- unet: UNet2DModel
- scheduler: KarrasVeScheduler
-
- def __init__(self, unet: UNet2DModel, scheduler: KarrasVeScheduler):
- super().__init__()
- self.register_modules(unet=unet, scheduler=scheduler)
-
- @paddle.no_grad()
- def __call__(
- self,
- batch_size: int = 1,
- num_inference_steps: int = 50,
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- **kwargs,
- ) -> Union[Tuple, ImagePipelineOutput]:
- r"""
- Args:
- batch_size (`int`, *optional*, defaults to 1):
- The number of images to generate.
- generator (`paddle.Generator`, *optional*):
- One or a list of paddle generator(s) to make generation deterministic.
- num_inference_steps (`int`, *optional*, defaults to 50):
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
- expense of slower inference.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generate image. Choose between
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
-
- Returns:
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
- generated images.
- """
-
- img_size = self.unet.config.sample_size
- shape = (batch_size, 3, img_size, img_size)
-
- model = self.unet
-
- # sample x_0 ~ N(0, sigma_0^2 * I)
- sample = paddle.randn(shape, generator=generator) * self.scheduler.init_noise_sigma
-
- self.scheduler.set_timesteps(num_inference_steps)
-
- for t in self.progress_bar(self.scheduler.timesteps):
- # here sigma_t == t_i from the paper
- sigma = self.scheduler.schedule[t]
- sigma_prev = self.scheduler.schedule[t - 1] if t > 0 else 0
-
- # 1. Select temporarily increased noise level sigma_hat
- # 2. Add new noise to move from sample_i to sample_hat
- sample_hat, sigma_hat = self.scheduler.add_noise_to_input(sample, sigma, generator=generator)
-
- # 3. Predict the noise residual given the noise magnitude `sigma_hat`
- # The model inputs and output are adjusted by following eq. (213) in [1].
- model_output = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample
-
- # 4. Evaluate dx/dt at sigma_hat
- # 5. Take Euler step from sigma to sigma_prev
- step_output = self.scheduler.step(model_output, sigma_hat, sigma_prev, sample_hat)
-
- if sigma_prev != 0:
- # 6. Apply 2nd order correction
- # The model inputs and output are adjusted by following eq. (213) in [1].
- model_output = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample
- step_output = self.scheduler.step_correct(
- model_output,
- sigma_hat,
- sigma_prev,
- sample_hat,
- step_output.prev_sample,
- step_output["derivative"],
- )
- sample = step_output.prev_sample
-
- sample = (sample / 2 + 0.5).clip(0, 1)
- image = sample.transpose([0, 2, 3, 1]).numpy()
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image,)
-
- return ImagePipelineOutput(images=image)
diff --git a/spaces/2023Liu2023/bingo/src/pages/api/sydney.ts b/spaces/2023Liu2023/bingo/src/pages/api/sydney.ts
deleted file mode 100644
index 0e7bbf23d77c2e1a6635185a060eeee58b8c8e66..0000000000000000000000000000000000000000
--- a/spaces/2023Liu2023/bingo/src/pages/api/sydney.ts
+++ /dev/null
@@ -1,62 +0,0 @@
-import { NextApiRequest, NextApiResponse } from 'next'
-import { WebSocket, debug } from '@/lib/isomorphic'
-import { BingWebBot } from '@/lib/bots/bing'
-import { websocketUtils } from '@/lib/bots/bing/utils'
-import { WatchDog, createHeaders } from '@/lib/utils'
-
-
-export default async function handler(req: NextApiRequest, res: NextApiResponse) {
- const conversationContext = req.body
- const headers = createHeaders(req.cookies)
- debug(headers)
- res.setHeader('Content-Type', 'text/stream; charset=UTF-8')
-
- const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', {
- headers: {
- ...headers,
- 'accept-language': 'zh-CN,zh;q=0.9',
- 'cache-control': 'no-cache',
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
- pragma: 'no-cache',
- }
- })
-
- const closeDog = new WatchDog()
- const timeoutDog = new WatchDog()
- ws.onmessage = (event) => {
- timeoutDog.watch(() => {
- ws.send(websocketUtils.packMessage({ type: 6 }))
- }, 1500)
- closeDog.watch(() => {
- ws.close()
- }, 10000)
- res.write(event.data)
- if (/\{"type":([367])\}/.test(String(event.data))) {
- const type = parseInt(RegExp.$1, 10)
- debug('connection type', type)
- if (type === 3) {
- ws.close()
- } else {
- ws.send(websocketUtils.packMessage({ type }))
- }
- }
- }
-
- ws.onclose = () => {
- timeoutDog.reset()
- closeDog.reset()
- debug('connection close')
- res.end()
- }
-
- await new Promise((resolve) => ws.onopen = resolve)
- ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 }))
- ws.send(websocketUtils.packMessage({ type: 6 }))
- ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!)))
- req.socket.once('close', () => {
- ws.close()
- if (!res.closed) {
- res.end()
- }
- })
-}
diff --git a/spaces/2ndelement/voicevox/make_docs.py b/spaces/2ndelement/voicevox/make_docs.py
deleted file mode 100644
index d10bd1aa40887783ba8cb90dabda031dce213be0..0000000000000000000000000000000000000000
--- a/spaces/2ndelement/voicevox/make_docs.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import json
-
-from voicevox_engine.dev.core import mock as core
-from voicevox_engine.dev.synthesis_engine.mock import MockSynthesisEngine
-from voicevox_engine.setting import USER_SETTING_PATH, SettingLoader
-
-if __name__ == "__main__":
- import run
-
- app = run.generate_app(
- synthesis_engines={"mock": MockSynthesisEngine(speakers=core.metas())},
- latest_core_version="mock",
- setting_loader=SettingLoader(USER_SETTING_PATH),
- )
- with open("docs/api/index.html", "w") as f:
- f.write(
- """
-
-
- voicevox_engine API Document
-
-
-
-
-
-
-
-
-"""
- % json.dumps(app.openapi())
- )
diff --git a/spaces/3B-Group/ConvRe-Leaderboard/src/utils.py b/spaces/3B-Group/ConvRe-Leaderboard/src/utils.py
deleted file mode 100644
index 27e9f5cab65c1a2f4a8f1909f28127b017089fbc..0000000000000000000000000000000000000000
--- a/spaces/3B-Group/ConvRe-Leaderboard/src/utils.py
+++ /dev/null
@@ -1,66 +0,0 @@
-from dataclasses import dataclass
-import pandas as pd
-
-
-@dataclass
-class ColumnContent:
- name: str
- type: str
- displayed_by_default: bool
- hidden: bool = False
-
-
-def fields(raw_class):
- return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
-
-
-@dataclass(frozen=True)
-class AutoEvalColumn: # Auto evals column
- model = ColumnContent("Models", "markdown", True)
- re2text_easy = ColumnContent("Re2Text-Easy", "number", True)
- text2re_easy = ColumnContent("Text2Re-Easy", "number", True)
- re2text_hard = ColumnContent("Re2Text-Hard", "number", True)
- text2re_hard = ColumnContent("Text2Re-Hard", "number", True)
- avg = ColumnContent("Avg", "number", True)
- model_size = ColumnContent("Model Size", "markdown", True)
-
- link = ColumnContent("Links", "str", False)
-
-
-def model_hyperlink(link, model_name):
- return f'{model_name}'
-
-
-def make_clickable_names(df):
- df["Models"] = df.apply(
- lambda row: model_hyperlink(row["Links"], row["Models"]), axis=1
- )
- return df
-
-
-def make_plot_data(df, task):
- c = []
- x = []
- y = []
-
- for i in df.index:
- c.append(df.loc[i, "pure_name"])
- x.append(f"{task}-Easy")
- y.append(df.loc[i, f"{task}-Easy"])
-
- c.append(df.loc[i, "pure_name"])
- x.append(f"{task}-Hard")
- y.append(df.loc[i, f"{task}-Hard"])
-
- data = pd.DataFrame(
- {
- "Symbol": c,
- "Setting": x,
- "Accuracy": y,
- }
- )
-
- return data
-
-
-
diff --git a/spaces/AIFILMS/generate_human_motion/VQ-Trans/models/smpl.py b/spaces/AIFILMS/generate_human_motion/VQ-Trans/models/smpl.py
deleted file mode 100644
index 587f5419601a74df92c1e37263b28d4aa6a7c0a9..0000000000000000000000000000000000000000
--- a/spaces/AIFILMS/generate_human_motion/VQ-Trans/models/smpl.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# This code is based on https://github.com/Mathux/ACTOR.git
-import numpy as np
-import torch
-
-import contextlib
-
-from smplx import SMPLLayer as _SMPLLayer
-from smplx.lbs import vertices2joints
-
-
-# action2motion_joints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 21, 24, 38]
-# change 0 and 8
-action2motion_joints = [8, 1, 2, 3, 4, 5, 6, 7, 0, 9, 10, 11, 12, 13, 14, 21, 24, 38]
-
-from utils.config import SMPL_MODEL_PATH, JOINT_REGRESSOR_TRAIN_EXTRA
-
-JOINTSTYPE_ROOT = {"a2m": 0, # action2motion
- "smpl": 0,
- "a2mpl": 0, # set(smpl, a2m)
- "vibe": 8} # 0 is the 8 position: OP MidHip below
-
-JOINT_MAP = {
- 'OP Nose': 24, 'OP Neck': 12, 'OP RShoulder': 17,
- 'OP RElbow': 19, 'OP RWrist': 21, 'OP LShoulder': 16,
- 'OP LElbow': 18, 'OP LWrist': 20, 'OP MidHip': 0,
- 'OP RHip': 2, 'OP RKnee': 5, 'OP RAnkle': 8,
- 'OP LHip': 1, 'OP LKnee': 4, 'OP LAnkle': 7,
- 'OP REye': 25, 'OP LEye': 26, 'OP REar': 27,
- 'OP LEar': 28, 'OP LBigToe': 29, 'OP LSmallToe': 30,
- 'OP LHeel': 31, 'OP RBigToe': 32, 'OP RSmallToe': 33, 'OP RHeel': 34,
- 'Right Ankle': 8, 'Right Knee': 5, 'Right Hip': 45,
- 'Left Hip': 46, 'Left Knee': 4, 'Left Ankle': 7,
- 'Right Wrist': 21, 'Right Elbow': 19, 'Right Shoulder': 17,
- 'Left Shoulder': 16, 'Left Elbow': 18, 'Left Wrist': 20,
- 'Neck (LSP)': 47, 'Top of Head (LSP)': 48,
- 'Pelvis (MPII)': 49, 'Thorax (MPII)': 50,
- 'Spine (H36M)': 51, 'Jaw (H36M)': 52,
- 'Head (H36M)': 53, 'Nose': 24, 'Left Eye': 26,
- 'Right Eye': 25, 'Left Ear': 28, 'Right Ear': 27
-}
-
-JOINT_NAMES = [
- 'OP Nose', 'OP Neck', 'OP RShoulder',
- 'OP RElbow', 'OP RWrist', 'OP LShoulder',
- 'OP LElbow', 'OP LWrist', 'OP MidHip',
- 'OP RHip', 'OP RKnee', 'OP RAnkle',
- 'OP LHip', 'OP LKnee', 'OP LAnkle',
- 'OP REye', 'OP LEye', 'OP REar',
- 'OP LEar', 'OP LBigToe', 'OP LSmallToe',
- 'OP LHeel', 'OP RBigToe', 'OP RSmallToe', 'OP RHeel',
- 'Right Ankle', 'Right Knee', 'Right Hip',
- 'Left Hip', 'Left Knee', 'Left Ankle',
- 'Right Wrist', 'Right Elbow', 'Right Shoulder',
- 'Left Shoulder', 'Left Elbow', 'Left Wrist',
- 'Neck (LSP)', 'Top of Head (LSP)',
- 'Pelvis (MPII)', 'Thorax (MPII)',
- 'Spine (H36M)', 'Jaw (H36M)',
- 'Head (H36M)', 'Nose', 'Left Eye',
- 'Right Eye', 'Left Ear', 'Right Ear'
-]
-
-
-# adapted from VIBE/SPIN to output smpl_joints, vibe joints and action2motion joints
-class SMPL(_SMPLLayer):
- """ Extension of the official SMPL implementation to support more joints """
-
- def __init__(self, model_path=SMPL_MODEL_PATH, **kwargs):
- kwargs["model_path"] = model_path
-
- # remove the verbosity for the 10-shapes beta parameters
- with contextlib.redirect_stdout(None):
- super(SMPL, self).__init__(**kwargs)
-
- J_regressor_extra = np.load(JOINT_REGRESSOR_TRAIN_EXTRA)
- self.register_buffer('J_regressor_extra', torch.tensor(J_regressor_extra, dtype=torch.float32))
- vibe_indexes = np.array([JOINT_MAP[i] for i in JOINT_NAMES])
- a2m_indexes = vibe_indexes[action2motion_joints]
- smpl_indexes = np.arange(24)
- a2mpl_indexes = np.unique(np.r_[smpl_indexes, a2m_indexes])
-
- self.maps = {"vibe": vibe_indexes,
- "a2m": a2m_indexes,
- "smpl": smpl_indexes,
- "a2mpl": a2mpl_indexes}
-
- def forward(self, *args, **kwargs):
- smpl_output = super(SMPL, self).forward(*args, **kwargs)
-
- extra_joints = vertices2joints(self.J_regressor_extra, smpl_output.vertices)
- all_joints = torch.cat([smpl_output.joints, extra_joints], dim=1)
-
- output = {"vertices": smpl_output.vertices}
-
- for joinstype, indexes in self.maps.items():
- output[joinstype] = all_joints[:, indexes]
-
- return output
\ No newline at end of file
diff --git a/spaces/AJRFan/dreambooth-training/convertosd.py b/spaces/AJRFan/dreambooth-training/convertosd.py
deleted file mode 100644
index b242edb1de11ad551b3c7ad98f5689fef2c3321a..0000000000000000000000000000000000000000
--- a/spaces/AJRFan/dreambooth-training/convertosd.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# Script for converting a HF Diffusers saved pipeline to a Stable Diffusion checkpoint.
-# *Only* converts the UNet, VAE, and Text Encoder.
-# Does not convert optimizer state or any other thing.
-# Written by jachiam
-
-import argparse
-import os.path as osp
-
-import torch
-
-
-# =================#
-# UNet Conversion #
-# =================#
-
-unet_conversion_map = [
- # (stable-diffusion, HF Diffusers)
- ("time_embed.0.weight", "time_embedding.linear_1.weight"),
- ("time_embed.0.bias", "time_embedding.linear_1.bias"),
- ("time_embed.2.weight", "time_embedding.linear_2.weight"),
- ("time_embed.2.bias", "time_embedding.linear_2.bias"),
- ("input_blocks.0.0.weight", "conv_in.weight"),
- ("input_blocks.0.0.bias", "conv_in.bias"),
- ("out.0.weight", "conv_norm_out.weight"),
- ("out.0.bias", "conv_norm_out.bias"),
- ("out.2.weight", "conv_out.weight"),
- ("out.2.bias", "conv_out.bias"),
-]
-
-unet_conversion_map_resnet = [
- # (stable-diffusion, HF Diffusers)
- ("in_layers.0", "norm1"),
- ("in_layers.2", "conv1"),
- ("out_layers.0", "norm2"),
- ("out_layers.3", "conv2"),
- ("emb_layers.1", "time_emb_proj"),
- ("skip_connection", "conv_shortcut"),
-]
-
-unet_conversion_map_layer = []
-# hardcoded number of downblocks and resnets/attentions...
-# would need smarter logic for other networks.
-for i in range(4):
- # loop over downblocks/upblocks
-
- for j in range(2):
- # loop over resnets/attentions for downblocks
- hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
- sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
- unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
-
- if i < 3:
- # no attention layers in down_blocks.3
- hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
- sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
- unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
-
- for j in range(3):
- # loop over resnets/attentions for upblocks
- hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
- sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
- unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
-
- if i > 0:
- # no attention layers in up_blocks.0
- hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
- sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
- unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
-
- if i < 3:
- # no downsample in down_blocks.3
- hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
- sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
- unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
-
- # no upsample in up_blocks.3
- hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
- sd_upsample_prefix = f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}."
- unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
-
-hf_mid_atn_prefix = "mid_block.attentions.0."
-sd_mid_atn_prefix = "middle_block.1."
-unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
-
-for j in range(2):
- hf_mid_res_prefix = f"mid_block.resnets.{j}."
- sd_mid_res_prefix = f"middle_block.{2*j}."
- unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
-
-
-def convert_unet_state_dict(unet_state_dict):
- # buyer beware: this is a *brittle* function,
- # and correct output requires that all of these pieces interact in
- # the exact order in which I have arranged them.
- mapping = {k: k for k in unet_state_dict.keys()}
- for sd_name, hf_name in unet_conversion_map:
- mapping[hf_name] = sd_name
- for k, v in mapping.items():
- if "resnets" in k:
- for sd_part, hf_part in unet_conversion_map_resnet:
- v = v.replace(hf_part, sd_part)
- mapping[k] = v
- for k, v in mapping.items():
- for sd_part, hf_part in unet_conversion_map_layer:
- v = v.replace(hf_part, sd_part)
- mapping[k] = v
- new_state_dict = {v: unet_state_dict[k] for k, v in mapping.items()}
- return new_state_dict
-
-
-# ================#
-# VAE Conversion #
-# ================#
-
-vae_conversion_map = [
- # (stable-diffusion, HF Diffusers)
- ("nin_shortcut", "conv_shortcut"),
- ("norm_out", "conv_norm_out"),
- ("mid.attn_1.", "mid_block.attentions.0."),
-]
-
-for i in range(4):
- # down_blocks have two resnets
- for j in range(2):
- hf_down_prefix = f"encoder.down_blocks.{i}.resnets.{j}."
- sd_down_prefix = f"encoder.down.{i}.block.{j}."
- vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
-
- if i < 3:
- hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0."
- sd_downsample_prefix = f"down.{i}.downsample."
- vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
-
- hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
- sd_upsample_prefix = f"up.{3-i}.upsample."
- vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
-
- # up_blocks have three resnets
- # also, up blocks in hf are numbered in reverse from sd
- for j in range(3):
- hf_up_prefix = f"decoder.up_blocks.{i}.resnets.{j}."
- sd_up_prefix = f"decoder.up.{3-i}.block.{j}."
- vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
-
-# this part accounts for mid blocks in both the encoder and the decoder
-for i in range(2):
- hf_mid_res_prefix = f"mid_block.resnets.{i}."
- sd_mid_res_prefix = f"mid.block_{i+1}."
- vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
-
-
-vae_conversion_map_attn = [
- # (stable-diffusion, HF Diffusers)
- ("norm.", "group_norm."),
- ("q.", "query."),
- ("k.", "key."),
- ("v.", "value."),
- ("proj_out.", "proj_attn."),
-]
-
-
-def reshape_weight_for_sd(w):
- # convert HF linear weights to SD conv2d weights
- return w.reshape(*w.shape, 1, 1)
-
-
-def convert_vae_state_dict(vae_state_dict):
- mapping = {k: k for k in vae_state_dict.keys()}
- for k, v in mapping.items():
- for sd_part, hf_part in vae_conversion_map:
- v = v.replace(hf_part, sd_part)
- mapping[k] = v
- for k, v in mapping.items():
- if "attentions" in k:
- for sd_part, hf_part in vae_conversion_map_attn:
- v = v.replace(hf_part, sd_part)
- mapping[k] = v
- new_state_dict = {v: vae_state_dict[k] for k, v in mapping.items()}
- weights_to_convert = ["q", "k", "v", "proj_out"]
- print("[1;32mConverting to CKPT ...")
- for k, v in new_state_dict.items():
- for weight_name in weights_to_convert:
- if f"mid.attn_1.{weight_name}.weight" in k:
- new_state_dict[k] = reshape_weight_for_sd(v)
- return new_state_dict
-
-
-# =========================#
-# Text Encoder Conversion #
-# =========================#
-# pretty much a no-op
-
-
-def convert_text_enc_state_dict(text_enc_dict):
- return text_enc_dict
-
-
-def convert(model_path, checkpoint_path):
- unet_path = osp.join(model_path, "unet", "diffusion_pytorch_model.bin")
- vae_path = osp.join(model_path, "vae", "diffusion_pytorch_model.bin")
- text_enc_path = osp.join(model_path, "text_encoder", "pytorch_model.bin")
-
- # Convert the UNet model
- unet_state_dict = torch.load(unet_path, map_location='cpu')
- unet_state_dict = convert_unet_state_dict(unet_state_dict)
- unet_state_dict = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
-
- # Convert the VAE model
- vae_state_dict = torch.load(vae_path, map_location='cpu')
- vae_state_dict = convert_vae_state_dict(vae_state_dict)
- vae_state_dict = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
-
- # Convert the text encoder model
- text_enc_dict = torch.load(text_enc_path, map_location='cpu')
- text_enc_dict = convert_text_enc_state_dict(text_enc_dict)
- text_enc_dict = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
-
- # Put together new checkpoint
- state_dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
-
- state_dict = {k:v.half() for k,v in state_dict.items()}
- state_dict = {"state_dict": state_dict}
- torch.save(state_dict, checkpoint_path)
diff --git a/spaces/Ababababababbababa/Arabic_poetry_Sha3bor_mid/app.py b/spaces/Ababababababbababa/Arabic_poetry_Sha3bor_mid/app.py
deleted file mode 100644
index c67e7f2dcc7d78e889486cf8fb6e23615a97929d..0000000000000000000000000000000000000000
--- a/spaces/Ababababababbababa/Arabic_poetry_Sha3bor_mid/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/aliosm/sha3bor-generator-aragpt2-medium").launch()
\ No newline at end of file
diff --git a/spaces/Abhilashvj/planogram-compliance/utils/segment/metrics.py b/spaces/Abhilashvj/planogram-compliance/utils/segment/metrics.py
deleted file mode 100644
index 35385c2eab9eadef8a070777c1e9e6005417bdcd..0000000000000000000000000000000000000000
--- a/spaces/Abhilashvj/planogram-compliance/utils/segment/metrics.py
+++ /dev/null
@@ -1,220 +0,0 @@
-# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
-"""
-Model validation metrics
-"""
-
-import numpy as np
-
-from ..metrics import ap_per_class
-
-
-def fitness(x):
- # Model fitness as a weighted combination of metrics
- w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9]
- return (x[:, :8] * w).sum(1)
-
-
-def ap_per_class_box_and_mask(
- tp_m,
- tp_b,
- conf,
- pred_cls,
- target_cls,
- plot=False,
- save_dir=".",
- names=(),
-):
- """
- Args:
- tp_b: tp of boxes.
- tp_m: tp of masks.
- other arguments see `func: ap_per_class`.
- """
- results_boxes = ap_per_class(
- tp_b,
- conf,
- pred_cls,
- target_cls,
- plot=plot,
- save_dir=save_dir,
- names=names,
- prefix="Box",
- )[2:]
- results_masks = ap_per_class(
- tp_m,
- conf,
- pred_cls,
- target_cls,
- plot=plot,
- save_dir=save_dir,
- names=names,
- prefix="Mask",
- )[2:]
-
- results = {
- "boxes": {
- "p": results_boxes[0],
- "r": results_boxes[1],
- "ap": results_boxes[3],
- "f1": results_boxes[2],
- "ap_class": results_boxes[4],
- },
- "masks": {
- "p": results_masks[0],
- "r": results_masks[1],
- "ap": results_masks[3],
- "f1": results_masks[2],
- "ap_class": results_masks[4],
- },
- }
- return results
-
-
-class Metric:
- def __init__(self) -> None:
- self.p = [] # (nc, )
- self.r = [] # (nc, )
- self.f1 = [] # (nc, )
- self.all_ap = [] # (nc, 10)
- self.ap_class_index = [] # (nc, )
-
- @property
- def ap50(self):
- """AP@0.5 of all classes.
- Return:
- (nc, ) or [].
- """
- return self.all_ap[:, 0] if len(self.all_ap) else []
-
- @property
- def ap(self):
- """AP@0.5:0.95
- Return:
- (nc, ) or [].
- """
- return self.all_ap.mean(1) if len(self.all_ap) else []
-
- @property
- def mp(self):
- """mean precision of all classes.
- Return:
- float.
- """
- return self.p.mean() if len(self.p) else 0.0
-
- @property
- def mr(self):
- """mean recall of all classes.
- Return:
- float.
- """
- return self.r.mean() if len(self.r) else 0.0
-
- @property
- def map50(self):
- """Mean AP@0.5 of all classes.
- Return:
- float.
- """
- return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0
-
- @property
- def map(self):
- """Mean AP@0.5:0.95 of all classes.
- Return:
- float.
- """
- return self.all_ap.mean() if len(self.all_ap) else 0.0
-
- def mean_results(self):
- """Mean of results, return mp, mr, map50, map"""
- return (self.mp, self.mr, self.map50, self.map)
-
- def class_result(self, i):
- """class-aware result, return p[i], r[i], ap50[i], ap[i]"""
- return (self.p[i], self.r[i], self.ap50[i], self.ap[i])
-
- def get_maps(self, nc):
- maps = np.zeros(nc) + self.map
- for i, c in enumerate(self.ap_class_index):
- maps[c] = self.ap[i]
- return maps
-
- def update(self, results):
- """
- Args:
- results: tuple(p, r, ap, f1, ap_class)
- """
- p, r, all_ap, f1, ap_class_index = results
- self.p = p
- self.r = r
- self.all_ap = all_ap
- self.f1 = f1
- self.ap_class_index = ap_class_index
-
-
-class Metrics:
- """Metric for boxes and masks."""
-
- def __init__(self) -> None:
- self.metric_box = Metric()
- self.metric_mask = Metric()
-
- def update(self, results):
- """
- Args:
- results: Dict{'boxes': Dict{}, 'masks': Dict{}}
- """
- self.metric_box.update(list(results["boxes"].values()))
- self.metric_mask.update(list(results["masks"].values()))
-
- def mean_results(self):
- return self.metric_box.mean_results() + self.metric_mask.mean_results()
-
- def class_result(self, i):
- return self.metric_box.class_result(i) + self.metric_mask.class_result(
- i
- )
-
- def get_maps(self, nc):
- return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)
-
- @property
- def ap_class_index(self):
- # boxes and masks have the same ap_class_index
- return self.metric_box.ap_class_index
-
-
-KEYS = [
- "train/box_loss",
- "train/seg_loss", # train loss
- "train/obj_loss",
- "train/cls_loss",
- "metrics/precision(B)",
- "metrics/recall(B)",
- "metrics/mAP_0.5(B)",
- "metrics/mAP_0.5:0.95(B)", # metrics
- "metrics/precision(M)",
- "metrics/recall(M)",
- "metrics/mAP_0.5(M)",
- "metrics/mAP_0.5:0.95(M)", # metrics
- "val/box_loss",
- "val/seg_loss", # val loss
- "val/obj_loss",
- "val/cls_loss",
- "x/lr0",
- "x/lr1",
- "x/lr2",
-]
-
-BEST_KEYS = [
- "best/epoch",
- "best/precision(B)",
- "best/recall(B)",
- "best/mAP_0.5(B)",
- "best/mAP_0.5:0.95(B)",
- "best/precision(M)",
- "best/recall(M)",
- "best/mAP_0.5(M)",
- "best/mAP_0.5:0.95(M)",
-]
diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Wewordle.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Wewordle.py
deleted file mode 100644
index a7bdc722795274270750f2609121c79a311df92e..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Wewordle.py
+++ /dev/null
@@ -1,65 +0,0 @@
-from __future__ import annotations
-
-import random, string, time
-from aiohttp import ClientSession
-
-from .base_provider import AsyncProvider
-
-
-class Wewordle(AsyncProvider):
- url = "https://wewordle.org"
- working = True
- supports_gpt_35_turbo = True
-
- @classmethod
- async def create_async(
- cls,
- model: str,
- messages: list[dict[str, str]],
- proxy: str = None,
- **kwargs
- ) -> str:
-
- headers = {
- "accept" : "*/*",
- "pragma" : "no-cache",
- "Content-Type" : "application/json",
- "Connection" : "keep-alive"
- }
-
- _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
- _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
- _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
- data = {
- "user" : _user_id,
- "messages" : messages,
- "subscriber": {
- "originalPurchaseDate" : None,
- "originalApplicationVersion" : None,
- "allPurchaseDatesMillis" : {},
- "entitlements" : {"active": {}, "all": {}},
- "allPurchaseDates" : {},
- "allExpirationDatesMillis" : {},
- "allExpirationDates" : {},
- "originalAppUserId" : f"$RCAnonymousID:{_app_id}",
- "latestExpirationDate" : None,
- "requestDate" : _request_date,
- "latestExpirationDateMillis" : None,
- "nonSubscriptionTransactions" : [],
- "originalPurchaseDateMillis" : None,
- "managementURL" : None,
- "allPurchasedProductIdentifiers": [],
- "firstSeen" : _request_date,
- "activeSubscriptions" : [],
- }
- }
-
-
- async with ClientSession(
- headers=headers
- ) as session:
- async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response:
- response.raise_for_status()
- content = (await response.json())["message"]["content"]
- if content:
- return content
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateHolyGrail.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateHolyGrail.js
deleted file mode 100644
index cb612ced36ffcf8742f3238d0a2ae0174dc16228..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateHolyGrail.js
+++ /dev/null
@@ -1,21 +0,0 @@
-import MergeStyle from './utils/MergeStyle.js';
-import HolyGrail from '../../holygrail/HolyGrail.js';
-import CreateChild from './utils/CreateChild.js';
-
-var CreateDialog = function (scene, data, view, styles, customBuilders) {
- data = MergeStyle(data, styles);
-
- // Replace data by child game object
- CreateChild(scene, data, 'background', view, styles, customBuilders);
- CreateChild(scene, data, 'content', view, styles, customBuilders);
- CreateChild(scene, data, 'leftSide', view, styles, customBuilders);
- CreateChild(scene, data, 'rightSide', view, styles, customBuilders);
- CreateChild(scene, data, 'header', view, styles, customBuilders);
- CreateChild(scene, data, 'footer', view, styles, customBuilders);
-
- var gameObject = new HolyGrail(scene, data);
- scene.add.existing(gameObject);
- return gameObject;
-};
-
-export default CreateDialog;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/OverlapSizer.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/OverlapSizer.d.ts
deleted file mode 100644
index a191a2c9b765502566542b3e43a5d34e2ae2fe0a..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/overlapsizer/OverlapSizer.d.ts
+++ /dev/null
@@ -1,109 +0,0 @@
-// import * as Phaser from 'phaser';
-import BaseSizer from '../basesizer/BaseSizer.js';
-
-export default OverlapSizer;
-
-declare namespace OverlapSizer {
- type AlignTypes = number | 'center' | 'left' | 'right' | 'top' | 'bottom' |
- 'left-top' | 'left-center' | 'left-bottom' |
- 'center-top' | 'center-center' | 'center-bottom' |
- 'right-top' | 'right-center' | 'right-bottom';
-
- type PaddingTypes = number |
- {
- left?: number,
- right?: number,
- top?: number,
- bottom?: number
- };
-
- interface IConfig extends BaseSizer.IConfig {
- x?: number,
- y?: number,
- width?: number,
- height?: number,
- }
-}
-
-declare class OverlapSizer extends BaseSizer {
- sizerChildren: { [name: string]: Phaser.GameObjects.GameObject };
-
- constructor(
- scene: Phaser.Scene,
- config?: OverlapSizer.IConfig
- );
-
- constructor(
- scene: Phaser.Scene,
- x: number, y: number,
- config?: OverlapSizer.IConfig
- );
-
- constructor(
- scene: Phaser.Scene,
- x: number, y: number,
- width: number, height: number,
- config?: OverlapSizer.IConfig
- );
-
- add(
- gameObject: Phaser.GameObjects.GameObject,
- config?: {
- key?: string,
-
- align?: OverlapSizer.AlignTypes,
- offsetX?: number,
- offsetY?: number,
-
- padding?: OverlapSizer.PaddingTypes,
-
- expand?: boolean |
- {
- width?: boolean,
- height?: boolean,
- },
-
- minWidth?: number,
-
- minHeight?: number,
- }
- ): this;
-
- add(
- gameObject: Phaser.GameObjects.GameObject,
- key?: string,
- align?: OverlapSizer.AlignTypes,
- padding?: OverlapSizer.PaddingTypes,
- expand?: boolean |
- {
- width?: boolean,
- height?: boolean,
- },
- minWidth?: number,
- minHeight?: number,
- offsetX?: number,
- offsetY?: number,
- ): this;
-
- remove(
- gameObject: Phaser.GameObjects.GameObject,
- destroyChild?: boolean
- ): this;
-
- remove(
- key: string,
- destroyChild?: boolean
- ): this;
-
- removeAll(
- destroyChild?: boolean
- ): this;
-
- clear(
- destroyChild?: boolean
- ): this;
-
- childToKey(
- gameObject: Phaser.GameObjects.GameObject
- ): string;
-}
\ No newline at end of file
diff --git a/spaces/Alican/pixera/util/get_data.py b/spaces/Alican/pixera/util/get_data.py
deleted file mode 100644
index 97edc3ce3c3ab6d6080dca34e73a5fb77bb715fb..0000000000000000000000000000000000000000
--- a/spaces/Alican/pixera/util/get_data.py
+++ /dev/null
@@ -1,110 +0,0 @@
-from __future__ import print_function
-import os
-import tarfile
-import requests
-from warnings import warn
-from zipfile import ZipFile
-from bs4 import BeautifulSoup
-from os.path import abspath, isdir, join, basename
-
-
-class GetData(object):
- """A Python script for downloading CycleGAN or pix2pix datasets.
-
- Parameters:
- technique (str) -- One of: 'cyclegan' or 'pix2pix'.
- verbose (bool) -- If True, print additional information.
-
- Examples:
- >>> from util.get_data import GetData
- >>> gd = GetData(technique='cyclegan')
- >>> new_data_path = gd.get(save_path='./datasets') # options will be displayed.
-
- Alternatively, You can use bash scripts: 'scripts/download_pix2pix_model.sh'
- and 'scripts/download_cyclegan_model.sh'.
- """
-
- def __init__(self, technique='cyclegan', verbose=True):
- url_dict = {
- 'pix2pix': 'http://efrosgans.eecs.berkeley.edu/pix2pix/datasets/',
- 'cyclegan': 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets'
- }
- self.url = url_dict.get(technique.lower())
- self._verbose = verbose
-
- def _print(self, text):
- if self._verbose:
- print(text)
-
- @staticmethod
- def _get_options(r):
- soup = BeautifulSoup(r.text, 'lxml')
- options = [h.text for h in soup.find_all('a', href=True)
- if h.text.endswith(('.zip', 'tar.gz'))]
- return options
-
- def _present_options(self):
- r = requests.get(self.url)
- options = self._get_options(r)
- print('Options:\n')
- for i, o in enumerate(options):
- print("{0}: {1}".format(i, o))
- choice = input("\nPlease enter the number of the "
- "dataset above you wish to download:")
- return options[int(choice)]
-
- def _download_data(self, dataset_url, save_path):
- if not isdir(save_path):
- os.makedirs(save_path)
-
- base = basename(dataset_url)
- temp_save_path = join(save_path, base)
-
- with open(temp_save_path, "wb") as f:
- r = requests.get(dataset_url)
- f.write(r.content)
-
- if base.endswith('.tar.gz'):
- obj = tarfile.open(temp_save_path)
- elif base.endswith('.zip'):
- obj = ZipFile(temp_save_path, 'r')
- else:
- raise ValueError("Unknown File Type: {0}.".format(base))
-
- self._print("Unpacking Data...")
- obj.extractall(save_path)
- obj.close()
- os.remove(temp_save_path)
-
- def get(self, save_path, dataset=None):
- """
-
- Download a dataset.
-
- Parameters:
- save_path (str) -- A directory to save the data to.
- dataset (str) -- (optional). A specific dataset to download.
- Note: this must include the file extension.
- If None, options will be presented for you
- to choose from.
-
- Returns:
- save_path_full (str) -- the absolute path to the downloaded data.
-
- """
- if dataset is None:
- selected_dataset = self._present_options()
- else:
- selected_dataset = dataset
-
- save_path_full = join(save_path, selected_dataset.split('.')[0])
-
- if isdir(save_path_full):
- warn("\n'{0}' already exists. Voiding Download.".format(
- save_path_full))
- else:
- self._print('Downloading Data...')
- url = "{0}/{1}".format(self.url, selected_dataset)
- self._download_data(url, save_path=save_path)
-
- return abspath(save_path_full)
diff --git a/spaces/Alpaca233/SadTalker/src/face3d/util/skin_mask.py b/spaces/Alpaca233/SadTalker/src/face3d/util/skin_mask.py
deleted file mode 100644
index a8a74e4c3b40d13b0258b83a12f56321a85bb179..0000000000000000000000000000000000000000
--- a/spaces/Alpaca233/SadTalker/src/face3d/util/skin_mask.py
+++ /dev/null
@@ -1,125 +0,0 @@
-"""This script is to generate skin attention mask for Deep3DFaceRecon_pytorch
-"""
-
-import math
-import numpy as np
-import os
-import cv2
-
-class GMM:
- def __init__(self, dim, num, w, mu, cov, cov_det, cov_inv):
- self.dim = dim # feature dimension
- self.num = num # number of Gaussian components
- self.w = w # weights of Gaussian components (a list of scalars)
- self.mu= mu # mean of Gaussian components (a list of 1xdim vectors)
- self.cov = cov # covariance matrix of Gaussian components (a list of dimxdim matrices)
- self.cov_det = cov_det # pre-computed determinet of covariance matrices (a list of scalars)
- self.cov_inv = cov_inv # pre-computed inverse covariance matrices (a list of dimxdim matrices)
-
- self.factor = [0]*num
- for i in range(self.num):
- self.factor[i] = (2*math.pi)**(self.dim/2) * self.cov_det[i]**0.5
-
- def likelihood(self, data):
- assert(data.shape[1] == self.dim)
- N = data.shape[0]
- lh = np.zeros(N)
-
- for i in range(self.num):
- data_ = data - self.mu[i]
-
- tmp = np.matmul(data_,self.cov_inv[i]) * data_
- tmp = np.sum(tmp,axis=1)
- power = -0.5 * tmp
-
- p = np.array([math.exp(power[j]) for j in range(N)])
- p = p/self.factor[i]
- lh += p*self.w[i]
-
- return lh
-
-
-def _rgb2ycbcr(rgb):
- m = np.array([[65.481, 128.553, 24.966],
- [-37.797, -74.203, 112],
- [112, -93.786, -18.214]])
- shape = rgb.shape
- rgb = rgb.reshape((shape[0] * shape[1], 3))
- ycbcr = np.dot(rgb, m.transpose() / 255.)
- ycbcr[:, 0] += 16.
- ycbcr[:, 1:] += 128.
- return ycbcr.reshape(shape)
-
-
-def _bgr2ycbcr(bgr):
- rgb = bgr[..., ::-1]
- return _rgb2ycbcr(rgb)
-
-
-gmm_skin_w = [0.24063933, 0.16365987, 0.26034665, 0.33535415]
-gmm_skin_mu = [np.array([113.71862, 103.39613, 164.08226]),
- np.array([150.19858, 105.18467, 155.51428]),
- np.array([183.92976, 107.62468, 152.71820]),
- np.array([114.90524, 113.59782, 151.38217])]
-gmm_skin_cov_det = [5692842.5, 5851930.5, 2329131., 1585971.]
-gmm_skin_cov_inv = [np.array([[0.0019472069, 0.0020450759, -0.00060243998],[0.0020450759, 0.017700525, 0.0051420014],[-0.00060243998, 0.0051420014, 0.0081308950]]),
- np.array([[0.0027110141, 0.0011036990, 0.0023122299],[0.0011036990, 0.010707724, 0.010742856],[0.0023122299, 0.010742856, 0.017481629]]),
- np.array([[0.0048026871, 0.00022935172, 0.0077668377],[0.00022935172, 0.011729696, 0.0081661865],[0.0077668377, 0.0081661865, 0.025374353]]),
- np.array([[0.0011989699, 0.0022453172, -0.0010748957],[0.0022453172, 0.047758564, 0.020332102],[-0.0010748957, 0.020332102, 0.024502251]])]
-
-gmm_skin = GMM(3, 4, gmm_skin_w, gmm_skin_mu, [], gmm_skin_cov_det, gmm_skin_cov_inv)
-
-gmm_nonskin_w = [0.12791070, 0.31130761, 0.34245777, 0.21832393]
-gmm_nonskin_mu = [np.array([99.200851, 112.07533, 140.20602]),
- np.array([110.91392, 125.52969, 130.19237]),
- np.array([129.75864, 129.96107, 126.96808]),
- np.array([112.29587, 128.85121, 129.05431])]
-gmm_nonskin_cov_det = [458703648., 6466488., 90611376., 133097.63]
-gmm_nonskin_cov_inv = [np.array([[0.00085371657, 0.00071197288, 0.00023958916],[0.00071197288, 0.0025935620, 0.00076557708],[0.00023958916, 0.00076557708, 0.0015042332]]),
- np.array([[0.00024650150, 0.00045542428, 0.00015019422],[0.00045542428, 0.026412144, 0.018419769],[0.00015019422, 0.018419769, 0.037497383]]),
- np.array([[0.00037054974, 0.00038146760, 0.00040408765],[0.00038146760, 0.0085505722, 0.0079136286],[0.00040408765, 0.0079136286, 0.010982352]]),
- np.array([[0.00013709733, 0.00051228428, 0.00012777430],[0.00051228428, 0.28237113, 0.10528370],[0.00012777430, 0.10528370, 0.23468947]])]
-
-gmm_nonskin = GMM(3, 4, gmm_nonskin_w, gmm_nonskin_mu, [], gmm_nonskin_cov_det, gmm_nonskin_cov_inv)
-
-prior_skin = 0.8
-prior_nonskin = 1 - prior_skin
-
-
-# calculate skin attention mask
-def skinmask(imbgr):
- im = _bgr2ycbcr(imbgr)
-
- data = im.reshape((-1,3))
-
- lh_skin = gmm_skin.likelihood(data)
- lh_nonskin = gmm_nonskin.likelihood(data)
-
- tmp1 = prior_skin * lh_skin
- tmp2 = prior_nonskin * lh_nonskin
- post_skin = tmp1 / (tmp1+tmp2) # posterior probability
-
- post_skin = post_skin.reshape((im.shape[0],im.shape[1]))
-
- post_skin = np.round(post_skin*255)
- post_skin = post_skin.astype(np.uint8)
- post_skin = np.tile(np.expand_dims(post_skin,2),[1,1,3]) # reshape to H*W*3
-
- return post_skin
-
-
-def get_skin_mask(img_path):
- print('generating skin masks......')
- names = [i for i in sorted(os.listdir(
- img_path)) if 'jpg' in i or 'png' in i or 'jpeg' in i or 'PNG' in i]
- save_path = os.path.join(img_path, 'mask')
- if not os.path.isdir(save_path):
- os.makedirs(save_path)
-
- for i in range(0, len(names)):
- name = names[i]
- print('%05d' % (i), ' ', name)
- full_image_name = os.path.join(img_path, name)
- img = cv2.imread(full_image_name).astype(np.float32)
- skin_img = skinmask(img)
- cv2.imwrite(os.path.join(save_path, name), skin_img.astype(np.uint8))
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/optimization/xformers.md b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/optimization/xformers.md
deleted file mode 100644
index a8b9408fbe50b07e9cb1e566a0678e2e8ca52ea2..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/optimization/xformers.md
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-# xFormers 설치하기
-
-추론과 학습 모두에 [xFormers](https://github.com/facebookresearch/xformers)를 사용하는 것이 좋습니다.
-자체 테스트로 어텐션 블록에서 수행된 최적화가 더 빠른 속도와 적은 메모리 소비를 확인했습니다.
-
-2023년 1월에 출시된 xFormers 버전 '0.0.16'부터 사전 빌드된 pip wheel을 사용하여 쉽게 설치할 수 있습니다:
-
-```bash
-pip install xformers
-```
-
-
-
-xFormers PIP 패키지에는 최신 버전의 PyTorch(xFormers 0.0.16에 1.13.1)가 필요합니다. 이전 버전의 PyTorch를 사용해야 하는 경우 [프로젝트 지침](https://github.com/facebookresearch/xformers#installing-xformers)의 소스를 사용해 xFormers를 설치하는 것이 좋습니다.
-
-
-
-xFormers를 설치하면, [여기](fp16#memory-efficient-attention)서 설명한 것처럼 'enable_xformers_memory_efficient_attention()'을 사용하여 추론 속도를 높이고 메모리 소비를 줄일 수 있습니다.
-
-
-
-[이 이슈](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212)에 따르면 xFormers `v0.0.16`에서 GPU를 사용한 학습(파인 튜닝 또는 Dreambooth)을 할 수 없습니다. 해당 문제가 발견되면. 해당 코멘트를 참고해 development 버전을 설치하세요.
-
-
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/others/test_check_copies.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/others/test_check_copies.py
deleted file mode 100644
index bd0a22da2c3af2bed6f3029e84face108e3cbda3..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/others/test_check_copies.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright 2023 The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import re
-import shutil
-import sys
-import tempfile
-import unittest
-
-import black
-
-
-git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
-sys.path.append(os.path.join(git_repo_path, "utils"))
-
-import check_copies # noqa: E402
-
-
-# This is the reference code that will be used in the tests.
-# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
-REFERENCE_CODE = """ \"""
- Output class for the scheduler's step function output.
-
- Args:
- prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
- denoising loop.
- pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
- The predicted denoised sample (x_{0}) based on the model output from the current timestep.
- `pred_original_sample` can be used to preview progress or for guidance.
- \"""
-
- prev_sample: torch.FloatTensor
- pred_original_sample: Optional[torch.FloatTensor] = None
-"""
-
-
-class CopyCheckTester(unittest.TestCase):
- def setUp(self):
- self.diffusers_dir = tempfile.mkdtemp()
- os.makedirs(os.path.join(self.diffusers_dir, "schedulers/"))
- check_copies.DIFFUSERS_PATH = self.diffusers_dir
- shutil.copy(
- os.path.join(git_repo_path, "src/diffusers/schedulers/scheduling_ddpm.py"),
- os.path.join(self.diffusers_dir, "schedulers/scheduling_ddpm.py"),
- )
-
- def tearDown(self):
- check_copies.DIFFUSERS_PATH = "src/diffusers"
- shutil.rmtree(self.diffusers_dir)
-
- def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None):
- code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
- if overwrite_result is not None:
- expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
- mode = black.Mode(target_versions={black.TargetVersion.PY35}, line_length=119)
- code = black.format_str(code, mode=mode)
- fname = os.path.join(self.diffusers_dir, "new_code.py")
- with open(fname, "w", newline="\n") as f:
- f.write(code)
- if overwrite_result is None:
- self.assertTrue(len(check_copies.is_copy_consistent(fname)) == 0)
- else:
- check_copies.is_copy_consistent(f.name, overwrite=True)
- with open(fname, "r") as f:
- self.assertTrue(f.read(), expected)
-
- def test_find_code_in_diffusers(self):
- code = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput")
- self.assertEqual(code, REFERENCE_CODE)
-
- def test_is_copy_consistent(self):
- # Base copy consistency
- self.check_copy_consistency(
- "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput",
- "DDPMSchedulerOutput",
- REFERENCE_CODE + "\n",
- )
-
- # With no empty line at the end
- self.check_copy_consistency(
- "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput",
- "DDPMSchedulerOutput",
- REFERENCE_CODE,
- )
-
- # Copy consistency with rename
- self.check_copy_consistency(
- "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test",
- "TestSchedulerOutput",
- re.sub("DDPM", "Test", REFERENCE_CODE),
- )
-
- # Copy consistency with a really long name
- long_class_name = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
- self.check_copy_consistency(
- f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}",
- f"{long_class_name}SchedulerOutput",
- re.sub("Bert", long_class_name, REFERENCE_CODE),
- )
-
- # Copy consistency with overwrite
- self.check_copy_consistency(
- "# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test",
- "TestSchedulerOutput",
- REFERENCE_CODE,
- overwrite_result=re.sub("DDPM", "Test", REFERENCE_CODE),
- )
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_score_sde_ve.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_score_sde_ve.py
deleted file mode 100644
index 08c30f9b1e0c2ce1f7baab82f5076efabe465a69..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/schedulers/test_scheduler_score_sde_ve.py
+++ /dev/null
@@ -1,189 +0,0 @@
-import tempfile
-import unittest
-
-import numpy as np
-import torch
-
-from diffusers import ScoreSdeVeScheduler
-
-
-class ScoreSdeVeSchedulerTest(unittest.TestCase):
- # TODO adapt with class SchedulerCommonTest (scheduler needs Numpy Integration)
- scheduler_classes = (ScoreSdeVeScheduler,)
- forward_default_kwargs = ()
-
- @property
- def dummy_sample(self):
- batch_size = 4
- num_channels = 3
- height = 8
- width = 8
-
- sample = torch.rand((batch_size, num_channels, height, width))
-
- return sample
-
- @property
- def dummy_sample_deter(self):
- batch_size = 4
- num_channels = 3
- height = 8
- width = 8
-
- num_elems = batch_size * num_channels * height * width
- sample = torch.arange(num_elems)
- sample = sample.reshape(num_channels, height, width, batch_size)
- sample = sample / num_elems
- sample = sample.permute(3, 0, 1, 2)
-
- return sample
-
- def dummy_model(self):
- def model(sample, t, *args):
- return sample * t / (t + 1)
-
- return model
-
- def get_scheduler_config(self, **kwargs):
- config = {
- "num_train_timesteps": 2000,
- "snr": 0.15,
- "sigma_min": 0.01,
- "sigma_max": 1348,
- "sampling_eps": 1e-5,
- }
-
- config.update(**kwargs)
- return config
-
- def check_over_configs(self, time_step=0, **config):
- kwargs = dict(self.forward_default_kwargs)
-
- for scheduler_class in self.scheduler_classes:
- sample = self.dummy_sample
- residual = 0.1 * sample
-
- scheduler_config = self.get_scheduler_config(**config)
- scheduler = scheduler_class(**scheduler_config)
-
- with tempfile.TemporaryDirectory() as tmpdirname:
- scheduler.save_config(tmpdirname)
- new_scheduler = scheduler_class.from_pretrained(tmpdirname)
-
- output = scheduler.step_pred(
- residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
- ).prev_sample
- new_output = new_scheduler.step_pred(
- residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
- ).prev_sample
-
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
-
- output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
- new_output = new_scheduler.step_correct(
- residual, sample, generator=torch.manual_seed(0), **kwargs
- ).prev_sample
-
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical"
-
- def check_over_forward(self, time_step=0, **forward_kwargs):
- kwargs = dict(self.forward_default_kwargs)
- kwargs.update(forward_kwargs)
-
- for scheduler_class in self.scheduler_classes:
- sample = self.dummy_sample
- residual = 0.1 * sample
-
- scheduler_config = self.get_scheduler_config()
- scheduler = scheduler_class(**scheduler_config)
-
- with tempfile.TemporaryDirectory() as tmpdirname:
- scheduler.save_config(tmpdirname)
- new_scheduler = scheduler_class.from_pretrained(tmpdirname)
-
- output = scheduler.step_pred(
- residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
- ).prev_sample
- new_output = new_scheduler.step_pred(
- residual, time_step, sample, generator=torch.manual_seed(0), **kwargs
- ).prev_sample
-
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical"
-
- output = scheduler.step_correct(residual, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
- new_output = new_scheduler.step_correct(
- residual, sample, generator=torch.manual_seed(0), **kwargs
- ).prev_sample
-
- assert torch.sum(torch.abs(output - new_output)) < 1e-5, "Scheduler correction are not identical"
-
- def test_timesteps(self):
- for timesteps in [10, 100, 1000]:
- self.check_over_configs(num_train_timesteps=timesteps)
-
- def test_sigmas(self):
- for sigma_min, sigma_max in zip([0.0001, 0.001, 0.01], [1, 100, 1000]):
- self.check_over_configs(sigma_min=sigma_min, sigma_max=sigma_max)
-
- def test_time_indices(self):
- for t in [0.1, 0.5, 0.75]:
- self.check_over_forward(time_step=t)
-
- def test_full_loop_no_noise(self):
- kwargs = dict(self.forward_default_kwargs)
-
- scheduler_class = self.scheduler_classes[0]
- scheduler_config = self.get_scheduler_config()
- scheduler = scheduler_class(**scheduler_config)
-
- num_inference_steps = 3
-
- model = self.dummy_model()
- sample = self.dummy_sample_deter
-
- scheduler.set_sigmas(num_inference_steps)
- scheduler.set_timesteps(num_inference_steps)
- generator = torch.manual_seed(0)
-
- for i, t in enumerate(scheduler.timesteps):
- sigma_t = scheduler.sigmas[i]
-
- for _ in range(scheduler.config.correct_steps):
- with torch.no_grad():
- model_output = model(sample, sigma_t)
- sample = scheduler.step_correct(model_output, sample, generator=generator, **kwargs).prev_sample
-
- with torch.no_grad():
- model_output = model(sample, sigma_t)
-
- output = scheduler.step_pred(model_output, t, sample, generator=generator, **kwargs)
- sample, _ = output.prev_sample, output.prev_sample_mean
-
- result_sum = torch.sum(torch.abs(sample))
- result_mean = torch.mean(torch.abs(sample))
-
- assert np.isclose(result_sum.item(), 14372758528.0)
- assert np.isclose(result_mean.item(), 18714530.0)
-
- def test_step_shape(self):
- kwargs = dict(self.forward_default_kwargs)
-
- num_inference_steps = kwargs.pop("num_inference_steps", None)
-
- for scheduler_class in self.scheduler_classes:
- scheduler_config = self.get_scheduler_config()
- scheduler = scheduler_class(**scheduler_config)
-
- sample = self.dummy_sample
- residual = 0.1 * sample
-
- if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"):
- scheduler.set_timesteps(num_inference_steps)
- elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"):
- kwargs["num_inference_steps"] = num_inference_steps
-
- output_0 = scheduler.step_pred(residual, 0, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
- output_1 = scheduler.step_pred(residual, 1, sample, generator=torch.manual_seed(0), **kwargs).prev_sample
-
- self.assertEqual(output_0.shape, sample.shape)
- self.assertEqual(output_0.shape, output_1.shape)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py
deleted file mode 100644
index 403747f127e0f7a301771e53e75bf0e83a1736c9..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py
+++ /dev/null
@@ -1,4 +0,0 @@
-_base_ = './faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py'
-# learning policy
-lr_config = dict(step=[28, 34])
-runner = dict(type='EpochBasedRunner', max_epochs=36)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py b/spaces/Andy1621/uniformer_image_detection/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py
deleted file mode 100644
index 6198b9717957374ce734ca74de5f54dda44123b9..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/legacy_1.x/retinanet_r50_fpn_1x_coco_v1.py
+++ /dev/null
@@ -1,17 +0,0 @@
-_base_ = [
- '../_base_/models/retinanet_r50_fpn.py',
- '../_base_/datasets/coco_detection.py',
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
-]
-model = dict(
- bbox_head=dict(
- type='RetinaHead',
- anchor_generator=dict(
- type='LegacyAnchorGenerator',
- center_offset=0.5,
- octave_base_scale=4,
- scales_per_octave=3,
- ratios=[0.5, 1.0, 2.0],
- strides=[8, 16, 32, 64, 128]),
- bbox_coder=dict(type='LegacyDeltaXYWHBBoxCoder'),
- loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/ssd300_voc0712.py b/spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/ssd300_voc0712.py
deleted file mode 100644
index 271ebe32ea354c0748d7745fad2c55960ac305d1..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/pascal_voc/ssd300_voc0712.py
+++ /dev/null
@@ -1,69 +0,0 @@
-_base_ = [
- '../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py',
- '../_base_/default_runtime.py'
-]
-model = dict(
- bbox_head=dict(
- num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2,
- 0.9))))
-# dataset settings
-dataset_type = 'VOCDataset'
-data_root = 'data/VOCdevkit/'
-img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
-train_pipeline = [
- dict(type='LoadImageFromFile', to_float32=True),
- dict(type='LoadAnnotations', with_bbox=True),
- dict(
- type='PhotoMetricDistortion',
- brightness_delta=32,
- contrast_range=(0.5, 1.5),
- saturation_range=(0.5, 1.5),
- hue_delta=18),
- dict(
- type='Expand',
- mean=img_norm_cfg['mean'],
- to_rgb=img_norm_cfg['to_rgb'],
- ratio_range=(1, 4)),
- dict(
- type='MinIoURandomCrop',
- min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
- min_crop_size=0.3),
- dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(300, 300),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=False),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- samples_per_gpu=8,
- workers_per_gpu=3,
- train=dict(
- type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
-# optimizer
-optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4)
-optimizer_config = dict()
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=500,
- warmup_ratio=0.001,
- step=[16, 20])
-checkpoint_config = dict(interval=1)
-# runtime settings
-runner = dict(type='EpochBasedRunner', max_epochs=24)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
deleted file mode 100644
index e4107e7f8985deaaf0287d6b7347521970babf1e..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
+++ /dev/null
@@ -1,65 +0,0 @@
-_base_ = [
- '../_base_/models/mask_rcnn_r50_fpn.py',
- '../_base_/datasets/coco_instance.py',
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
-]
-model = dict(
- pretrained='open-mmlab://regnetx_3.2gf',
- backbone=dict(
- _delete_=True,
- type='RegNet',
- arch='regnetx_3.2gf',
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=True),
- norm_eval=True,
- style='pytorch'),
- neck=dict(
- type='FPN',
- in_channels=[96, 192, 432, 1008],
- out_channels=256,
- num_outs=5))
-img_norm_cfg = dict(
- # The mean and std are used in PyCls when training RegNets
- mean=[103.53, 116.28, 123.675],
- std=[57.375, 57.12, 58.395],
- to_rgb=False)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
- dict(
- type='Resize',
- img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
- (1333, 768), (1333, 800)],
- multiscale_mode='value',
- keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- train=dict(pipeline=train_pipeline),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
-optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
-lr_config = dict(step=[28, 34])
-runner = dict(type='EpochBasedRunner', max_epochs=36)
-optimizer_config = dict(
- _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py
deleted file mode 100644
index 0322006464e158a238525e91449cc81a6143375c..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/sabl/sabl_cascade_rcnn_r101_fpn_1x_coco.py
+++ /dev/null
@@ -1,88 +0,0 @@
-_base_ = [
- '../_base_/models/cascade_rcnn_r50_fpn.py',
- '../_base_/datasets/coco_detection.py',
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
-]
-# model settings
-model = dict(
- pretrained='torchvision://resnet101',
- backbone=dict(depth=101),
- roi_head=dict(bbox_head=[
- dict(
- type='SABLHead',
- num_classes=80,
- cls_in_channels=256,
- reg_in_channels=256,
- roi_feat_size=7,
- reg_feat_up_ratio=2,
- reg_pre_kernel=3,
- reg_post_kernel=3,
- reg_pre_num=2,
- reg_post_num=1,
- cls_out_channels=1024,
- reg_offset_out_channels=256,
- reg_cls_out_channels=256,
- num_cls_fcs=1,
- num_reg_fcs=0,
- reg_class_agnostic=True,
- norm_cfg=None,
- bbox_coder=dict(
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.7),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
- loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
- loss_weight=1.0)),
- dict(
- type='SABLHead',
- num_classes=80,
- cls_in_channels=256,
- reg_in_channels=256,
- roi_feat_size=7,
- reg_feat_up_ratio=2,
- reg_pre_kernel=3,
- reg_post_kernel=3,
- reg_pre_num=2,
- reg_post_num=1,
- cls_out_channels=1024,
- reg_offset_out_channels=256,
- reg_cls_out_channels=256,
- num_cls_fcs=1,
- num_reg_fcs=0,
- reg_class_agnostic=True,
- norm_cfg=None,
- bbox_coder=dict(
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.5),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
- loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1,
- loss_weight=1.0)),
- dict(
- type='SABLHead',
- num_classes=80,
- cls_in_channels=256,
- reg_in_channels=256,
- roi_feat_size=7,
- reg_feat_up_ratio=2,
- reg_pre_kernel=3,
- reg_post_kernel=3,
- reg_pre_num=2,
- reg_post_num=1,
- cls_out_channels=1024,
- reg_offset_out_channels=256,
- reg_cls_out_channels=256,
- num_cls_fcs=1,
- num_reg_fcs=0,
- reg_class_agnostic=True,
- norm_cfg=None,
- bbox_coder=dict(
- type='BucketingBBoxCoder', num_buckets=14, scale_factor=1.3),
- loss_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
- loss_bbox_cls=dict(
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
- loss_bbox_reg=dict(type='SmoothL1Loss', beta=0.1, loss_weight=1.0))
- ]))
diff --git a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/pseudo_bbox_coder.py b/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/pseudo_bbox_coder.py
deleted file mode 100644
index 1c8346f4ae2c7db9719a70c7dc0244e088a9965b..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/coder/pseudo_bbox_coder.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from ..builder import BBOX_CODERS
-from .base_bbox_coder import BaseBBoxCoder
-
-
-@BBOX_CODERS.register_module()
-class PseudoBBoxCoder(BaseBBoxCoder):
- """Pseudo bounding box coder."""
-
- def __init__(self, **kwargs):
- super(BaseBBoxCoder, self).__init__(**kwargs)
-
- def encode(self, bboxes, gt_bboxes):
- """torch.Tensor: return the given ``bboxes``"""
- return gt_bboxes
-
- def decode(self, bboxes, pred_bboxes):
- """torch.Tensor: return the given ``pred_bboxes``"""
- return pred_bboxes
diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py b/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py
deleted file mode 100644
index 1b70ca8e46a0409379f5ae9809ce03de203426ad..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r101-d8_512x512_20k_voc12aug.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './gcnet_r50-d8_512x512_20k_voc12aug.py'
-model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
diff --git a/spaces/Andyrasika/distilbert-base-uncased-finetuned-emotion/app.py b/spaces/Andyrasika/distilbert-base-uncased-finetuned-emotion/app.py
deleted file mode 100644
index 0fd5db94abbfed37870c5163294cdc1c45ddcb00..0000000000000000000000000000000000000000
--- a/spaces/Andyrasika/distilbert-base-uncased-finetuned-emotion/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/Andyrasika/distilbert-base-uncased-finetuned-emotion").launch()
\ No newline at end of file
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py
deleted file mode 100644
index a3941e27874993418b3b5708d5a7485f175ff9c8..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .registry import CONV_LAYERS
-
-
-def conv_ws_2d(input,
- weight,
- bias=None,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- eps=1e-5):
- c_in = weight.size(0)
- weight_flat = weight.view(c_in, -1)
- mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
- std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
- weight = (weight - mean) / (std + eps)
- return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
-
-
-@CONV_LAYERS.register_module('ConvWS')
-class ConvWS2d(nn.Conv2d):
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- bias=True,
- eps=1e-5):
- super(ConvWS2d, self).__init__(
- in_channels,
- out_channels,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=groups,
- bias=bias)
- self.eps = eps
-
- def forward(self, x):
- return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
- self.dilation, self.groups, self.eps)
-
-
-@CONV_LAYERS.register_module(name='ConvAWS')
-class ConvAWS2d(nn.Conv2d):
- """AWS (Adaptive Weight Standardization)
-
- This is a variant of Weight Standardization
- (https://arxiv.org/pdf/1903.10520.pdf)
- It is used in DetectoRS to avoid NaN
- (https://arxiv.org/pdf/2006.02334.pdf)
-
- Args:
- in_channels (int): Number of channels in the input image
- out_channels (int): Number of channels produced by the convolution
- kernel_size (int or tuple): Size of the conv kernel
- stride (int or tuple, optional): Stride of the convolution. Default: 1
- padding (int or tuple, optional): Zero-padding added to both sides of
- the input. Default: 0
- dilation (int or tuple, optional): Spacing between kernel elements.
- Default: 1
- groups (int, optional): Number of blocked connections from input
- channels to output channels. Default: 1
- bias (bool, optional): If set True, adds a learnable bias to the
- output. Default: True
- """
-
- def __init__(self,
- in_channels,
- out_channels,
- kernel_size,
- stride=1,
- padding=0,
- dilation=1,
- groups=1,
- bias=True):
- super().__init__(
- in_channels,
- out_channels,
- kernel_size,
- stride=stride,
- padding=padding,
- dilation=dilation,
- groups=groups,
- bias=bias)
- self.register_buffer('weight_gamma',
- torch.ones(self.out_channels, 1, 1, 1))
- self.register_buffer('weight_beta',
- torch.zeros(self.out_channels, 1, 1, 1))
-
- def _get_weight(self, weight):
- weight_flat = weight.view(weight.size(0), -1)
- mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
- std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
- weight = (weight - mean) / std
- weight = self.weight_gamma * weight + self.weight_beta
- return weight
-
- def forward(self, x):
- weight = self._get_weight(self.weight)
- return F.conv2d(x, weight, self.bias, self.stride, self.padding,
- self.dilation, self.groups)
-
- def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
- missing_keys, unexpected_keys, error_msgs):
- """Override default load function.
-
- AWS overrides the function _load_from_state_dict to recover
- weight_gamma and weight_beta if they are missing. If weight_gamma and
- weight_beta are found in the checkpoint, this function will return
- after super()._load_from_state_dict. Otherwise, it will compute the
- mean and std of the pretrained weights and store them in weight_beta
- and weight_gamma.
- """
-
- self.weight_gamma.data.fill_(-1)
- local_missing_keys = []
- super()._load_from_state_dict(state_dict, prefix, local_metadata,
- strict, local_missing_keys,
- unexpected_keys, error_msgs)
- if self.weight_gamma.data.mean() > 0:
- for k in local_missing_keys:
- missing_keys.append(k)
- return
- weight = self.weight.data
- weight_flat = weight.view(weight.size(0), -1)
- mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
- std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
- self.weight_beta.data.copy_(mean)
- self.weight_gamma.data.copy_(std)
- missing_gamma_beta = [
- k for k in local_missing_keys
- if k.endswith('weight_gamma') or k.endswith('weight_beta')
- ]
- for k in missing_gamma_beta:
- local_missing_keys.remove(k)
- for k in local_missing_keys:
- missing_keys.append(k)
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/furthest_point_sample.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/furthest_point_sample.py
deleted file mode 100644
index 374b7a878f1972c183941af28ba1df216ac1a60f..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/furthest_point_sample.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import torch
-from torch.autograd import Function
-
-from ..utils import ext_loader
-
-ext_module = ext_loader.load_ext('_ext', [
- 'furthest_point_sampling_forward',
- 'furthest_point_sampling_with_dist_forward'
-])
-
-
-class FurthestPointSampling(Function):
- """Uses iterative furthest point sampling to select a set of features whose
- corresponding points have the furthest distance."""
-
- @staticmethod
- def forward(ctx, points_xyz: torch.Tensor,
- num_points: int) -> torch.Tensor:
- """
- Args:
- points_xyz (Tensor): (B, N, 3) where N > num_points.
- num_points (int): Number of points in the sampled set.
-
- Returns:
- Tensor: (B, num_points) indices of the sampled points.
- """
- assert points_xyz.is_contiguous()
-
- B, N = points_xyz.size()[:2]
- output = torch.cuda.IntTensor(B, num_points)
- temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
-
- ext_module.furthest_point_sampling_forward(
- points_xyz,
- temp,
- output,
- b=B,
- n=N,
- m=num_points,
- )
- if torch.__version__ != 'parrots':
- ctx.mark_non_differentiable(output)
- return output
-
- @staticmethod
- def backward(xyz, a=None):
- return None, None
-
-
-class FurthestPointSamplingWithDist(Function):
- """Uses iterative furthest point sampling to select a set of features whose
- corresponding points have the furthest distance."""
-
- @staticmethod
- def forward(ctx, points_dist: torch.Tensor,
- num_points: int) -> torch.Tensor:
- """
- Args:
- points_dist (Tensor): (B, N, N) Distance between each point pair.
- num_points (int): Number of points in the sampled set.
-
- Returns:
- Tensor: (B, num_points) indices of the sampled points.
- """
- assert points_dist.is_contiguous()
-
- B, N, _ = points_dist.size()
- output = points_dist.new_zeros([B, num_points], dtype=torch.int32)
- temp = points_dist.new_zeros([B, N]).fill_(1e10)
-
- ext_module.furthest_point_sampling_with_dist_forward(
- points_dist, temp, output, b=B, n=N, m=num_points)
- if torch.__version__ != 'parrots':
- ctx.mark_non_differentiable(output)
- return output
-
- @staticmethod
- def backward(xyz, a=None):
- return None, None
-
-
-furthest_point_sample = FurthestPointSampling.apply
-furthest_point_sample_with_dist = FurthestPointSamplingWithDist.apply
diff --git a/spaces/Apex-X/Tm/roop/capturer.py b/spaces/Apex-X/Tm/roop/capturer.py
deleted file mode 100644
index fd49d468dd4cd45832ab9612205968207a6f45cf..0000000000000000000000000000000000000000
--- a/spaces/Apex-X/Tm/roop/capturer.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from typing import Any
-import cv2
-
-
-def get_video_frame(video_path: str, frame_number: int = 0) -> Any:
- capture = cv2.VideoCapture(video_path)
- frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT)
- capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1))
- has_frame, frame = capture.read()
- capture.release()
- if has_frame:
- return frame
- return None
-
-
-def get_video_frame_total(video_path: str) -> int:
- capture = cv2.VideoCapture(video_path)
- video_frame_total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
- capture.release()
- return video_frame_total
diff --git a/spaces/Astroomx/Mine/Dockerfile b/spaces/Astroomx/Mine/Dockerfile
deleted file mode 100644
index 6c01c09373883afcb4ea34ae2d316cd596e1737b..0000000000000000000000000000000000000000
--- a/spaces/Astroomx/Mine/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-FROM node:18-bullseye-slim
-
-RUN apt-get update && \
-
-apt-get install -y git
-
-RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
-
-WORKDIR /app
-
-RUN npm install
-
-COPY Dockerfile greeting.md* .env* ./
-
-RUN npm run build
-
-EXPOSE 7860
-
-ENV NODE_ENV=production
-
-CMD [ "npm", "start" ]
\ No newline at end of file
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py
deleted file mode 100644
index 2e3c80869d9c1a70ee003d054a53f49c3f53a556..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py
+++ /dev/null
@@ -1,153 +0,0 @@
-"""
- pygments.unistring
- ~~~~~~~~~~~~~~~~~~
-
- Strings of all Unicode characters of a certain category.
- Used for matching in Unicode-aware languages. Run to regenerate.
-
- Inspired by chartypes_create.py from the MoinMoin project.
-
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-Cc = '\x00-\x1f\x7f-\x9f'
-
-Cf = '\xad\u0600-\u0605\u061c\u06dd\u070f\u08e2\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb\U000110bd\U000110cd\U0001bca0-\U0001bca3\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f'
-
-Cn = '\u0378-\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557-\u0558\u058b-\u058c\u0590\u05c8-\u05cf\u05eb-\u05ee\u05f5-\u05ff\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07fc\u082e-\u082f\u083f\u085c-\u085d\u085f\u086b-\u089f\u08b5\u08be-\u08d2\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09ff-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a77-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0af8\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0cff\u0d04\u0d0d\u0d11\u0d45\u0d49\u0d50-\u0d53\u0d64-\u0d65\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f6-\u13f7\u13fe-\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1879-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1c8f\u1cbb-\u1cbc\u1cc8-\u1ccf\u1cfa-\u1cff\u1dfa\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20c0-\u20cf\u20f1-\u20ff\u218c-\u218f\u2427-\u243f\u244b-\u245f\u2b74-\u2b75\u2b96-\u2b97\u2bc9\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e4f-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9ff0-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7ba-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018f\U0001019c-\U0001019f\U000101a1-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102df\U000102fc-\U000102ff\U00010324-\U0001032c\U0001034b-\U0001034f\U0001037b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000104af\U000104d4-\U000104d7\U000104fc-\U000104ff\U00010528-\U0001052f\U00010564-\U0001056e\U00010570-\U000105ff\U00010737-\U0001073f\U00010756-\U0001075f\U00010768-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U0001089f-\U000108a6\U000108b0-\U000108df\U000108f3\U000108f6-\U000108fa\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bb\U000109d0-\U000109d1\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a36-\U00010a37\U00010a3b-\U00010a3e\U00010a49-\U00010a4f\U00010a59-\U00010a5f\U00010aa0-\U00010abf\U00010ae7-\U00010aea\U00010af7-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b92-\U00010b98\U00010b9d-\U00010ba8\U00010bb0-\U00010bff\U00010c49-\U00010c7f\U00010cb3-\U00010cbf\U00010cf3-\U00010cf9\U00010d28-\U00010d2f\U00010d3a-\U00010e5f\U00010e7f-\U00010eff\U00010f28-\U00010f2f\U00010f5a-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107e\U000110c2-\U000110cc\U000110ce-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011147-\U0001114f\U00011177-\U0001117f\U000111ce-\U000111cf\U000111e0\U000111f5-\U000111ff\U00011212\U0001123f-\U0001127f\U00011287\U00011289\U0001128e\U0001129e\U000112aa-\U000112af\U000112eb-\U000112ef\U000112fa-\U000112ff\U00011304\U0001130d-\U0001130e\U00011311-\U00011312\U00011329\U00011331\U00011334\U0001133a\U00011345-\U00011346\U00011349-\U0001134a\U0001134e-\U0001134f\U00011351-\U00011356\U00011358-\U0001135c\U00011364-\U00011365\U0001136d-\U0001136f\U00011375-\U000113ff\U0001145a\U0001145c\U0001145f-\U0001147f\U000114c8-\U000114cf\U000114da-\U0001157f\U000115b6-\U000115b7\U000115de-\U000115ff\U00011645-\U0001164f\U0001165a-\U0001165f\U0001166d-\U0001167f\U000116b8-\U000116bf\U000116ca-\U000116ff\U0001171b-\U0001171c\U0001172c-\U0001172f\U00011740-\U000117ff\U0001183c-\U0001189f\U000118f3-\U000118fe\U00011900-\U000119ff\U00011a48-\U00011a4f\U00011a84-\U00011a85\U00011aa3-\U00011abf\U00011af9-\U00011bff\U00011c09\U00011c37\U00011c46-\U00011c4f\U00011c6d-\U00011c6f\U00011c90-\U00011c91\U00011ca8\U00011cb7-\U00011cff\U00011d07\U00011d0a\U00011d37-\U00011d39\U00011d3b\U00011d3e\U00011d48-\U00011d4f\U00011d5a-\U00011d5f\U00011d66\U00011d69\U00011d8f\U00011d92\U00011d99-\U00011d9f\U00011daa-\U00011edf\U00011ef9-\U00011fff\U0001239a-\U000123ff\U0001246f\U00012475-\U0001247f\U00012544-\U00012fff\U0001342f-\U000143ff\U00014647-\U000167ff\U00016a39-\U00016a3f\U00016a5f\U00016a6a-\U00016a6d\U00016a70-\U00016acf\U00016aee-\U00016aef\U00016af6-\U00016aff\U00016b46-\U00016b4f\U00016b5a\U00016b62\U00016b78-\U00016b7c\U00016b90-\U00016e3f\U00016e9b-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U00016fdf\U00016fe2-\U00016fff\U000187f2-\U000187ff\U00018af3-\U0001afff\U0001b11f-\U0001b16f\U0001b2fc-\U0001bbff\U0001bc6b-\U0001bc6f\U0001bc7d-\U0001bc7f\U0001bc89-\U0001bc8f\U0001bc9a-\U0001bc9b\U0001bca4-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1e9-\U0001d1ff\U0001d246-\U0001d2df\U0001d2f4-\U0001d2ff\U0001d357-\U0001d35f\U0001d379-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001da8c-\U0001da9a\U0001daa0\U0001dab0-\U0001dfff\U0001e007\U0001e019-\U0001e01a\U0001e022\U0001e025\U0001e02b-\U0001e7ff\U0001e8c5-\U0001e8c6\U0001e8d7-\U0001e8ff\U0001e94b-\U0001e94f\U0001e95a-\U0001e95d\U0001e960-\U0001ec70\U0001ecb5-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0c0\U0001f0d0\U0001f0f6-\U0001f0ff\U0001f10d-\U0001f10f\U0001f16c-\U0001f16f\U0001f1ad-\U0001f1e5\U0001f203-\U0001f20f\U0001f23c-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f25f\U0001f266-\U0001f2ff\U0001f6d5-\U0001f6df\U0001f6ed-\U0001f6ef\U0001f6fa-\U0001f6ff\U0001f774-\U0001f77f\U0001f7d9-\U0001f7ff\U0001f80c-\U0001f80f\U0001f848-\U0001f84f\U0001f85a-\U0001f85f\U0001f888-\U0001f88f\U0001f8ae-\U0001f8ff\U0001f90c-\U0001f90f\U0001f93f\U0001f971-\U0001f972\U0001f977-\U0001f979\U0001f97b\U0001f9a3-\U0001f9af\U0001f9ba-\U0001f9bf\U0001f9c3-\U0001f9cf\U0001fa00-\U0001fa5f\U0001fa6e-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002b81f\U0002cea2-\U0002ceaf\U0002ebe1-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff'
-
-Co = '\ue000-\uf8ff\U000f0000-\U000ffffd\U00100000-\U0010fffd'
-
-Cs = '\ud800-\udbff\\\udc00\udc01-\udfff'
-
-Ll = 'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0560-\u0588\u10d0-\u10fa\u10fd-\u10ff\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7af\ua7b5\ua7b7\ua7b9\ua7fa\uab30-\uab5a\uab60-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a\U00010428-\U0001044f\U000104d8-\U000104fb\U00010cc0-\U00010cf2\U000118c0-\U000118df\U00016e60-\U00016e7f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb\U0001e922-\U0001e943'
-
-Lm = '\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua69c-\ua69d\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\ua9e6\uaa70\uaadd\uaaf3-\uaaf4\uab5c-\uab5f\uff70\uff9e-\uff9f\U00016b40-\U00016b43\U00016f93-\U00016f9f\U00016fe0-\U00016fe1'
-
-Lo = '\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05ef-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1100-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1878\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua78f\ua7f7\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9e0-\ua9e4\ua9e7-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U00010340\U00010342-\U00010349\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016f00-\U00016f44\U00016f50\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001e800-\U0001e8c4\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
-
-Lt = '\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc'
-
-Lu = 'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1c90-\u1cba\u1cbd-\u1cbf\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\ua7b8\uff21-\uff3a\U00010400-\U00010427\U000104b0-\U000104d3\U00010c80-\U00010cb2\U000118a0-\U000118bf\U00016e40-\U00016e5f\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca\U0001e900-\U0001e921'
-
-Mc = '\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u1cf7\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaa7d\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011145-\U00011146\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U0001122c-\U0001122e\U00011232-\U00011233\U00011235\U000112e0-\U000112e2\U00011302-\U00011303\U0001133e-\U0001133f\U00011341-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011357\U00011362-\U00011363\U00011435-\U00011437\U00011440-\U00011441\U00011445\U000114b0-\U000114b2\U000114b9\U000114bb-\U000114be\U000114c1\U000115af-\U000115b1\U000115b8-\U000115bb\U000115be\U00011630-\U00011632\U0001163b-\U0001163c\U0001163e\U000116ac\U000116ae-\U000116af\U000116b6\U00011720-\U00011721\U00011726\U0001182c-\U0001182e\U00011838\U00011a39\U00011a57-\U00011a58\U00011a97\U00011c2f\U00011c3e\U00011ca9\U00011cb1\U00011cb4\U00011d8a-\U00011d8e\U00011d93-\U00011d94\U00011d96\U00011ef5-\U00011ef6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172'
-
-Me = '\u0488-\u0489\u1abe\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672'
-
-Mn = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u07fd\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d3-\u08e1\u08e3-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u09fe\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0afa-\u0aff\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c00\u0c04\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0c81\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d00-\u0d01\u0d3b-\u0d3c\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u1885-\u1886\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1ab0-\u1abd\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab-\u1bad\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8-\u1cf9\u1dc0-\u1df9\u1dfb-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69e-\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4-\ua8c5\ua8e0-\ua8f1\ua8ff\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\ua9e5\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaa7c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\U000101fd\U000102e0\U00010376-\U0001037a\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00010ae5-\U00010ae6\U00010d24-\U00010d27\U00010f46-\U00010f50\U00011001\U00011038-\U00011046\U0001107f-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011173\U00011180-\U00011181\U000111b6-\U000111be\U000111c9-\U000111cc\U0001122f-\U00011231\U00011234\U00011236-\U00011237\U0001123e\U000112df\U000112e3-\U000112ea\U00011300-\U00011301\U0001133b-\U0001133c\U00011340\U00011366-\U0001136c\U00011370-\U00011374\U00011438-\U0001143f\U00011442-\U00011444\U00011446\U0001145e\U000114b3-\U000114b8\U000114ba\U000114bf-\U000114c0\U000114c2-\U000114c3\U000115b2-\U000115b5\U000115bc-\U000115bd\U000115bf-\U000115c0\U000115dc-\U000115dd\U00011633-\U0001163a\U0001163d\U0001163f-\U00011640\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U0001171d-\U0001171f\U00011722-\U00011725\U00011727-\U0001172b\U0001182f-\U00011837\U00011839-\U0001183a\U00011a01-\U00011a0a\U00011a33-\U00011a38\U00011a3b-\U00011a3e\U00011a47\U00011a51-\U00011a56\U00011a59-\U00011a5b\U00011a8a-\U00011a96\U00011a98-\U00011a99\U00011c30-\U00011c36\U00011c38-\U00011c3d\U00011c3f\U00011c92-\U00011ca7\U00011caa-\U00011cb0\U00011cb2-\U00011cb3\U00011cb5-\U00011cb6\U00011d31-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d45\U00011d47\U00011d90-\U00011d91\U00011d95\U00011d97\U00011ef3-\U00011ef4\U00016af0-\U00016af4\U00016b30-\U00016b36\U00016f8f-\U00016f92\U0001bc9d-\U0001bc9e\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e8d0-\U0001e8d6\U0001e944-\U0001e94a\U000e0100-\U000e01ef'
-
-Nd = '0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19\U000104a0-\U000104a9\U00010d30-\U00010d39\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000112f0-\U000112f9\U00011450-\U00011459\U000114d0-\U000114d9\U00011650-\U00011659\U000116c0-\U000116c9\U00011730-\U00011739\U000118e0-\U000118e9\U00011c50-\U00011c59\U00011d50-\U00011d59\U00011da0-\U00011da9\U00016a60-\U00016a69\U00016b50-\U00016b59\U0001d7ce-\U0001d7ff\U0001e950-\U0001e959'
-
-Nl = '\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U0001246e'
-
-No = '\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d58-\u0d5e\u0d70-\u0d78\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835\U00010107-\U00010133\U00010175-\U00010178\U0001018a-\U0001018b\U000102e1-\U000102fb\U00010320-\U00010323\U00010858-\U0001085f\U00010879-\U0001087f\U000108a7-\U000108af\U000108fb-\U000108ff\U00010916-\U0001091b\U000109bc-\U000109bd\U000109c0-\U000109cf\U000109d2-\U000109ff\U00010a40-\U00010a48\U00010a7d-\U00010a7e\U00010a9d-\U00010a9f\U00010aeb-\U00010aef\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010ba9-\U00010baf\U00010cfa-\U00010cff\U00010e60-\U00010e7e\U00010f1d-\U00010f26\U00010f51-\U00010f54\U00011052-\U00011065\U000111e1-\U000111f4\U0001173a-\U0001173b\U000118ea-\U000118f2\U00011c5a-\U00011c6c\U00016b5b-\U00016b61\U00016e80-\U00016e96\U0001d2e0-\U0001d2f3\U0001d360-\U0001d378\U0001e8c7-\U0001e8cf\U0001ec71-\U0001ecab\U0001ecad-\U0001ecaf\U0001ecb1-\U0001ecb4\U0001f100-\U0001f10c'
-
-Pc = '_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f'
-
-Pd = '\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u2e40\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d'
-
-Pe = ')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3e\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
-
-Pf = '\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21'
-
-Pi = '\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20'
-
-Po = "!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u09fd\u0a76\u0af0\u0c84\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u2e3c-\u2e3f\u2e41\u2e43-\u2e4e\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65\U00010100-\U00010102\U0001039f\U000103d0\U0001056f\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010af0-\U00010af6\U00010b39-\U00010b3f\U00010b99-\U00010b9c\U00010f55-\U00010f59\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U00011174-\U00011175\U000111c5-\U000111c8\U000111cd\U000111db\U000111dd-\U000111df\U00011238-\U0001123d\U000112a9\U0001144b-\U0001144f\U0001145b\U0001145d\U000114c6\U000115c1-\U000115d7\U00011641-\U00011643\U00011660-\U0001166c\U0001173c-\U0001173e\U0001183b\U00011a3f-\U00011a46\U00011a9a-\U00011a9c\U00011a9e-\U00011aa2\U00011c41-\U00011c45\U00011c70-\U00011c71\U00011ef7-\U00011ef8\U00012470-\U00012474\U00016a6e-\U00016a6f\U00016af5\U00016b37-\U00016b3b\U00016b44\U00016e97-\U00016e9a\U0001bc9f\U0001da87-\U0001da8b\U0001e95e-\U0001e95f"
-
-Ps = '(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u2e42\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3f\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
-
-Sc = '$\xa2-\xa5\u058f\u060b\u07fe-\u07ff\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20bf\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6\U0001ecb0'
-
-Sk = '\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\uab5b\ufbb2-\ufbc1\uff3e\uff40\uffe3\U0001f3fb-\U0001f3ff'
-
-Sm = '+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1'
-
-So = '\xa6\xa9\xae\xb0\u0482\u058d-\u058e\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d4f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u218a-\u218b\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b4d-\u2b73\u2b76-\u2b95\u2b98-\u2bc8\u2bca-\u2bfe\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd\U00010137-\U0001013f\U00010179-\U00010189\U0001018c-\U0001018e\U00010190-\U0001019b\U000101a0\U000101d0-\U000101fc\U00010877-\U00010878\U00010ac8\U0001173f\U00016b3c-\U00016b3f\U00016b45\U0001bc9c\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1e8\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001d800-\U0001d9ff\U0001da37-\U0001da3a\U0001da6d-\U0001da74\U0001da76-\U0001da83\U0001da85-\U0001da86\U0001ecac\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0bf\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0f5\U0001f110-\U0001f16b\U0001f170-\U0001f1ac\U0001f1e6-\U0001f202\U0001f210-\U0001f23b\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f260-\U0001f265\U0001f300-\U0001f3fa\U0001f400-\U0001f6d4\U0001f6e0-\U0001f6ec\U0001f6f0-\U0001f6f9\U0001f700-\U0001f773\U0001f780-\U0001f7d8\U0001f800-\U0001f80b\U0001f810-\U0001f847\U0001f850-\U0001f859\U0001f860-\U0001f887\U0001f890-\U0001f8ad\U0001f900-\U0001f90b\U0001f910-\U0001f93e\U0001f940-\U0001f970\U0001f973-\U0001f976\U0001f97a\U0001f97c-\U0001f9a2\U0001f9b0-\U0001f9b9\U0001f9c0-\U0001f9c2\U0001f9d0-\U0001f9ff\U0001fa60-\U0001fa6d'
-
-Zl = '\u2028'
-
-Zp = '\u2029'
-
-Zs = ' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000'
-
-xid_continue = '0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05ef-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u07fd\u0800-\u082d\u0840-\u085b\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u08d3-\u08e1\u08e3-\u0963\u0966-\u096f\u0971-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u09fc\u09fe\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0af9-\u0aff\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c00-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c66-\u0c6f\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d00-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d54-\u0d57\u0d5f-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1878\u1880-\u18aa\u18b0-\u18f5\u1900-\u191e\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1ab0-\u1abd\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1cd0-\u1cd2\u1cd4-\u1cf9\u1d00-\u1df9\u1dfb-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua827\ua840-\ua873\ua880-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua8fd-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\ua9e0-\ua9fe\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe2f\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U000102e0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U0001037a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae6\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d27\U00010d30-\U00010d39\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f50\U00011000-\U00011046\U00011066-\U0001106f\U0001107f-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011144-\U00011146\U00011150-\U00011173\U00011176\U00011180-\U000111c4\U000111c9-\U000111cc\U000111d0-\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U00011237\U0001123e\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112ea\U000112f0-\U000112f9\U00011300-\U00011303\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133b-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011350\U00011357\U0001135d-\U00011363\U00011366-\U0001136c\U00011370-\U00011374\U00011400-\U0001144a\U00011450-\U00011459\U0001145e\U00011480-\U000114c5\U000114c7\U000114d0-\U000114d9\U00011580-\U000115b5\U000115b8-\U000115c0\U000115d8-\U000115dd\U00011600-\U00011640\U00011644\U00011650-\U00011659\U00011680-\U000116b7\U000116c0-\U000116c9\U00011700-\U0001171a\U0001171d-\U0001172b\U00011730-\U00011739\U00011800-\U0001183a\U000118a0-\U000118e9\U000118ff\U00011a00-\U00011a3e\U00011a47\U00011a50-\U00011a83\U00011a86-\U00011a99\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c36\U00011c38-\U00011c40\U00011c50-\U00011c59\U00011c72-\U00011c8f\U00011c92-\U00011ca7\U00011ca9-\U00011cb6\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d47\U00011d50-\U00011d59\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d8e\U00011d90-\U00011d91\U00011d93-\U00011d98\U00011da0-\U00011da9\U00011ee0-\U00011ef6\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016a60-\U00016a69\U00016ad0-\U00016aed\U00016af0-\U00016af4\U00016b00-\U00016b36\U00016b40-\U00016b43\U00016b50-\U00016b59\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001bc9d-\U0001bc9e\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e800-\U0001e8c4\U0001e8d0-\U0001e8d6\U0001e900-\U0001e94a\U0001e950-\U0001e959\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d\U000e0100-\U000e01ef'
-
-xid_start = 'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u05d0-\u05ea\u05ef-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1878\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118a0-\U000118df\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b40-\U00016b43\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001e800-\U0001e8c4\U0001e900-\U0001e943\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
-
-cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs']
-
-# Generated from unidata 11.0.0
-
-def combine(*args):
- return ''.join(globals()[cat] for cat in args)
-
-
-def allexcept(*args):
- newcats = cats[:]
- for arg in args:
- newcats.remove(arg)
- return ''.join(globals()[cat] for cat in newcats)
-
-
-def _handle_runs(char_list): # pragma: no cover
- buf = []
- for c in char_list:
- if len(c) == 1:
- if buf and buf[-1][1] == chr(ord(c)-1):
- buf[-1] = (buf[-1][0], c)
- else:
- buf.append((c, c))
- else:
- buf.append((c, c))
- for a, b in buf:
- if a == b:
- yield a
- else:
- yield '%s-%s' % (a, b)
-
-
-if __name__ == '__main__': # pragma: no cover
- import unicodedata
-
- categories = {'xid_start': [], 'xid_continue': []}
-
- with open(__file__) as fp:
- content = fp.read()
-
- header = content[:content.find('Cc =')]
- footer = content[content.find("def combine("):]
-
- for code in range(0x110000):
- c = chr(code)
- cat = unicodedata.category(c)
- if ord(c) == 0xdc00:
- # Hack to avoid combining this combining with the preceding high
- # surrogate, 0xdbff, when doing a repr.
- c = '\\' + c
- elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e):
- # Escape regex metachars.
- c = '\\' + c
- categories.setdefault(cat, []).append(c)
- # XID_START and XID_CONTINUE are special categories used for matching
- # identifiers in Python 3.
- if c.isidentifier():
- categories['xid_start'].append(c)
- if ('a' + c).isidentifier():
- categories['xid_continue'].append(c)
-
- with open(__file__, 'w') as fp:
- fp.write(header)
-
- for cat in sorted(categories):
- val = ''.join(_handle_runs(categories[cat]))
- fp.write('%s = %a\n\n' % (cat, val))
-
- cats = sorted(categories)
- cats.remove('xid_start')
- cats.remove('xid_continue')
- fp.write('cats = %r\n\n' % cats)
-
- fp.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,))
-
- fp.write(footer)
diff --git a/spaces/AutoLLM/AutoAgents/README.md b/spaces/AutoLLM/AutoAgents/README.md
deleted file mode 100644
index 8cc80bd9f875dec186ef7439782b7c7b5d0b7653..0000000000000000000000000000000000000000
--- a/spaces/AutoLLM/AutoAgents/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: AutoAgents
-emoji: 🐢
-colorFrom: green
-colorTo: purple
-sdk: streamlit
-sdk_version: 1.21.0
-python_version: 3.10.11
-app_file: autoagents/spaces/app.py
-pinned: true
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/predictor.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/predictor.py
deleted file mode 100644
index 8a036bde3f0fffd770f9ec6fd04a3505b88b09df..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/predictor.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import atexit
-import bisect
-import multiprocessing as mp
-from collections import deque
-import cv2
-import torch
-
-from detectron2.data import MetadataCatalog
-from detectron2.engine.defaults import DefaultPredictor
-from detectron2.utils.video_visualizer import VideoVisualizer
-from detectron2.utils.visualizer import ColorMode, Visualizer
-
-
-class VisualizationDemo(object):
- def __init__(self, cfg, instance_mode=ColorMode.IMAGE, parallel=False):
- """
- Args:
- cfg (CfgNode):
- instance_mode (ColorMode):
- parallel (bool): whether to run the model in different processes from visualization.
- Useful since the visualization logic can be slow.
- """
- self.metadata = MetadataCatalog.get(
- cfg.DATASETS.TRAIN[0] if len(cfg.DATASETS.TRAIN) else "__unused"
- )
- self.cpu_device = torch.device("cpu")
- self.instance_mode = instance_mode
-
- self.parallel = parallel
- if parallel:
- num_gpu = torch.cuda.device_count()
- self.predictor = AsyncPredictor(cfg, num_gpus=num_gpu)
- else:
- self.predictor = DefaultPredictor(cfg)
-
- def run_on_image(self, image, visualizer=None):
- """
- Args:
- image (np.ndarray): an image of shape (H, W, C) (in BGR order).
- This is the format used by OpenCV.
-
- Returns:
- predictions (dict): the output of the model.
- vis_output (VisImage): the visualized image output.
- """
- vis_output = None
- predictions = self.predictor(image)
- # Convert image from OpenCV BGR format to Matplotlib RGB format.
- image = image[:, :, ::-1]
- use_video_vis = True
- if visualizer is None:
- use_video_vis = False
- visualizer = Visualizer(image, self.metadata, instance_mode=self.instance_mode)
- if "panoptic_seg" in predictions:
- panoptic_seg, segments_info = predictions["panoptic_seg"]
- vis_output = visualizer.draw_panoptic_seg_predictions(
- panoptic_seg.to(self.cpu_device), segments_info
- )
- else:
- if "sem_seg" in predictions:
- vis_output = visualizer.draw_sem_seg(
- predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
- )
- if "instances" in predictions:
- instances = predictions["instances"].to(self.cpu_device)
- if use_video_vis:
- vis_output = visualizer.draw_instance_predictions(
- image, predictions=instances)
- else:
- vis_output = visualizer.draw_instance_predictions(predictions=instances)
- elif "proposals" in predictions:
- instances = predictions["proposals"].to(self.cpu_device)
- instances.pred_boxes = instances.proposal_boxes
- instances.scores = instances.objectness_logits
- instances.pred_classes[:] = -1
- if use_video_vis:
- vis_output = visualizer.draw_instance_predictions(
- image, predictions=instances)
- else:
- vis_output = visualizer.draw_instance_predictions(predictions=instances)
-
- return predictions, vis_output
-
- def _frame_from_video(self, video):
- while video.isOpened():
- success, frame = video.read()
- if success:
- yield frame
- else:
- break
-
- def run_on_video(self, video):
- """
- Visualizes predictions on frames of the input video.
-
- Args:
- video (cv2.VideoCapture): a :class:`VideoCapture` object, whose source can be
- either a webcam or a video file.
-
- Yields:
- ndarray: BGR visualizations of each video frame.
- """
- video_visualizer = VideoVisualizer(self.metadata, self.instance_mode)
-
- def process_predictions(frame, predictions):
- frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
- if "panoptic_seg" in predictions:
- panoptic_seg, segments_info = predictions["panoptic_seg"]
- vis_frame = video_visualizer.draw_panoptic_seg_predictions(
- frame, panoptic_seg.to(self.cpu_device), segments_info
- )
- elif "instances" in predictions:
- predictions = predictions["instances"].to(self.cpu_device)
- vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
- elif "sem_seg" in predictions:
- vis_frame = video_visualizer.draw_sem_seg(
- frame, predictions["sem_seg"].argmax(dim=0).to(self.cpu_device)
- )
- elif "proposals" in predictions:
- predictions = predictions["proposals"].to(self.cpu_device)
- predictions.pred_boxes = predictions.proposal_boxes
- predictions.scores = predictions.objectness_logits
- predictions.pred_classes[:] = -1
- vis_frame = video_visualizer.draw_instance_predictions(frame, predictions)
-
- # Converts Matplotlib RGB format to OpenCV BGR format
- vis_frame = cv2.cvtColor(vis_frame.get_image(), cv2.COLOR_RGB2BGR)
- return vis_frame
-
- frame_gen = self._frame_from_video(video)
- if self.parallel:
- buffer_size = self.predictor.default_buffer_size
-
- frame_data = deque()
-
- for cnt, frame in enumerate(frame_gen):
- frame_data.append(frame)
- self.predictor.put(frame)
-
- if cnt >= buffer_size:
- frame = frame_data.popleft()
- predictions = self.predictor.get()
- yield process_predictions(frame, predictions)
-
- while len(frame_data):
- frame = frame_data.popleft()
- predictions = self.predictor.get()
- yield process_predictions(frame, predictions)
- else:
- for frame in frame_gen:
- yield process_predictions(frame, self.predictor(frame))
-
-
-class AsyncPredictor:
- """
- A predictor that runs the model asynchronously, possibly on >1 GPUs.
- Because rendering the visualization takes considerably amount of time,
- this helps improve throughput when rendering videos.
- """
-
- class _StopToken:
- pass
-
- class _PredictWorker(mp.Process):
- def __init__(self, cfg, task_queue, result_queue):
- self.cfg = cfg
- self.task_queue = task_queue
- self.result_queue = result_queue
- super().__init__()
-
- def run(self):
- predictor = DefaultPredictor(self.cfg)
-
- while True:
- task = self.task_queue.get()
- if isinstance(task, AsyncPredictor._StopToken):
- break
- idx, data = task
- result = predictor(data)
- self.result_queue.put((idx, result))
-
- def __init__(self, cfg, num_gpus: int = 1):
- """
- Args:
- cfg (CfgNode):
- num_gpus (int): if 0, will run on CPU
- """
- num_workers = max(num_gpus, 1)
- self.task_queue = mp.Queue(maxsize=num_workers * 3)
- self.result_queue = mp.Queue(maxsize=num_workers * 3)
- self.procs = []
- for gpuid in range(max(num_gpus, 1)):
- cfg = cfg.clone()
- cfg.defrost()
- cfg.MODEL.DEVICE = "cuda:{}".format(gpuid) if num_gpus > 0 else "cpu"
- self.procs.append(
- AsyncPredictor._PredictWorker(cfg, self.task_queue, self.result_queue)
- )
-
- self.put_idx = 0
- self.get_idx = 0
- self.result_rank = []
- self.result_data = []
-
- for p in self.procs:
- p.start()
- atexit.register(self.shutdown)
-
- def put(self, image):
- self.put_idx += 1
- self.task_queue.put((self.put_idx, image))
-
- def get(self):
- self.get_idx += 1 # the index needed for this request
- if len(self.result_rank) and self.result_rank[0] == self.get_idx:
- res = self.result_data[0]
- del self.result_data[0], self.result_rank[0]
- return res
-
- while True:
- # make sure the results are returned in the correct order
- idx, res = self.result_queue.get()
- if idx == self.get_idx:
- return res
- insert = bisect.bisect(self.result_rank, idx)
- self.result_rank.insert(insert, idx)
- self.result_data.insert(insert, res)
-
- def __len__(self):
- return self.put_idx - self.get_idx
-
- def __call__(self, image):
- self.put(image)
- return self.get()
-
- def shutdown(self):
- for _ in self.procs:
- self.task_queue.put(AsyncPredictor._StopToken())
-
- @property
- def default_buffer_size(self):
- return len(self.procs) * 5
diff --git a/spaces/BAAI/vid2vid-zero/vid2vid_zero/models/unet_2d_blocks.py b/spaces/BAAI/vid2vid-zero/vid2vid_zero/models/unet_2d_blocks.py
deleted file mode 100644
index a4e708eaeb355d95cad6d2dffa1a17e1ba77e462..0000000000000000000000000000000000000000
--- a/spaces/BAAI/vid2vid-zero/vid2vid_zero/models/unet_2d_blocks.py
+++ /dev/null
@@ -1,609 +0,0 @@
-# Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_blocks.py
-
-import torch
-from torch import nn
-
-from .attention_2d import Transformer2DModel
-from .resnet_2d import Downsample2D, ResnetBlock2D, Upsample2D
-
-
-def get_down_block(
- down_block_type,
- num_layers,
- in_channels,
- out_channels,
- temb_channels,
- add_downsample,
- resnet_eps,
- resnet_act_fn,
- attn_num_head_channels,
- resnet_groups=None,
- cross_attention_dim=None,
- downsample_padding=None,
- dual_cross_attention=False,
- use_linear_projection=False,
- only_cross_attention=False,
- upcast_attention=False,
- resnet_time_scale_shift="default",
- use_sc_attn=False,
- use_st_attn=False,
-):
- down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
- if down_block_type == "DownBlock2D":
- return DownBlock2D(
- num_layers=num_layers,
- in_channels=in_channels,
- out_channels=out_channels,
- temb_channels=temb_channels,
- add_downsample=add_downsample,
- resnet_eps=resnet_eps,
- resnet_act_fn=resnet_act_fn,
- resnet_groups=resnet_groups,
- downsample_padding=downsample_padding,
- resnet_time_scale_shift=resnet_time_scale_shift,
- )
- elif down_block_type == "CrossAttnDownBlock2D":
- if cross_attention_dim is None:
- raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D")
- return CrossAttnDownBlock2D(
- num_layers=num_layers,
- in_channels=in_channels,
- out_channels=out_channels,
- temb_channels=temb_channels,
- add_downsample=add_downsample,
- resnet_eps=resnet_eps,
- resnet_act_fn=resnet_act_fn,
- resnet_groups=resnet_groups,
- downsample_padding=downsample_padding,
- cross_attention_dim=cross_attention_dim,
- attn_num_head_channels=attn_num_head_channels,
- dual_cross_attention=dual_cross_attention,
- use_linear_projection=use_linear_projection,
- only_cross_attention=only_cross_attention,
- upcast_attention=upcast_attention,
- resnet_time_scale_shift=resnet_time_scale_shift,
- use_sc_attn=use_sc_attn,
- use_st_attn=use_st_attn,
- )
- raise ValueError(f"{down_block_type} does not exist.")
-
-
-def get_up_block(
- up_block_type,
- num_layers,
- in_channels,
- out_channels,
- prev_output_channel,
- temb_channels,
- add_upsample,
- resnet_eps,
- resnet_act_fn,
- attn_num_head_channels,
- resnet_groups=None,
- cross_attention_dim=None,
- dual_cross_attention=False,
- use_linear_projection=False,
- only_cross_attention=False,
- upcast_attention=False,
- resnet_time_scale_shift="default",
- use_sc_attn=False,
- use_st_attn=False,
-):
- up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
- if up_block_type == "UpBlock2D":
- return UpBlock2D(
- num_layers=num_layers,
- in_channels=in_channels,
- out_channels=out_channels,
- prev_output_channel=prev_output_channel,
- temb_channels=temb_channels,
- add_upsample=add_upsample,
- resnet_eps=resnet_eps,
- resnet_act_fn=resnet_act_fn,
- resnet_groups=resnet_groups,
- resnet_time_scale_shift=resnet_time_scale_shift,
- )
- elif up_block_type == "CrossAttnUpBlock2D":
- if cross_attention_dim is None:
- raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D")
- return CrossAttnUpBlock2D(
- num_layers=num_layers,
- in_channels=in_channels,
- out_channels=out_channels,
- prev_output_channel=prev_output_channel,
- temb_channels=temb_channels,
- add_upsample=add_upsample,
- resnet_eps=resnet_eps,
- resnet_act_fn=resnet_act_fn,
- resnet_groups=resnet_groups,
- cross_attention_dim=cross_attention_dim,
- attn_num_head_channels=attn_num_head_channels,
- dual_cross_attention=dual_cross_attention,
- use_linear_projection=use_linear_projection,
- only_cross_attention=only_cross_attention,
- upcast_attention=upcast_attention,
- resnet_time_scale_shift=resnet_time_scale_shift,
- use_sc_attn=use_sc_attn,
- use_st_attn=use_st_attn,
- )
- raise ValueError(f"{up_block_type} does not exist.")
-
-
-class UNetMidBlock2DCrossAttn(nn.Module):
- def __init__(
- self,
- in_channels: int,
- temb_channels: int,
- dropout: float = 0.0,
- num_layers: int = 1,
- resnet_eps: float = 1e-6,
- resnet_time_scale_shift: str = "default",
- resnet_act_fn: str = "swish",
- resnet_groups: int = 32,
- resnet_pre_norm: bool = True,
- attn_num_head_channels=1,
- output_scale_factor=1.0,
- cross_attention_dim=1280,
- dual_cross_attention=False,
- use_linear_projection=False,
- upcast_attention=False,
- use_sc_attn=False,
- use_st_attn=False,
- ):
- super().__init__()
-
- self.has_cross_attention = True
- self.attn_num_head_channels = attn_num_head_channels
- resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
-
- # there is always at least one resnet
- resnets = [
- ResnetBlock2D(
- in_channels=in_channels,
- out_channels=in_channels,
- temb_channels=temb_channels,
- eps=resnet_eps,
- groups=resnet_groups,
- dropout=dropout,
- time_embedding_norm=resnet_time_scale_shift,
- non_linearity=resnet_act_fn,
- output_scale_factor=output_scale_factor,
- pre_norm=resnet_pre_norm,
- )
- ]
- attentions = []
-
- for _ in range(num_layers):
- if dual_cross_attention:
- raise NotImplementedError
- attentions.append(
- Transformer2DModel(
- attn_num_head_channels,
- in_channels // attn_num_head_channels,
- in_channels=in_channels,
- num_layers=1,
- cross_attention_dim=cross_attention_dim,
- norm_num_groups=resnet_groups,
- use_linear_projection=use_linear_projection,
- upcast_attention=upcast_attention,
- use_sc_attn=use_sc_attn,
- use_st_attn=True if (use_st_attn and _ == 0) else False,
- )
- )
- resnets.append(
- ResnetBlock2D(
- in_channels=in_channels,
- out_channels=in_channels,
- temb_channels=temb_channels,
- eps=resnet_eps,
- groups=resnet_groups,
- dropout=dropout,
- time_embedding_norm=resnet_time_scale_shift,
- non_linearity=resnet_act_fn,
- output_scale_factor=output_scale_factor,
- pre_norm=resnet_pre_norm,
- )
- )
-
- self.attentions = nn.ModuleList(attentions)
- self.resnets = nn.ModuleList(resnets)
-
- def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, normal_infer=False):
- hidden_states = self.resnets[0](hidden_states, temb)
- for attn, resnet in zip(self.attentions, self.resnets[1:]):
- hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, normal_infer=normal_infer).sample
- hidden_states = resnet(hidden_states, temb)
-
- return hidden_states
-
-
-class CrossAttnDownBlock2D(nn.Module):
- def __init__(
- self,
- in_channels: int,
- out_channels: int,
- temb_channels: int,
- dropout: float = 0.0,
- num_layers: int = 1,
- resnet_eps: float = 1e-6,
- resnet_time_scale_shift: str = "default",
- resnet_act_fn: str = "swish",
- resnet_groups: int = 32,
- resnet_pre_norm: bool = True,
- attn_num_head_channels=1,
- cross_attention_dim=1280,
- output_scale_factor=1.0,
- downsample_padding=1,
- add_downsample=True,
- dual_cross_attention=False,
- use_linear_projection=False,
- only_cross_attention=False,
- upcast_attention=False,
- use_sc_attn=False,
- use_st_attn=False,
- ):
- super().__init__()
- resnets = []
- attentions = []
-
- self.has_cross_attention = True
- self.attn_num_head_channels = attn_num_head_channels
-
- for i in range(num_layers):
- in_channels = in_channels if i == 0 else out_channels
- resnets.append(
- ResnetBlock2D(
- in_channels=in_channels,
- out_channels=out_channels,
- temb_channels=temb_channels,
- eps=resnet_eps,
- groups=resnet_groups,
- dropout=dropout,
- time_embedding_norm=resnet_time_scale_shift,
- non_linearity=resnet_act_fn,
- output_scale_factor=output_scale_factor,
- pre_norm=resnet_pre_norm,
- )
- )
- if dual_cross_attention:
- raise NotImplementedError
- attentions.append(
- Transformer2DModel(
- attn_num_head_channels,
- out_channels // attn_num_head_channels,
- in_channels=out_channels,
- num_layers=1,
- cross_attention_dim=cross_attention_dim,
- norm_num_groups=resnet_groups,
- use_linear_projection=use_linear_projection,
- only_cross_attention=only_cross_attention,
- upcast_attention=upcast_attention,
- use_sc_attn=use_sc_attn,
- use_st_attn=True if (use_st_attn and i == 0) else False,
- )
- )
- self.attentions = nn.ModuleList(attentions)
- self.resnets = nn.ModuleList(resnets)
-
- if add_downsample:
- self.downsamplers = nn.ModuleList(
- [
- Downsample2D(
- out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
- )
- ]
- )
- else:
- self.downsamplers = None
-
- self.gradient_checkpointing = False
-
- def forward(self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, normal_infer=False):
- output_states = ()
-
- for resnet, attn in zip(self.resnets, self.attentions):
- if self.training and self.gradient_checkpointing:
-
- def create_custom_forward(module, return_dict=None, normal_infer=False):
- def custom_forward(*inputs):
- if return_dict is not None:
- return module(*inputs, return_dict=return_dict, normal_infer=normal_infer)
- else:
- return module(*inputs)
-
- return custom_forward
-
- hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
- hidden_states = torch.utils.checkpoint.checkpoint(
- create_custom_forward(attn, return_dict=False, normal_infer=normal_infer),
- hidden_states,
- encoder_hidden_states,
- )[0]
- else:
- hidden_states = resnet(hidden_states, temb)
- hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, normal_infer=normal_infer).sample
-
- output_states += (hidden_states,)
-
- if self.downsamplers is not None:
- for downsampler in self.downsamplers:
- hidden_states = downsampler(hidden_states)
-
- output_states += (hidden_states,)
-
- return hidden_states, output_states
-
-
-class DownBlock2D(nn.Module):
- def __init__(
- self,
- in_channels: int,
- out_channels: int,
- temb_channels: int,
- dropout: float = 0.0,
- num_layers: int = 1,
- resnet_eps: float = 1e-6,
- resnet_time_scale_shift: str = "default",
- resnet_act_fn: str = "swish",
- resnet_groups: int = 32,
- resnet_pre_norm: bool = True,
- output_scale_factor=1.0,
- add_downsample=True,
- downsample_padding=1,
- ):
- super().__init__()
- resnets = []
-
- for i in range(num_layers):
- in_channels = in_channels if i == 0 else out_channels
- resnets.append(
- ResnetBlock2D(
- in_channels=in_channels,
- out_channels=out_channels,
- temb_channels=temb_channels,
- eps=resnet_eps,
- groups=resnet_groups,
- dropout=dropout,
- time_embedding_norm=resnet_time_scale_shift,
- non_linearity=resnet_act_fn,
- output_scale_factor=output_scale_factor,
- pre_norm=resnet_pre_norm,
- )
- )
-
- self.resnets = nn.ModuleList(resnets)
-
- if add_downsample:
- self.downsamplers = nn.ModuleList(
- [
- Downsample2D(
- out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
- )
- ]
- )
- else:
- self.downsamplers = None
-
- self.gradient_checkpointing = False
-
- def forward(self, hidden_states, temb=None):
- output_states = ()
-
- for resnet in self.resnets:
- if self.training and self.gradient_checkpointing:
-
- def create_custom_forward(module):
- def custom_forward(*inputs):
- return module(*inputs)
-
- return custom_forward
-
- hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
- else:
- hidden_states = resnet(hidden_states, temb)
-
- output_states += (hidden_states,)
-
- if self.downsamplers is not None:
- for downsampler in self.downsamplers:
- hidden_states = downsampler(hidden_states)
-
- output_states += (hidden_states,)
-
- return hidden_states, output_states
-
-
-class CrossAttnUpBlock2D(nn.Module):
- def __init__(
- self,
- in_channels: int,
- out_channels: int,
- prev_output_channel: int,
- temb_channels: int,
- dropout: float = 0.0,
- num_layers: int = 1,
- resnet_eps: float = 1e-6,
- resnet_time_scale_shift: str = "default",
- resnet_act_fn: str = "swish",
- resnet_groups: int = 32,
- resnet_pre_norm: bool = True,
- attn_num_head_channels=1,
- cross_attention_dim=1280,
- output_scale_factor=1.0,
- add_upsample=True,
- dual_cross_attention=False,
- use_linear_projection=False,
- only_cross_attention=False,
- upcast_attention=False,
- use_sc_attn=False,
- use_st_attn=False,
- ):
- super().__init__()
- resnets = []
- attentions = []
-
- self.has_cross_attention = True
- self.attn_num_head_channels = attn_num_head_channels
-
- for i in range(num_layers):
- res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
- resnet_in_channels = prev_output_channel if i == 0 else out_channels
-
- resnets.append(
- ResnetBlock2D(
- in_channels=resnet_in_channels + res_skip_channels,
- out_channels=out_channels,
- temb_channels=temb_channels,
- eps=resnet_eps,
- groups=resnet_groups,
- dropout=dropout,
- time_embedding_norm=resnet_time_scale_shift,
- non_linearity=resnet_act_fn,
- output_scale_factor=output_scale_factor,
- pre_norm=resnet_pre_norm,
- )
- )
- if dual_cross_attention:
- raise NotImplementedError
- attentions.append(
- Transformer2DModel(
- attn_num_head_channels,
- out_channels // attn_num_head_channels,
- in_channels=out_channels,
- num_layers=1,
- cross_attention_dim=cross_attention_dim,
- norm_num_groups=resnet_groups,
- use_linear_projection=use_linear_projection,
- only_cross_attention=only_cross_attention,
- upcast_attention=upcast_attention,
- use_sc_attn=use_sc_attn,
- use_st_attn=True if (use_st_attn and i == 0) else False,
- )
- )
-
- self.attentions = nn.ModuleList(attentions)
- self.resnets = nn.ModuleList(resnets)
-
- if add_upsample:
- self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
- else:
- self.upsamplers = None
-
- self.gradient_checkpointing = False
-
- def forward(
- self,
- hidden_states,
- res_hidden_states_tuple,
- temb=None,
- encoder_hidden_states=None,
- upsample_size=None,
- attention_mask=None,
- normal_infer=False,
- ):
- for resnet, attn in zip(self.resnets, self.attentions):
- # pop res hidden states
- res_hidden_states = res_hidden_states_tuple[-1]
- res_hidden_states_tuple = res_hidden_states_tuple[:-1]
- hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
-
- if self.training and self.gradient_checkpointing:
-
- def create_custom_forward(module, return_dict=None, normal_infer=False):
- def custom_forward(*inputs):
- if return_dict is not None:
- return module(*inputs, return_dict=return_dict, normal_infer=normal_infer)
- else:
- return module(*inputs)
-
- return custom_forward
-
- hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
- hidden_states = torch.utils.checkpoint.checkpoint(
- create_custom_forward(attn, return_dict=False, normal_infer=normal_infer),
- hidden_states,
- encoder_hidden_states,
- )[0]
- else:
- hidden_states = resnet(hidden_states, temb)
- hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states, normal_infer=normal_infer).sample
-
- if self.upsamplers is not None:
- for upsampler in self.upsamplers:
- hidden_states = upsampler(hidden_states, upsample_size)
-
- return hidden_states
-
-
-class UpBlock2D(nn.Module):
- def __init__(
- self,
- in_channels: int,
- prev_output_channel: int,
- out_channels: int,
- temb_channels: int,
- dropout: float = 0.0,
- num_layers: int = 1,
- resnet_eps: float = 1e-6,
- resnet_time_scale_shift: str = "default",
- resnet_act_fn: str = "swish",
- resnet_groups: int = 32,
- resnet_pre_norm: bool = True,
- output_scale_factor=1.0,
- add_upsample=True,
- ):
- super().__init__()
- resnets = []
-
- for i in range(num_layers):
- res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
- resnet_in_channels = prev_output_channel if i == 0 else out_channels
-
- resnets.append(
- ResnetBlock2D(
- in_channels=resnet_in_channels + res_skip_channels,
- out_channels=out_channels,
- temb_channels=temb_channels,
- eps=resnet_eps,
- groups=resnet_groups,
- dropout=dropout,
- time_embedding_norm=resnet_time_scale_shift,
- non_linearity=resnet_act_fn,
- output_scale_factor=output_scale_factor,
- pre_norm=resnet_pre_norm,
- )
- )
-
- self.resnets = nn.ModuleList(resnets)
-
- if add_upsample:
- self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
- else:
- self.upsamplers = None
-
- self.gradient_checkpointing = False
-
- def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
- for resnet in self.resnets:
- # pop res hidden states
- res_hidden_states = res_hidden_states_tuple[-1]
- res_hidden_states_tuple = res_hidden_states_tuple[:-1]
- hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
-
- if self.training and self.gradient_checkpointing:
-
- def create_custom_forward(module):
- def custom_forward(*inputs):
- return module(*inputs)
-
- return custom_forward
-
- hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
- else:
- hidden_states = resnet(hidden_states, temb)
-
- if self.upsamplers is not None:
- for upsampler in self.upsamplers:
- hidden_states = upsampler(hidden_states, upsample_size)
-
- return hidden_states
diff --git a/spaces/Banbri/zcvzcv/src/lib/dirtyLLMResponseCleaner.ts b/spaces/Banbri/zcvzcv/src/lib/dirtyLLMResponseCleaner.ts
deleted file mode 100644
index f3052c217445760d102949a11c64384f488865ae..0000000000000000000000000000000000000000
--- a/spaces/Banbri/zcvzcv/src/lib/dirtyLLMResponseCleaner.ts
+++ /dev/null
@@ -1,46 +0,0 @@
-export function dirtyLLMResponseCleaner(input: string) {
- let str = (
- `${input || ""}`
- // a summary of all the weird hallucinations I saw it make..
- .replaceAll(`"]`, `"}]`)
- .replaceAll(`" ]`, `"}]`)
- .replaceAll(`" ]`, `"}]`)
- .replaceAll(`"\n]`, `"}]`)
- .replaceAll(`"\n ]`, `"}]`)
- .replaceAll(`"\n ]`, `"}]`)
- .replaceAll("}}", "}")
- .replaceAll("]]", "]")
- .replaceAll("[[", "[")
- .replaceAll("{{", "{")
- .replaceAll(",,", ",")
- .replaceAll("[0]", "")
- .replaceAll("[1]", "")
- .replaceAll("[2]", "")
- .replaceAll("[3]", "")
- .replaceAll("[4]", "")
- .replaceAll("[panel 0]", "")
- .replaceAll("[panel 1]", "")
- .replaceAll("[panel 2]", "")
- .replaceAll("[panel 3]", "")
- .replaceAll("[panel 4]", "")
- )
-
- // repair missing end of JSON array
- if (str.at(-1) === '}') {
- str = str + "]"
- }
-
- if (str.at(-1) === '"') {
- str = str + "}]"
- }
-
- if (str[0] === '{') {
- str = "[" + str
- }
-
- if (str[0] === '"') {
- str = "[{" + str
- }
-
- return str
-}
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Efecto De Sonido De Bocina.md b/spaces/Benson/text-generation/Examples/Descargar Efecto De Sonido De Bocina.md
deleted file mode 100644
index e32b985c15e01cb5d6227542357183f0157e510a..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Efecto De Sonido De Bocina.md
+++ /dev/null
@@ -1,86 +0,0 @@
-
-
Descargar Airhorn efecto de sonido: Cómo encontrar y utilizar los mejores sonidos libres de derechos
-
Si está buscando un efecto de sonido fuerte, llamativo y divertido para sus proyectos, es posible que desee considerar la descarga de un efecto de sonido airhorn. Un efecto de sonido airhorn es una ráfaga corta y aguda de ruido que se puede usar para varios propósitos, como anunciar algo importante, crear emoción o agregar humor. En este artículo, explicaremos qué es un efecto de sonido de cuerno de aire, por qué podría necesitarlo y cómo encontrar y usar los mejores efectos de sonido de cuerno de aire libres de derechos para sus proyectos.
-
¿Qué es un efecto de sonido de bocina y por qué lo necesita?
-
Historia y origen del sonido de la bocina
-
Una bocina es un dispositivo que produce un ruido fuerte al forzar el aire comprimido a través de un cuerno de metal o plástico. Fue inventado originalmente en el siglo XIX como un dispositivo de señalización para barcos, trenes y vehículos. Más tarde, fue adoptado por los aficionados al deporte, DJs y músicos como una forma de expresar entusiasmo, animar o celebrar. El sonido airhorn se hizo popular en géneros como reggae, hip hop, dancehall y música electrónica, donde se utilizó como muestra o elemento de remix. Hoy en día, el sonido de la bocina es ampliamente reconocido como un símbolo de emoción, energía y diversión.
Los diferentes tipos y usos del sonido de la bocina
-
Hay muchos tipos y variaciones diferentes del efecto de sonido de la bocina, dependiendo de la fuente, la duración, el tono y la intensidad del ruido. Algunos tipos comunes de efectos de sonido airhorn son:
-
-
Bocina de DJ: Una ráfaga corta y aguda que es a menudo utilizada por los DJs para animar a la multitud o introducir una nueva canción.
-
Sirena: Un gemido largo y agudo que se usa a menudo para indicar peligro, emergencia o alarma.
-
Bocinazo: Un pitido medio agudo que se usa a menudo para señalar algo o llamar la atención de alguien.
-
-
-
El efecto de sonido airhorn se puede utilizar para diversos propósitos en diferentes proyectos, como:
-
-
Podcasts: Para anunciar un nuevo episodio, segmento o invitado.
-
Vídeos: Para crear suspenso, drama o comedia.
-
Juegos: Para recompensar a los jugadores, indicar el éxito, o añadir algún desafío.
-
Aplicaciones: Para notificar a los usuarios, proporcionar comentarios o mejorar la experiencia del usuario.
-
-
Los beneficios de usar efectos de sonido sin regalías
-
Una de las principales ventajas de usar efectos de sonido de cuerno de aire libre de regalías es que no tienes que preocuparte por pagar tarifas o regalías para usarlos en tus proyectos. Esto significa que usted puede ahorrar dinero, tiempo, y molestias al mismo tiempo obtener sonidos de alta calidad. Otro beneficio de usar efectos de sonido de cuerno de aire libres de derechos es que puede encontrarlos y descargarlos fácilmente de varios sitios web y fuentes en línea. Esto significa que puede acceder a una amplia gama de sonidos y elegir los que se adapten a sus necesidades y preferencias. Por último, el uso de efectos de sonido de bocina sin derechos de autor puede ayudarle a hacer sus proyectos más atractivos, entretenidos y memorables para su audiencia. Esto significa que puedes aumentar tu popularidad, reputación y éxito con tus proyectos.
-
Cómo
Cómo encontrar y descargar los mejores efectos de sonido de cuerno de aire libres de derechos
-
Los criterios para elegir un buen efecto de sonido de bocina
-
Antes de descargar cualquier efecto de sonido airhorn, debe considerar algunos criterios que pueden ayudarlo a elegir uno bueno. Algunos de los criterios son:
-
-
Calidad: El sonido debe ser claro, nítido y lo suficientemente fuerte como para ser escuchado bien.
-
Formato: El sonido debe ser compatible con el formato de tu proyecto, como MP3, WAV u OGG.
-
Licencia: El sonido debe estar libre de regalías, lo que significa que puede usarlo para cualquier propósito sin pagar tarifas o regalías.
-
Relevancia: El sonido debe coincidir con el tema, el tono y el propósito de tu proyecto.
-
-
-
Los mejores sitios web y fuentes para descargar gratis airhorn efectos de sonido
-
Hay muchos sitios web y fuentes en línea que ofrecen libre airhorn efectos de sonido para descargar. Sin embargo, no todos son confiables, seguros y de alta calidad. Estos son algunos de los mejores sitios web y fuentes que recomendamos para descargar gratis efectos de sonido de bocina:
-
Pixabay
-
Pixabay es un sitio web popular que ofrece imágenes gratuitas, videos, música y efectos de sonido para descargar. Puedes encontrar más de 200 efectos de sonido en Pixabay, que van desde bocinas de DJ hasta sirenas y bocinazos. Todos los sonidos son libres de derechos y se pueden utilizar para cualquier propósito. También puede filtrar los sonidos por duración, tipo, categoría y etiquetas. Para descargar un efecto de sonido airhorn desde Pixabay, solo tienes que hacer clic en el botón de descarga y elegir el formato y el tamaño que desee.
-
ZapSplat
-
ZapSplat es otro sitio web que ofrece efectos de sonido y música gratis para descargar. Puedes encontrar más de 100 efectos de sonido en ZapSplat, incluyendo sonidos de trompeta, sonidos de estadio y sonidos de fiesta. Todos los sonidos son libres de derechos y se pueden utilizar para cualquier propósito. También puede navegar por los sonidos por categoría, género, estado de ánimo y palabra clave. Para descargar un efecto de sonido airhorn de ZapSplat, necesita crear una cuenta gratuita y luego hacer clic en el botón de descarga.
-
-
Otros sitios web
-
Algunos otros sitios web que ofrecen libre airhorn efectos de sonido son:
-
-
Freesound: Un sitio web que alberga una gran colección de sonidos subidos por el usuario que están licenciados bajo Creative Commons.
-
SoundBible: Un sitio web que proporciona clips de sonido y efectos gratuitos que son de dominio público o libres de derechos.
-
SoundJay: Un sitio web que ofrece efectos de sonido gratuitos y clips de música que son libres de derechos y pueden ser utilizados para proyectos personales o comerciales.
-
-
Cómo descargar y usar los efectos de sonido de la bocina en tus proyectos
-
-
Los pasos para descargar los efectos de sonido airhorn de cualquiera de los sitios web mencionados anteriormente son:
-
-
Ir a la página web y buscar el efecto de sonido airhorn que desee.
-
Previsualizar el sonido y comprobar su calidad, formato, licencia y relevancia.
-
Haga clic en el botón de descarga y elija el formato y el tamaño que desee.
-
Guarde el archivo en su dispositivo o almacenamiento en la nube.
-
-
Los consejos para usar los efectos de sonido de la bocina efectivamente
-
Algunos consejos para usar eficazmente los efectos de sonido de la bocina en sus proyectos son:
-
-
Utilice el efecto de sonido airhorn con moderación y estratégicamente. No lo sobreuse o perderá su impacto y molestará a su audiencia.
-
Usa el tipo y tono apropiado del efecto de sonido de la bocina para el tema, tono y propósito de tu proyecto. Por ejemplo, use una sirena para un proyecto serio o dramático, o una trompeta para un proyecto festivo o de celebración.
-
Utilice el volumen y el momento adecuados del efecto de sonido de la bocina para el contexto y el flujo de su proyecto. Por ejemplo, use una bocina fuerte y repentina para un efecto de sorpresa o choque, o una bocina suave y gradual para un efecto de acumulación o transición.
-
Utilice el efecto de sonido airhorn creativa y experimentalmente. Intente mezclarlo con otros sonidos o música, o modifíquelo con filtros o efectos.
-
-
Conclusión
-
Resumen de los puntos principales
-
-
Llamada a la acción
-
Ahora que has aprendido a descargar el efecto de sonido airhorn y usarlo en tus proyectos, ¿por qué no intentarlo? Puedes encontrar y descargar cientos de efectos de sonido de bocina gratis de los sitios web mencionados anteriormente y usarlos para darle vida a tus podcasts, videos, juegos, aplicaciones y más. Usted se sorprenderá por la cantidad de diversión y emoción que puede crear con una simple explosión de ruido. Así que sigue adelante y descarga el efecto de sonido airhorn hoy y ve la diferencia que puede hacer en tus proyectos.
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre la descarga del efecto de sonido de la bocina:
-
-
P: ¿Es legal usar efectos de sonido de bocina en mis proyectos?
-
A: Sí, siempre y cuando utilice efectos de sonido de cuerno de aire libres de derechos que están autorizados para cualquier propósito. Siempre debe verificar la licencia y los términos de uso del sonido antes de descargarlo y usarlo.
-
Q: ¿Cómo puedo editar o personalizar los efectos de sonido de la bocina?
-
A: Puede utilizar cualquier software de edición de audio o aplicación para editar o personalizar los efectos de sonido de la bocina. Puede cambiar el volumen, el tono, la velocidad, la duración o añadir filtros o efectos al sonido.
-
P: ¿Cómo puedo evitar molestar u ofender a mi audiencia con los efectos de sonido de la bocina?
-
A: Puedes evitar molestar o ofender a tu audiencia usando los efectos de sonido de la bocina con moderación y estratégicamente. No los uses en exceso ni en situaciones inapropiadas o irrelevantes. Además, considere las preferencias, expectativas y sensibilidades de su audiencia al elegir y usar los efectos de sonido de la bocina.
-
Q: ¿Cómo puedo hacer mis propios efectos de sonido de bocina?
-
A: Puede crear sus propios efectos de sonido de bocina grabando un dispositivo de bocina real o sintetizando un sonido similar con un instrumento o software digital. También puede mezclar y combinar diferentes sonidos o sampling para crear sus propios efectos de sonido de bocina única.
-
-
A: Puede encontrar más información y recursos sobre los efectos de sonido de la bocina de aire buscando en línea o visitando algunos de los sitios web mencionados anteriormente. También puedes ver algunos tutoriales o vídeos sobre cómo usar efectos de sonido airhorn en diferentes proyectos.
- 64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Benson/text-generation/Examples/Descargar Escuchar Msica Apk.md b/spaces/Benson/text-generation/Examples/Descargar Escuchar Msica Apk.md
deleted file mode 100644
index d3245d5d9eec01d88fc68e4649bd961f76551204..0000000000000000000000000000000000000000
--- a/spaces/Benson/text-generation/Examples/Descargar Escuchar Msica Apk.md
+++ /dev/null
@@ -1,135 +0,0 @@
-
-
Cómo descargar APK de reproducción de música para Android
-
Si usted está buscando una manera de disfrutar de su música favorita y podcasts en su dispositivo Android, es posible que desee probar Play Music APK. Esta es una versión modificada de la aplicación oficial de Google Play Music que ofrece algunas características y beneficios adicionales que no están disponibles en la aplicación original. En este artículo, le mostraremos cómo descargar Play Music APK para Android, así como algunos consejos y trucos para usarlo.
-
¿Qué es la reproducción de música APK?
-
Reproducir música APK es una aplicación no oficial que le permite acceder al servicio de Google Play Music en su dispositivo Android. Google Play Music es un servicio de streaming que te permite escuchar millones de canciones y podcasts de varios géneros, artistas y categorías. También puede subir su propia colección de música a la nube y transmitirla desde cualquier dispositivo.
Reproducir música APK es diferente de la aplicación oficial de Google Play Music de varias maneras. Algunas de las características y beneficios de Reproducir música APK son:
-
-
No requiere una cuenta de Google para usarlo.
-
No tiene anuncios ni interrupciones.
-
Tiene una opción de modo oscuro que es más fácil para los ojos.
-
Tiene un ecualizador incorporado que le permite ajustar la calidad del sonido.
-
Tiene un gestor de descargas que le permite gestionar sus descargas de manera más eficiente.
-
Tiene un temporizador de espera que le permite establecer un límite de tiempo para reproducir música.
-
-
¿Por qué descargar Reproducir música APK?
-
Hay muchas razones por las que es posible que desee descargar Reproducir música APK para Android. Algunos de ellos son:
-
-
Quieres disfrutar del servicio Google Play Music sin iniciar sesión con una cuenta de Google.
-
Quieres evitar anuncios e interrupciones mientras escuchas música y podcasts.
-
Quieres tener más control sobre la apariencia y la calidad de sonido de la aplicación.
-
Quieres tener más opciones para descargar y administrar tu música sin conexión.
-
-
-
¿Cómo descargar música APK?
-
Descargar Reproducir música APK para Android no es difícil, pero requiere algunos pasos que son diferentes de descargar aplicaciones de la Google Play Store. Aquí hay una guía paso a paso sobre cómo descargar Play Music APK:
-
Paso 1: Habilitar fuentes desconocidas
-
Lo primero que debes hacer es habilitar fuentes desconocidas en tu dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, siga estos pasos:
-
-
Ir a Configuración > Seguridad > Fuentes desconocidas.
-
Alternar en el interruptor para habilitar fuentes desconocidas.
-
Pulse Aceptar para confirmar su elección.
-
-
Nota: Los pasos exactos pueden variar dependiendo del modelo de dispositivo y la versión de Android. También puede buscar "fuentes desconocidas" en la aplicación Configuración para encontrar la opción.
-
Paso 2: Encontrar una fuente confiable
-
Lo siguiente que tienes que hacer es encontrar una fuente confiable para descargar Play Music APK. Hay muchos sitios web que ofrecen archivos APK, pero no todos ellos son seguros y confiables. Algunos de ellos pueden contener malware, virus o aplicaciones falsas que pueden dañar su dispositivo o robar sus datos. Para evitar esto, usted debe buscar un sitio web de buena reputación que tiene críticas positivas, calificaciones y comentarios de otros usuarios. También puede utilizar un escáner de virus o una aplicación antivirus para comprobar el archivo APK antes de descargarlo.
-
Uno de los sitios web que recomendamos para descargar Play Music APK es APKPure.com. Este es un sitio web popular y de confianza que proporciona archivos APK originales y puros para varias aplicaciones y juegos. También puede encontrar la última versión de Play Music APK en este sitio web, así como otra información como el tamaño, desarrollador, y la descripción de la aplicación.
-
Paso 3: Descargar el archivo APK
-
Una vez que haya encontrado una fuente confiable, puede proceder a descargar el archivo APK a su dispositivo. Para hacer esto, siga estos pasos:
-
-
-
-
Toque en el botón Descargar APK y esperar a que comience la descarga.
-
Puede ver un mensaje de advertencia que dice "Este tipo de archivo puede dañar su dispositivo". Pulse OK para continuar.
-
La descarga se guardará en su carpeta de descargas o en cualquier otra ubicación que haya establecido como su ubicación de descarga predeterminada.
-
-
Paso 4: Instalar el archivo APK
-
Después de descargar el archivo APK, debe instalarlo en su dispositivo. Para hacer esto, siga estos pasos:
-
-
Ve a tu carpeta de descargas o a cualquier otra ubicación donde hayas guardado el archivo APK.
-
Toque en el archivo Reproducir música APK y toque Instalar.
-
Puede ver un mensaje que dice "¿Desea instalar esta aplicación? No requiere ningún acceso especial". Pulse Instalar de nuevo.
-
Espera a que la instalación termine y toca Hecho.
-
-
Paso 5: Inicie la aplicación y disfrute
-
El paso final es iniciar la aplicación y disfrutar de sus características y beneficios. Para hacer esto, siga estos pasos:
-
-
Vaya a su cajón de aplicaciones y busque el icono Reproducir música. Puede tener un nombre o apariencia diferente que la aplicación oficial de Google Play Music.
-
Toque en el icono y abra la aplicación.
-
Puede ver un mensaje que dice "Permitir que Play Music acceda a fotos, medios y archivos en su dispositivo?". Pulse Permitir para conceder permiso.
-
También puede ver un mensaje que dice "Permitir reproducir música para hacer y gestionar llamadas telefónicas?". Pulse Permitir conceder permiso.
-
Verá la pantalla principal de la aplicación con varias opciones como Biblioteca, Recientes, Gráficos superiores, Nuevas versiones, Podcasts y Configuración.
-
Ahora puede disfrutar de escuchar música y podcasts en Reproducir música APK.
-
-
Consejos y trucos para el uso de reproducción de música APK
-
Ahora que ha descargado e instalado Play Music APK en su dispositivo, es posible que desee saber algunos consejos y trucos para usarlo de manera más efectiva. Estos son algunos de ellos:
-
Consejo 1: Personaliza tu biblioteca
-
-
-
Toque en la opción Biblioteca en la parte inferior de la pantalla.
-
Toque en el icono del menú en la esquina superior derecha de la pantalla.
-
Toque en Editar Biblioteca.
-
Seleccione o deseleccione los elementos que desea agregar o quitar de su biblioteca.
-
Toque en Hecho cuando haya terminado.
-
-
Consejo 2: Crear y compartir listas de reproducción
-
Otra cosa que puede hacer con Play Music APK es crear y compartir listas de reproducción con otros. Puedes crear listas de reproducción según tu estado de ánimo, actividad, género, artista o cualquier otro criterio que te guste. También puede compartir sus listas de reproducción con sus amigos, familiares o cualquier otra persona que tiene Play Music APK. Para hacer esto, siga estos pasos:
-
-
Toque en la opción Biblioteca en la parte inferior de la pantalla.
-
Toque en el icono más en la esquina superior derecha de la pantalla.
-
Toque en la nueva lista de reproducción.
-
Introduzca un nombre y una descripción para su lista de reproducción.
-
Toque en Agregar música.
-
Seleccione las canciones que desea agregar a su lista de reproducción.
-
Toque en Hecho cuando haya terminado.
-
Toque en el icono del menú junto al nombre de su lista de reproducción.
-
Toque en Compartir.
-
Seleccione la aplicación o método que desea utilizar para compartir su lista de reproducción.
-
-
Consejo 3: Descargar música para escuchar sin conexión
-
Una de las mejores características de Play Music APK es que le permite descargar música para escuchar sin conexión. Esto significa que puedes escuchar tus canciones y podcasts favoritos sin conexión a Internet. Esto es especialmente útil cuando viaja, se desplaza o está en áreas con poca cobertura de red. Para hacer esto, siga estos pasos:
-
-
Toque en la opción Biblioteca en la parte inferior de la pantalla.
-
Seleccione las canciones, álbumes, artistas, géneros, listas de reproducción, estaciones o podcasts que desea descargar.
-
Toque en el icono de descarga en la esquina superior derecha de la pantalla.
-
Espere a que la descarga se complete y toque en Hecho.
-
-
-
Consejo 4: Ajuste la calidad del sonido
-
Si desea tener más control sobre la calidad de sonido de Play Music APK, puede utilizar el ecualizador incorporado que le permite ajustar los graves, agudos y otros efectos de sonido. También puede elegir entre diferentes presets como Normal, Pop, Rock, Jazz, Classical y más. Para hacer esto, siga estos pasos:
-
-
Toque en la opción Configuración en la parte inferior de la pantalla.
-
Toque en Ecualizador.
-
Utilice los controles deslizantes o botones para ajustar la calidad del sonido según su preferencia.
-
Toque en Hecho cuando haya terminado.
-
-
Consejo 5: Explora nueva música y podcasts
-
Si desea descubrir nueva música y podcasts en Play Music APK, puede utilizar las diversas opciones que están disponibles en la aplicación. Puede navegar a través de diferentes categorías como Top Charts, Nuevos lanzamientos, Podcasts, Géneros, Estados de ánimo, Actividades y más. También puede buscar canciones, artistas, álbumes, podcasts o palabras clave específicas. Para hacer esto, siga estos pasos:
-
-
Toque en la opción Recientes en la parte inferior de la pantalla.
-
Seleccione una de las opciones que desea explorar.
-
Desliza hacia la izquierda o hacia la derecha para ver más opciones o toca Ver todo para ver más resultados.
-
Toque en cualquier elemento que desee escuchar o agregar a su biblioteca.
-
-
Conclusión
-
-
Preguntas frecuentes
-
Aquí hay algunas preguntas frecuentes sobre Play Music APK:
-
Q: ¿Es Play Music APK seguro y legal?
-
A: Reproducir música APK es seguro y legal, siempre y cuando se descarga desde un sitio web de buena reputación como APKPure.com y escanearlo con un escáner de virus o una aplicación antivirus antes de instalarlo. Sin embargo, usted debe ser consciente de que Play Music APK no es una aplicación oficial de Google y puede violar algunos de sus términos y condiciones. Por lo tanto, úsalo bajo tu propio riesgo y discreción.
-
Q: ¿Es Play Music APK gratis?
-
A: Reproducir música APK es gratis para descargar y usar. Usted no necesita pagar ninguna cuota de suscripción o cargos para acceder al servicio de Google Play Music. Sin embargo, es posible que tenga que pagar por algunas canciones o álbumes que no están disponibles para la transmisión o descarga en la aplicación.
-
Q: ¿Cómo puedo actualizar Reproducir música APK?
-
A: Reproducir música APK no se actualiza automáticamente como la aplicación oficial de Google Play Music. Es necesario comprobar las actualizaciones manualmente y descargar la última versión de la aplicación de la misma fuente que lo descargó de. También puede habilitar las notificaciones en el sitio web desde el que lo descargó para recibir notificaciones cuando una nueva versión esté disponible.
-
Q: ¿Cómo puedo desinstalar Play Music APK?
-
A: Si desea desinstalar Play Music APK de su dispositivo, puede hacerlo siguiendo estos pasos:
-
-
Ir a Configuración > Aplicaciones > Reproducir música.
-
Toque en Desinstalar y confirmar su elección.
-
Espera a que la desinstalación termine y toca OK.
-
-
Q: ¿Cómo puedo contactar con el desarrollador de Play Music APK?
-
A: Reproducir música APK es desarrollado por un desarrollador desconocido que no tiene un sitio web oficial o información de contacto. Por lo tanto, no es posible ponerse en contacto con el desarrollador directamente. Sin embargo, puede dejar sus comentarios, sugerencias o preguntas en el sitio web desde el que lo descargó, como APKPure.com, y esperar que el desarrollador los vea y responda.
64aa2da5cf
-
-
\ No newline at end of file
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/abc.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/abc.py
deleted file mode 100644
index d39dc1adba0f00d2f7bdf6fa2cd1abcd82475e2e..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/importlib_resources/abc.py
+++ /dev/null
@@ -1,137 +0,0 @@
-import abc
-from typing import BinaryIO, Iterable, Text
-
-from ._compat import runtime_checkable, Protocol
-
-
-class ResourceReader(metaclass=abc.ABCMeta):
- """Abstract base class for loaders to provide resource reading support."""
-
- @abc.abstractmethod
- def open_resource(self, resource: Text) -> BinaryIO:
- """Return an opened, file-like object for binary reading.
-
- The 'resource' argument is expected to represent only a file name.
- If the resource cannot be found, FileNotFoundError is raised.
- """
- # This deliberately raises FileNotFoundError instead of
- # NotImplementedError so that if this method is accidentally called,
- # it'll still do the right thing.
- raise FileNotFoundError
-
- @abc.abstractmethod
- def resource_path(self, resource: Text) -> Text:
- """Return the file system path to the specified resource.
-
- The 'resource' argument is expected to represent only a file name.
- If the resource does not exist on the file system, raise
- FileNotFoundError.
- """
- # This deliberately raises FileNotFoundError instead of
- # NotImplementedError so that if this method is accidentally called,
- # it'll still do the right thing.
- raise FileNotFoundError
-
- @abc.abstractmethod
- def is_resource(self, path: Text) -> bool:
- """Return True if the named 'path' is a resource.
-
- Files are resources, directories are not.
- """
- raise FileNotFoundError
-
- @abc.abstractmethod
- def contents(self) -> Iterable[str]:
- """Return an iterable of entries in `package`."""
- raise FileNotFoundError
-
-
-@runtime_checkable
-class Traversable(Protocol):
- """
- An object with a subset of pathlib.Path methods suitable for
- traversing directories and opening files.
- """
-
- @abc.abstractmethod
- def iterdir(self):
- """
- Yield Traversable objects in self
- """
-
- def read_bytes(self):
- """
- Read contents of self as bytes
- """
- with self.open('rb') as strm:
- return strm.read()
-
- def read_text(self, encoding=None):
- """
- Read contents of self as text
- """
- with self.open(encoding=encoding) as strm:
- return strm.read()
-
- @abc.abstractmethod
- def is_dir(self) -> bool:
- """
- Return True if self is a directory
- """
-
- @abc.abstractmethod
- def is_file(self) -> bool:
- """
- Return True if self is a file
- """
-
- @abc.abstractmethod
- def joinpath(self, child):
- """
- Return Traversable child in self
- """
-
- def __truediv__(self, child):
- """
- Return Traversable child in self
- """
- return self.joinpath(child)
-
- @abc.abstractmethod
- def open(self, mode='r', *args, **kwargs):
- """
- mode may be 'r' or 'rb' to open as text or binary. Return a handle
- suitable for reading (same as pathlib.Path.open).
-
- When opening as text, accepts encoding parameters such as those
- accepted by io.TextIOWrapper.
- """
-
- @abc.abstractproperty
- def name(self) -> str:
- """
- The base name of this object without any parent references.
- """
-
-
-class TraversableResources(ResourceReader):
- """
- The required interface for providing traversable
- resources.
- """
-
- @abc.abstractmethod
- def files(self):
- """Return a Traversable object for the loaded package."""
-
- def open_resource(self, resource):
- return self.files().joinpath(resource).open('rb')
-
- def resource_path(self, resource):
- raise FileNotFoundError(resource)
-
- def is_resource(self, path):
- return self.files().joinpath(path).is_file()
-
- def contents(self):
- return (item.name for item in self.files().iterdir())
diff --git a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/alias.py b/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/alias.py
deleted file mode 100644
index 452a9244ea6766d8cf94425fb583583ef740baee..0000000000000000000000000000000000000000
--- a/spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/command/alias.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from distutils.errors import DistutilsOptionError
-
-from setuptools.command.setopt import edit_config, option_base, config_file
-
-
-def shquote(arg):
- """Quote an argument for later parsing by shlex.split()"""
- for c in '"', "'", "\\", "#":
- if c in arg:
- return repr(arg)
- if arg.split() != [arg]:
- return repr(arg)
- return arg
-
-
-class alias(option_base):
- """Define a shortcut that invokes one or more commands"""
-
- description = "define a shortcut to invoke one or more commands"
- command_consumes_arguments = True
-
- user_options = [
- ('remove', 'r', 'remove (unset) the alias'),
- ] + option_base.user_options
-
- boolean_options = option_base.boolean_options + ['remove']
-
- def initialize_options(self):
- option_base.initialize_options(self)
- self.args = None
- self.remove = None
-
- def finalize_options(self):
- option_base.finalize_options(self)
- if self.remove and len(self.args) != 1:
- raise DistutilsOptionError(
- "Must specify exactly one argument (the alias name) when "
- "using --remove"
- )
-
- def run(self):
- aliases = self.distribution.get_option_dict('aliases')
-
- if not self.args:
- print("Command Aliases")
- print("---------------")
- for alias in aliases:
- print("setup.py alias", format_alias(alias, aliases))
- return
-
- elif len(self.args) == 1:
- alias, = self.args
- if self.remove:
- command = None
- elif alias in aliases:
- print("setup.py alias", format_alias(alias, aliases))
- return
- else:
- print("No alias definition found for %r" % alias)
- return
- else:
- alias = self.args[0]
- command = ' '.join(map(shquote, self.args[1:]))
-
- edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
-
-
-def format_alias(name, aliases):
- source, command = aliases[name]
- if source == config_file('global'):
- source = '--global-config '
- elif source == config_file('user'):
- source = '--user-config '
- elif source == config_file('local'):
- source = ''
- else:
- source = '--filename=%r' % source
- return source + name + ' ' + command
diff --git a/spaces/Blackroot/Fancy-Audiogen/app.py b/spaces/Blackroot/Fancy-Audiogen/app.py
deleted file mode 100644
index 18c5576ae25db0cacf2c596ebb241c4442602fc8..0000000000000000000000000000000000000000
--- a/spaces/Blackroot/Fancy-Audiogen/app.py
+++ /dev/null
@@ -1,114 +0,0 @@
-import gradio as gr
-import os, json
-from generator import HijackedMusicGen
-from audiocraft.data.audio import audio_write
-from audio import predict
-from itertools import zip_longest
-
-def split_prompt(bigly_prompt, num_segments):
- prompts = bigly_prompt.split(',,')
- num_segments = int(num_segments) # Assuming 'segment' comes as a string from Gradio slider
- # repeat last prompt to fill in the rest
- if len(prompts) < num_segments:
- prompts += [prompts[-1]] * (num_segments - len(prompts))
- elif len(prompts) > num_segments:
- prompts = prompts[:num_segments]
- return prompts
-
-loaded_model = None
-audio_files = []
-def model_interface(model_name, top_k, top_p, temperature, cfg_coef, segments, overlap, duration, optional_audio, prompt):
- global loaded_model
-
- if loaded_model is None or loaded_model.name != model_name:
- loaded_model = HijackedMusicGen.get_pretrained(None, name=model_name)
-
- print(optional_audio)
-
- loaded_model.set_generation_params(
- use_sampling=True,
- duration=duration,
- top_p=top_p,
- top_k=top_k,
- temperature=temperature,
- cfg_coef=cfg_coef,
- )
-
- extension_parameters = {"segments":segments, "overlap":overlap}
- optional_audio_parameters = {"optional_audio":optional_audio, "sample_rate":loaded_model.sample_rate}
-
- prompts = split_prompt(prompt, segments)
- first_prompt = prompts[0]
-
- sample_rate, audio = predict(loaded_model, prompts, duration, optional_audio_parameters, extension_parameters)
-
- counter = 1
- audio_path = "static/"
- audio_name = first_prompt
- while os.path.exists(audio_path + audio_name + ".wav"):
- audio_name = f"{first_prompt}({counter})"
- counter += 1
-
- file = audio_write(audio_path + audio_name, audio.squeeze(), sample_rate, strategy="loudness")
- audio_files.append(file)
-
- audio_list_html = " ".join([
- f'''
-
-
{os.path.splitext(os.path.basename(file))[0]}
-
-
- '''
- for file in reversed(audio_files)
- ])
-
- return audio_list_html
-
-slider_param = {
- "top_k": {"minimum": 0, "maximum": 1000, "value": 0, "label": "Top K"},
- "top_p": {"minimum": 0.0, "maximum": 1.0, "value": 0.0, "label": "Top P"},
- "temperature": {"minimum": 0.1, "maximum": 10.0, "value": 1.0, "label": "Temperature"},
- "cfg_coef": {"minimum": 0.0, "maximum": 10.0, "value": 4.0, "label": "CFG Coefficient"},
- "segments": {"minimum": 1, "maximum": 10, "value": 1, "step": 1, "label": "Number of Segments"},
- "overlap": {"minimum": 0.0, "maximum": 10.0, "value": 1.0, "label": "Segment Overlap"},
- "duration": {"minimum": 1, "maximum": 300, "value": 10, "label": "Duration"},
-}
-
-slider_params = {
- key: gr.components.Slider(**params)
- for key, params in slider_param.items()
-}
-
-with gr.Blocks() as interface:
- with gr.Row():
-
- with gr.Column():
- with gr.Row():
- model_dropdown = gr.components.Dropdown(choices=["small", "medium", "large", "melody"], label="Model Size", value="large")
- optional_audio = gr.components.Audio(source="upload", type="numpy", label="Optional Audio", interactive=True)
-
- slider_keys = list(slider_param.keys())
- slider_pairs = list(zip_longest(slider_keys[::2], slider_keys[1::2]))
-
- for key1, key2 in slider_pairs:
- with gr.Row():
- with gr.Column():
- slider_params[key1] = gr.components.Slider(**slider_param[key1])
- if key2 is not None:
- with gr.Column():
- slider_params[key2] = gr.components.Slider(**slider_param[key2])
-
- prompt_box = gr.components.Textbox(lines=5, placeholder="""Insert a double comma ,, to indicate this should prompt a new segment. For example:
- Rock Opera,,Dueling Banjos
- This allows you to prompt each segment individually. If you only provide one prompt, every segment will use that one prompt. If you provide multiple prompts but less than the number of segments, then the last prompt will be used to fill in the rest.
- """)
- submit = gr.Button("Submit")
-
- with gr.Column():
- output = gr.outputs.HTML()
-
- inputs_list = [model_dropdown] + list(slider_params.values()) + [optional_audio] + [prompt_box]
- submit.click(model_interface, inputs=inputs_list, outputs=[output])
-
-interface.queue()
-interface.launch()
\ No newline at end of file
diff --git a/spaces/Bong15/Rewrite/README.md b/spaces/Bong15/Rewrite/README.md
deleted file mode 100644
index 451dd86fea4f5ec23a328a09f627f49d9281037c..0000000000000000000000000000000000000000
--- a/spaces/Bong15/Rewrite/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Rewrite
-emoji: 🚀
-colorFrom: green
-colorTo: green
-sdk: streamlit
-sdk_version: 1.15.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/CVPR/LIVE/cmake/FindTensorFlow.cmake b/spaces/CVPR/LIVE/cmake/FindTensorFlow.cmake
deleted file mode 100644
index b251b10538f69f3dce42370e840f167ea24fc4fc..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/cmake/FindTensorFlow.cmake
+++ /dev/null
@@ -1,34 +0,0 @@
-# https://github.com/PatWie/tensorflow-cmake/blob/master/cmake/modules/FindTensorFlow.cmake
-
-execute_process(
- COMMAND python -c "exec(\"try:\\n import tensorflow as tf; print(tf.__version__); print(tf.__cxx11_abi_flag__);print(tf.sysconfig.get_include()); print(tf.sysconfig.get_lib())\\nexcept ImportError:\\n exit(1)\")"
- OUTPUT_VARIABLE TF_INFORMATION_STRING
- OUTPUT_STRIP_TRAILING_WHITESPACE
- RESULT_VARIABLE retcode)
-
-if("${retcode}" STREQUAL "0")
- string(REPLACE "\n" ";" TF_INFORMATION_LIST ${TF_INFORMATION_STRING})
- list(GET TF_INFORMATION_LIST 0 TF_DETECTED_VERSION)
- list(GET TF_INFORMATION_LIST 1 TF_DETECTED_ABI)
- list(GET TF_INFORMATION_LIST 2 TF_DETECTED_INCLUDE_DIR)
- list(GET TF_INFORMATION_LIST 3 TF_DETECTED_LIBRARY_DIR)
- if(WIN32)
- find_library(TF_DETECTED_LIBRARY NAMES _pywrap_tensorflow_internal PATHS
- ${TF_DETECTED_LIBRARY_DIR}/python)
- else()
- # For some reason my tensorflow doesn't have a .so file
- list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES .so.1)
- list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES .so.2)
- find_library(TF_DETECTED_LIBRARY NAMES tensorflow_framework PATHS
- ${TF_DETECTED_LIBRARY_DIR})
- endif()
- set(TensorFlow_VERSION ${TF_DETECTED_VERSION})
- set(TensorFlow_ABI ${TF_DETECTED_ABI})
- set(TensorFlow_INCLUDE_DIR ${TF_DETECTED_INCLUDE_DIR})
- set(TensorFlow_LIBRARY ${TF_DETECTED_LIBRARY})
- if(TensorFlow_LIBRARY AND TensorFlow_INCLUDE_DIR)
- set(TensorFlow_FOUND TRUE)
- else()
- set(TensorFlow_FOUND FALSE)
- endif()
-endif()
diff --git a/spaces/CVPR/LIVE/thrust/thrust/for_each.h b/spaces/CVPR/LIVE/thrust/thrust/for_each.h
deleted file mode 100644
index dcc87f399445ce776aabee256f97cd0d4570bc99..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/for_each.h
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Copyright 2008-2013 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- * * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-/*! \file thrust/for_each.h
- * \brief Applies a function to each element in a range
- */
-
-#pragma once
-
-#include
-#include
-#include
-
-namespace thrust
-{
-
-
-/*! \addtogroup modifying
- * \ingroup transformations
- * \{
- */
-
-
-/*! \p for_each applies the function object \p f to each element
- * in the range [first, last); \p f's return value, if any,
- * is ignored. Unlike the C++ Standard Template Library function
- * std::for_each, this version offers no guarantee on
- * order of execution. For this reason, this version of \p for_each
- * does not return a copy of the function object.
- *
- * The algorithm's execution is parallelized as determined by \p exec.
- *
- * \param exec The execution policy to use for parallelization.
- * \param first The beginning of the sequence.
- * \param last The end of the sequence.
- * \param f The function object to apply to the range [first, last).
- * \return last
- *
- * \tparam DerivedPolicy The name of the derived execution policy.
- * \tparam InputIterator is a model of Input Iterator,
- * and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type.
- * \tparam UnaryFunction is a model of Unary Function,
- * and \p UnaryFunction does not apply any non-constant operation through its argument.
- *
- * The following code snippet demonstrates how to use \p for_each to print the elements
- * of a \p std::device_vector using the \p thrust::device parallelization policy:
- *
- * \code
- * #include
- * #include
- * #include
- * #include
- * ...
- *
- * struct printf_functor
- * {
- * __host__ __device__
- * void operator()(int x)
- * {
- * // note that using printf in a __device__ function requires
- * // code compiled for a GPU with compute capability 2.0 or
- * // higher (nvcc --arch=sm_20)
- * printf("%d\n", x);
- * }
- * };
- * ...
- * thrust::device_vector d_vec(3);
- * d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2;
- *
- * thrust::for_each(thrust::device, d_vec.begin(), d_vec.end(), printf_functor());
- *
- * // 0 1 2 is printed to standard output in some unspecified order
- * \endcode
- *
- * \see for_each_n
- * \see http://www.sgi.com/tech/stl/for_each.html
- */
-template
-__host__ __device__
-InputIterator for_each(const thrust::detail::execution_policy_base &exec,
- InputIterator first,
- InputIterator last,
- UnaryFunction f);
-
-
-/*! \p for_each_n applies the function object \p f to each element
- * in the range [first, first + n); \p f's return value, if any,
- * is ignored. Unlike the C++ Standard Template Library function
- * std::for_each, this version offers no guarantee on
- * order of execution.
- *
- * The algorithm's execution is parallelized as determined by \p exec.
- *
- * \param exec The execution policy to use for parallelization.
- * \param first The beginning of the sequence.
- * \param n The size of the input sequence.
- * \param f The function object to apply to the range [first, first + n).
- * \return first + n
- *
- * \tparam DerivedPolicy The name of the derived execution policy.
- * \tparam InputIterator is a model of Input Iterator,
- * and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type.
- * \tparam Size is an integral type.
- * \tparam UnaryFunction is a model of Unary Function,
- * and \p UnaryFunction does not apply any non-constant operation through its argument.
- *
- * The following code snippet demonstrates how to use \p for_each_n to print the elements
- * of a \p device_vector using the \p thrust::device parallelization policy.
- *
- * \code
- * #include
- * #include
- * #include
- * #include
- *
- * struct printf_functor
- * {
- * __host__ __device__
- * void operator()(int x)
- * {
- * // note that using printf in a __device__ function requires
- * // code compiled for a GPU with compute capability 2.0 or
- * // higher (nvcc --arch=sm_20)
- * printf("%d\n", x);
- * }
- * };
- * ...
- * thrust::device_vector d_vec(3);
- * d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2;
- *
- * thrust::for_each_n(thrust::device, d_vec.begin(), d_vec.size(), printf_functor());
- *
- * // 0 1 2 is printed to standard output in some unspecified order
- * \endcode
- *
- * \see for_each
- * \see http://www.sgi.com/tech/stl/for_each.html
- */
-template
-__host__ __device__
-InputIterator for_each_n(const thrust::detail::execution_policy_base &exec,
- InputIterator first,
- Size n,
- UnaryFunction f);
-
-/*! \p for_each applies the function object \p f to each element
- * in the range [first, last); \p f's return value, if any,
- * is ignored. Unlike the C++ Standard Template Library function
- * std::for_each, this version offers no guarantee on
- * order of execution. For this reason, this version of \p for_each
- * does not return a copy of the function object.
- *
- * \param first The beginning of the sequence.
- * \param last The end of the sequence.
- * \param f The function object to apply to the range [first, last).
- * \return last
- *
- * \tparam InputIterator is a model of Input Iterator,
- * and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type.
- * \tparam UnaryFunction is a model of Unary Function,
- * and \p UnaryFunction does not apply any non-constant operation through its argument.
- *
- * The following code snippet demonstrates how to use \p for_each to print the elements
- * of a \p device_vector.
- *
- * \code
- * #include
- * #include
- * #include
- *
- * struct printf_functor
- * {
- * __host__ __device__
- * void operator()(int x)
- * {
- * // note that using printf in a __device__ function requires
- * // code compiled for a GPU with compute capability 2.0 or
- * // higher (nvcc --arch=sm_20)
- * printf("%d\n", x);
- * }
- * };
- * ...
- * thrust::device_vector d_vec(3);
- * d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2;
- *
- * thrust::for_each(d_vec.begin(), d_vec.end(), printf_functor());
- *
- * // 0 1 2 is printed to standard output in some unspecified order
- * \endcode
- *
- * \see for_each_n
- * \see http://www.sgi.com/tech/stl/for_each.html
- */
-template
-InputIterator for_each(InputIterator first,
- InputIterator last,
- UnaryFunction f);
-
-
-/*! \p for_each_n applies the function object \p f to each element
- * in the range [first, first + n); \p f's return value, if any,
- * is ignored. Unlike the C++ Standard Template Library function
- * std::for_each, this version offers no guarantee on
- * order of execution.
- *
- * \param first The beginning of the sequence.
- * \param n The size of the input sequence.
- * \param f The function object to apply to the range [first, first + n).
- * \return first + n
- *
- * \tparam InputIterator is a model of Input Iterator,
- * and \p InputIterator's \c value_type is convertible to \p UnaryFunction's \c argument_type.
- * \tparam Size is an integral type.
- * \tparam UnaryFunction is a model of Unary Function,
- * and \p UnaryFunction does not apply any non-constant operation through its argument.
- *
- * The following code snippet demonstrates how to use \p for_each_n to print the elements
- * of a \p device_vector.
- *
- * \code
- * #include
- * #include
- * #include
- *
- * struct printf_functor
- * {
- * __host__ __device__
- * void operator()(int x)
- * {
- * // note that using printf in a __device__ function requires
- * // code compiled for a GPU with compute capability 2.0 or
- * // higher (nvcc --arch=sm_20)
- * printf("%d\n", x);
- * }
- * };
- * ...
- * thrust::device_vector d_vec(3);
- * d_vec[0] = 0; d_vec[1] = 1; d_vec[2] = 2;
- *
- * thrust::for_each_n(d_vec.begin(), d_vec.size(), printf_functor());
- *
- * // 0 1 2 is printed to standard output in some unspecified order
- * \endcode
- *
- * \see for_each
- * \see http://www.sgi.com/tech/stl/for_each.html
- */
-template
-InputIterator for_each_n(InputIterator first,
- Size n,
- UnaryFunction f);
-
-/*! \} // end modifying
- */
-
-} // end namespace thrust
-
-#include
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/temporary_buffer.h b/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/temporary_buffer.h
deleted file mode 100644
index 6b5276141625d61567d3adb06a363682b4df968b..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/temporary_buffer.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2008-2016 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-
-// this system has no special temporary buffer functions
-
diff --git a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/per_device_resource.h b/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/per_device_resource.h
deleted file mode 100644
index 1b8d61f92169e0e09c3821e59218f0dcbb70cbe5..0000000000000000000000000000000000000000
--- a/spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/per_device_resource.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2018 NVIDIA Corporation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include
-
-// this system has no special per device resource functions
-
diff --git a/spaces/CVPR/MonoScene/fusion.py b/spaces/CVPR/MonoScene/fusion.py
deleted file mode 100644
index aecd5cba3b1e3dd1e0534cda347eca8956657926..0000000000000000000000000000000000000000
--- a/spaces/CVPR/MonoScene/fusion.py
+++ /dev/null
@@ -1,507 +0,0 @@
-"""
-Most of the code is taken from https://github.com/andyzeng/tsdf-fusion-python/blob/master/fusion.py
-
-@inproceedings{zeng20163dmatch,
- title={3DMatch: Learning Local Geometric Descriptors from RGB-D Reconstructions},
- author={Zeng, Andy and Song, Shuran and Nie{\ss}ner, Matthias and Fisher, Matthew and Xiao, Jianxiong and Funkhouser, Thomas},
- booktitle={CVPR},
- year={2017}
-}
-"""
-
-import numpy as np
-
-from numba import njit, prange
-from skimage import measure
-
-FUSION_GPU_MODE = 0
-
-
-class TSDFVolume:
- """Volumetric TSDF Fusion of RGB-D Images."""
-
- def __init__(self, vol_bnds, voxel_size, use_gpu=True):
- """Constructor.
-
- Args:
- vol_bnds (ndarray): An ndarray of shape (3, 2). Specifies the
- xyz bounds (min/max) in meters.
- voxel_size (float): The volume discretization in meters.
- """
- vol_bnds = np.asarray(vol_bnds)
- assert vol_bnds.shape == (3, 2), "[!] `vol_bnds` should be of shape (3, 2)."
-
- # Define voxel volume parameters
- self._vol_bnds = vol_bnds
- self._voxel_size = float(voxel_size)
- self._trunc_margin = 5 * self._voxel_size # truncation on SDF
- # self._trunc_margin = 10 # truncation on SDF
- self._color_const = 256 * 256
-
- # Adjust volume bounds and ensure C-order contiguous
- self._vol_dim = (
- np.ceil((self._vol_bnds[:, 1] - self._vol_bnds[:, 0]) / self._voxel_size)
- .copy(order="C")
- .astype(int)
- )
- self._vol_bnds[:, 1] = self._vol_bnds[:, 0] + self._vol_dim * self._voxel_size
- self._vol_origin = self._vol_bnds[:, 0].copy(order="C").astype(np.float32)
-
- print(
- "Voxel volume size: {} x {} x {} - # points: {:,}".format(
- self._vol_dim[0],
- self._vol_dim[1],
- self._vol_dim[2],
- self._vol_dim[0] * self._vol_dim[1] * self._vol_dim[2],
- )
- )
-
- # Initialize pointers to voxel volume in CPU memory
- self._tsdf_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
- # for computing the cumulative moving average of observations per voxel
- self._weight_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
- self._color_vol_cpu = np.zeros(self._vol_dim).astype(np.float32)
-
- self.gpu_mode = use_gpu and FUSION_GPU_MODE
-
- # Copy voxel volumes to GPU
- if self.gpu_mode:
- self._tsdf_vol_gpu = cuda.mem_alloc(self._tsdf_vol_cpu.nbytes)
- cuda.memcpy_htod(self._tsdf_vol_gpu, self._tsdf_vol_cpu)
- self._weight_vol_gpu = cuda.mem_alloc(self._weight_vol_cpu.nbytes)
- cuda.memcpy_htod(self._weight_vol_gpu, self._weight_vol_cpu)
- self._color_vol_gpu = cuda.mem_alloc(self._color_vol_cpu.nbytes)
- cuda.memcpy_htod(self._color_vol_gpu, self._color_vol_cpu)
-
- # Cuda kernel function (C++)
- self._cuda_src_mod = SourceModule(
- """
- __global__ void integrate(float * tsdf_vol,
- float * weight_vol,
- float * color_vol,
- float * vol_dim,
- float * vol_origin,
- float * cam_intr,
- float * cam_pose,
- float * other_params,
- float * color_im,
- float * depth_im) {
- // Get voxel index
- int gpu_loop_idx = (int) other_params[0];
- int max_threads_per_block = blockDim.x;
- int block_idx = blockIdx.z*gridDim.y*gridDim.x+blockIdx.y*gridDim.x+blockIdx.x;
- int voxel_idx = gpu_loop_idx*gridDim.x*gridDim.y*gridDim.z*max_threads_per_block+block_idx*max_threads_per_block+threadIdx.x;
- int vol_dim_x = (int) vol_dim[0];
- int vol_dim_y = (int) vol_dim[1];
- int vol_dim_z = (int) vol_dim[2];
- if (voxel_idx > vol_dim_x*vol_dim_y*vol_dim_z)
- return;
- // Get voxel grid coordinates (note: be careful when casting)
- float voxel_x = floorf(((float)voxel_idx)/((float)(vol_dim_y*vol_dim_z)));
- float voxel_y = floorf(((float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z))/((float)vol_dim_z));
- float voxel_z = (float)(voxel_idx-((int)voxel_x)*vol_dim_y*vol_dim_z-((int)voxel_y)*vol_dim_z);
- // Voxel grid coordinates to world coordinates
- float voxel_size = other_params[1];
- float pt_x = vol_origin[0]+voxel_x*voxel_size;
- float pt_y = vol_origin[1]+voxel_y*voxel_size;
- float pt_z = vol_origin[2]+voxel_z*voxel_size;
- // World coordinates to camera coordinates
- float tmp_pt_x = pt_x-cam_pose[0*4+3];
- float tmp_pt_y = pt_y-cam_pose[1*4+3];
- float tmp_pt_z = pt_z-cam_pose[2*4+3];
- float cam_pt_x = cam_pose[0*4+0]*tmp_pt_x+cam_pose[1*4+0]*tmp_pt_y+cam_pose[2*4+0]*tmp_pt_z;
- float cam_pt_y = cam_pose[0*4+1]*tmp_pt_x+cam_pose[1*4+1]*tmp_pt_y+cam_pose[2*4+1]*tmp_pt_z;
- float cam_pt_z = cam_pose[0*4+2]*tmp_pt_x+cam_pose[1*4+2]*tmp_pt_y+cam_pose[2*4+2]*tmp_pt_z;
- // Camera coordinates to image pixels
- int pixel_x = (int) roundf(cam_intr[0*3+0]*(cam_pt_x/cam_pt_z)+cam_intr[0*3+2]);
- int pixel_y = (int) roundf(cam_intr[1*3+1]*(cam_pt_y/cam_pt_z)+cam_intr[1*3+2]);
- // Skip if outside view frustum
- int im_h = (int) other_params[2];
- int im_w = (int) other_params[3];
- if (pixel_x < 0 || pixel_x >= im_w || pixel_y < 0 || pixel_y >= im_h || cam_pt_z<0)
- return;
- // Skip invalid depth
- float depth_value = depth_im[pixel_y*im_w+pixel_x];
- if (depth_value == 0)
- return;
- // Integrate TSDF
- float trunc_margin = other_params[4];
- float depth_diff = depth_value-cam_pt_z;
- if (depth_diff < -trunc_margin)
- return;
- float dist = fmin(1.0f,depth_diff/trunc_margin);
- float w_old = weight_vol[voxel_idx];
- float obs_weight = other_params[5];
- float w_new = w_old + obs_weight;
- weight_vol[voxel_idx] = w_new;
- tsdf_vol[voxel_idx] = (tsdf_vol[voxel_idx]*w_old+obs_weight*dist)/w_new;
- // Integrate color
- float old_color = color_vol[voxel_idx];
- float old_b = floorf(old_color/(256*256));
- float old_g = floorf((old_color-old_b*256*256)/256);
- float old_r = old_color-old_b*256*256-old_g*256;
- float new_color = color_im[pixel_y*im_w+pixel_x];
- float new_b = floorf(new_color/(256*256));
- float new_g = floorf((new_color-new_b*256*256)/256);
- float new_r = new_color-new_b*256*256-new_g*256;
- new_b = fmin(roundf((old_b*w_old+obs_weight*new_b)/w_new),255.0f);
- new_g = fmin(roundf((old_g*w_old+obs_weight*new_g)/w_new),255.0f);
- new_r = fmin(roundf((old_r*w_old+obs_weight*new_r)/w_new),255.0f);
- color_vol[voxel_idx] = new_b*256*256+new_g*256+new_r;
- }"""
- )
-
- self._cuda_integrate = self._cuda_src_mod.get_function("integrate")
-
- # Determine block/grid size on GPU
- gpu_dev = cuda.Device(0)
- self._max_gpu_threads_per_block = gpu_dev.MAX_THREADS_PER_BLOCK
- n_blocks = int(
- np.ceil(
- float(np.prod(self._vol_dim))
- / float(self._max_gpu_threads_per_block)
- )
- )
- grid_dim_x = min(gpu_dev.MAX_GRID_DIM_X, int(np.floor(np.cbrt(n_blocks))))
- grid_dim_y = min(
- gpu_dev.MAX_GRID_DIM_Y, int(np.floor(np.sqrt(n_blocks / grid_dim_x)))
- )
- grid_dim_z = min(
- gpu_dev.MAX_GRID_DIM_Z,
- int(np.ceil(float(n_blocks) / float(grid_dim_x * grid_dim_y))),
- )
- self._max_gpu_grid_dim = np.array(
- [grid_dim_x, grid_dim_y, grid_dim_z]
- ).astype(int)
- self._n_gpu_loops = int(
- np.ceil(
- float(np.prod(self._vol_dim))
- / float(
- np.prod(self._max_gpu_grid_dim)
- * self._max_gpu_threads_per_block
- )
- )
- )
-
- else:
- # Get voxel grid coordinates
- xv, yv, zv = np.meshgrid(
- range(self._vol_dim[0]),
- range(self._vol_dim[1]),
- range(self._vol_dim[2]),
- indexing="ij",
- )
- self.vox_coords = (
- np.concatenate(
- [xv.reshape(1, -1), yv.reshape(1, -1), zv.reshape(1, -1)], axis=0
- )
- .astype(int)
- .T
- )
-
- @staticmethod
- @njit(parallel=True)
- def vox2world(vol_origin, vox_coords, vox_size, offsets=(0.5, 0.5, 0.5)):
- """Convert voxel grid coordinates to world coordinates."""
- vol_origin = vol_origin.astype(np.float32)
- vox_coords = vox_coords.astype(np.float32)
- # print(np.min(vox_coords))
- cam_pts = np.empty_like(vox_coords, dtype=np.float32)
-
- for i in prange(vox_coords.shape[0]):
- for j in range(3):
- cam_pts[i, j] = (
- vol_origin[j]
- + (vox_size * vox_coords[i, j])
- + vox_size * offsets[j]
- )
- return cam_pts
-
- @staticmethod
- @njit(parallel=True)
- def cam2pix(cam_pts, intr):
- """Convert camera coordinates to pixel coordinates."""
- intr = intr.astype(np.float32)
- fx, fy = intr[0, 0], intr[1, 1]
- cx, cy = intr[0, 2], intr[1, 2]
- pix = np.empty((cam_pts.shape[0], 2), dtype=np.int64)
- for i in prange(cam_pts.shape[0]):
- pix[i, 0] = int(np.round((cam_pts[i, 0] * fx / cam_pts[i, 2]) + cx))
- pix[i, 1] = int(np.round((cam_pts[i, 1] * fy / cam_pts[i, 2]) + cy))
- return pix
-
- @staticmethod
- @njit(parallel=True)
- def integrate_tsdf(tsdf_vol, dist, w_old, obs_weight):
- """Integrate the TSDF volume."""
- tsdf_vol_int = np.empty_like(tsdf_vol, dtype=np.float32)
- # print(tsdf_vol.shape)
- w_new = np.empty_like(w_old, dtype=np.float32)
- for i in prange(len(tsdf_vol)):
- w_new[i] = w_old[i] + obs_weight
- tsdf_vol_int[i] = (w_old[i] * tsdf_vol[i] + obs_weight * dist[i]) / w_new[i]
- return tsdf_vol_int, w_new
-
- def integrate(self, color_im, depth_im, cam_intr, cam_pose, obs_weight=1.0):
- """Integrate an RGB-D frame into the TSDF volume.
-
- Args:
- color_im (ndarray): An RGB image of shape (H, W, 3).
- depth_im (ndarray): A depth image of shape (H, W).
- cam_intr (ndarray): The camera intrinsics matrix of shape (3, 3).
- cam_pose (ndarray): The camera pose (i.e. extrinsics) of shape (4, 4).
- obs_weight (float): The weight to assign for the current observation. A higher
- value
- """
- im_h, im_w = depth_im.shape
-
- # Fold RGB color image into a single channel image
- color_im = color_im.astype(np.float32)
- color_im = np.floor(
- color_im[..., 2] * self._color_const
- + color_im[..., 1] * 256
- + color_im[..., 0]
- )
-
- if self.gpu_mode: # GPU mode: integrate voxel volume (calls CUDA kernel)
- for gpu_loop_idx in range(self._n_gpu_loops):
- self._cuda_integrate(
- self._tsdf_vol_gpu,
- self._weight_vol_gpu,
- self._color_vol_gpu,
- cuda.InOut(self._vol_dim.astype(np.float32)),
- cuda.InOut(self._vol_origin.astype(np.float32)),
- cuda.InOut(cam_intr.reshape(-1).astype(np.float32)),
- cuda.InOut(cam_pose.reshape(-1).astype(np.float32)),
- cuda.InOut(
- np.asarray(
- [
- gpu_loop_idx,
- self._voxel_size,
- im_h,
- im_w,
- self._trunc_margin,
- obs_weight,
- ],
- np.float32,
- )
- ),
- cuda.InOut(color_im.reshape(-1).astype(np.float32)),
- cuda.InOut(depth_im.reshape(-1).astype(np.float32)),
- block=(self._max_gpu_threads_per_block, 1, 1),
- grid=(
- int(self._max_gpu_grid_dim[0]),
- int(self._max_gpu_grid_dim[1]),
- int(self._max_gpu_grid_dim[2]),
- ),
- )
- else: # CPU mode: integrate voxel volume (vectorized implementation)
- # Convert voxel grid coordinates to pixel coordinates
- cam_pts = self.vox2world(
- self._vol_origin, self.vox_coords, self._voxel_size
- )
- cam_pts = rigid_transform(cam_pts, np.linalg.inv(cam_pose))
- pix_z = cam_pts[:, 2]
- pix = self.cam2pix(cam_pts, cam_intr)
- pix_x, pix_y = pix[:, 0], pix[:, 1]
-
- # Eliminate pixels outside view frustum
- valid_pix = np.logical_and(
- pix_x >= 0,
- np.logical_and(
- pix_x < im_w,
- np.logical_and(pix_y >= 0, np.logical_and(pix_y < im_h, pix_z > 0)),
- ),
- )
- depth_val = np.zeros(pix_x.shape)
- depth_val[valid_pix] = depth_im[pix_y[valid_pix], pix_x[valid_pix]]
-
- # Integrate TSDF
- depth_diff = depth_val - pix_z
-
- valid_pts = np.logical_and(depth_val > 0, depth_diff >= -10)
- dist = depth_diff
-
- valid_vox_x = self.vox_coords[valid_pts, 0]
- valid_vox_y = self.vox_coords[valid_pts, 1]
- valid_vox_z = self.vox_coords[valid_pts, 2]
- w_old = self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
- tsdf_vals = self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
- valid_dist = dist[valid_pts]
- tsdf_vol_new, w_new = self.integrate_tsdf(
- tsdf_vals, valid_dist, w_old, obs_weight
- )
- self._weight_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = w_new
- self._tsdf_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = tsdf_vol_new
-
- # Integrate color
- old_color = self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z]
- old_b = np.floor(old_color / self._color_const)
- old_g = np.floor((old_color - old_b * self._color_const) / 256)
- old_r = old_color - old_b * self._color_const - old_g * 256
- new_color = color_im[pix_y[valid_pts], pix_x[valid_pts]]
- new_b = np.floor(new_color / self._color_const)
- new_g = np.floor((new_color - new_b * self._color_const) / 256)
- new_r = new_color - new_b * self._color_const - new_g * 256
- new_b = np.minimum(
- 255.0, np.round((w_old * old_b + obs_weight * new_b) / w_new)
- )
- new_g = np.minimum(
- 255.0, np.round((w_old * old_g + obs_weight * new_g) / w_new)
- )
- new_r = np.minimum(
- 255.0, np.round((w_old * old_r + obs_weight * new_r) / w_new)
- )
- self._color_vol_cpu[valid_vox_x, valid_vox_y, valid_vox_z] = (
- new_b * self._color_const + new_g * 256 + new_r
- )
-
- def get_volume(self):
- if self.gpu_mode:
- cuda.memcpy_dtoh(self._tsdf_vol_cpu, self._tsdf_vol_gpu)
- cuda.memcpy_dtoh(self._color_vol_cpu, self._color_vol_gpu)
- return self._tsdf_vol_cpu, self._color_vol_cpu
-
- def get_point_cloud(self):
- """Extract a point cloud from the voxel volume."""
- tsdf_vol, color_vol = self.get_volume()
-
- # Marching cubes
- verts = measure.marching_cubes_lewiner(tsdf_vol, level=0)[0]
- verts_ind = np.round(verts).astype(int)
- verts = verts * self._voxel_size + self._vol_origin
-
- # Get vertex colors
- rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]
- colors_b = np.floor(rgb_vals / self._color_const)
- colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)
- colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256
- colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T
- colors = colors.astype(np.uint8)
-
- pc = np.hstack([verts, colors])
- return pc
-
- def get_mesh(self):
- """Compute a mesh from the voxel volume using marching cubes."""
- tsdf_vol, color_vol = self.get_volume()
-
- # Marching cubes
- verts, faces, norms, vals = measure.marching_cubes_lewiner(tsdf_vol, level=0)
- verts_ind = np.round(verts).astype(int)
- verts = (
- verts * self._voxel_size + self._vol_origin
- ) # voxel grid coordinates to world coordinates
-
- # Get vertex colors
- rgb_vals = color_vol[verts_ind[:, 0], verts_ind[:, 1], verts_ind[:, 2]]
- colors_b = np.floor(rgb_vals / self._color_const)
- colors_g = np.floor((rgb_vals - colors_b * self._color_const) / 256)
- colors_r = rgb_vals - colors_b * self._color_const - colors_g * 256
- colors = np.floor(np.asarray([colors_r, colors_g, colors_b])).T
- colors = colors.astype(np.uint8)
- return verts, faces, norms, colors
-
-
-def rigid_transform(xyz, transform):
- """Applies a rigid transform to an (N, 3) pointcloud."""
- xyz_h = np.hstack([xyz, np.ones((len(xyz), 1), dtype=np.float32)])
- xyz_t_h = np.dot(transform, xyz_h.T).T
- return xyz_t_h[:, :3]
-
-
-def get_view_frustum(depth_im, cam_intr, cam_pose):
- """Get corners of 3D camera view frustum of depth image"""
- im_h = depth_im.shape[0]
- im_w = depth_im.shape[1]
- max_depth = np.max(depth_im)
- view_frust_pts = np.array(
- [
- (np.array([0, 0, 0, im_w, im_w]) - cam_intr[0, 2])
- * np.array([0, max_depth, max_depth, max_depth, max_depth])
- / cam_intr[0, 0],
- (np.array([0, 0, im_h, 0, im_h]) - cam_intr[1, 2])
- * np.array([0, max_depth, max_depth, max_depth, max_depth])
- / cam_intr[1, 1],
- np.array([0, max_depth, max_depth, max_depth, max_depth]),
- ]
- )
- view_frust_pts = rigid_transform(view_frust_pts.T, cam_pose).T
- return view_frust_pts
-
-
-def meshwrite(filename, verts, faces, norms, colors):
- """Save a 3D mesh to a polygon .ply file."""
- # Write header
- ply_file = open(filename, "w")
- ply_file.write("ply\n")
- ply_file.write("format ascii 1.0\n")
- ply_file.write("element vertex %d\n" % (verts.shape[0]))
- ply_file.write("property float x\n")
- ply_file.write("property float y\n")
- ply_file.write("property float z\n")
- ply_file.write("property float nx\n")
- ply_file.write("property float ny\n")
- ply_file.write("property float nz\n")
- ply_file.write("property uchar red\n")
- ply_file.write("property uchar green\n")
- ply_file.write("property uchar blue\n")
- ply_file.write("element face %d\n" % (faces.shape[0]))
- ply_file.write("property list uchar int vertex_index\n")
- ply_file.write("end_header\n")
-
- # Write vertex list
- for i in range(verts.shape[0]):
- ply_file.write(
- "%f %f %f %f %f %f %d %d %d\n"
- % (
- verts[i, 0],
- verts[i, 1],
- verts[i, 2],
- norms[i, 0],
- norms[i, 1],
- norms[i, 2],
- colors[i, 0],
- colors[i, 1],
- colors[i, 2],
- )
- )
-
- # Write face list
- for i in range(faces.shape[0]):
- ply_file.write("3 %d %d %d\n" % (faces[i, 0], faces[i, 1], faces[i, 2]))
-
- ply_file.close()
-
-
-def pcwrite(filename, xyzrgb):
- """Save a point cloud to a polygon .ply file."""
- xyz = xyzrgb[:, :3]
- rgb = xyzrgb[:, 3:].astype(np.uint8)
-
- # Write header
- ply_file = open(filename, "w")
- ply_file.write("ply\n")
- ply_file.write("format ascii 1.0\n")
- ply_file.write("element vertex %d\n" % (xyz.shape[0]))
- ply_file.write("property float x\n")
- ply_file.write("property float y\n")
- ply_file.write("property float z\n")
- ply_file.write("property uchar red\n")
- ply_file.write("property uchar green\n")
- ply_file.write("property uchar blue\n")
- ply_file.write("end_header\n")
-
- # Write vertex list
- for i in range(xyz.shape[0]):
- ply_file.write(
- "%f %f %f %d %d %d\n"
- % (
- xyz[i, 0],
- xyz[i, 1],
- xyz[i, 2],
- rgb[i, 0],
- rgb[i, 1],
- rgb[i, 2],
- )
- )
diff --git a/spaces/CVPR/WALT/mmdet/core/bbox/coder/yolo_bbox_coder.py b/spaces/CVPR/WALT/mmdet/core/bbox/coder/yolo_bbox_coder.py
deleted file mode 100644
index d6d0e82ac780820952938d8751ac9776ea31588a..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/core/bbox/coder/yolo_bbox_coder.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import mmcv
-import torch
-
-from ..builder import BBOX_CODERS
-from .base_bbox_coder import BaseBBoxCoder
-
-
-@BBOX_CODERS.register_module()
-class YOLOBBoxCoder(BaseBBoxCoder):
- """YOLO BBox coder.
-
- Following `YOLO `_, this coder divide
- image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
- cx, cy in [0., 1.], denotes relative center position w.r.t the center of
- bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
-
- Args:
- eps (float): Min value of cx, cy when encoding.
- """
-
- def __init__(self, eps=1e-6):
- super(BaseBBoxCoder, self).__init__()
- self.eps = eps
-
- @mmcv.jit(coderize=True)
- def encode(self, bboxes, gt_bboxes, stride):
- """Get box regression transformation deltas that can be used to
- transform the ``bboxes`` into the ``gt_bboxes``.
-
- Args:
- bboxes (torch.Tensor): Source boxes, e.g., anchors.
- gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
- ground-truth boxes.
- stride (torch.Tensor | int): Stride of bboxes.
-
- Returns:
- torch.Tensor: Box transformation deltas
- """
-
- assert bboxes.size(0) == gt_bboxes.size(0)
- assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
- x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
- y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
- w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
- h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
- x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
- y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
- w = bboxes[..., 2] - bboxes[..., 0]
- h = bboxes[..., 3] - bboxes[..., 1]
- w_target = torch.log((w_gt / w).clamp(min=self.eps))
- h_target = torch.log((h_gt / h).clamp(min=self.eps))
- x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
- self.eps, 1 - self.eps)
- y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
- self.eps, 1 - self.eps)
- encoded_bboxes = torch.stack(
- [x_center_target, y_center_target, w_target, h_target], dim=-1)
- return encoded_bboxes
-
- @mmcv.jit(coderize=True)
- def decode(self, bboxes, pred_bboxes, stride):
- """Apply transformation `pred_bboxes` to `boxes`.
-
- Args:
- boxes (torch.Tensor): Basic boxes, e.g. anchors.
- pred_bboxes (torch.Tensor): Encoded boxes with shape
- stride (torch.Tensor | int): Strides of bboxes.
-
- Returns:
- torch.Tensor: Decoded boxes.
- """
- assert pred_bboxes.size(0) == bboxes.size(0)
- assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
- x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
- y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
- w = bboxes[..., 2] - bboxes[..., 0]
- h = bboxes[..., 3] - bboxes[..., 1]
- # Get outputs x, y
- x_center_pred = (pred_bboxes[..., 0] - 0.5) * stride + x_center
- y_center_pred = (pred_bboxes[..., 1] - 0.5) * stride + y_center
- w_pred = torch.exp(pred_bboxes[..., 2]) * w
- h_pred = torch.exp(pred_bboxes[..., 3]) * h
-
- decoded_bboxes = torch.stack(
- (x_center_pred - w_pred / 2, y_center_pred - h_pred / 2,
- x_center_pred + w_pred / 2, y_center_pred + h_pred / 2),
- dim=-1)
-
- return decoded_bboxes
diff --git a/spaces/CVPR/regionclip-demo/detectron2/data/clip_datasets/oscar_tsv.py b/spaces/CVPR/regionclip-demo/detectron2/data/clip_datasets/oscar_tsv.py
deleted file mode 100644
index c2dfce34f399ef2efc44e13d45f13dc9fba66970..0000000000000000000000000000000000000000
--- a/spaces/CVPR/regionclip-demo/detectron2/data/clip_datasets/oscar_tsv.py
+++ /dev/null
@@ -1,218 +0,0 @@
-import logging
-import random
-
-class InputExample(object):
- """A single training/test example for the language model."""
-
- def __init__(self, guid, tokens_a, tokens_b=None, is_next=None,
- lm_labels=None, img_id=None, is_img_match=None,
- img_label=None):
- """Constructs a InputExample.
-
- Args:
- guid: Unique id for the example.
- tokens_a: string. The untokenized text of the first sequence. For single
- sequence tasks, only this sequence must be specified.
- tokens_b: (Optional) string. The untokenized text of the second sequence.
- Only must be specified for sequence pair tasks.
- """
- self.guid = guid
- self.tokens_a = tokens_a
- self.tokens_b = tokens_b
- self.is_next = is_next # nextSentence
- self.lm_labels = lm_labels # masked words for language model
-
- self.img_id = img_id
- self.is_img_match = is_img_match
- self.img_label = img_label
-
-class InputFeatures(object):
- """A single set of features of data."""
-
- def __init__(self, input_ids, input_mask, segment_ids, is_next,
- lm_label_ids, img_feat_len, is_img_match):
- self.input_ids = input_ids
- self.input_mask = input_mask
- self.segment_ids = segment_ids
- self.is_next = is_next
- self.lm_label_ids = lm_label_ids
-
- self.img_feat_len = img_feat_len
- self.is_img_match = is_img_match
-
-
-def random_word(tokens, tokenizer):
- """
- Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
- :param tokens: list of str, tokenized sentence.
- :param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
- :return: (list of str, list of int), masked tokens and related labels for LM prediction
- """
- output_label = []
-
- for i, token in enumerate(tokens):
- prob = random.random()
- # mask token with 15% probability
- if prob < 0.15:
- prob /= 0.15
-
- # 80% randomly change token to mask token
- if prob < 0.8:
- tokens[i] = "[MASK]"
-
- # 10% randomly change token to random token
- elif prob < 0.9:
- tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]
-
- # -> rest 10% randomly keep current token
-
- # append current token to output (we will predict these later)
- try:
- output_label.append(tokenizer.vocab[token])
- except KeyError:
- # For unknown words (should not occur with BPE vocab)
- output_label.append(tokenizer.vocab["[UNK]"])
- logging.warning(
- "Cannot find token '{}' in vocab. Using [UNK] insetad".format(
- token))
- else:
- # no masking token (will be ignored by loss function later)
- output_label.append(-1)
-
- return tokens, output_label
-
-
-def convert_example_to_features(args, example, max_seq_length, tokenizer,
- img_feat_len):
- """
- Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
- IDs, LM labels, input_mask, CLS and SEP tokens etc.
- :param args: parameter settings
- :param img_feat_len: lens of actual img features
- :param example: InputExample, containing sentence input as strings and is_next label
- :param max_seq_length: int, maximum length of sequence.
- :param tokenizer: Tokenizer
- :return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
- """
-
- tokens_a = example.tokens_a
- tokens_b = None
- if example.tokens_b:
- tokens_b = example.tokens_b
- # Modifies `tokens_a` and `tokens_b` in place so that the total
- # length is less than the specified length.
- # Account for [CLS], [SEP], [SEP] with "- 3"
- _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
- else:
- if len(tokens_a) > max_seq_length - 2:
- tokens_a = tokens_a[:(max_seq_length - 2)]
-
- tokens_a, t1_label = random_word(tokens_a, tokenizer)
- if tokens_b:
- tokens_b, t2_label = random_word(tokens_b, tokenizer)
-
- # concatenate lm labels and account for CLS, SEP, SEP
- if tokens_b:
- lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1])
- else:
- lm_label_ids = ([-1] + t1_label + [-1])
-
- # The convention in BERT is:
- # (a) For sequence pairs:
- # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
- # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
- # (b) For single sequences:
- # tokens: [CLS] the dog is hairy . [SEP]
- # type_ids: 0 0 0 0 0 0 0
- #
- # Where "type_ids" are used to indicate whether this is the first
- # sequence or the second sequence. The embedding vectors for `type=0` and
- # `type=1` were learned during pre-training and are added to the wordpiece
- # embedding vector (and position vector). This is not *strictly* necessary
- # since the [SEP] token unambigiously separates the sequences, but it makes
- # it easier for the model to learn the concept of sequences.
- #
- # For classification tasks, the first vector (corresponding to [CLS]) is
- # used as as the "sentence vector". Note that this only makes sense because
- # the entire model is fine-tuned.
- tokens = []
- segment_ids = []
- tokens.append("[CLS]")
- segment_ids.append(0)
- for token in tokens_a:
- tokens.append(token)
- segment_ids.append(0)
- tokens.append("[SEP]")
- segment_ids.append(0)
-
- if tokens_b:
- assert len(tokens_b) > 0
- for token in tokens_b:
- tokens.append(token)
- segment_ids.append(1)
- tokens.append("[SEP]")
- segment_ids.append(1)
-
- input_ids = tokenizer.convert_tokens_to_ids(tokens)
-
- # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.
- input_mask = [1] * len(input_ids)
-
- # Zero-pad up to the sequence length.
- while len(input_ids) < max_seq_length:
- input_ids.append(0)
- input_mask.append(0)
- segment_ids.append(0)
- lm_label_ids.append(-1)
-
- assert len(input_ids) == max_seq_length
- assert len(input_mask) == max_seq_length
- assert len(segment_ids) == max_seq_length
- assert len(lm_label_ids) == max_seq_length
-
- # image features
- if args.max_img_seq_length > 0:
- if img_feat_len > args.max_img_seq_length:
- input_mask = input_mask + [1] * img_feat_len
- else:
- input_mask = input_mask + [1] * img_feat_len
- pad_img_feat_len = args.max_img_seq_length - img_feat_len
- input_mask = input_mask + ([0] * pad_img_feat_len)
-
- lm_label_ids = lm_label_ids + [-1] * args.max_img_seq_length
-
- if example.guid < 1:
- logging.info("*** Example ***")
- logging.info("guid: %s" % example.guid)
- logging.info("tokens: %s" % " ".join([str(x) for x in tokens]))
- logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
- logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
- logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
- logging.info("LM label: %s " % lm_label_ids)
- logging.info("Is next sentence label: %s " % example.is_next)
-
- features = InputFeatures(input_ids=input_ids,
- input_mask=input_mask,
- segment_ids=segment_ids,
- lm_label_ids=lm_label_ids,
- is_next=example.is_next,
- img_feat_len=img_feat_len,
- is_img_match=example.is_img_match)
- return features
-
-
-def _truncate_seq_pair(tokens_a, tokens_b, max_length):
- """Truncates a sequence pair in place to the maximum length."""
-
- # This is a simple heuristic which will always truncate the longer sequence
- # one token at a time. This makes more sense than truncating an equal percent
- # of tokens from each, since if one sequence is very short then each token
- # that's truncated likely contains more information than a longer sequence.
- while True:
- total_length = len(tokens_a) + len(tokens_b)
- if total_length <= max_length:
- break
- if len(tokens_a) > len(tokens_b):
- tokens_a.pop()
- else:
- tokens_b.pop()
\ No newline at end of file
diff --git a/spaces/CVPR/regionclip-demo/detectron2/modeling/roi_heads/roi_heads.py b/spaces/CVPR/regionclip-demo/detectron2/modeling/roi_heads/roi_heads.py
deleted file mode 100644
index 6ef2cab98374ab279d49640b5644c9f7ecb0ee45..0000000000000000000000000000000000000000
--- a/spaces/CVPR/regionclip-demo/detectron2/modeling/roi_heads/roi_heads.py
+++ /dev/null
@@ -1,887 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import inspect
-import logging
-import numpy as np
-from typing import Dict, List, Optional, Tuple
-import torch
-from torch import nn
-
-from detectron2.config import configurable
-from detectron2.layers import ShapeSpec, nonzero_tuple
-from detectron2.structures import Boxes, ImageList, Instances, pairwise_iou
-from detectron2.utils.events import get_event_storage
-from detectron2.utils.registry import Registry
-
-from ..backbone.resnet import BottleneckBlock, ResNet
-from ..matcher import Matcher
-from ..poolers import ROIPooler
-from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals
-from ..sampling import subsample_labels
-from .box_head import build_box_head
-from .fast_rcnn import FastRCNNOutputLayers
-from .keypoint_head import build_keypoint_head
-from .mask_head import build_mask_head
-
-ROI_HEADS_REGISTRY = Registry("ROI_HEADS")
-ROI_HEADS_REGISTRY.__doc__ = """
-Registry for ROI heads in a generalized R-CNN model.
-ROIHeads take feature maps and region proposals, and
-perform per-region computation.
-
-The registered object will be called with `obj(cfg, input_shape)`.
-The call is expected to return an :class:`ROIHeads`.
-"""
-
-logger = logging.getLogger(__name__)
-
-
-def build_roi_heads(cfg, input_shape):
- """
- Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.
- """
- name = cfg.MODEL.ROI_HEADS.NAME
- return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape)
-
-
-def select_foreground_proposals(
- proposals: List[Instances], bg_label: int
-) -> Tuple[List[Instances], List[torch.Tensor]]:
- """
- Given a list of N Instances (for N images), each containing a `gt_classes` field,
- return a list of Instances that contain only instances with `gt_classes != -1 &&
- gt_classes != bg_label`.
-
- Args:
- proposals (list[Instances]): A list of N Instances, where N is the number of
- images in the batch.
- bg_label: label index of background class.
-
- Returns:
- list[Instances]: N Instances, each contains only the selected foreground instances.
- list[Tensor]: N boolean vector, correspond to the selection mask of
- each Instances object. True for selected instances.
- """
- assert isinstance(proposals, (list, tuple))
- assert isinstance(proposals[0], Instances)
- assert proposals[0].has("gt_classes")
- fg_proposals = []
- fg_selection_masks = []
- for proposals_per_image in proposals:
- gt_classes = proposals_per_image.gt_classes
- fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label)
- fg_idxs = fg_selection_mask.nonzero().squeeze(1)
- fg_proposals.append(proposals_per_image[fg_idxs])
- fg_selection_masks.append(fg_selection_mask)
- return fg_proposals, fg_selection_masks
-
-
-def select_proposals_with_visible_keypoints(proposals: List[Instances]) -> List[Instances]:
- """
- Args:
- proposals (list[Instances]): a list of N Instances, where N is the
- number of images.
-
- Returns:
- proposals: only contains proposals with at least one visible keypoint.
-
- Note that this is still slightly different from Detectron.
- In Detectron, proposals for training keypoint head are re-sampled from
- all the proposals with IOU>threshold & >=1 visible keypoint.
-
- Here, the proposals are first sampled from all proposals with
- IOU>threshold, then proposals with no visible keypoint are filtered out.
- This strategy seems to make no difference on Detectron and is easier to implement.
- """
- ret = []
- all_num_fg = []
- for proposals_per_image in proposals:
- # If empty/unannotated image (hard negatives), skip filtering for train
- if len(proposals_per_image) == 0:
- ret.append(proposals_per_image)
- continue
- gt_keypoints = proposals_per_image.gt_keypoints.tensor
- # #fg x K x 3
- vis_mask = gt_keypoints[:, :, 2] >= 1
- xs, ys = gt_keypoints[:, :, 0], gt_keypoints[:, :, 1]
- proposal_boxes = proposals_per_image.proposal_boxes.tensor.unsqueeze(dim=1) # #fg x 1 x 4
- kp_in_box = (
- (xs >= proposal_boxes[:, :, 0])
- & (xs <= proposal_boxes[:, :, 2])
- & (ys >= proposal_boxes[:, :, 1])
- & (ys <= proposal_boxes[:, :, 3])
- )
- selection = (kp_in_box & vis_mask).any(dim=1)
- selection_idxs = nonzero_tuple(selection)[0]
- all_num_fg.append(selection_idxs.numel())
- ret.append(proposals_per_image[selection_idxs])
-
- storage = get_event_storage()
- storage.put_scalar("keypoint_head/num_fg_samples", np.mean(all_num_fg))
- return ret
-
-
-class ROIHeads(torch.nn.Module):
- """
- ROIHeads perform all per-region computation in an R-CNN.
-
- It typically contains logic to
-
- 1. (in training only) match proposals with ground truth and sample them
- 2. crop the regions and extract per-region features using proposals
- 3. make per-region predictions with different heads
-
- It can have many variants, implemented as subclasses of this class.
- This base class contains the logic to match/sample proposals.
- But it is not necessary to inherit this class if the sampling logic is not needed.
- """
-
- @configurable
- def __init__(
- self,
- *,
- num_classes,
- batch_size_per_image,
- positive_fraction,
- proposal_matcher,
- proposal_append_gt=True,
- only_sample_fg_proposals=False,
- ):
- """
- NOTE: this interface is experimental.
-
- Args:
- num_classes (int): number of foreground classes (i.e. background is not included)
- batch_size_per_image (int): number of proposals to sample for training
- positive_fraction (float): fraction of positive (foreground) proposals
- to sample for training.
- proposal_matcher (Matcher): matcher that matches proposals and ground truth
- proposal_append_gt (bool): whether to include ground truth as proposals as well
- """
- super().__init__()
- self.batch_size_per_image = batch_size_per_image
- self.positive_fraction = positive_fraction
- self.num_classes = num_classes
- self.proposal_matcher = proposal_matcher
- self.proposal_append_gt = proposal_append_gt
- self.only_sample_fg_proposals = only_sample_fg_proposals
-
- @classmethod
- def from_config(cls, cfg):
- return {
- "batch_size_per_image": cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE,
- "positive_fraction": cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION,
- "num_classes": cfg.MODEL.ROI_HEADS.NUM_CLASSES,
- "proposal_append_gt": cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT,
- # Matcher to assign box proposals to gt boxes
- "proposal_matcher": Matcher(
- cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,
- cfg.MODEL.ROI_HEADS.IOU_LABELS,
- allow_low_quality_matches=False,
- ),
- "only_sample_fg_proposals": cfg.MODEL.CLIP.ONLY_SAMPLE_FG_PROPOSALS,
- }
-
- def _sample_proposals(
- self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- """
- Based on the matching between N proposals and M groundtruth,
- sample the proposals and set their classification labels.
-
- Args:
- matched_idxs (Tensor): a vector of length N, each is the best-matched
- gt index in [0, M) for each proposal.
- matched_labels (Tensor): a vector of length N, the matcher's label
- (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
- gt_classes (Tensor): a vector of length M.
-
- Returns:
- Tensor: a vector of indices of sampled proposals. Each is in [0, N).
- Tensor: a vector of the same length, the classification label for
- each sampled proposal. Each sample is labeled as either a category in
- [0, num_classes) or the background (num_classes).
- """
- has_gt = gt_classes.numel() > 0
- # Get the corresponding GT for each proposal
- if has_gt:
- gt_classes = gt_classes[matched_idxs]
- # Label unmatched proposals (0 label from matcher) as background (label=num_classes)
- gt_classes[matched_labels == 0] = self.num_classes
- # Label ignore proposals (-1 label)
- gt_classes[matched_labels == -1] = -1
- else:
- gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
-
- # only sample fg proposals to train recognition branch (ref to subsample_labels)
- if self.only_sample_fg_proposals:
- if has_gt:
- positive = nonzero_tuple((gt_classes != -1) & (gt_classes != self.num_classes))[0]
- num_pos = int(self.batch_size_per_image * self.positive_fraction)
- # protect against not enough positive examples
- num_pos = min(positive.numel(), num_pos)
- # randomly select positive and negative examples
- perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
- sampled_idxs = positive[perm1]
- else: # no gt, only keep 1 bg proposal to fill the slot
- sampled_idxs = torch.zeros_like(matched_idxs[0:1])
- return sampled_idxs, gt_classes[sampled_idxs]
-
- sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
- gt_classes, self.batch_size_per_image, self.positive_fraction, self.num_classes
- )
-
- sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
- return sampled_idxs, gt_classes[sampled_idxs]
-
- @torch.no_grad()
- def label_and_sample_proposals(
- self, proposals: List[Instances], targets: List[Instances]
- ) -> List[Instances]:
- """
- Prepare some proposals to be used to train the ROI heads.
- It performs box matching between `proposals` and `targets`, and assigns
- training labels to the proposals.
- It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
- boxes, with a fraction of positives that is no larger than
- ``self.positive_fraction``.
-
- Args:
- See :meth:`ROIHeads.forward`
-
- Returns:
- list[Instances]:
- length `N` list of `Instances`s containing the proposals
- sampled for training. Each `Instances` has the following fields:
-
- - proposal_boxes: the proposal boxes
- - gt_boxes: the ground-truth box that the proposal is assigned to
- (this is only meaningful if the proposal has a label > 0; if label = 0
- then the ground-truth box is random)
-
- Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
- """
- # Augment proposals with ground-truth boxes.
- # In the case of learned proposals (e.g., RPN), when training starts
- # the proposals will be low quality due to random initialization.
- # It's possible that none of these initial
- # proposals have high enough overlap with the gt objects to be used
- # as positive examples for the second stage components (box head,
- # cls head, mask head). Adding the gt boxes to the set of proposals
- # ensures that the second stage components will have some positive
- # examples from the start of training. For RPN, this augmentation improves
- # convergence and empirically improves box AP on COCO by about 0.5
- # points (under one tested configuration).
- if self.proposal_append_gt:
- proposals = add_ground_truth_to_proposals(targets, proposals)
-
- proposals_with_gt = []
-
- num_fg_samples = []
- num_bg_samples = []
- for proposals_per_image, targets_per_image in zip(proposals, targets):
- has_gt = len(targets_per_image) > 0
- match_quality_matrix = pairwise_iou(
- targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
- )
- matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
- sampled_idxs, gt_classes = self._sample_proposals(
- matched_idxs, matched_labels, targets_per_image.gt_classes
- )
-
- # Set target attributes of the sampled proposals:
- proposals_per_image = proposals_per_image[sampled_idxs]
- proposals_per_image.gt_classes = gt_classes
-
- if has_gt:
- sampled_targets = matched_idxs[sampled_idxs]
- # We index all the attributes of targets that start with "gt_"
- # and have not been added to proposals yet (="gt_classes").
- # NOTE: here the indexing waste some compute, because heads
- # like masks, keypoints, etc, will filter the proposals again,
- # (by foreground/background, or number of keypoints in the image, etc)
- # so we essentially index the data twice.
- for (trg_name, trg_value) in targets_per_image.get_fields().items():
- if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name):
- proposals_per_image.set(trg_name, trg_value[sampled_targets])
- # If no GT is given in the image, we don't know what a dummy gt value can be.
- # Therefore the returned proposals won't have any gt_* fields, except for a
- # gt_classes full of background label.
-
- num_bg_samples.append((gt_classes == self.num_classes).sum().item())
- num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
- proposals_with_gt.append(proposals_per_image)
-
- # Log the number of fg/bg samples that are selected for training ROI heads
- storage = get_event_storage()
- storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
- storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
- #print("num_fg: {}; num_bg: {}".format(num_fg_samples, num_bg_samples))
-
- return proposals_with_gt
-
- def forward(
- self,
- images: ImageList,
- features: Dict[str, torch.Tensor],
- proposals: List[Instances],
- targets: Optional[List[Instances]] = None,
- ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
- """
- Args:
- images (ImageList):
- features (dict[str,Tensor]): input data as a mapping from feature
- map name to tensor. Axis 0 represents the number of images `N` in
- the input data; axes 1-3 are channels, height, and width, which may
- vary between feature maps (e.g., if a feature pyramid is used).
- proposals (list[Instances]): length `N` list of `Instances`. The i-th
- `Instances` contains object proposals for the i-th input image,
- with fields "proposal_boxes" and "objectness_logits".
- targets (list[Instances], optional): length `N` list of `Instances`. The i-th
- `Instances` contains the ground-truth per-instance annotations
- for the i-th input image. Specify `targets` during training only.
- It may have the following fields:
-
- - gt_boxes: the bounding box of each instance.
- - gt_classes: the label for each instance with a category ranging in [0, #class].
- - gt_masks: PolygonMasks or BitMasks, the ground-truth masks of each instance.
- - gt_keypoints: NxKx3, the groud-truth keypoints for each instance.
-
- Returns:
- list[Instances]: length `N` list of `Instances` containing the
- detected instances. Returned during inference only; may be [] during training.
-
- dict[str->Tensor]:
- mapping from a named loss to a tensor storing the loss. Used during training only.
- """
- raise NotImplementedError()
-
-
-@ROI_HEADS_REGISTRY.register()
-class Res5ROIHeads(ROIHeads):
- """
- The ROIHeads in a typical "C4" R-CNN model, where
- the box and mask head share the cropping and
- the per-region feature computation by a Res5 block.
- See :paper:`ResNet` Appendix A.
- """
-
- @configurable
- def __init__(
- self,
- *,
- in_features: List[str],
- pooler: ROIPooler,
- res5: nn.Module,
- box_predictor: nn.Module,
- mask_head: Optional[nn.Module] = None,
- **kwargs,
- ):
- """
- NOTE: this interface is experimental.
-
- Args:
- in_features (list[str]): list of backbone feature map names to use for
- feature extraction
- pooler (ROIPooler): pooler to extra region features from backbone
- res5 (nn.Sequential): a CNN to compute per-region features, to be used by
- ``box_predictor`` and ``mask_head``. Typically this is a "res5"
- block from a ResNet.
- box_predictor (nn.Module): make box predictions from the feature.
- Should have the same interface as :class:`FastRCNNOutputLayers`.
- mask_head (nn.Module): transform features to make mask predictions
- """
- super().__init__(**kwargs)
- self.in_features = in_features
- self.pooler = pooler
- if isinstance(res5, (list, tuple)):
- res5 = nn.Sequential(*res5)
- self.res5 = res5
- self.box_predictor = box_predictor
- self.mask_on = mask_head is not None
- if self.mask_on:
- self.mask_head = mask_head
-
- @classmethod
- def from_config(cls, cfg, input_shape):
- # fmt: off
- ret = super().from_config(cfg)
- in_features = ret["in_features"] = cfg.MODEL.ROI_HEADS.IN_FEATURES
- pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
- pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
- pooler_scales = (1.0 / input_shape[in_features[0]].stride, )
- sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
- mask_on = cfg.MODEL.MASK_ON
- # fmt: on
- assert not cfg.MODEL.KEYPOINT_ON
- assert len(in_features) == 1
-
- ret["pooler"] = ROIPooler(
- output_size=pooler_resolution,
- scales=pooler_scales,
- sampling_ratio=sampling_ratio,
- pooler_type=pooler_type,
- )
-
- # Compatbility with old moco code. Might be useful.
- # See notes in StandardROIHeads.from_config
- if not inspect.ismethod(cls._build_res5_block):
- logger.warning(
- "The behavior of _build_res5_block may change. "
- "Please do not depend on private methods."
- )
- cls._build_res5_block = classmethod(cls._build_res5_block)
-
- ret["res5"], out_channels = cls._build_res5_block(cfg)
- ret["box_predictor"] = FastRCNNOutputLayers(
- cfg, ShapeSpec(channels=out_channels, height=1, width=1)
- )
-
- if mask_on:
- ret["mask_head"] = build_mask_head(
- cfg,
- ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution),
- )
- return ret
-
- @classmethod
- def _build_res5_block(cls, cfg):
- # fmt: off
- stage_channel_factor = 2 ** 3 # res5 is 8x res2
- num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
- width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
- bottleneck_channels = num_groups * width_per_group * stage_channel_factor
- out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor
- stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
- norm = cfg.MODEL.RESNETS.NORM
- assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \
- "Deformable conv is not yet supported in res5 head."
- # fmt: on
-
- blocks = ResNet.make_stage(
- BottleneckBlock,
- 3,
- stride_per_block=[2, 1, 1],
- in_channels=out_channels // 2,
- bottleneck_channels=bottleneck_channels,
- out_channels=out_channels,
- num_groups=num_groups,
- norm=norm,
- stride_in_1x1=stride_in_1x1,
- )
- return nn.Sequential(*blocks), out_channels
-
- def _shared_roi_transform(self, features, boxes):
- x = self.pooler(features, boxes)
- return self.res5(x)
-
- def forward(self, images, features, proposals, targets=None):
- """
- See :meth:`ROIHeads.forward`.
- """
- del images
-
- if self.training:
- assert targets
- proposals = self.label_and_sample_proposals(proposals, targets)
- del targets
-
- proposal_boxes = [x.proposal_boxes for x in proposals]
- box_features = self._shared_roi_transform(
- [features[f] for f in self.in_features], proposal_boxes
- )
- predictions = self.box_predictor(box_features.mean(dim=[2, 3]))
-
- if self.training:
- del features
- losses = self.box_predictor.losses(predictions, proposals)
- if self.mask_on:
- proposals, fg_selection_masks = select_foreground_proposals(
- proposals, self.num_classes
- )
- # Since the ROI feature transform is shared between boxes and masks,
- # we don't need to recompute features. The mask loss is only defined
- # on foreground proposals, so we need to select out the foreground
- # features.
- mask_features = box_features[torch.cat(fg_selection_masks, dim=0)]
- del box_features
- losses.update(self.mask_head(mask_features, proposals))
- return [], losses
- else:
- pred_instances, _ = self.box_predictor.inference(predictions, proposals)
- pred_instances = self.forward_with_given_boxes(features, pred_instances)
- return pred_instances, {}
-
- def forward_with_given_boxes(self, features, instances):
- """
- Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
-
- Args:
- features: same as in `forward()`
- instances (list[Instances]): instances to predict other outputs. Expect the keys
- "pred_boxes" and "pred_classes" to exist.
-
- Returns:
- instances (Instances):
- the same `Instances` object, with extra
- fields such as `pred_masks` or `pred_keypoints`.
- """
- assert not self.training
- assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
-
- if self.mask_on:
- features = [features[f] for f in self.in_features]
- x = self._shared_roi_transform(features, [x.pred_boxes for x in instances])
- return self.mask_head(x, instances)
- else:
- return instances
-
-
-@ROI_HEADS_REGISTRY.register()
-class StandardROIHeads(ROIHeads):
- """
- It's "standard" in a sense that there is no ROI transform sharing
- or feature sharing between tasks.
- Each head independently processes the input features by each head's
- own pooler and head.
-
- This class is used by most models, such as FPN and C5.
- To implement more models, you can subclass it and implement a different
- :meth:`forward()` or a head.
- """
-
- @configurable
- def __init__(
- self,
- *,
- box_in_features: List[str],
- box_pooler: ROIPooler,
- box_head: nn.Module,
- box_predictor: nn.Module,
- mask_in_features: Optional[List[str]] = None,
- mask_pooler: Optional[ROIPooler] = None,
- mask_head: Optional[nn.Module] = None,
- keypoint_in_features: Optional[List[str]] = None,
- keypoint_pooler: Optional[ROIPooler] = None,
- keypoint_head: Optional[nn.Module] = None,
- train_on_pred_boxes: bool = False,
- **kwargs,
- ):
- """
- NOTE: this interface is experimental.
-
- Args:
- box_in_features (list[str]): list of feature names to use for the box head.
- box_pooler (ROIPooler): pooler to extra region features for box head
- box_head (nn.Module): transform features to make box predictions
- box_predictor (nn.Module): make box predictions from the feature.
- Should have the same interface as :class:`FastRCNNOutputLayers`.
- mask_in_features (list[str]): list of feature names to use for the mask
- pooler or mask head. None if not using mask head.
- mask_pooler (ROIPooler): pooler to extract region features from image features.
- The mask head will then take region features to make predictions.
- If None, the mask head will directly take the dict of image features
- defined by `mask_in_features`
- mask_head (nn.Module): transform features to make mask predictions
- keypoint_in_features, keypoint_pooler, keypoint_head: similar to ``mask_*``.
- train_on_pred_boxes (bool): whether to use proposal boxes or
- predicted boxes from the box head to train other heads.
- """
- super().__init__(**kwargs)
- # keep self.in_features for backward compatibility
- self.in_features = self.box_in_features = box_in_features
- self.box_pooler = box_pooler
- self.box_head = box_head
- self.box_predictor = box_predictor
-
- self.mask_on = mask_in_features is not None
- if self.mask_on:
- self.mask_in_features = mask_in_features
- self.mask_pooler = mask_pooler
- self.mask_head = mask_head
-
- self.keypoint_on = keypoint_in_features is not None
- if self.keypoint_on:
- self.keypoint_in_features = keypoint_in_features
- self.keypoint_pooler = keypoint_pooler
- self.keypoint_head = keypoint_head
-
- self.train_on_pred_boxes = train_on_pred_boxes
-
- @classmethod
- def from_config(cls, cfg, input_shape):
- ret = super().from_config(cfg)
- ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
- # Subclasses that have not been updated to use from_config style construction
- # may have overridden _init_*_head methods. In this case, those overridden methods
- # will not be classmethods and we need to avoid trying to call them here.
- # We test for this with ismethod which only returns True for bound methods of cls.
- # Such subclasses will need to handle calling their overridden _init_*_head methods.
- if inspect.ismethod(cls._init_box_head):
- ret.update(cls._init_box_head(cfg, input_shape))
- if inspect.ismethod(cls._init_mask_head):
- ret.update(cls._init_mask_head(cfg, input_shape))
- if inspect.ismethod(cls._init_keypoint_head):
- ret.update(cls._init_keypoint_head(cfg, input_shape))
- return ret
-
- @classmethod
- def _init_box_head(cls, cfg, input_shape):
- # fmt: off
- in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
- pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
- pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
- sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
- pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
- # fmt: on
-
- # If StandardROIHeads is applied on multiple feature maps (as in FPN),
- # then we share the same predictors and therefore the channel counts must be the same
- in_channels = [input_shape[f].channels for f in in_features]
- # Check all channel counts are equal
- assert len(set(in_channels)) == 1, in_channels
- in_channels = in_channels[0]
-
- box_pooler = ROIPooler(
- output_size=pooler_resolution,
- scales=pooler_scales,
- sampling_ratio=sampling_ratio,
- pooler_type=pooler_type,
- )
- # Here we split "box head" and "box predictor", which is mainly due to historical reasons.
- # They are used together so the "box predictor" layers should be part of the "box head".
- # New subclasses of ROIHeads do not need "box predictor"s.
- box_head = build_box_head(
- cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
- )
- box_predictor = FastRCNNOutputLayers(cfg, box_head.output_shape)
- return {
- "box_in_features": in_features,
- "box_pooler": box_pooler,
- "box_head": box_head,
- "box_predictor": box_predictor,
- }
-
- @classmethod
- def _init_mask_head(cls, cfg, input_shape):
- if not cfg.MODEL.MASK_ON:
- return {}
- # fmt: off
- in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
- pooler_resolution = cfg.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION
- pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
- sampling_ratio = cfg.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO
- pooler_type = cfg.MODEL.ROI_MASK_HEAD.POOLER_TYPE
- # fmt: on
-
- in_channels = [input_shape[f].channels for f in in_features][0]
-
- ret = {"mask_in_features": in_features}
- ret["mask_pooler"] = (
- ROIPooler(
- output_size=pooler_resolution,
- scales=pooler_scales,
- sampling_ratio=sampling_ratio,
- pooler_type=pooler_type,
- )
- if pooler_type
- else None
- )
- if pooler_type:
- shape = ShapeSpec(
- channels=in_channels, width=pooler_resolution, height=pooler_resolution
- )
- else:
- shape = {f: input_shape[f] for f in in_features}
- ret["mask_head"] = build_mask_head(cfg, shape)
- return ret
-
- @classmethod
- def _init_keypoint_head(cls, cfg, input_shape):
- if not cfg.MODEL.KEYPOINT_ON:
- return {}
- # fmt: off
- in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
- pooler_resolution = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION
- pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features) # noqa
- sampling_ratio = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO
- pooler_type = cfg.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE
- # fmt: on
-
- in_channels = [input_shape[f].channels for f in in_features][0]
-
- ret = {"keypoint_in_features": in_features}
- ret["keypoint_pooler"] = (
- ROIPooler(
- output_size=pooler_resolution,
- scales=pooler_scales,
- sampling_ratio=sampling_ratio,
- pooler_type=pooler_type,
- )
- if pooler_type
- else None
- )
- if pooler_type:
- shape = ShapeSpec(
- channels=in_channels, width=pooler_resolution, height=pooler_resolution
- )
- else:
- shape = {f: input_shape[f] for f in in_features}
- ret["keypoint_head"] = build_keypoint_head(cfg, shape)
- return ret
-
- def forward(
- self,
- images: ImageList,
- features: Dict[str, torch.Tensor],
- proposals: List[Instances],
- targets: Optional[List[Instances]] = None,
- ) -> Tuple[List[Instances], Dict[str, torch.Tensor]]:
- """
- See :class:`ROIHeads.forward`.
- """
- del images
- if self.training:
- assert targets, "'targets' argument is required during training"
- proposals = self.label_and_sample_proposals(proposals, targets)
- del targets
-
- if self.training:
- losses = self._forward_box(features, proposals)
- # Usually the original proposals used by the box head are used by the mask, keypoint
- # heads. But when `self.train_on_pred_boxes is True`, proposals will contain boxes
- # predicted by the box head.
- losses.update(self._forward_mask(features, proposals))
- losses.update(self._forward_keypoint(features, proposals))
- return proposals, losses
- else:
- pred_instances = self._forward_box(features, proposals)
- # During inference cascaded prediction is used: the mask and keypoints heads are only
- # applied to the top scoring box detections.
- pred_instances = self.forward_with_given_boxes(features, pred_instances)
- return pred_instances, {}
-
- def forward_with_given_boxes(
- self, features: Dict[str, torch.Tensor], instances: List[Instances]
- ) -> List[Instances]:
- """
- Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
-
- This is useful for downstream tasks where a box is known, but need to obtain
- other attributes (outputs of other heads).
- Test-time augmentation also uses this.
-
- Args:
- features: same as in `forward()`
- instances (list[Instances]): instances to predict other outputs. Expect the keys
- "pred_boxes" and "pred_classes" to exist.
-
- Returns:
- list[Instances]:
- the same `Instances` objects, with extra
- fields such as `pred_masks` or `pred_keypoints`.
- """
- assert not self.training
- assert instances[0].has("pred_boxes") and instances[0].has("pred_classes")
-
- instances = self._forward_mask(features, instances)
- instances = self._forward_keypoint(features, instances)
- return instances
-
- def _forward_box(self, features: Dict[str, torch.Tensor], proposals: List[Instances]):
- """
- Forward logic of the box prediction branch. If `self.train_on_pred_boxes is True`,
- the function puts predicted boxes in the `proposal_boxes` field of `proposals` argument.
-
- Args:
- features (dict[str, Tensor]): mapping from feature map names to tensor.
- Same as in :meth:`ROIHeads.forward`.
- proposals (list[Instances]): the per-image object proposals with
- their matching ground truth.
- Each has fields "proposal_boxes", and "objectness_logits",
- "gt_classes", "gt_boxes".
-
- Returns:
- In training, a dict of losses.
- In inference, a list of `Instances`, the predicted instances.
- """
- features = [features[f] for f in self.box_in_features]
- box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
- box_features = self.box_head(box_features)
- predictions = self.box_predictor(box_features)
- del box_features
-
- if self.training:
- losses = self.box_predictor.losses(predictions, proposals)
- # proposals is modified in-place below, so losses must be computed first.
- if self.train_on_pred_boxes:
- with torch.no_grad():
- pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
- predictions, proposals
- )
- for proposals_per_image, pred_boxes_per_image in zip(proposals, pred_boxes):
- proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
- return losses
- else:
- pred_instances, _ = self.box_predictor.inference(predictions, proposals)
- return pred_instances
-
- def _forward_mask(self, features: Dict[str, torch.Tensor], instances: List[Instances]):
- """
- Forward logic of the mask prediction branch.
-
- Args:
- features (dict[str, Tensor]): mapping from feature map names to tensor.
- Same as in :meth:`ROIHeads.forward`.
- instances (list[Instances]): the per-image instances to train/predict masks.
- In training, they can be the proposals.
- In inference, they can be the boxes predicted by R-CNN box head.
-
- Returns:
- In training, a dict of losses.
- In inference, update `instances` with new fields "pred_masks" and return it.
- """
- if not self.mask_on:
- return {} if self.training else instances
-
- if self.training:
- # head is only trained on positive proposals.
- instances, _ = select_foreground_proposals(instances, self.num_classes)
-
- if self.mask_pooler is not None:
- features = [features[f] for f in self.mask_in_features]
- boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances]
- features = self.mask_pooler(features, boxes)
- else:
- features = {f: features[f] for f in self.mask_in_features}
- return self.mask_head(features, instances)
-
- def _forward_keypoint(self, features: Dict[str, torch.Tensor], instances: List[Instances]):
- """
- Forward logic of the keypoint prediction branch.
-
- Args:
- features (dict[str, Tensor]): mapping from feature map names to tensor.
- Same as in :meth:`ROIHeads.forward`.
- instances (list[Instances]): the per-image instances to train/predict keypoints.
- In training, they can be the proposals.
- In inference, they can be the boxes predicted by R-CNN box head.
-
- Returns:
- In training, a dict of losses.
- In inference, update `instances` with new fields "pred_keypoints" and return it.
- """
- if not self.keypoint_on:
- return {} if self.training else instances
-
- if self.training:
- # head is only trained on positive proposals with >=1 visible keypoints.
- instances, _ = select_foreground_proposals(instances, self.num_classes)
- instances = select_proposals_with_visible_keypoints(instances)
-
- if self.keypoint_pooler is not None:
- features = [features[f] for f in self.keypoint_in_features]
- boxes = [x.proposal_boxes if self.training else x.pred_boxes for x in instances]
- features = self.keypoint_pooler(features, boxes)
- else:
- features = {f: features[f] for f in self.keypoint_in_features}
- return self.keypoint_head(features, instances)
diff --git a/spaces/CarlDennis/HYTTS/text/german.py b/spaces/CarlDennis/HYTTS/text/german.py
deleted file mode 100644
index 32b886dff8029db7146c0184554df4620d4560b3..0000000000000000000000000000000000000000
--- a/spaces/CarlDennis/HYTTS/text/german.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import text.ger_to_ipa as ipa
-from text.ger_to_ipa import normalize_numbers
-import re
-from unidecode import unidecode
-
-# List of (ipa, lazy ipa) pairs:
-_lazy_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('r', 'ɹ'),
- ('æ', 'e'),
- ('ɑ', 'a'),
- ('ɔ', 'o'),
- ('ð', 'z'),
- ('θ', 's'),
- ('ɛ', 'e'),
- ('ɪ', 'i'),
- ('ʊ', 'u'),
- ('ʒ', 'ʥ'),
- ('ʤ', 'ʥ'),
- ('ˈ', '↓'),
-]]
-
-# List of (ipa, lazy ipa2) pairs:
-_lazy_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('r', 'ɹ'),
- ('ʤ', 'dʒ'),
- ('ʧ', 'tʃ'),
- ('r', 'ɹ'),
- ('æ', 'e'),
- ('ɑ', 'a'),
- ('ɔ', 'o'),
- ('ð', 'z'),
- ('θ', 's'),
- ('ɛ', 'e'),
- ('ɪ', 'i'),
- ('ʊ', 'u'),
- ('ʒ', 'ʥ'),
- ('ʤ', 'ʥ'),
- ('ˈ', '↓'),
- ('ɡ', 'g'),
- ('ɔ', 'o'),
- ('ɪ','i')
-]]
-
-# List of (ipa, ipa2) pairs
-_ipa_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('æ', 'e'),
- ('ɑ', 'a'),
- ('ɔ', 'o'),
- ('β', 'ss'),
- ('ɛ', 'e'),
- ('ɪ', 'i'),
- ('ʊ', 'u'),
- ('ɡ', 'g'),
- ('ɔ', 'o'),
- ('ɪ','i'),
-
-]]
-
-def german_to_ipa(text):
- text = unidecode(text).lower()
- text = normalize_numbers(text)
- phonemes = ipa.convert(text)
- phonemes = ipa.collapse_whitespace(phonemes)
- for regex, replacement in _lazy_ipa2:
- phonemes = re.sub(regex, replacement, phonemes)
- return phonemes
-
-def german_to_lazy_ipa2(text):
- text = german_to_ipa(text)
- for regex, replacement in _lazy_ipa2:
- text = re.sub(regex, replacement, text)
- return text
-
diff --git a/spaces/CarlDennis/Lovelive-VITS-JPZH/text/mandarin.py b/spaces/CarlDennis/Lovelive-VITS-JPZH/text/mandarin.py
deleted file mode 100644
index ceae9fedf8e710607d598fe91f70f503926059bb..0000000000000000000000000000000000000000
--- a/spaces/CarlDennis/Lovelive-VITS-JPZH/text/mandarin.py
+++ /dev/null
@@ -1,170 +0,0 @@
-import os
-import re
-import sys
-
-import jieba
-import cn2an
-import logging
-from pypinyin import lazy_pinyin, BOPOMOFO
-
-logging.getLogger('jieba').setLevel(logging.WARNING)
-
-
-# List of (Latin alphabet, bopomofo) pairs:
-_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('a', 'ㄟˉ'),
- ('b', 'ㄅㄧˋ'),
- ('c', 'ㄙㄧˉ'),
- ('d', 'ㄉㄧˋ'),
- ('e', 'ㄧˋ'),
- ('f', 'ㄝˊㄈㄨˋ'),
- ('g', 'ㄐㄧˋ'),
- ('h', 'ㄝˇㄑㄩˋ'),
- ('i', 'ㄞˋ'),
- ('j', 'ㄐㄟˋ'),
- ('k', 'ㄎㄟˋ'),
- ('l', 'ㄝˊㄛˋ'),
- ('m', 'ㄝˊㄇㄨˋ'),
- ('n', 'ㄣˉ'),
- ('o', 'ㄡˉ'),
- ('p', 'ㄆㄧˉ'),
- ('q', 'ㄎㄧㄡˉ'),
- ('r', 'ㄚˋ'),
- ('s', 'ㄝˊㄙˋ'),
- ('t', 'ㄊㄧˋ'),
- ('u', 'ㄧㄡˉ'),
- ('v', 'ㄨㄧˉ'),
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
- ('y', 'ㄨㄞˋ'),
- ('z', 'ㄗㄟˋ')
-]]
-
-# List of (bopomofo, romaji) pairs:
-_bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('ㄅㄛ', 'p⁼wo'),
- ('ㄆㄛ', 'pʰwo'),
- ('ㄇㄛ', 'mwo'),
- ('ㄈㄛ', 'fwo'),
- ('ㄅ', 'p⁼'),
- ('ㄆ', 'pʰ'),
- ('ㄇ', 'm'),
- ('ㄈ', 'f'),
- ('ㄉ', 't⁼'),
- ('ㄊ', 'tʰ'),
- ('ㄋ', 'n'),
- ('ㄌ', 'l'),
- ('ㄍ', 'k⁼'),
- ('ㄎ', 'kʰ'),
- ('ㄏ', 'h'),
- ('ㄐ', 'ʧ⁼'),
- ('ㄑ', 'ʧʰ'),
- ('ㄒ', 'ʃ'),
- ('ㄓ', 'ʦ`⁼'),
- ('ㄔ', 'ʦ`ʰ'),
- ('ㄕ', 's`'),
- ('ㄖ', 'ɹ`'),
- ('ㄗ', 'ʦ⁼'),
- ('ㄘ', 'ʦʰ'),
- ('ㄙ', 's'),
- ('ㄚ', 'a'),
- ('ㄛ', 'o'),
- ('ㄜ', 'ə'),
- ('ㄝ', 'e'),
- ('ㄞ', 'ai'),
- ('ㄟ', 'ei'),
- ('ㄠ', 'au'),
- ('ㄡ', 'ou'),
- ('ㄧㄢ', 'yeNN'),
- ('ㄢ', 'aNN'),
- ('ㄧㄣ', 'iNN'),
- ('ㄣ', 'əNN'),
- ('ㄤ', 'aNg'),
- ('ㄧㄥ', 'iNg'),
- ('ㄨㄥ', 'uNg'),
- ('ㄩㄥ', 'yuNg'),
- ('ㄥ', 'əNg'),
- ('ㄦ', 'əɻ'),
- ('ㄧ', 'i'),
- ('ㄨ', 'u'),
- ('ㄩ', 'ɥ'),
- ('ˉ', '→'),
- ('ˊ', '↑'),
- ('ˇ', '↓↑'),
- ('ˋ', '↓'),
- ('˙', ''),
- (',', ','),
- ('。', '.'),
- ('!', '!'),
- ('?', '?'),
- ('—', '-')
-]]
-
-# List of (romaji, ipa) pairs:
-_romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
- ('ʃy', 'ʃ'),
- ('ʧʰy', 'ʧʰ'),
- ('ʧ⁼y', 'ʧ⁼'),
- ('NN', 'n'),
- ('Ng', 'ŋ'),
- ('y', 'j'),
- ('h', 'x')
-]]
-
-
-def number_to_chinese(text):
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
- for number in numbers:
- text = text.replace(number, cn2an.an2cn(number), 1)
- return text
-
-
-def chinese_to_bopomofo(text):
- text = text.replace('、', ',').replace(';', ',').replace(':', ',')
- words = jieba.lcut(text, cut_all=False)
- text = ''
- for word in words:
- bopomofos = lazy_pinyin(word, BOPOMOFO)
- if not re.search('[\u4e00-\u9fff]', word):
- text += word
- continue
- for i in range(len(bopomofos)):
- if re.match('[\u3105-\u3129]', bopomofos[i][-1]):
- bopomofos[i] += 'ˉ'
- if text != '':
- text += ' '
- text += ''.join(bopomofos)
- return text
-
-
-def latin_to_bopomofo(text):
- for regex, replacement in _latin_to_bopomofo:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def bopomofo_to_romaji(text):
- for regex, replacement in _bopomofo_to_romaji:
- text = re.sub(regex, replacement, text)
- return text
-
-
-def chinese_to_romaji(text):
- text = number_to_chinese(text)
- text = chinese_to_bopomofo(text)
- text = latin_to_bopomofo(text)
- text = bopomofo_to_romaji(text)
- text = re.sub('i[aoe]', lambda x: 'y' + x.group(0)[1:], text)
- text = re.sub('u[aoəe]', lambda x: 'w' + x.group(0)[1:], text)
- text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)', lambda x: x.group(1) +
- 'ɹ`' + x.group(2), text).replace('ɻ', 'ɹ`')
- text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)',
- lambda x: x.group(1) + 'ɹ' + x.group(2), text)
- return text
-
-
-def chinese_to_lazy_ipa(text):
- text = chinese_to_romaji(text)
- for regex, replacement in _romaji_to_ipa:
- text = re.sub(regex, replacement, text)
- return text
diff --git a/spaces/Catmeow/Count_objects_in_picture/app.py b/spaces/Catmeow/Count_objects_in_picture/app.py
deleted file mode 100644
index f6b3e25ac6d2e0273d51c7ea0bab4fd585a9c127..0000000000000000000000000000000000000000
--- a/spaces/Catmeow/Count_objects_in_picture/app.py
+++ /dev/null
@@ -1,24 +0,0 @@
-import gradio as gr
-import torch
-from PIL import Image
-import pandas as pd
-
-model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)
-
-def inference(im):
- results = model(im)
- results._run(render=True)
- text = results.pandas().xyxy[0].round(2)
- counts = text.groupby(['name'])['name'].count()
- return Image.fromarray(results.ims[0]),str(counts)[5:-24]
-
-title = "Count Objects in the picture"
-description = "Count objects in picture by Yolov5s model"
-Example=[['test.jpg']]
-demo = gr.Interface(inference,
- inputs = [gr.Image(label="Original Image")],
- outputs = [gr.Image(label="Output Image"),gr.Textbox(label="Count Objects")],
- title=title,
- examples=Example,
- description=description)
-demo.launch()
\ No newline at end of file
diff --git a/spaces/ChrisCaviar/ControlNet-v1-1/app_shuffle.py b/spaces/ChrisCaviar/ControlNet-v1-1/app_shuffle.py
deleted file mode 100644
index f863c95a6c263eeb55fee7c8502d79b404771a3a..0000000000000000000000000000000000000000
--- a/spaces/ChrisCaviar/ControlNet-v1-1/app_shuffle.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env python
-
-import gradio as gr
-
-from utils import randomize_seed_fn
-
-
-def create_demo(process, max_images=12, default_num_images=3):
- with gr.Blocks() as demo:
- with gr.Row():
- with gr.Column():
- image = gr.Image()
- prompt = gr.Textbox(label='Prompt')
- run_button = gr.Button('Run')
- with gr.Accordion('Advanced options', open=False):
- preprocessor_name = gr.Radio(
- label='Preprocessor',
- choices=['ContentShuffle', 'None'],
- type='value',
- value='ContentShuffle')
- num_samples = gr.Slider(label='Number of images',
- minimum=1,
- maximum=max_images,
- value=default_num_images,
- step=1)
- image_resolution = gr.Slider(label='Image resolution',
- minimum=256,
- maximum=512,
- value=512,
- step=256)
- num_steps = gr.Slider(label='Number of steps',
- minimum=1,
- maximum=100,
- value=20,
- step=1)
- guidance_scale = gr.Slider(label='Guidance scale',
- minimum=0.1,
- maximum=30.0,
- value=9.0,
- step=0.1)
- seed = gr.Slider(label='Seed',
- minimum=0,
- maximum=1000000,
- step=1,
- value=0,
- randomize=True)
- randomize_seed = gr.Checkbox(label='Randomize seed',
- value=True)
- a_prompt = gr.Textbox(
- label='Additional prompt',
- value='best quality, extremely detailed')
- n_prompt = gr.Textbox(
- label='Negative prompt',
- value=
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
- )
- with gr.Column():
- result = gr.Gallery(label='Output', show_label=False).style(
- columns=2, object_fit='scale-down')
- inputs = [
- image,
- prompt,
- a_prompt,
- n_prompt,
- num_samples,
- image_resolution,
- num_steps,
- guidance_scale,
- seed,
- preprocessor_name,
- ]
- prompt.submit(
- fn=randomize_seed_fn,
- inputs=[seed, randomize_seed],
- outputs=seed,
- ).then(
- fn=process,
- inputs=inputs,
- outputs=result,
- )
- run_button.click(
- fn=randomize_seed_fn,
- inputs=[seed, randomize_seed],
- outputs=seed,
- ).then(
- fn=process,
- inputs=inputs,
- outputs=result,
- api_name='content-shuffle',
- )
- return demo
-
-
-if __name__ == '__main__':
- from model import Model
- model = Model(task_name='shuffle')
- demo = create_demo(model.process_shuffle)
- demo.queue().launch()
diff --git a/spaces/CohereForAI/pokemon-cards-explorer/src/app.py b/spaces/CohereForAI/pokemon-cards-explorer/src/app.py
deleted file mode 100644
index 6803bcef52b794f8b4288ec2342f18739064f414..0000000000000000000000000000000000000000
--- a/spaces/CohereForAI/pokemon-cards-explorer/src/app.py
+++ /dev/null
@@ -1,73 +0,0 @@
-import os
-
-import streamlit as st
-import pinecone
-import cohere
-import openai
-
-st.set_page_config(
- page_title="Pokemon Card Explorer",
- page_icon="🔍",
- layout="centered",
- menu_items={
- 'Get Help': 'mailto:bhavnicksm@gmail.com',
- 'Report a bug': "https://github.com/bhavnicksm/pokemon-card-explorer/issues",
- 'About': "Pokemon Card Explorer lets you do super power semantic search over 13K Pokemon cards, to find the one you are looking for!"
- }
-)
-
-OPENAI_API_KEY = st.secrets['OPENAI_API_KEY']
-COHERE_API_KEY = st.secrets['COHERE_API_KEY']
-PINECONE_API_KEY = st.secrets['PINECONE_API_KEY']
-
-def init_pinecone():
- # find API key at app.pinecone.io
- pinecone.init(api_key=PINECONE_API_KEY, environment="us-west1-gcp")
- return pinecone.Index('pokemon-cards-v2')
-
-def init_reranker():
- return cohere.Client(COHERE_API_KEY)
-
-
-index = init_pinecone()
-reranker = init_reranker()
-retriever = openai.Embedding
-
-def card(urls):
- figures = [f"""
-
-
-
- """ for url in urls]
- return st.markdown(f"""
-