diff --git a/spaces/17TheWord/RealESRGAN/realesrgan/archs/srvgg_arch.py b/spaces/17TheWord/RealESRGAN/realesrgan/archs/srvgg_arch.py
deleted file mode 100644
index 39460965c9c5ee9cd6eb41c50d33574cb8ba6e50..0000000000000000000000000000000000000000
--- a/spaces/17TheWord/RealESRGAN/realesrgan/archs/srvgg_arch.py
+++ /dev/null
@@ -1,69 +0,0 @@
-from basicsr.utils.registry import ARCH_REGISTRY
-from torch import nn as nn
-from torch.nn import functional as F
-
-
-@ARCH_REGISTRY.register()
-class SRVGGNetCompact(nn.Module):
- """A compact VGG-style network structure for super-resolution.
-
- It is a compact network structure, which performs upsampling in the last layer and no convolution is
- conducted on the HR feature space.
-
- Args:
- num_in_ch (int): Channel number of inputs. Default: 3.
- num_out_ch (int): Channel number of outputs. Default: 3.
- num_feat (int): Channel number of intermediate features. Default: 64.
- num_conv (int): Number of convolution layers in the body network. Default: 16.
- upscale (int): Upsampling factor. Default: 4.
- act_type (str): Activation type, options: 'relu', 'prelu', 'leakyrelu'. Default: prelu.
- """
-
- def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu'):
- super(SRVGGNetCompact, self).__init__()
- self.num_in_ch = num_in_ch
- self.num_out_ch = num_out_ch
- self.num_feat = num_feat
- self.num_conv = num_conv
- self.upscale = upscale
- self.act_type = act_type
-
- self.body = nn.ModuleList()
- # the first conv
- self.body.append(nn.Conv2d(num_in_ch, num_feat, 3, 1, 1))
- # the first activation
- if act_type == 'relu':
- activation = nn.ReLU(inplace=True)
- elif act_type == 'prelu':
- activation = nn.PReLU(num_parameters=num_feat)
- elif act_type == 'leakyrelu':
- activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
- self.body.append(activation)
-
- # the body structure
- for _ in range(num_conv):
- self.body.append(nn.Conv2d(num_feat, num_feat, 3, 1, 1))
- # activation
- if act_type == 'relu':
- activation = nn.ReLU(inplace=True)
- elif act_type == 'prelu':
- activation = nn.PReLU(num_parameters=num_feat)
- elif act_type == 'leakyrelu':
- activation = nn.LeakyReLU(negative_slope=0.1, inplace=True)
- self.body.append(activation)
-
- # the last conv
- self.body.append(nn.Conv2d(num_feat, num_out_ch * upscale * upscale, 3, 1, 1))
- # upsample
- self.upsampler = nn.PixelShuffle(upscale)
-
- def forward(self, x):
- out = x
- for i in range(0, len(self.body)):
- out = self.body[i](out)
-
- out = self.upsampler(out)
- # add the nearest upsampled image, so that the network learns the residual
- base = F.interpolate(x, scale_factor=self.upscale, mode='nearest')
- out += base
- return out
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 The Ultimate Guide for Navigation and Entertainment.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 The Ultimate Guide for Navigation and Entertainment.md
deleted file mode 100644
index 9be5a51742471ae9e9b9d4dc1c095d4b738fa0d2..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 The Ultimate Guide for Navigation and Entertainment.md
+++ /dev/null
@@ -1,139 +0,0 @@
-
-
DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46
-
If you are looking for a high-quality navigation system for your car, you might be interested in the DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46. This is a DVD-based map update for the KENWOOD DV3200 navigation system, which is manufactured by DENSO, a leading supplier of automotive technology. In this article, we will review this product and tell you everything you need to know about it.
-
DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46
What is DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46?
-
DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 is a map update disc that contains the latest data for the KENWOOD DV3200 navigation system. This system is a DVD-based navigation system that uses a touch screen display and a remote control to provide you with turn-by-turn directions, voice guidance, and various other features. The system can also play DVDs, CDs, MP3s, and other media formats.
-
Why do you need it?
-
You need this map update disc if you want to enjoy the best performance and accuracy of your KENWOOD DV3200 navigation system. The disc contains the most recent map data for Europe, including countries such as France, Germany, Italy, Spain, UK, and more. It also includes thousands of points of interest, such as gas stations, restaurants, hotels, attractions, and more. By updating your map data, you can avoid getting lost, save time and fuel, and discover new places.
-
How to install it?
-
Installing the map update disc is very easy and straightforward. All you need to do is insert the disc into your DVD player in your car and follow the on-screen instructions. The update process will take about 15 minutes to complete. You can then eject the disc and enjoy your updated navigation system.
-
Features and Benefits
-
High-quality navigation system
-
The KENWOOD DV3200 navigation system is a high-quality navigation system that offers you many features and benefits. Some of them are:
-
-
It has a 6.5-inch touch screen display that shows you clear and detailed maps and directions.
-
It has a remote control that allows you to operate the system without taking your hands off the steering wheel.
-
It has a built-in GPS receiver that tracks your location and speed accurately.
-
It has a voice guidance feature that speaks to you in various languages and dialects.
-
It has a text-to-speech feature that reads out street names and other information for you.
-
It has a lane guidance feature that shows you which lane to take at complex junctions.
-
It has a speed limit warning feature that alerts you when you exceed the speed limit.
-
It has a traffic information feature that shows you real-time traffic conditions and suggests alternative routes.
-
-
Compatible with various models of cars
-
The KENWOOD DV3200 navigation system is compatible with various models of cars that have a DVD player and a screen in their dashboard. Some of these models are:
-
-
-
Brand
-
Model
-
Year
-
-
-
Audi
-
A4
-
2008-2011
-
-
-
Bmw
-
X5
-
2007-2010
-
-
-
Citroen
-
C5
-
2008-2012
-
-
-
Ford
-
Focus
-
2009-2011
-
-
-
Honda
-
Civic
-
2006-2011
-
-
-
Mazda
-
Mazda6
-
2008-2012
-
-
-
Nissan
-
Qashqai
-
2007-2012
-
-
-
Volkswagen
-
Golf
-
2009-2012
-
-
Updated maps and points of interest
-
The DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 contains the updated maps and points of interest for Europe. The maps cover more than 40 countries and regions, including:
-
-
Austria
Belgium
Bulgaria
Croatia
Czech Republic
Denmark
Estonia
Finland
France
Germany
Greece
Hungary
Ireland
Italy
Latvia
Lithuania
Luxembourg
Netherlands
Norway
Poland
Portugal
Romania
Slovakia
Slovenia
Spain
Sweden
Switzerland
Turkey
UK and more. The points of interest include more than 10 million locations, such as:
-
Airports
Banks
Cinemas
Hospitals
Museums
Parks
Schools and more. You can search for points of interest by name, category, distance, or vicinity.
-
Easy to use interface and functions
-
The KENWOOD DV3200 navigation system has an easy to use interface and functions that make it user-friendly and convenient. Some of them are:
-
You can choose from different map views, such as 2D, 3D, split screen, or bird's eye view.
You can zoom in or out of the map using the touch screen or the remote control.
You can customize the map display by changing the color scheme, brightness, contrast, or day/night mode.
You can set your preferences for route calculation, such as fastest, shortest, or eco-friendly.
You can save your favorite destinations or routes for quick access.
You can access various settings and options by pressing the menu button on the screen or the remote control.
-
Pros and Cons
-
Pros
-
The DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 has many pros that make it a great product for your car. Some of them are:
-
Accurate and reliable navigation:The map update disc provides you with accurate and reliable navigation data that helps you reach your destination safely and efficiently.
Affordable price:The map update disc is available at an affordable price compared to other similar products on the market.
Customer support and warranty:The map update disc comes with customer support and warranty from the manufacturer in case you encounter any issues or defects with the product.
-
Cons
- them are:
-
DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 update
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 compatibility
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 installation
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 price
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 review
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 manual
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 download
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 for sale
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 ebay
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 amazon
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 software
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 error
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 replacement
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 repair
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 warranty
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 version
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 original
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 new
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 used
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 refurbished
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 cheap
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 discount
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 free shipping
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 best buy
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 online
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 in stock
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 availability
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 model number
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 serial number
-DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 product code
-
Limited availability and stock:The map update disc is not widely available and may be out of stock in some online or offline stores.
Requires DVD player and screen in the car:The map update disc requires you to have a DVD player and a screen in your car dashboard to use the KENWOOD DV3200 navigation system. If you don't have them, you will need to buy them separately.
May not work with some newer models of cars:The map update disc may not work with some newer models of cars that have different software or hardware specifications. You should check the compatibility list before buying the product.
-
Conclusion
-
Summary of the main points
-
In conclusion, the DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46 is a map update disc that provides you with the latest map data and points of interest for Europe for your KENWOOD DV3200 navigation system. The product has many features and benefits that make it a high-quality navigation system for your car. It also has some pros and cons that you should consider before buying it.
-
Recommendation and call to action
-
If you are looking for a map update disc for your KENWOOD DV3200 navigation system, we recommend you to buy the DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46. It is a reliable and affordable product that will enhance your driving experience and help you discover new places. You can buy it online from various websites or offline from authorized dealers. Don't miss this opportunity and order yours today!
-
FAQs
-
Here are some frequently asked questions about the DVD MAP 2011-2012 KENWOOD DV3200 DENSO Part No. 46:
-
-
Q: How often should I update my map data? A: You should update your map data at least once a year to ensure that you have the most accurate and up-to-date information.
-
Q: How can I check if my car is compatible with the product? A: You can check the compatibility list on the official website of the manufacturer or contact them directly for more information.
-
Q: How can I contact the customer support or claim the warranty? A: You can contact the customer support or claim the warranty by calling the toll-free number or sending an email to the address provided on the product package or manual.
-
Q: What are some other products that are similar to this one? A: Some other products that are similar to this one are DVD MAP 2011-2012 KENWOOD DNX520VBT DENSO Part No. 46, DVD MAP 2011-2012 KENWOOD DNX7200 DENSO Part No. 46, and DVD MAP 2011-2012 KENWOOD DNX8220BT DENSO Part No. 46.
-
Q: What are some reviews from other customers who bought this product? A: Some reviews from other customers who bought this product are:
-
-
"I bought this product for my Audi A4 and it works perfectly. The installation was easy and the navigation is accurate and fast. The maps are clear and detailed and the points of interest are useful. I am very satisfied with this product."
-
"This product is a waste of money. It does not work with my BMW X5 and I had to return it. The customer service was rude and unhelpful. The product is also outdated and expensive. I do not recommend this product."
-
"This product is okay but not great. It works with my Ford Focus but sometimes it freezes or glitches. The maps are not very updated and some roads are missing or wrong. The traffic information is also not very reliable. I think there are better products out there."
-
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fine Cut Para Corel X7 Serial Number.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fine Cut Para Corel X7 Serial Number.md
deleted file mode 100644
index 7d3dac91b19f45a997ffbe49bc9093c510e0694b..0000000000000000000000000000000000000000
--- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Fine Cut Para Corel X7 Serial Number.md
+++ /dev/null
@@ -1,137 +0,0 @@
-
-
What is FineCut/Coat9 for CorelDRAW and why do you need it?
-
If you are looking for a cutting plotter software that can handle sophisticated design needs, you might want to try FineCut/Coat9 for CorelDRAW. This is a plug-in software that allows you to do design, creation of cutting data and its output to a cutting plotter from inside CorelDRAW, one of the most popular graphic design software in the world.
-
FineCut/Coat9 for CorelDRAW is developed by Mimaki, a leading manufacturer of wide-format inkjet printers and cutting machines. It is compatible with various Mimaki cutting plotters, such as CJV, UCJV, CG, CF and CFL series. It also supports output to laser engraving machines of other manufacturers.
With FineCut/Coat9 for CorelDRAW, you can create stunning graphics and cut them out with precision and ease. Whether you are making signs, stickers, labels, decals, logos, banners, posters, packaging, apparel, or any other creative projects, FineCut/Coat9 for CorelDRAW can help you achieve professional results.
-
FineCut/Coat9 for CorelDRAW is a plug-in software that enhances the potentiality of cutting plotter
-
FineCut/Coat9 for CorelDRAW is not just a simple cutting tool. It is a comprehensive software that offers many features and benefits that can improve your workflow and productivity.
-
Features and benefits of FineCut/Coat9 for CorelDRAW
-
-
It integrates seamlessly with CorelDRAW Graphics Suite 2017/2018/2019/2020/2021/2022 and Technical Suite 2017/2018/2019/2020/2021/2022. You can access all the functions of FineCut/Coat9 from the menu bar or toolbar of CorelDRAW.
-
It supports various cutting methods, such as half cut, perforation cut, creasing, die-cutting, kiss-cutting, contour cutting, etc. You can also apply different cutting conditions to each object or layer.
-
It allows you to create registration marks automatically or manually. You can also adjust the size, position and shape of the registration marks according to your preferences.
-
It enables you to preview the cutting data on the screen before outputting it to the cutting plotter. You can also simulate the cutting process and check the accuracy of the alignment.
-
It provides you with various editing tools, such as offset, corner rounder, overcut, weeding line, etc. You can also use the powerful vectorization function to convert bitmap images into vector data.
-
It supports multiple languages, such as English, Japanese, Chinese (Simplified), Chinese (Traditional), Korean, German, French, Italian, Spanish and Portuguese.
-
-
How to install and use FineCut/Coat9 for CorelDRAW
-
To install FineCut/Coat9 for CorelDRAW, you need to download it from Mimaki's website and enter the serial key. You can get the serial key by authenticating your previous FineCut CD or by purchasing a new product.
-
To use FineCut/Coat9 for CorelDRAW, you need to follow these steps:
-
fine cut plugin for corel x7 activation code
-fine cut software for corel x7 license key
-fine cut 8.0.5 for corel x7 crack download
-fine cut tool for corel x7 product key
-fine cut para corel draw x7 serial number generator
-fine cut 8 for corel x7 free download full version
-fine cut para corel graphics suite x7 keygen
-fine cut 9 for corel x7 registration code
-fine cut para corel draw graphics suite x7 crack
-fine cut 8.0.6 for corel x7 patch
-fine cut para corel draw x7 full version download
-fine cut 9.0.1 for corel x7 serial key
-fine cut para corel draw graphics suite x7 activation key
-fine cut 8.0.4 for corel x7 license code
-fine cut para corel draw x7 keygen download
-fine cut 9.0.2 for corel x7 crack free download
-fine cut para corel graphics suite x7 serial number and activation code
-fine cut 8.0.3 for corel x7 product code
-fine cut para corel draw graphics suite x7 license number
-fine cut 9.0.3 for corel x7 patch download
-fine cut para corel draw x7 free download with crack
-fine cut 8.0.2 for corel x7 serial number and activation code generator
-fine cut para corel graphics suite x7 keygen download
-fine cut 9.0.4 for corel x7 license key free download
-fine cut para corel draw graphics suite x7 crack free download
-fine cut 8.0.1 for corel x7 activation key free download
-fine cut para corel draw x7 serial number and activation code free download
-fine cut 9.0.5 for corel x7 product key free download
-fine cut para corel graphics suite x7 crack download
-fine cut 8 for corel x7 serial number free download
-fine cut para corel draw graphics suite x7 patch download
-fine cut 9 for corel x7 activation code free download
-fine cut para corel draw x7 full version free download with crack
-fine cut 8 for corel x7 license key free download
-fine cut para corel graphics suite x7 serial number free download
-fine cut 9 for corel x7 license code free download
-fine cut para corel draw graphics suite x7 full version free download with keygen
-fine cut 8 for corel x7 product key free download
-fine cut para corel graphics suite x7 license key free download
-fine cut 9 for corel x7 product code free download
-fine cut para corel draw graphics suite x7 registration code free download
-fine cut 8 for corel x7 registration code free download
-fine cut para corel graphics suite x7 activation code free download
-fine cut 9 for corel x7 patch free download
-fine cut para corel draw graphics suite x7 license code free download
-fine cut 8 for corel x7 patch free download
-fine cut para corel graphics suite x7 registration code free download
-fine cut 9 for corel x7 keygen free download
-fine cut para corel draw graphics suite x7 product code free download
-
-
Open CorelDRAW and create or open your design file.
-
Select the objects that you want to cut out and go to FineCut > Create Cut Data.
-
In the Create Cut Data dialog box, choose your cutting plotter model and set your cutting conditions.
-
Click Create to generate the cutting data.
-
If you want to edit the cutting data further, go to FineCut > Edit Cut Data.
-
If you want to preview the cutting data or simulate the cutting process, go to FineCut > Cut Preview.
-
When you are ready to output the cutting data to the cutting plotter, go to FineCut > Cut Out.
-
In the Cut Out dialog box, choose your connection method and click Cut Out.
-
The cutting plotter will start cutting your design according to your settings.
-
-
How to get a serial number for FineCut/Coat9 for CorelDRAW
-
To use FineCut/Coat9 for CorelDRAW, you need a serial number that matches your product version. There are two ways to get a serial number:
-
Serial authentication procedure for FineCut/Coat9 for CorelDRAW
-
If you have a previous version of FineCut CD (FineCut8 or below), you can upgrade to FineCut/Coat9 with free of charge by following these steps:
-
-
Download FineCut/Coat9 from Mimaki's website and install it on your computer.
-
Insert your previous version of FineCut CD into your CD-ROM drive.
-
Run FinecutAuthTool.exe, which is located in the folder where you installed FineCut/Coat9.
-
Select your product name from the drop-down list and click Next.
-
The tool will read your previous version of FineCut CD and generate a serial key for FineCut/Coat9.
-
Copy the serial key and paste it into the registration window of FineCut/Coat9.
-
You can now use FineCut/Coat9 with full functionality.
-
-
List of serial numbers and activation codes for FineCut/Coat9 for CorelDRAW
-
If you don't have a previous version of FineCut CD or if you want to use a different product name than your previous one, you need to purchase a new product from Mimaki's website or authorized dealers. Here are some examples of serial numbers and activation codes for different products:
-
-
Product name
Serial number
Activation code
-
Finecut 8 Upgrade Kit (for CJV30)
F8U-CJV30-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XX XX-XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX X7 Serial Number Activation Code Business October 5 2022 Sumit","F8U-CJV30-ACTI-VATI-ONCO-DEHE-RE00-0000-0000-0000-0000-0000-0000-0000-0000-0000"
-| Product name | Serial number | Activation code | | ------------ | ------------- | --------------- | | Finecut 8 Upgrade Kit (for CG-FXII) | F8U-CGFXII-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XX XX-XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX XX X7 Serial Number Activation Code Business October 5 2022 Sumit | F8U-CGFXII-ACTI-VATI-ONCO-DEHE-RE00-0000-0000-0000-0000-0000-0000-0000-0000-0000 | | Finecut 8 Upgrade Kit (for CF2) | F8U-CF2XXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-X7 Serial Number Activation Code Business October 5 2022 Sumit | F8U-CF2XXX-ACTI-VATI-ONCO-DEHE-RE00-0000 | | Finecut 8 Upgrade Kit (for CF3) | F8U-CF3XXX-X7 Serial Number Activation Code Business October 5 2022 Sumit | F8U-CF3XXX-ACTI-VATI | | Finecut 9 Standard Kit (for CorelDRAW) | F9S-CORELX-X7 Serial Number Activation Code Business October 5 2022 Sumit | F9S-CORELX-ACTI-VATI |
How to troubleshoot common issues with FineCut/Coat9 for CorelDRAW
-
Although FineCut/Coat9 for CorelDRAW is a reliable and user-friendly software, you may encounter some problems or errors while using it. Here are some tips on how to troubleshoot common issues with FineCut/Coat9 for CorelDRAW:
-
How to fix slow operation or freezing when using FineCut/Coat9 for CorelDRAW Ver2.4
-
If you are using FineCut/Coat9 for CorelDRAW Ver2.4, you may experience extremely slow operation or freezing when you create or edit cut data. This is because of a compatibility issue between FineCut/Coat9 and CorelDRAW Graphics Suite 2021/2022.
-
To fix this issue, you need to update FineCut/Coat9 to Ver2.4.1 or later. You can download the latest version of FineCut/Coat9 from Mimaki's website and install it on your computer. After updating FineCut/Coat9, you should be able to use it normally with CorelDRAW Graphics Suite 2021/2022.
-
How to contact Mimaki support for FineCut/Coat9 for CorelDRAW
-
If you have any questions or problems with FineCut/Coat9 for CorelDRAW that are not covered in this article, you can contact Mimaki support for assistance. You can find the contact information of Mimaki support in your region on Mimaki's website.
-
Before contacting Mimaki support, please prepare the following information:
-
-
Your product name and serial number
-
Your CorelDRAW version and operating system
-
A detailed description of your issue and the steps to reproduce it
-
Any error messages or screenshots that can help diagnose the problem
-
-
Mimaki support will try to help you resolve your issue as soon as possible.
-
Conclusion
-
Summary of the main points
-
In this article, we have learned about FineCut/Coat9 for CorelDRAW, a plug-in software that enhances the potentiality of cutting plotter. We have discussed its features and benefits, how to install and use it, how to get a serial number for it, and how to troubleshoot common issues with it.
-
We have also provided some examples of serial numbers and activation codes for different products of FineCut/Coat9 for CorelDRAW. You can use these serial numbers and activation codes to activate your product and enjoy its full functionality.
-
Call to action and recommendation
-
If you are interested in FineCut/Coat9 for CorelDRAW, you can download it from Mimaki's website and try it for free for 30 days. You can also purchase a new product or upgrade from a previous version of FineCut CD.
-
We recommend you to use FineCut/Coat9 for CorelDRAW with Mimaki cutting plotters, such as CJV, UCJV, CG, CF and CFL series. These cutting plotters are designed to work seamlessly with FineCut/Coat9 and offer high-quality and versatile cutting performance.
-
Thank you for reading this article. We hope you have found it useful and informative. If you have any feedback or questions, please feel free to contact us or leave a comment below.
-
FAQs
-
-
Q: What is the difference between FineCut/Coat9 and FineCut8?
-
A: The main difference between FineCut/Coat9 and FineCut8 is that FineCut/Coat9 supports output to laser engraving machines of other manufacturers. It also has some new and improved functions, such as offset correction function, cut preview function, vectorization function, etc.
-
Q: Can I use FineCut/Coat9 with CorelDRAW in Macintosh version?
-
A: No, FineCut/Coat9 does not support CorelDRAW in Macintosh version. You can only use it with CorelDRAW in Windows version.
-
Q: Can I use FineCut/Coat9 with other graphic design software besides CorelDRAW?
-
A: Yes, you can use FineCut/Coat9 with Adobe Illustrator as well. You can download FineCut/Coat9 for Illustrator from Mimaki's website.
-
Q: How can I update my FineCut/Coat9 to the latest version?
-
A: You can update your FineCut/Coat9 to the latest version by downloading it from Mimaki's website and installing it on your computer. You don't need to enter a new serial key if you already have one.
-
Q: How can I get more information about FineCut/Coat9?
-
A: You can get more information about FineCut/Coat9 by visiting Mimaki's website or reading the user manual that comes with the software.
-
- 0a6ba089eb
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved Steam Activation Code - Get Your License Key.txt Now.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved Steam Activation Code - Get Your License Key.txt Now.md
deleted file mode 100644
index 4785a69bb93897c1722312fd4d0643c96bd9ef4d..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved Steam Activation Code - Get Your License Key.txt Now.md
+++ /dev/null
@@ -1,209 +0,0 @@
-
-
-
-
Ark: Survival Evolved - A Guide for Beginners
-
-
-
What is Ark: Survival Evolved?
-
-
-
A brief overview of the game's premise, genre, and features
-
-
-
Ark: Survival Evolved is an action-adventure survival game that was released in 2017 by Studio Wildcard. In this game, you play as a human who wakes up on a mysterious island full of dinosaurs, mythical creatures, natural hazards, and potentially hostile human players. Your goal is to survive by gathering resources, crafting tools, building shelters, taming creatures, exploring the island, and fighting against enemies. You can play solo or with other players in various modes, such as PvE (player versus environment), PvP (player versus player), or PvX (a mix of both). You can also customize your game experience by using mods that add new features, content, and options to the game.
-
-
-
How to get started in Ark: Survival Evolved
-
-
-
The basics of survival: gathering resources, crafting tools, building shelters, and taming creatures
-
-
-
-
Tips and tricks for finding and harvesting resources
-
-
-
Resources are essential for your survival in Ark: Survival Evolved, as they allow you to craft items, build structures, tame creatures, and more. However, resources are not evenly distributed across the map, and some are more rare and valuable than others. Here are some tips and tricks for finding and harvesting resources:
Use your map and explorer notes to locate resource-rich areas, such as mountains, caves, rivers, and swamps. You can also use a GPS or a Compass to navigate more easily.
-
Use the right tool for the right resource. For example, use a pick to get more flint from rocks, or a hatchet to get more wood from trees. You can also use specialized tools, such as a Metal Pick or a Chainsaw, to get more resources faster and more efficiently.
-
Use tamed creatures to help you gather resources. Some creatures have special abilities or bonuses for harvesting certain resources, such as the Ankylosaurus for metal, the Doedicurus for stone, the Castoroides for wood, or the Therizinosaur for fiber. You can also use flying creatures, such as the Argentavis or the Quetzal, to transport large amounts of resources.
-
Be aware of the weight limit of your inventory and your creatures. If you exceed the weight limit, you will move slower and consume more stamina. You can increase your weight capacity by leveling up your Weight stat or by using items such as a Backpack or a Parachute.
-
Be careful of the dangers that lurk in resource-rich areas. Some areas may have hostile creatures, environmental hazards, or enemy players that may attack you while you are gathering resources. Always be prepared for a fight or a flight, and have an escape plan in case things go wrong.
-
-
-
-
Tips and tricks for crafting useful items and weapons
-
-
-
Crafting is one of the core mechanics of Ark: Survival Evolved, as it allows you to create items and weapons that can help you survive and thrive in the game. However, crafting is not always straightforward, and you may need to learn some tips and tricks to craft more effectively. Here are some tips and tricks for crafting useful items and weapons:
-
-
Learn new Engrams as you level up. Engrams are blueprints that unlock new crafting recipes for items and weapons. You can choose which Engrams to learn by spending Engram Points that you earn by leveling up. You can also find Engrams in loot crates, explorer notes, or by defeating bosses.
-
Use different crafting stations to craft different items and weapons. Some items and weapons can be crafted in your inventory, such as a Stone Pick or a Spear. However, some items and weapons require a specific crafting station, such as a Mortar and Pestle, a Smithy, a Fabricator, or a Tek Replicator. You can also use crafting stations to repair your items and weapons.
-
Use blueprints to craft higher quality items and weapons. Blueprints are special versions of Engrams that can produce items and weapons with better stats and durability than normal ones. You can find blueprints in loot crates, explorer notes, or by defeating bosses. Blueprints have different quality levels, from Primitive to Ascendant.
-
Use mods to add new items and weapons to the game. Mods are user-created content that can enhance your game experience by adding new features, content, and customization options to the game. You can find mods on Steam Workshop or on other websites. Some of the most popular mods for Ark: Survival Evolved are Structures Plus (S+), Super Structures (SS), Awesome Spyglass!, Classic Flyers, Eco's Decor Mods, Primal Fear, Ark Eternal, Gaia, Extinction Core, Annunaki Genesis, Ark Additions: The Collection!, Pyria: Mythos Evolved ,and many more.
-
Use cheats or admin commands to spawn items and weapons instantly. Cheats or admin commands are special codes that can alter the game settings or give you access to items and weapons without having to craft them. However, using cheats or admin commands may disable achievements or affect your game balance. You can find a list of cheats or admin commands on this wiki or on other websites.
-
-
-
-
Tips and tricks for building and defending your base
-
-
-
Building and defending your base is another important aspect of Ark: Survival Evolved, as it provides you with a safe place to store your items, craft your weapons, breed your creatures, and more. However, building and defending your base is not easy, and you may need to learn some tips and tricks to do it well. Here are some tips and tricks for building and defending your base:
-
-
Choose a good location for your base. You want to find a spot that has access to resources, water, and flat land, but also has some natural defenses, such as cliffs, caves, or waterfalls. You also want to avoid areas that are too crowded, too dangerous, or too exposed to enemy attacks.
-
Use different materials and structures to build your base. You can start with basic materials, such as Thatch or Wood, but you should upgrade to stronger materials, such as Stone, Metal, or Tek, as soon as possible. You can also use different structures, such as Foundations, Walls, Ceilings, Doors, Windows, Stairs, Ramps, Pillars, Fences, Gates, Turrets, Traps, and more.
-
Use different designs and layouts to build your base. You can build your base in any shape or size you want, but you should consider some factors, such as functionality, aesthetics, and security. You can also use different designs and layouts, such as a Tower, a Castle, a Bunker, a Treehouse, a Cave Base, a Raft Base, a Platform Base, or a Floating Base.
-
Use different strategies and tactics to defend your base. You can use passive defenses, such as Walls, Gates, Turrets, Traps, and Spikes. You can also use active defenses, such as tamed creatures, weapons, explosives, or allies. You can also use stealth defenses, such as camouflage, decoys, or hidden entrances.
-
Use different modes and settings to protect your base. You can play on PvE servers or single-player mode if you don't want to deal with other players. You can also use settings such as Offline Raid Protection (ORP), Structure Resistance (SR), Structure Decay (SD), or Tribe Governance (TG) to adjust the rules and options for your base.
-
-
-
-
Tips and tricks for taming and riding dinosaurs and other creatures
-
-
-
Taming and riding dinosaurs and other creatures is one of the most fun and rewarding aspects of Ark: Survival Evolved, as it allows you to have loyal companions that can help you in various ways. However, taming and riding dinosaurs and other creatures is not easy, and you may need to learn some tips and tricks to do it well. Here are some tips and tricks for taming and riding dinosaurs and other creatures:
-
-
Use different methods and items to tame dinosaurs and other creatures. There are two main methods of taming: passive taming and knockout taming. Passive taming involves feeding the creature its preferred food while avoiding its aggression. Knockout taming involves knocking out the creature with tranquilizers or other means and then feeding it its preferred food while keeping it unconscious. You can also use items such as Bolas, Nets, Cages, Traps, Lassos, or Taming Pens to immobilize or capture the creature.
-
Use different foods and kibbles to tame dinosaurs and other creatures faster and more efficiently. Different creatures have different preferences for food and kibble. Food is any edible item that can fill the creature's hunger bar. Kibble is a special type of food that is made from eggs and other ingredients in a Cooking Pot or an Industrial Cooker. Kibble has higher taming effectiveness than regular food.
-
Use different saddles and platforms to ride dinosaurs and other creatures more comfortably and securely. Saddles are items that allow you to ride certain creatures once they are tamed. Saddles have different quality levels from Primitive to Ascendant that affect their armor value and durability. Platforms are special types of saddles that allow you to build structures on top of certain creatures.
-/Disable Group/Set Aggression Level/Set Turret Mode and more. Settings are options that you can adjust for your tamed creatures by using the radial menu or the options menu. Settings include Enable/Disable Ally Looking/Enable/Disable Resource Harvesting/Enable/Disable Victim Item Collection/Enable/Disable Allow Anyone To Imprint/Enable/Disable Public Seating/Enable/Disable Auto-Courtesy/Enable/Disable Auto-Engage and more.
-
Use different stats and mutations to breed dinosaurs and other creatures more effectively and selectively. Stats are numerical values that represent the attributes of your tamed creatures, such as Health, Stamina, Oxygen, Food, Weight, Melee Damage, Movement Speed, Torpidity, and more. Mutations are random changes that occur in the stats or colors of your offspring when you breed two tamed creatures. Mutations can be beneficial or detrimental, and they can be inherited or not by the next generation.
-
-
-
-
How to enhance your experience in Ark: Survival Evolved
-
-
-
The benefits of playing with other players: forming tribes, trading, and cooperating
-
-
-
Playing with other players can enhance your experience in Ark: Survival Evolved by adding more fun, challenge, and diversity to the game. You can play with other players in various ways, such as forming tribes, trading, and cooperating. Here are some benefits of playing with other players:
-
-
Forming tribes can help you survive and thrive in the game by sharing resources, items, structures, creatures, and responsibilities with your tribe mates. You can also access tribe-only features, such as tribe chat, tribe log, tribe rank, tribe alliances, tribe wars, and more.
-
Trading can help you acquire resources, items, creatures, or services that you may not have or need in exchange for something that you have or don't need. You can trade with other players by using chat, voice chat, signs, mailboxes, trading posts, vending machines, or other means.
-
Cooperating can help you achieve goals that you may not be able to achieve alone or with your tribe mates. You can cooperate with other players by joining forces to explore the map, tame creatures, build structures, fight enemies, complete missions, defeat bosses, or participate in events.
-
-
-
-
The challenges of playing with other players: competing, raiding, and surviving
-
-
-
Playing with other players can also pose some challenges in Ark: Survival Evolved by adding more risk, conflict, and uncertainty to the game. You can play with other players in various ways, such as competing, raiding, and surviving. Here are some challenges of playing with other players:
-
-
Competing can test your skills and abilities in the game by measuring your performance against other players. You can compete with other players by comparing your stats, achievements, trophies, rankings, or scores. You can also participate in competitive modes, such as PvP, PvX, or Survival of the Fittest.
-
Raiding can challenge your strategy and tactics in the game by attacking or defending against other players' bases. You can raid other players by using weapons, explosives, creatures, or allies to breach their defenses and loot their resources, items, structures, or creatures. You can also defend against raids by using walls, gates, turrets, traps, creatures, or allies to repel their attacks and protect your assets.
-
Surviving can challenge your endurance and adaptability in the game by facing other players' threats and actions. You can survive with other players by avoiding or dealing with hostile players who may try to kill you, rob you, capture you, enslave you, or troll you. You can also survive in different environments and biomes that may have different weather conditions, temperatures, resources, creatures, or hazards.
-
-
-
-
The fun of playing with mods: adding new features, content, and customization
-
-
-
Playing with mods can enhance your experience in Ark: Survival Evolved by adding new features, content, and customization to the game. Mods are user-created content that can change or improve the game in various ways. You can play with mods by finding and installing them on Steam Workshop or on other websites. Here are some fun of playing with mods:
-
-
Adding new features can expand the gameplay and mechanics of the game by introducing new elements or options that are not available in the base game. For example, you can use mods that add new crafting systems, new combat systems, new building systems, new taming systems, new mission systems, new event systems, new difficulty settings, new game modes, and more.
-new weapons, new armor, new vehicles, new structures, new skins, new hairstyles, new tattoos, new music, new sounds, new voiceovers, and more.
-
Adding new customization can personalize your game experience by allowing you to modify or adjust the game to your liking. For example, you can use mods that change the graphics, the UI, the controls, the camera, the animations, the effects, the colors, the fonts, the icons, the names, and more.
-
-
-
-
Conclusion
-
-
-
A summary of the main points of the article
-
-
-
In conclusion, Ark: Survival Evolved is a game that offers a lot of fun and challenge for players who enjoy action-adventure survival games. In this game, you can explore a mysterious island full of dinosaurs and other creatures, gather resources, craft items and weapons, build and defend your base, tame and ride creatures, play with other players or against them, and customize your game with mods. This article has provided you with some tips and tricks on how to get started in Ark: Survival Evolved, how to enhance your experience in Ark: Survival Evolved, and how to download ark survival evolved license key.txt file.
A call to action for the readers to try out the game or share their own tips and tricks
-
-
-
If you are interested in trying out Ark: Survival Evolved, you can download it from Steam or other platforms. You can also download ark survival evolved license key.txt file from this link to activate the game. However, be careful of fake or malicious links that may harm your computer or steal your information. Always use a trusted source and scan your files before opening them.
-
If you have already played Ark: Survival Evolved or have your own tips and tricks to share, feel free to leave a comment below or join our community on Discord . We would love to hear from you and learn from your experience. Thank you for reading this article and have fun playing Ark: Survival Evolved!
-
-
-
FAQs
-
-
-
Q: How do I download ark survival evolved license key.txt file?
-
-
-
A: You can download ark survival evolved license key.txt file from this link . However, be careful of fake or malicious links that may harm your computer or steal your information. Always use a trusted source and scan your files before opening them.
-
-
-
Q: How do I install mods for Ark: Survival Evolved?
-
-
-
A: You can install mods for Ark: Survival Evolved by using Steam Workshop or other websites. To use Steam Workshop, you need to subscribe to the mod you want to use and then launch the game. The mod will be automatically downloaded and installed. To use other websites, you need to download the mod file and then copy it to the Mods folder in your game directory. You can also use mod managers, such as Ark Server Manager or Ark Mod Downloader, to make the process easier.
-
-
-
Q: How do I join a tribe in Ark: Survival Evolved?
-
-
-
A: You can join a tribe in Ark: Survival Evolved by either creating your own tribe or joining an existing one. To create your own tribe, you need to access the Tribe Manager menu and then click on Create New Tribe. You can then invite other players to join your tribe by using the Invite To Tribe option in the radial menu. To join an existing tribe, you need to receive an invitation from a tribe member or leader and then accept it. You can also request to join a tribe by using the Request To Join Tribe option in the radial menu.
-
-
-
Q: How do I tame a dinosaur in Ark: Survival Evolved?
-
-
-
A: You can tame a dinosaur in Ark: Survival Evolved by either using passive taming or knockout taming. Passive taming involves feeding the dinosaur its preferred food while avoiding its aggression. Knockout taming involves knocking out the dinosaur with tranquilizers or other means and then feeding it its preferred food while keeping it unconscious. You can also use items such as Bolas, Nets, Cages, Traps, Lassos, or Taming Pens to immobilize or capture the dinosaur.
-
-
-
Q: How do I breed dinosaurs in Ark: Survival Evolved?
-
-
-
A: You can breed dinosaurs in Ark: Survival Evolved by mating two tamed dinosaurs of the same species and opposite gender. To mate them, you need to enable mating on both of them and then place them near each other. They will then produce a fertilized egg or a gestation bar, depending on their type. You need to incubate the egg or wait for the gestation bar to fill up until the baby dinosaur is born. You then need to imprint and raise the baby dinosaur until it reaches adulthood.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawlhalla APK Chromebook Everything You Need to Know About the Free Platform Fighter.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawlhalla APK Chromebook Everything You Need to Know About the Free Platform Fighter.md
deleted file mode 100644
index 8fba4b5d71f3de2cfb25d3e0782be7025d7368ee..0000000000000000000000000000000000000000
--- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawlhalla APK Chromebook Everything You Need to Know About the Free Platform Fighter.md
+++ /dev/null
@@ -1,132 +0,0 @@
-
-
How to Play Brawlhalla on Your Chromebook
-
If you are looking for a fun and free platform fighting game that supports cross-play with millions of players on various devices, you might want to check out Brawlhalla. Brawlhalla is a 2D game that features over 50 unique characters, various game modes, and frequent updates. In this article, we will show you how to play Brawlhalla on your Chromebook by installing the APK file of the game.
Brawlhalla is a free-to-play game developed by Blue Mammoth Games and published by Ubisoft. It was released in 2017 for Windows, macOS, PlayStation 4, Xbox One, Nintendo Switch, iOS, and Android. It is one of the most popular platform fighting games, with over 100 million players as of 2023.
-
Brawlhalla game features
-
Some of the features that make Brawlhalla stand out are:
-
-
An immersive action game that takes fighting to the next level
-
Explore the halls of Valhalla and cherish its splendor
-
Supports up to eight players online in a single match
-
Advance the ranking ladder by taking on foes alone
-
A game training room to hone your skills in fighting
-
Over 50 unique characters (and counting) to choose from
-
No pay-to-win advantages or in-game purchases
-
-
Brawlhalla game modes
-
Brawlhalla offers various game modes for different preferences and play styles. Some of the game modes are:
-
-
Online Ranked 1v1: Climb the ranked ladder from Tin up to Platinum and beyond. Fight enemies with swords, axes, hammers, blasters, rocket lances and more.
-
Online Ranked 2v2: Team up with a friend or find a partner online. Increase your clan's rank and glory.
-
4 Player Online Free for All: Casual matches where four fighters enter, but only one can win.
-
Cross-play Custom Rooms: Invite up to 8 friends on PC, Xbox, PS4, Nintendo Switch, iOS or Android to a huge variety of custom matches.
-
Many Special Game Modes: Brawlball, Bombsketball, Capture the Flag, Kung-Foot, and many more.
-
The Training Room: Practice combos and setups inside the Training Room. Look at detailed frame data, hitboxes, hurtboxes, and sharpen your skills.
-
-
How to Install Brawlhalla APK on Chromebook
-
To play Brawlhalla on your Chromebook, you will need to install the APK file of the game. APK files are Android application packages that contain all the files and resources needed to run an app on an Android device. However, Chromebooks do not support APK files natively, so you will need to enable some settings and use some tools to install them.
-
Enable developer mode and unknown sources
-
The first step is to enable developer mode on your Chromebook. Developer mode allows you to access more features and settings on your device, such as installing APK files. However, it also removes some security protections and may void your warranty, so proceed with caution. To enable developer mode on your Chromebook, follow these steps:
-
-
Turn off your Chromebook by holding down the power button.
-
Press and hold the Esc + Refresh keys (the circular arrow key above the number 3), then press the power button.
-
You will see a screen that says "Chrome OS is missing or damaged". Press Ctrl + D to enter developer mode.
-
Press Enter to confirm. You will see a screen that says \"To turn OS verification off, press Enter. Your system will reboot and local data will be cleared. To go back, press Esc.\" Press Enter again.
-
Your Chromebook will reboot and show a screen that says \"OS verification is off\". Press Ctrl + D to continue.
-
Your Chromebook will erase all your local data and prepare for developer mode. This may take a few minutes.
-
Once the process is done, your Chromebook will reboot and show the same \"OS verification is off\" screen. Press Ctrl + D again or wait for 30 seconds to boot into developer mode.
-
Once you are in developer mode, you will see a screen that says \"Welcome!\". Select your language and keyboard settings, then click \"Let's go\".
-
Connect to a network and agree to the terms and conditions. Then, sign in with your Google account or create one if you don't have one.
-
You will see a screen that says \"You're almost done\". Click on \"Turn on Google Play Store\".
-
You will see a screen that says \"Get your Android apps\". Click on \"More\" and then \"Accept\".
-
You will see a screen that says \"Google Play Store is ready to use\". Click on \"Got it\".
-
-
Download and install APK file
-
The next step is to download and install the APK file of Brawlhalla on your Chromebook. You can use any file manager Android app from the Play Store to do this. We recommend using Solid Explorer, as it is easy to use and has a lot of features. To download and install the APK file of Brawlhalla, follow these steps:
-
-
Open the Play Store app on your Chromebook and search for Solid Explorer. Install the app and open it.
-
In Solid Explorer, tap on the hamburger icon on the top left corner and select \"Storage\".
-
Tap on \"Downloads\" and you will see all the files you have downloaded on your Chromebook.
-
If you have not downloaded the APK file of Brawlhalla yet, you can do so by opening a new tab in Chrome and going to [APKMirror.com](^1^). Search for Brawlhalla and download the latest version of the game.
-
Once the download is complete, go back to Solid Explorer and tap on the APK file of Brawlhalla. You will see a pop-up that says \"Do you want to install this application?\" Tap on \"Install\".
-
You will see another pop-up that says \"For your security, your phone is not allowed to install unknown apps from this source.\" Tap on \"Settings\".
-
You will see a screen that says \"Install unknown apps\". Toggle on the switch next to \"Allow from this source\".
-
Go back to Solid Explorer and tap on the APK file of Brawlhalla again. Tap on \"Install\" again.
-
You will see a screen that shows the progress of the installation. Wait for it to finish.
-
Once the installation is done, you will see a screen that says \"App installed\". Tap on \"Open\" to launch the game.
-
-
Launch and enjoy the game
-
The final step is to launch and enjoy Brawlhalla on your Chromebook. You can play the game with your keyboard, mouse, or touchpad, or connect a controller if you prefer. To launch and enjoy Brawlhalla, follow these steps:
-
brawlhalla free download for chromebook
-brawlhalla cross-play on chromebook
-brawlhalla 2D platform fighter for chromebook
-brawlhalla online ranked mode on chromebook
-brawlhalla epic crossover events on chromebook
-brawlhalla best legends for chromebook
-brawlhalla tips and tricks for chromebook
-brawlhalla latest patch notes for chromebook
-brawlhalla how to install on chromebook
-brawlhalla system requirements for chromebook
-brawlhalla gameplay review for chromebook
-brawlhalla custom game modes on chromebook
-brawlhalla tournaments and esports on chromebook
-brawlhalla skins and cosmetics for chromebook
-brawlhalla support and feedback for chromebook
-brawlhalla mobile version for chromebook
-brawlhalla offline mode on chromebook
-brawlhalla keyboard and mouse controls for chromebook
-brawlhalla community and social media for chromebook
-brawlhalla history and lore for chromebook
-brawlhalla fun and casual games on chromebook
-brawlhalla training and practice mode on chromebook
-brawlhalla achievements and rewards for chromebook
-brawlhalla updates and news for chromebook
-brawlhalla bugs and issues on chromebook
-brawlhalla guides and tutorials for chromebook
-brawlhalla fan art and videos for chromebook
-brawlhalla steam version for chromebook
-brawlhalla codes and giveaways for chromebook
-brawlhalla weapons and combos for chromebook
-brawlhalla maps and stages for chromebook
-brawlhalla stats and leaderboards for chromebook
-brawlhalla clans and friends on chromebook
-brawlhalla mods and hacks for chromebook
-brawlhalla test features and experimental mode on chromebook
-brawlhalla balance changes and feedback for chromebook
-brawlhalla all legends pack for chromebook
-brawlhalla soundtrack and music for chromebook
-brawlhalla voice chat and communication on chromebook
-brawlhalla settings and options for chromebook
-brawlhalla developer blog and roadmap for chromebook
-brawlhalla discord server and community for chromebook
-brawlhalla wiki and information for chromebook
-brawlhalla memes and jokes for chromebook
-brawlhalla beginners guide for chromebook
-
-
If you have not opened the game yet, you can do so by going to the launcher app on your Chromebook and finding Brawlhalla under the Play Store section. Tap on it to open it.
-
You will see a screen that says \"Brawlhalla requires access to photos/media/files on your device.\" Tap on \"Allow\".
-
You will see another screen that says \"Brawlhalla would like to access this device's location.\" Tap on \"Allow only while using the app\".
-
You will see a loading screen with the Brawlhalla logo. Wait for it to load.
-
You will see a welcome screen with some tips and news about the game. Tap on \"Play Now\".
-
You will see a screen that asks you to choose your region. Select the one that is closest to you for better performance.
-
You will see a screen that asks you to create or link an Ubisoft account.
You can create or link an Ubisoft account to enjoy some benefits, such as saving your progress, unlocking rewards, and accessing exclusive content. You can also skip this step if you want to play as a guest.
-
You will see the main menu of the game, where you can choose from various options, such as online play, offline play, training room, store, settings, and more.
-
Select the option that suits your preference and start playing Brawlhalla on your Chromebook. Have fun!
-
-
Conclusion
-
Brawlhalla is a free and fun platform fighting game that you can play on your Chromebook by installing the APK file of the game. You will need to enable developer mode and unknown sources on your device, download and install the APK file using a file manager app, and launch and enjoy the game. You can also create or link an Ubisoft account to get some extra benefits. Brawlhalla supports cross-play with millions of players on various devices, so you can join your friends or make new ones in this epic game.
-
FAQs
-
Is Brawlhalla safe to play on Chromebook?
-
Yes, Brawlhalla is safe to play on Chromebook as long as you download the APK file from a trusted source, such as [APKMirror.com](^1^). You should also be careful when enabling developer mode and unknown sources on your device, as they may expose your device to some risks.
-
Can I play Brawlhalla with a controller on Chromebook?
-
Yes, you can play Brawlhalla with a controller on Chromebook if you have a compatible controller that can connect to your device via Bluetooth or USB. You can also customize the controller settings in the game menu.
-
How do I update Brawlhalla on Chromebook?
-
To update Brawlhalla on Chromebook, you will need to download and install the latest version of the APK file from [APKMirror.com](^1^) or another trusted source. You can also check for updates in the game menu.
-
How do I uninstall Brawlhalla on Chromebook?
-
To uninstall Brawlhalla on Chromebook, you will need to go to the launcher app on your device and find Brawlhalla under the Play Store section. Right-click on it and select \"Uninstall\". You can also delete the APK file from your downloads folder using Solid Explorer or another file manager app.
-
How do I contact Brawlhalla support?
-
If you have any issues or questions about Brawlhalla, you can contact Brawlhalla support by visiting their official website at [brawlhalla.com] and clicking on \"Support\" at the bottom of the page. You can also follow them on social media platforms, such as Twitter, Facebook, Instagram, YouTube, Twitch, and Discord.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Caribbean Treasures A Game that Will Keep You Hooked - Download and Play Now!.md b/spaces/1phancelerku/anime-remove-background/Caribbean Treasures A Game that Will Keep You Hooked - Download and Play Now!.md
deleted file mode 100644
index ed06264d85b8f3a3f18b73e5b49661b2985c5df2..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Caribbean Treasures A Game that Will Keep You Hooked - Download and Play Now!.md
+++ /dev/null
@@ -1,99 +0,0 @@
-
-
Caribbean Treasures Download: How to Play and Win Big
-
If you are looking for a fun and rewarding way to spend your time, you might want to check out Caribbean Treasures. Caribbean Treasures is an online gaming platform that offers exciting fish games and slot games that you can play anytime, anywhere. In this article, we will tell you everything you need to know about Caribbean Treasures, how to download and play it, and how to win big with it.
-
What is Caribbean Treasures?
-
Caribbean Treasures is an online gaming platform that offers two types of games: fish games and slot games. Both types of games are designed to test your skills and luck, and reward you with big treasures. Here are some features of each type of game:
The fish game is a shooting game where you have to aim and fire at various fish and sea creatures on the screen. The more fish you catch, the more coins you earn. You can also encounter special fish that give you extra coins, bonuses, or multipliers. The fish game has different levels of difficulty, from easy to hard, and different modes, such as single-player or multiplayer. You can also customize your cannon and use different power-ups to enhance your gameplay.
-
A variety of slot games
-
The slot games are classic casino games where you have to spin the reels and match symbols to win prizes. The slot games have different themes, such as fruits, animals, pirates, or ancient civilizations. The slot games also have different features, such as wilds, scatters, free spins, or bonus rounds. The slot games have different paylines, from 5 to 50, and different bet sizes, from 0.01 to 5 coins per line.
-
How to download and play Caribbean Treasures?
-
Downloading and playing Caribbean Treasures is very easy. Here are the steps you need to follow:
-
caribbean treasures game download
-caribbean treasures slot machine download
-caribbean treasures fish games download
-caribbean treasures reflexive entertainment download
-caribbean treasures free download full version
-caribbean treasures pc game download
-caribbean treasures online slots download
-caribbean treasures emotion rays download
-caribbean treasures software informer download
-caribbean treasures internet archive download
-caribbean treasures match 3 game download
-caribbean treasures hidden object game download
-caribbean treasures deluxe edition download
-caribbean treasures windows 10 download
-caribbean treasures mac game download
-caribbean treasures android game download
-caribbean treasures ios game download
-caribbean treasures mobile game download
-caribbean treasures apk download
-caribbean treasures mod apk download
-caribbean treasures hack apk download
-caribbean treasures cheats apk download
-caribbean treasures unlimited coins apk download
-caribbean treasures free spins apk download
-caribbean treasures bonus codes apk download
-caribbean treasures no deposit apk download
-caribbean treasures play for fun apk download
-caribbean treasures play for real apk download
-caribbean treasures play offline apk download
-caribbean treasures play online apk download
-caribbean treasures multiplayer apk download
-caribbean treasures live casino apk download
-caribbean treasures jackpot apk download
-caribbean treasures progressive jackpot apk download
-caribbean treasures mega jackpot apk download
-caribbean treasures big win apk download
-caribbean treasures mega win apk download
-caribbean treasures super win apk download
-caribbean treasures epic win apk download
-caribbean treasures tips and tricks apk download
-caribbean treasures strategy guide apk download
-caribbean treasures walkthrough apk download
-caribbean treasures review apk download
-caribbean treasures ratings apk download
-caribbean treasures testimonials apk download
-caribbean treasures customer support apk download
-caribbean treasures contact us apk download
-caribbean treasures sign up for free account apk download
-caribbean treasures how to play apk download
-
Download the game for Android or iOS
-
You can download the game for your mobile device from the official website. The game is compatible with both Android and iOS devices. The download is free and fast. You can also download the game for your desktop computer if you prefer.
-
Sign up for a free account
-
After downloading the game, you need to sign up for a free account. You don't need a credit card to sign up. You just need to provide some basic information, such as your name, email address, phone number, and username. The support team will activate your account and contact you with details. You can also contact them anytime if you have any questions or issues.
-
Choose your game and start playing
-
Once you have your account ready, you can choose your game and start playing. You can switch between the fish game and the slot game anytime you want. You can also choose from different rooms or tables depending on your preference. You can play with real money or with virtual coins. You can also play with other players or by yourself.
-
How to win big with Caribbean Treasures?
-
Playing Caribbean Treasures is not only fun but also rewarding. Here are some tips on how to win big with Caribbean Treasures:
-
Use your skills and strategy
-
The fish game and the slot game both require some skills and strategy to win. For the fish game, you need to aim carefully, fire wisely, and avoid wasting bullets. You also need to know which fish are worth more coins, which fish have special effects, and when to use power-ups. For the slot game, you need to know how to adjust your bet size, how to choose the best paylines, and how to trigger the bonus features. You also need to manage your bankroll and set a limit for your losses and wins.
-
Take advantage of bonuses and promotions
-
Caribbean Treasures offers various bonuses and promotions to its players. You can get a welcome bonus when you sign up, a deposit bonus when you make your first deposit, a referral bonus when you invite your friends, and a loyalty bonus when you play regularly. You can also get free coins, free spins, or free tickets from time to time. You can use these bonuses and promotions to increase your chances of winning and have more fun.
-
Join the VIP club for more rewards
-
If you want to enjoy more benefits and rewards, you can join the VIP club of Caribbean Treasures. The VIP club is a membership program that gives you access to exclusive offers, discounts, gifts, and events. You can also get higher payouts, faster withdrawals, and better customer service. You can join the VIP club by earning points from playing the games or by paying a monthly fee.
-
Conclusion
-
Caribbean Treasures is an online gaming platform that offers exciting fish games and slot games that you can play anytime, anywhere. You can download the game for free for your mobile device or desktop computer. You can sign up for a free account and start playing with real money or virtual coins. You can use your skills and strategy, take advantage of bonuses and promotions, and join the VIP club to win big with Caribbean Treasures. So what are you waiting for? Download Caribbean Treasures today and discover the hidden treasures of the Caribbean!
-
FAQs
-
Here are some frequently asked questions about Caribbean Treasures:
-
-
Is Caribbean Treasures safe and secure?
-
Yes, Caribbean Treasures is safe and secure. The game uses advanced encryption technology to protect your personal and financial information. The game also follows fair gaming practices and has a random number generator to ensure fair outcomes.
-
How can I contact the support team of Caribbean Treasures?
-
You can contact the support team of Caribbean Treasures by email, phone, or live chat. The support team is available 24/7 to assist you with any questions or issues you may have.
-
Can I play Caribbean Treasures with my friends?
-
Yes, you can play Caribbean Treasures with your friends. You can invite your friends to join the game and play together in the multiplayer mode of the fish game. You can also chat with your friends and other players in the game.
-
What are the minimum requirements to play Caribbean Treasures?
-
The minimum requirements to play Caribbean Treasures are as follows:
-
-
Device
Operating System
Memory
-
Android
4.4 or higher
1 GB or higher
-
iOS
9.0 or higher
1 GB or higher
-
Desktop
Windows 7 or higher / Mac OS X 10.10 or higher
2 GB or higher
-
-
How can I withdraw my winnings from Caribbean Treasures?
-
You can withdraw your winnings from Caribbean Treasures by using one of the following methods: PayPal, Skrill, Neteller, Bitcoin, or bank transfer. The minimum withdrawal amount is $20 and the maximum withdrawal amount is $10,000 per day. The withdrawal process may take up to 48 hours depending on the method you choose.
-
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Chess Online APK Download Play with Millions of Players Worldwide.md b/spaces/1phancelerku/anime-remove-background/Chess Online APK Download Play with Millions of Players Worldwide.md
deleted file mode 100644
index 9f5f479b600f11fc6dc69c9d456c5e46ebd57851..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Chess Online APK Download Play with Millions of Players Worldwide.md
+++ /dev/null
@@ -1,146 +0,0 @@
-
-
Chess Online APK Download: How to Play Chess on Your Android Device
-
Chess is one of the oldest and most popular board games in the world. It is a game of strategy, logic, and skill that can challenge your mind and improve your cognitive abilities. Whether you are a beginner or a master, chess can offer you endless hours of fun and entertainment.
But what if you don't have a chess board or a partner to play with? What if you want to play chess anytime and anywhere, without any hassle or cost? Well, there is a solution for that: chess online apk.
-
Introduction
-
What is chess online apk?
-
Chess online apk is an application that allows you to play chess online on your Android device. It is not an official app from any chess organization or website, but rather a third-party app that connects you to various chess servers and platforms. You can download it for free from different sources on the internet, but you need to be careful about the quality and security of the apk file.
-
Why play chess online on your Android device?
-
Playing chess online on your Android device has many advantages over playing on a physical board or a computer. Here are some of them:
-
-
You can play chess anytime and anywhere, as long as you have an internet connection and a charged battery.
-
You can play with millions of players from around the world, with different skill levels and styles.
-
You can improve your skills with puzzles, lessons, and analysis tools that are available on the app.
-
You can customize your board, pieces, and themes according to your preferences.
-
You can save your games and review them later.
-
You can chat with your opponents and make new friends.
-
-
How to download and install chess online apk
-
Step 1: Find a reliable source for the apk file
-
The first step to download and install chess online apk is to find a reliable source for the apk file. There are many websites that offer free downloads of chess online apk, but not all of them are safe and trustworthy. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful and do some research before downloading anything from the internet.
-
One of the best sources for chess online apk is [Chess.com](^1^), which is the #1 free chess app in the world. It has over 50 million users and offers a variety of features and benefits for chess lovers. You can download the apk file from their official website or from other reputable sites like [APKPure](https://apkpure.com/chess-play-learn/com.chess) or [APKMirror](https://www.apkmirror.com/apk/chess-com/chess-play-learn/).
-
Step 2: Enable unknown sources on your device settings
-
The next step is to enable unknown sources on your device settings. This will allow you to install apps that are not from the Google Play Store or other official sources. To do this, follow these steps:
-
chess online apk download free
-chess online apk download latest version
-chess online apk download for android
-chess online apk download 2023
-chess online apk download mod
-chess online apk download offline
-chess online apk download unlimited
-chess online apk download 3d
-chess online apk download with friends
-chess online apk download no ads
-chess online apk download hack
-chess online apk download pro
-chess online apk download premium
-chess online apk download full
-chess online apk download cracked
-chess online apk download update
-chess online apk download old version
-chess online apk download new version
-chess online apk download beta
-chess online apk download original
-chess online apk download best
-chess online apk download top
-chess online apk download android 11
-chess online apk download android 10
-chess online apk download android 9
-chess online apk download android 8
-chess online apk download android 7
-chess online apk download android 6
-chess online apk download android 5
-chess online apk download android 4.4.4
-chess online apk download for pc
-chess online apk download for laptop
-chess online apk download for windows 10
-chess online apk download for windows 7
-chess online apk download for windows 8.1
-chess online apk download for macbook pro
-chess online apk download for macbook air
-chess online apk download for mac os x
-chess online apk download for ios
-chess online apk download for iphone x
-chess online apk download for iphone 11 pro max
-chess online apk download for ipad pro
-chess online apk download for ipad air
-chess online apk download for ipad mini
-chess online apk download for fire tablet
-chess online apk download for kindle fire
-chess online apk download for chromebook
-chess online apk download from google play store
-chess online apk download from apkpure
-
-
Go to your device settings and look for the security option and tap on it.
-
Find the option that says unknown sources or install unknown apps and toggle it on.
-
A warning message will pop up, telling you the risks of installing apps from unknown sources. Read it carefully and tap on OK if you agree.
-
-
Step 3: Download and install the apk file
-
The final step is to download and install the apk file. To do this, follow these steps:
-
-
Go to the website where you found the chess online apk file and tap on the download button.
-
Wait for the download to finish and then open the file manager app on your device.
-
Find the downloaded apk file and tap on it to start the installation process.
-
Follow the instructions on the screen and grant the necessary permissions for the app to run.
-
Once the installation is complete, you can launch the app and start playing chess online.
-
-
How to play chess online with the app
-
Step 1: Create an account or log in with an existing one
-
Before you can play chess online with the app, you need to create an account or log in with an existing one. This will allow you to access all the features and benefits of the app, such as saving your games, joining tournaments, earning ratings, and more. To do this, follow these steps:
-
-
Open the app and tap on the menu icon on the top left corner of the screen.
-
Tap on sign up or log in, depending on whether you have an account or not.
-
If you are signing up, enter your email address, username, password, and country. You can also sign up with your Facebook or Google account.
-
If you are logging in, enter your email address or username and password. You can also log in with your Facebook or Google account.
-
Tap on the sign up or log in button and wait for the confirmation message.
-
-
Step 2: Choose a game mode and a skill level
-
After creating an account or logging in, you can choose a game mode and a skill level that suits your preferences. There are four game modes available on the app: play online, play offline, puzzles, and lessons. You can also choose a skill level from beginner to expert, depending on how confident you are in your chess abilities. To do this, follow these steps:
-
-
Tap on the game mode that you want to play. For example, if you want to play online, tap on play online.
-
Select a skill level that matches your level of expertise. For example, if you are a beginner, tap on beginner.
-
You will see a list of available opponents that match your skill level. You can also filter them by rating, time control, color preference, and more.
-
Tap on an opponent that you want to play with and wait for them to accept your challenge.
-
-
Step 3: Start playing and enjoy the game
-
The last step is to start playing and enjoy the game. You will see a chess board with your pieces and your opponent's pieces on it. You can move your pieces by tapping on them and then tapping on the square where you want to move them. You can also chat with your opponent by tapping on the chat icon on the bottom right corner of the screen. To play chess online with the app, follow these steps:
-
-
Make your move by tapping on a piece and then tapping on a square where you want to move it.
-
Wait for your opponent to make their move. You will see their move on the board and hear a sound notification.
-
Continue making moves until one of you wins, loses, or draws the game. You can also offer or accept a draw by tapping on the draw icon on the top right corner of the screen.
-
When the game is over, you will see a summary of the game with your rating changes, statistics, analysis, and more. You can also rematch your opponent by tapping on the rematch button or find a new opponent by tapping on the new game button.
-
-
Features and benefits of chess online apk
-
Play with millions of players from around the world
-
One of the main features and benefits of chess online apk is that you can play with millions of players from around the world, with different skill levels and styles. You can find opponents that match your rating, time control, color preference, and more. You can also join tournaments, clubs, teams, and events that are organized by the app or by other users. You can also challenge your friends or invite them to play with you by using the app's social features.
-
Improve your skills with puzzles, lessons, and analysis tools
-
Another feature and benefit of chess online apk is that you can improve your skills with puzzles, lessons, and analysis tools that are available on the app. You can solve thousands of puzzles that are tailored to your skill level and help you practice different aspects of the game, such as tactics, strategy, endgames, and more. You can also learn from hundreds of lessons that are taught by expert coaches and cover various topics, such as openings, middlegames, endgames, and more. You can also use the app's analysis tools to review your games, find your mistakes, and learn from them. You can also access a powerful chess engine that can evaluate any position and suggest the best moves.
-
Customize your board, pieces, and themes
-
A third feature and benefit of chess online apk is that you can customize your board, pieces, and themes according to your preferences. You can choose from a variety of board styles, piece sets, and themes that suit your taste and mood. You can also adjust the sound effects, the animation speed, the board orientation, and more. You can also create your own custom board and pieces by using the app's editor feature.
-
Conclusion
-
Summary of the main points
-
In conclusion, chess online apk is an application that allows you to play chess online on your Android device. It is not an official app from any chess organization or website, but rather a third-party app that connects you to various chess servers and platforms. You can download it for free from different sources on the internet, but you need to be careful about the quality and security of the apk file.
-
Chess online apk has many features and benefits that make it a great choice for chess lovers. You can play with millions of players from around the world, improve your skills with puzzles, lessons, and analysis tools, and customize your board, pieces, and themes. You can also save your games, join tournaments, chat with your opponents, and more.
-
Call to action
-
If you are interested in playing chess online on your Android device, you should give chess online apk a try. It is easy to download and install, and it will provide you with endless hours of fun and entertainment. You will also be able to improve your chess skills and meet new friends along the way. So what are you waiting for? Download chess online apk today and start playing chess online!
-
FAQs
-
Here are some frequently asked questions about chess online apk:
-
-
Is chess online apk safe to use?
-
Chess online apk is safe to use as long as you download it from a reliable source like [Chess.com], [APKPure], or [APKMirror]. These sources have verified the apk file and ensured that it does not contain any viruses, malware, or spyware that can harm your device or steal your personal information. However, you should always be careful and do some research before downloading anything from the internet, as there may be some fake or malicious sources that try to trick you.
-
How can I update chess online apk?
-
Chess online apk is updated regularly by the developers to fix bugs, improve performance, and add new features. You can check for updates by opening the app and tapping on the menu icon on the top left corner of the screen. Then, tap on settings and look for the update option. If there is an update available, you can tap on it and download it. Alternatively, you can also check for updates by visiting the website where you downloaded the apk file and looking for the latest version.
-
Can I play chess online apk offline?
-
Chess online apk requires an internet connection to play online with other players or access some of the features and benefits of the app. However, you can also play chess online apk offline by choosing the play offline game mode. This will allow you to play against the app's artificial intelligence, which has different skill levels and personalities. You can also access some of the puzzles and lessons that are available offline.
-
Can I play chess online apk on other devices?
-
Chess online apk is designed for Android devices, but you can also play it on other devices that support Android applications. For example, you can play chess online apk on your Windows PC or laptop by using an Android emulator like [BlueStacks](https://www.bluestacks.com/) or [NoxPlayer](https://www.bignox.com/). You can also play chess online apk on your iOS device by using an app like [iAndroid](https://apps.apple.com/us/app/iandroid/id1447357030) or [Appetize.io](https://appetize.io/). However, you may experience some compatibility issues or performance problems when playing chess online apk on other devices.
-
What are some alternatives to chess online apk?
-
If you are looking for some alternatives to chess online apk, you may want to try some of these apps:
-
-
[Lichess](https://lichess.org/): A free and open-source chess app that offers a variety of game modes, features, and benefits. You can play with millions of players from around the world, join tournaments, watch live games, learn from lessons, and more.
-
[Chess24](https://chess24.com/): A premium chess app that offers a high-quality chess experience. You can play with top players from around the world, watch live broadcasts, access a huge database of games, and more.
-
[Chess.com](https://www.chess.com/): The #1 free chess app in the world that also has an official app for Android devices. You can enjoy all the features and benefits of chess online apk and more with this app.
-
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download NBA 2K20 APK MOD Data for Android (Free Shopping).md b/spaces/1phancelerku/anime-remove-background/Download NBA 2K20 APK MOD Data for Android (Free Shopping).md
deleted file mode 100644
index 56528d6251440220f0ae4300f910e9e5f87579a9..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download NBA 2K20 APK MOD Data for Android (Free Shopping).md
+++ /dev/null
@@ -1,88 +0,0 @@
-
-
Download NBA 2K20 Mod APK Revdl: How to Enjoy the Best Basketball Game on Your Android Device
-
If you are a fan of basketball and want to experience the thrill of playing with your favorite NBA stars on your mobile device, then you should definitely check out NBA 2K20. This is one of the most popular and realistic basketball games available for Android devices, with stunning graphics, smooth gameplay, and tons of features. However, if you want to enjoy the game to the fullest, you might want to download NBA 2K20 Mod APK Revdl, which is a modified version of the game that offers free shopping, unlimited VC, and more. In this article, we will tell you what NBA 2K20 is, what are the benefits of NBA 2K20 Mod APK Revdl, and how to download and install it on your device.
NBA 2K20 is a basketball simulation game developed by Visual Concepts and published by 2K Sports. It is the 21st installment in the NBA 2K franchise and the successor to NBA 2K19. The game features various modes that allow you to play as your favorite NBA players, create your own custom player, or run your own team. Some of the modes include:
-
Features of NBA 2K20
-
- 5 new NBA Stories
-
This mode lets you follow the careers of five legendary NBA players, such as Kobe Bryant, Tim Duncan, Kevin Garnett, Dirk Nowitzki, and Dwyane Wade. You can relive their memorable moments, challenges, and achievements in this mode.
-
- A new MyCAREER storyline
-
This mode lets you create your own custom player and guide him through his journey from high school to college to the NBA. You can choose your position, skills, attributes, appearance, and personality. You can also interact with other characters, make decisions that affect your career path, and earn endorsements and fans.
-
download nba 2k20 mod apk rexdl
-download nba 2k20 mod apk unlimited money
-download nba 2k20 mod apk obb
-download nba 2k20 mod apk android 1
-download nba 2k20 mod apk latest version
-download nba 2k20 mod apk offline
-download nba 2k20 mod apk free shopping
-download nba 2k20 mod apk data
-download nba 2k20 mod apk no verification
-download nba 2k20 mod apk andropalace
-download nba 2k20 mod apk highly compressed
-download nba 2k20 mod apk unlimited vc
-download nba 2k20 mod apk full unlocked
-download nba 2k20 mod apk for pc
-download nba 2k20 mod apk happymod
-download nba 2k20 mod apk with commentary
-download nba 2k20 mod apk mega
-download nba 2k20 mod apk mediafıre
-download nba 2k20 mod apk all star
-download nba 2k20 mod apk zippyshare
-download nba 2k20 mod apk update
-download nba 2k20 mod apk cheat
-download nba 2k20 mod apk real faces
-download nba 2k20 mod apk blacktop mode
-download nba 2k20 mod apk pure
-download nba 2k20 mod apk mirror
-download nba 2k20 mod apk gameplay
-download nba 2k20 mod apk hack
-download nba 2k20 mod apk cracked
-download nba 2k20 mod apk original
-download nba 2k20 mod apk vip
-download nba 2k20 mod apk new roster
-download nba 2k20 mod apk direct link
-download nba 2k20 mod apk google drive
-download nba 2k20 mod apk best settings
-download nba 2k20 mod apk no root
-download nba 2k20 mod apk unlimited coins
-download nba 2k20 mod apk file
-download nba 2k20 mod apk online mode
-download nba 2k20 mod apk for ios
-
- An all-new Run The Streets mode
-
This mode lets you take your MyPLAYER to the streets and compete in 3-on-3 tournaments around the world. You can earn rewards, upgrade your skills, and climb the leaderboards in this mode.
-
- 5-on-5 basketball with current or all-time great NBA teams
-
This mode lets you play as any of the current or historic NBA teams, such as the Los Angeles Lakers, the Chicago Bulls, the Boston Celtics, and more. You can also create your own custom teams and rosters in this mode.
-
- Streetball in Blacktop mode
-
This mode lets you play streetball with your own rules and settings. You can choose the number of players, the court, the time limit, and the difficulty level. You can also use your MyPLAYER or any NBA player in this mode.
-
- Customization options for players, teams, and courts
-
This mode lets you customize various aspects of the game, such as the appearance, skills, attributes, animations, accessories, and outfits of your players. You can also edit the logos, jerseys, arenas, and courts of your teams.
-
- Online multiplayer and leaderboards
-
This mode lets you play online with or against other players from around the world. You can join or create online matches, leagues, tournaments, and events. You can also compete for rankings, rewards, and bragging rights in this mode.
-
What is NBA 2K20 Mod APK Revdl?
-
NBA 2K20 Mod APK Revdl is a modified version of NBA 2K20 that offers some additional features and benefits that are not available in the original game. NBA 2K20 Mod APK Revdl is created by REXDL, which is a website that provides free download links for various modded games and apps for Android devices. NBA 2K20 Mod APK Revdl is one of the most popular and downloaded modded games on REXDL.
-
Benefits of NBA 2K20 Mod APK Revdl
-
- Free shopping for clothes, shoes, accessories, and more
-
One of the benefits of NBA 2K20 Mod APK Revdl is that it allows you to shop for free in the game. You can buy any clothes, shoes, accessories, and other items that you want for your players without spending any real money or virtual currency. You can also unlock all the premium items that are normally locked behind a paywall.
-
- Unlimited virtual currency (VC) to upgrade your skills and attributes
-
Another benefit of NBA 2K20 Mod APK Revdl is that it gives you unlimited VC, which is the main currency used in the game. You can use VC to upgrade your skills and attributes, such as shooting, passing, dribbling, defense, rebounding, and more. You can also use VC to buy packs, cards, boosts, and other items in the game.
-
- No ads or in-app purchases
-
A third benefit of NBA 2K20 Mod APK Revdl is that it removes all the ads and in-app purchases from the game. You can enjoy the game without any interruptions or distractions from annoying ads or pop-ups. You can also avoid spending any real money on in-app purchases that might affect your gameplay or progress.
-
- Easy installation and compatibility with most Android devices
-
A fourth benefit of NBA 2K20 Mod APK Revdl is that it is easy to install and compatible with most Android devices. You do not need to root your device or use any special tools or methods to install the modded game. You just need to download the APK file and the OBB data file from REXDL and follow some simple steps to install them on your device. You can also run the game smoothly on most Android devices without any lag or glitches.
As a bonus, we have also prepared a table that compares the features of NBA 2K20 and NBA 2K20 Mod APK Revdl. You can use this table to see the differences and similarities between the two versions of the game.
- Table 3: Comparison of NBA 2K20 and NBA 2K20 Mod APK Revdl | Feature | NBA 2K20 | NBA 2K20 Mod APK Revdl | | --- | --- | --- | | Graphics | High-quality graphics with realistic animations and effects | Same as NBA 2K20 | | Gameplay | Smooth and responsive gameplay with various modes and options | Same as NBA 2K20 | | Features | 5 new NBA Stories, a new MyCAREER storyline, an all-new Run The Streets mode, 5-on-5 basketball with current or all-time great NBA teams, streetball in Blacktop mode, customization options for players, teams, and courts, online multiplayer and leaderboards | Same as NBA 2K20, plus free shopping for clothes, shoes, accessories, and more, unlimited virtual currency (VC) to upgrade your skills and attributes, no ads or in-app purchases | | Installation | Requires downloading from the Google Play Store or other trusted sources, may require additional data download after installation, may require payment for some features or items | Requires downloading from REXDL website, requires downloading both APK file and OBB data file, requires enabling installation of apps from unknown sources, requires extracting OBB data file to Android/OBB folder, does not require payment for any features or items | | Compatibility | Compatible with most Android devices with Android 4.3 or higher, may require high-end devices for optimal performance | Compatible with most Android devices with Android 4.3 or higher, may require high-end devices for optimal performance |
We hope that this table has been useful and informative for you. If you have any questions or feedback, please feel free to leave a comment below.
-
Now that you have learned everything you need to know about NBA 2K20 Mod APK Revdl, it is time to download it and enjoy the best basketball game on your Android device. You will not regret it!
-
FAQs
-
Here are some frequently asked questions and answers about NBA 2K20 Mod APK Revdl that you might find helpful.
-
Q: Is NBA 2K20 Mod APK Revdl safe to download and install?
-
A: Yes, NBA 2K20 Mod APK Revdl is safe to download and install. It does not contain any viruses, malware, or spyware that might harm your device or compromise your privacy. However, you should always download it from the official website of REXDL and not from any other sources that might be unreliable or malicious.
-
Q: Is NBA 2K20 Mod APK Revdl legal to use?
-
A: Yes, NBA 2K20 Mod APK Revdl is legal to use. It does not violate any laws or regulations that might prohibit the use of modded games or apps. However, you should always use it at your own risk and discretion. We are not responsible for any consequences that might arise from using NBA 2K20 Mod APK Revdl.
-
Q: Will NBA 2K20 Mod APK Revdl affect my original game progress or data?
-
A: No, NBA 2K20 Mod APK Revdl will not affect your original game progress or data. It will create a separate folder and file for the modded game on your device. You can still play the original game without any interference or conflict with the modded game. You can also switch between the two versions of the game as you wish.
-
Q: Can I play online with other players using NBA 2K20 Mod APK Revdl?
-
A: Yes, you can play online with other players using NBA 2K20 Mod APK Revdl. However, you should be aware that some players might not appreciate playing with someone who has an unfair advantage over them. You might also face some issues or errors while playing online with the modded game. Therefore, we recommend that you play online with other players who are also using NBA 2K20 Mod APK Revdl or play offline with the modded game.
-
Q: Can I update NBA 2K20 Mod APK Revdl to the latest version?
-
A: Yes, you can update NBA 2K20 Mod APK Revdl to the latest version. However, you should always check the official website of REXDL for the latest update and download link. You should also backup your modded game data before updating to avoid losing any progress or settings. You should also follow the same steps as before to install the updated version of the modded game. You should also disable the automatic update of the game from the Google Play Store or other sources to avoid overwriting the modded game.
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/1phancelerku/anime-remove-background/Download for Spider Solitaire The Original and Best Solitaire Game.md b/spaces/1phancelerku/anime-remove-background/Download for Spider Solitaire The Original and Best Solitaire Game.md
deleted file mode 100644
index c50f3f0dbd05ad17678bf2cf2af3a9b3b64d439a..0000000000000000000000000000000000000000
--- a/spaces/1phancelerku/anime-remove-background/Download for Spider Solitaire The Original and Best Solitaire Game.md
+++ /dev/null
@@ -1,120 +0,0 @@
-
-
Download for Spider Solitaire: How to Play the Classic Card Game Online and for Free
-
Spider Solitaire is one of the most popular and addictive card games in the world. It is a fun and challenging way to test your logic, patience, and concentration skills. But did you know that you can download and play Spider Solitaire online and for free? In this article, we will tell you everything you need to know about Spider Solitaire, including its history, rules, variations, benefits, tips, tricks, and more. We will also show you the best websites to download and play Spider Solitaire online and for free. So, if you are ready to join millions of other Spider Solitaire fans, read on!
-
What is Spider Solitaire?
-
Spider Solitaire is a type of solitaire game that uses two decks of cards. The goal is to arrange all the cards in the tableau (the playing area) into eight piles of cards, each pile containing cards of the same suit in descending order from King to Ace. Once a pile is completed, it is moved to the foundation (the area at the top) and removed from the game. The game is won when all eight piles are completed.
Spider Solitaire was first introduced in 1949 by F. R. Simon in his book "The Playing Card". It was originally called "Scorpion" and had some different rules. The name "Spider" was given by Thomas Warfield, who created a computer version of the game in 1996. He also added some variations, such as different levels of difficulty based on the number of suits used (one, two, or four). Since then, Spider Solitaire has become one of the most popular solitaire games in the world, especially after it was included in Microsoft Windows in 2001. According to some estimates, more than 100 million people play Spider Solitaire every day.
-
The rules and variations of Spider Solitaire
-
The basic rules of Spider Solitaire are as follows:
-
-
The game starts with 54 cards dealt face up into 10 columns in the tableau. The first four columns have six cards each, and the last six columns have five cards each.
-
The remaining 50 cards are placed face down in the stock (the area at the bottom left). You can deal a new card onto each tableau column by clicking on the stock.
-
You can move any card or a sequence of cards that are in descending order and of the same suit from one column to another. For example, you can move a 9 of Spades onto a 10 of Spades, or a 7-6-5 of Hearts onto an 8 of Hearts.
-
You can also move any card or a sequence of cards to an empty column.
-
When you have a complete pile of cards from King to Ace of the same suit in a column, it is automatically moved to the foundation and removed from the game.
-
You can change the level of difficulty by choosing how many suits you want to play with: one (easy), two (medium), or four (hard).
-
-
There are also some variations of Spider Solitaire that have different rules or features. For example:
-
-
Some versions allow you to undo
Some versions allow you to undo your moves, reshuffle the stock, or use hints.
-
Some versions have different layouts, themes, backgrounds, or card designs.
-
Some versions have different scoring systems, achievements, leaderboards, or statistics.
-
Some versions have different modes, such as timed, relaxed, or challenge.
-
-
How to download and play Spider Solitaire online and for free?
-
If you want to download and play Spider Solitaire online and for free, you have many options to choose from. You can play Spider Solitaire on your computer, smartphone, tablet, or any other device that has an internet connection and a web browser. You can also download Spider Solitaire apps or software for your device if you prefer. Here are some of the benefits of playing Spider Solitaire online and for free:
-
The benefits of playing Spider Solitaire online and for free
-
-
You can play Spider Solitaire anytime and anywhere you want, without any limitations or restrictions.
-
You can play Spider Solitaire with different levels of difficulty, variations, features, and options, according to your preference and skill level.
-
You can play Spider Solitaire with high-quality graphics, sounds, and animations, that enhance your gaming experience and enjoyment.
-
You can play Spider Solitaire with other players from around the world, compete with them, chat with them, or learn from them.
-
You can play Spider Solitaire without spending any money, downloading any files, or risking any viruses or malware.
-
-
The best websites to download and play Spider Solitaire online and for free
-
There are many websites that offer Spider Solitaire online and for free, but not all of them are reliable, safe, or fun. To help you find the best websites to download and play Spider Solitaire online and for free, we have selected three of them that we think are the best. Here they are:
-
Spider Solitaire - Play Online
-
This website is one of the most popular and trusted websites to play Spider Solitaire online and for free. It has a simple and user-friendly interface, a smooth and fast gameplay, and a variety of options and features. You can choose from one, two, or four suits, undo your moves, use hints, change the background color, adjust the sound volume, and more. You can also see your score, time, moves, and statistics. You can play Spider Solitaire on this website on any device that has a web browser.
-
download for spider solitaire collection free
-download for spider solitaire card games
-download for spider solitaire classic 2022
-download for spider solitaire HD free
-download for spider solitaire pro
-download for spider solitaire windows 10
-download for spider solitaire windows 8.1
-download for spider solitaire offline
-download for spider solitaire no ads
-download for spider solitaire with hints
-download for spider solitaire one suit
-download for spider solitaire two suits
-download for spider solitaire four suits
-download for spider solitaire scorpion
-download for spider solitaire mobilityware
-download for spider solitaire treecardgames
-download for spider solitaire microsoft store
-download for spider solitaire google play
-download for spider solitaire app store
-download for spider solitaire apk
-download for spider solitaire pc
-download for spider solitaire mac
-download for spider solitaire android
-download for spider solitaire ios
-download for spider solitaire iphone
-download for spider solitaire ipad
-download for spider solitaire laptop
-download for spider solitaire desktop
-download for spider solitaire online
-download for spider solitaire free play
-download for spider solitaire unlimited undo
-download for spider solitaire daily challenge
-download for spider solitaire winning deals
-download for spider solitaire autoplay option
-download for spider solitaire statistics tracking
-download for spider solitaire save game progress
-download for spider solitaire beautiful graphics
-download for spider solitaire smooth animations
-download for spider solitaire many card sets
-download for spider solitaire many backgrounds
-download for spider solitaire easy to use interface
-download for spider solitaire fun and addictive gameplay
-download for spider solitaire best brain exercise
-download for spider solitaire how to play guide
-download for spider solitaire tips and tricks
-download for spider solitaire latest version
-download for spider solitaire update
-download for spider solitaire support
-
Spider Solitaire: free online card game, play full-screen without download
-
This website is another great option to play Spider Solitaire online and for free. It has a beautiful and elegant design, a responsive and fluid gameplay, and a range of options and features. You can choose from one, two, or four suits, undo your moves, use hints, change the card style, switch to full-screen mode, and more. You can also see your score, time, moves, and statistics. You can play Spider Solitaire on this website on any device that has a web browser.
-
Spider Solitaire (2 Suits)
-
This website is a good choice to play Spider Solitaire online and for free if you are looking for a medium level of difficulty. It offers Spider Solitaire with two suits only, which is more challenging than one suit but easier than four suits. It has a clean and simple interface, a smooth and fast gameplay, and some basic options and features. You can undo your moves, use hints, change the background color, and adjust the sound volume. You can also see your score, time, moves, and statistics. You can play Spider Solitaire on this website on any device that has a web browser.
-
How to improve your skills and strategies in Spider Solitaire?
-
Spider Solitaire is not only a game of luck but also a game of skill and strategy. If you want to improve your skills and strategies in Spider Solitaire, you need to practice regularly, learn from your mistakes, and apply some tips and tricks. Here are some of them:
-
The tips and tricks for winning Spider Solitaire
-
-
Try to expose the hidden cards as soon as possible, especially the ones in the longer columns.
-
Try to create empty columns as soon as possible, as they give you more flexibility and space to move cards around.
-
Try to build piles of cards of the same suit in descending order as much as possible, as they are easier to move and complete.
-
Try to avoid moving cards that are not of the same suit unless it is necessary or beneficial.
-
Try to plan ahead and anticipate the consequences of your moves before you make them.
-
Try to use the stock wisely and only when you are stuck or need new cards.
-
-
The common mistakes and pitfalls to avoid in Spider Solitaire
-
-
Don't move cards without a purpose or a plan.
-
Don't waste your undo moves or hints on trivial or obvious moves.
-
Don't ignore the cards in the foundation or the stock, as they may be useful later.
-
Don't get frustrated or give up if you lose or get stuck, as every game is different and solvable.
-
-
Conclusion
-
Spider Solitaire is a classic card game that you can download and play online and for free. It is a fun and challenging way to test your logic, patience, and concentration skills. It also has many benefits, such as improving your memory, mood, and mental health. In this article, we have told you everything you need to know about Spider Solitaire, including its history, rules, variations, benefits, tips, tricks, and more. We have also shown you the best websites to download and play Spider Solitaire online and for free. We hope you have enjoyed reading this article and learned something new. Now it's time to put your knowledge into practice and start playing Spider Solitaire online and for free!
-
Call to action
-
If you are ready to download and play Spider Solitaire online and for free, click on one of the links below and start having fun!
- Demo for 22h Diffusion v0-1 Stable Diffusion model.
- {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""}
-
- Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"}
- """)
-
-demo.queue(concurrency_count=1)
-demo.launch()
\ No newline at end of file
diff --git a/spaces/ADOPLE/ResumeSummarizer/README.md b/spaces/ADOPLE/ResumeSummarizer/README.md
deleted file mode 100644
index 93e79737e1cfd8e1281657df3ca307b9d156e68a..0000000000000000000000000000000000000000
--- a/spaces/ADOPLE/ResumeSummarizer/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: ResumeSummarizer
-emoji: 👁
-colorFrom: green
-colorTo: gray
-sdk: gradio
-sdk_version: 3.35.2
-app_file: app.py
-pinned: false
-duplicated_from: randstad/ResumeSummarizer
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/wav_processors/__init__.py b/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/wav_processors/__init__.py
deleted file mode 100644
index 4be97b377dcb95a0e6bceb876ac0ce93c8290249..0000000000000000000000000000000000000000
--- a/spaces/AIGC-Audio/AudioGPT/text_to_speech/data_gen/tts/wav_processors/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from . import base_processor
-from . import common_processors
diff --git a/spaces/AIZero2Hero4Health/2-BiomedEntityRecognition-GR/README.md b/spaces/AIZero2Hero4Health/2-BiomedEntityRecognition-GR/README.md
deleted file mode 100644
index 277ea157607d38b0c354d6c41600f058e43d1ad2..0000000000000000000000000000000000000000
--- a/spaces/AIZero2Hero4Health/2-BiomedEntityRecognition-GR/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: 2 BiomedEntityRecognition GR
-emoji: ⚡
-colorFrom: red
-colorTo: pink
-sdk: gradio
-sdk_version: 3.8.2
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/settings/+page.server.ts b/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/settings/+page.server.ts
deleted file mode 100644
index 9084fa8f983c65b2aa8f90c553e1b69006a6d2d1..0000000000000000000000000000000000000000
--- a/spaces/AchyuthGamer/OpenGPT-Chat-UI/src/routes/settings/+page.server.ts
+++ /dev/null
@@ -1,33 +0,0 @@
-import { base } from "$app/paths";
-import { collections } from "$lib/server/database";
-import { redirect } from "@sveltejs/kit";
-import { z } from "zod";
-import { models, validateModel } from "$lib/server/models";
-import { authCondition } from "$lib/server/auth";
-import { DEFAULT_SETTINGS } from "$lib/types/Settings";
-
-export const actions = {
- default: async function ({ request, locals }) {
- const formData = await request.formData();
-
- const { ethicsModalAccepted, ...settings } = z
- .object({
- shareConversationsWithModelAuthors: z
- .union([z.literal("true"), z.literal("on"), z.literal("false"), z.null()])
- .transform((value) => {
- return value === "true" || value === "on";
- }),
- ethicsModalAccepted: z.boolean({ coerce: true }).optional(),
- activeModel: validateModel(models),
- customPrompts: z.record(z.string()).default({}),
- })
- .parse({
- shareConversationsWithModelAuthors: formData.get("shareConversationsWithModelAuthors"),
- ethicsModalAccepted: formData.get("ethicsModalAccepted"),
- activeModel: formData.get("activeModel") ?? DEFAULT_SETTINGS.activeModel,
- customPrompts: JSON.parse(formData.get("customPrompts")?.toString() ?? "{}"),
- });
-
- throw redirect(303, request.headers.get("referer") || `${base}/`);
- },
-};
diff --git a/spaces/Adapter/T2I-Adapter/ldm/modules/encoders/modules.py b/spaces/Adapter/T2I-Adapter/ldm/modules/encoders/modules.py
deleted file mode 100644
index d59229ac1c97980e811e3b808f3431311c4f3b7d..0000000000000000000000000000000000000000
--- a/spaces/Adapter/T2I-Adapter/ldm/modules/encoders/modules.py
+++ /dev/null
@@ -1,441 +0,0 @@
-import torch
-import torch.nn as nn
-import math
-from torch.utils.checkpoint import checkpoint
-
-from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel, CLIPModel
-
-import open_clip
-import re
-from ldm.util import default, count_params
-
-
-class AbstractEncoder(nn.Module):
- def __init__(self):
- super().__init__()
-
- def encode(self, *args, **kwargs):
- raise NotImplementedError
-
-
-class IdentityEncoder(AbstractEncoder):
-
- def encode(self, x):
- return x
-
-
-class ClassEmbedder(nn.Module):
- def __init__(self, embed_dim, n_classes=1000, key='class'):
- super().__init__()
- self.key = key
- self.embedding = nn.Embedding(n_classes, embed_dim)
-
- def forward(self, batch, key=None):
- if key is None:
- key = self.key
- # this is for use in crossattn
- c = batch[key][:, None]
- c = self.embedding(c)
- return c
-
-
-class FrozenT5Embedder(AbstractEncoder):
- """Uses the T5 transformer encoder for text"""
- def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
- super().__init__()
- self.tokenizer = T5Tokenizer.from_pretrained(version)
- self.transformer = T5EncoderModel.from_pretrained(version)
- self.device = device
- self.max_length = max_length # TODO: typical value?
- if freeze:
- self.freeze()
-
- def freeze(self):
- self.transformer = self.transformer.eval()
- #self.train = disabled_train
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, text):
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
- tokens = batch_encoding["input_ids"].to(self.device)
- outputs = self.transformer(input_ids=tokens)
-
- z = outputs.last_hidden_state
- return z
-
- def encode(self, text):
- return self(text)
-
-
-class FrozenCLIPEmbedder(AbstractEncoder):
- """Uses the CLIP transformer encoder for text (from huggingface)"""
- def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77,
- freeze=True, layer="last"): # clip-vit-base-patch32
- super().__init__()
- self.tokenizer = CLIPTokenizer.from_pretrained(version)
- self.transformer = CLIPModel.from_pretrained(version).text_model
- self.device = device
- self.max_length = max_length
- if freeze:
- self.freeze()
- self.layer = layer
-
- def freeze(self):
- self.transformer = self.transformer.eval()
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, text):
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
- tokens = batch_encoding["input_ids"].to(self.device)
- outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer != 'last')
-
- if self.layer == 'penultimate':
- z = outputs.hidden_states[-2]
- z = self.transformer.final_layer_norm(z)
- else:
- z = outputs.last_hidden_state
- return z
-
- def encode(self, text):
- return self(text)
-
-
-class FrozenOpenCLIPEmbedder(AbstractEncoder):
- """
- Uses the OpenCLIP transformer encoder for text
- """
- LAYERS = [
- #"pooled",
- "last",
- "penultimate"
- ]
- def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", max_length=77,
- freeze=True, layer="last"):
- super().__init__()
- assert layer in self.LAYERS
- model, _, _ = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version)
- del model.visual
- self.model = model
-
- self.device = device
- self.max_length = max_length
- if freeze:
- self.freeze()
- self.layer = layer
- if self.layer == "last":
- self.layer_idx = 0
- elif self.layer == "penultimate":
- self.layer_idx = 1
- else:
- raise NotImplementedError()
-
- def freeze(self):
- self.model = self.model.eval()
- for param in self.parameters():
- param.requires_grad = False
-
- def forward(self, text):
- tokens = open_clip.tokenize(text)
- z = self.encode_with_transformer(tokens.to(self.device))
- return z
-
- def encode_with_transformer(self, text):
- x = self.model.token_embedding(text) # [batch_size, n_ctx, d_model]
- x = x + self.model.positional_embedding
- x = x.permute(1, 0, 2) # NLD -> LND
- x = self.text_transformer_forward(x, attn_mask=self.model.attn_mask)
- x = x.permute(1, 0, 2) # LND -> NLD
- x = self.model.ln_final(x)
- return x
-
- def text_transformer_forward(self, x: torch.Tensor, attn_mask = None):
- for i, r in enumerate(self.model.transformer.resblocks):
- if i == len(self.model.transformer.resblocks) - self.layer_idx:
- break
- if self.model.transformer.grad_checkpointing and not torch.jit.is_scripting():
- x = checkpoint(r, x, attn_mask)
- else:
- x = r(x, attn_mask=attn_mask)
- return x
-
- def encode(self, text):
- return self(text)
-
-
-class FrozenCLIPT5Encoder(AbstractEncoder):
- def __init__(self, clip_version="openai/clip-vit-large-patch14", t5_version="google/t5-v1_1-xl", device="cuda",
- clip_max_length=77, t5_max_length=77):
- super().__init__()
- self.clip_encoder = FrozenCLIPEmbedder(clip_version, device, max_length=clip_max_length)
- self.t5_encoder = FrozenT5Embedder(t5_version, device, max_length=t5_max_length)
- print(f"{self.clip_encoder.__class__.__name__} has {count_params(self.clip_encoder)*1.e-6:.2f} M parameters, "
- f"{self.t5_encoder.__class__.__name__} comes with {count_params(self.t5_encoder)*1.e-6:.2f} M params.")
-
- def encode(self, text):
- return self(text)
-
- def forward(self, text):
- clip_z = self.clip_encoder.encode(text)
- t5_z = self.t5_encoder.encode(text)
- return [clip_z, t5_z]
-
-
-# code from sd-webui
-re_attention = re.compile(r"""
-\\\(|
-\\\)|
-\\\[|
-\\]|
-\\\\|
-\\|
-\(|
-\[|
-:([+-]?[.\d]+)\)|
-\)|
-]|
-[^\\()\[\]:]+|
-:
-""", re.X)
-
-
-def parse_prompt_attention(text):
- """
- Parses a string with attention tokens and returns a list of pairs: text and its associated weight.
- Accepted tokens are:
- (abc) - increases attention to abc by a multiplier of 1.1
- (abc:3.12) - increases attention to abc by a multiplier of 3.12
- [abc] - decreases attention to abc by a multiplier of 1.1
- \( - literal character '('
- \[ - literal character '['
- \) - literal character ')'
- \] - literal character ']'
- \\ - literal character '\'
- anything else - just text
-
- >>> parse_prompt_attention('normal text')
- [['normal text', 1.0]]
- >>> parse_prompt_attention('an (important) word')
- [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
- >>> parse_prompt_attention('(unbalanced')
- [['unbalanced', 1.1]]
- >>> parse_prompt_attention('\(literal\]')
- [['(literal]', 1.0]]
- >>> parse_prompt_attention('(unnecessary)(parens)')
- [['unnecessaryparens', 1.1]]
- >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
- [['a ', 1.0],
- ['house', 1.5730000000000004],
- [' ', 1.1],
- ['on', 1.0],
- [' a ', 1.1],
- ['hill', 0.55],
- [', sun, ', 1.1],
- ['sky', 1.4641000000000006],
- ['.', 1.1]]
- """
-
- res = []
- round_brackets = []
- square_brackets = []
-
- round_bracket_multiplier = 1.1
- square_bracket_multiplier = 1 / 1.1
-
- def multiply_range(start_position, multiplier):
- for p in range(start_position, len(res)):
- res[p][1] *= multiplier
-
- for m in re_attention.finditer(text):
- text = m.group(0)
- weight = m.group(1)
-
- if text.startswith('\\'):
- res.append([text[1:], 1.0])
- elif text == '(':
- round_brackets.append(len(res))
- elif text == '[':
- square_brackets.append(len(res))
- elif weight is not None and len(round_brackets) > 0:
- multiply_range(round_brackets.pop(), float(weight))
- elif text == ')' and len(round_brackets) > 0:
- multiply_range(round_brackets.pop(), round_bracket_multiplier)
- elif text == ']' and len(square_brackets) > 0:
- multiply_range(square_brackets.pop(), square_bracket_multiplier)
- else:
- res.append([text, 1.0])
-
- for pos in round_brackets:
- multiply_range(pos, round_bracket_multiplier)
-
- for pos in square_brackets:
- multiply_range(pos, square_bracket_multiplier)
-
- if len(res) == 0:
- res = [["", 1.0]]
-
- # merge runs of identical weights
- i = 0
- while i + 1 < len(res):
- if res[i][1] == res[i + 1][1]:
- res[i][0] += res[i + 1][0]
- res.pop(i + 1)
- else:
- i += 1
-
- return res
-
-class WebUIFrozenCLIPEmebedder(AbstractEncoder):
- def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", freeze=True, layer="penultimate"):
- super(WebUIFrozenCLIPEmebedder, self).__init__()
- self.tokenizer = CLIPTokenizer.from_pretrained(version)
- self.transformer = CLIPModel.from_pretrained(version).text_model
- self.device = device
- self.layer = layer
- if freeze:
- self.freeze()
-
- self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0]
- self.comma_padding_backtrack = 20
-
- def freeze(self):
- self.transformer = self.transformer.eval()
- for param in self.parameters():
- param.requires_grad = False
-
- def tokenize(self, texts):
- tokenized = self.tokenizer(texts, truncation=False, add_special_tokens=False)["input_ids"]
- return tokenized
-
- def encode_with_transformers(self, tokens):
- outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer!='last')
-
- if self.layer == 'penultimate':
- z = outputs.hidden_states[-2]
- z = self.transformer.final_layer_norm(z)
- else:
- z = outputs.last_hidden_state
-
- return z
-
- def tokenize_line(self, line):
- parsed = parse_prompt_attention(line)
- # print(parsed)
-
- tokenized = self.tokenize([text for text, _ in parsed])
-
- remade_tokens = []
- multipliers = []
- last_comma = -1
-
- for tokens, (text, weight) in zip(tokenized, parsed):
- i = 0
- while i < len(tokens):
- token = tokens[i]
-
- if token == self.comma_token:
- last_comma = len(remade_tokens)
- elif self.comma_padding_backtrack != 0 and max(len(remade_tokens),
- 1) % 75 == 0 and last_comma != -1 and len(
- remade_tokens) - last_comma <= self.comma_padding_backtrack:
- last_comma += 1
- reloc_tokens = remade_tokens[last_comma:]
- reloc_mults = multipliers[last_comma:]
-
- remade_tokens = remade_tokens[:last_comma]
- length = len(remade_tokens)
-
- rem = int(math.ceil(length / 75)) * 75 - length
- remade_tokens += [self.tokenizer.eos_token_id] * rem + reloc_tokens
- multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults
-
- remade_tokens.append(token)
- multipliers.append(weight)
- i += 1
-
- token_count = len(remade_tokens)
- prompt_target_length = math.ceil(max(token_count, 1) / 75) * 75
- tokens_to_add = prompt_target_length - len(remade_tokens)
-
- remade_tokens = remade_tokens + [self.tokenizer.eos_token_id] * tokens_to_add
- multipliers = multipliers + [1.0] * tokens_to_add
-
- return remade_tokens, multipliers, token_count
-
- def process_text(self, texts):
- remade_batch_tokens = []
- token_count = 0
-
- cache = {}
- batch_multipliers = []
- for line in texts:
- if line in cache:
- remade_tokens, multipliers = cache[line]
- else:
- remade_tokens, multipliers, current_token_count = self.tokenize_line(line)
- token_count = max(current_token_count, token_count)
-
- cache[line] = (remade_tokens, multipliers)
-
- remade_batch_tokens.append(remade_tokens)
- batch_multipliers.append(multipliers)
-
- return batch_multipliers, remade_batch_tokens, token_count
-
- def process_tokens(self, remade_batch_tokens, batch_multipliers):
- remade_batch_tokens = [[self.tokenizer.bos_token_id] + x[:75] + [self.tokenizer.eos_token_id] for x in remade_batch_tokens]
- batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers]
-
- tokens = torch.asarray(remade_batch_tokens).to(self.device)
-
- z = self.encode_with_transformers(tokens)
-
- # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
- batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers]
- batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(self.device)
- original_mean = z.mean()
- z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
- new_mean = z.mean()
- z *= original_mean / new_mean
-
- return z
-
- def forward(self, text):
- batch_multipliers, remade_batch_tokens, token_count = self.process_text(text)
-
- z = None
- i = 0
- while max(map(len, remade_batch_tokens)) != 0:
- rem_tokens = [x[75:] for x in remade_batch_tokens]
- rem_multipliers = [x[75:] for x in batch_multipliers]
-
- tokens = []
- multipliers = []
- for j in range(len(remade_batch_tokens)):
- if len(remade_batch_tokens[j]) > 0:
- tokens.append(remade_batch_tokens[j][:75])
- multipliers.append(batch_multipliers[j][:75])
- else:
- tokens.append([self.tokenizer.eos_token_id] * 75)
- multipliers.append([1.0] * 75)
-
- z1 = self.process_tokens(tokens, multipliers)
- z = z1 if z is None else torch.cat((z, z1), axis=-2)
-
- remade_batch_tokens = rem_tokens
- batch_multipliers = rem_multipliers
- i += 1
-
- return z
-
- def encode(self, text):
- return self(text)
-
-
-
-if __name__ == "__main__":
- model = FrozenCLIPEmbedder()
- count_params(model, verbose=True)
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/moveto-plugin.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/moveto-plugin.d.ts
deleted file mode 100644
index bb12e9c1d7cc0ca65c3631c998ecfbf2f390580d..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/moveto-plugin.d.ts
+++ /dev/null
@@ -1,9 +0,0 @@
-import MoveTo from './moveto';
-
-export default class MoveToPlugin extends Phaser.Plugins.BasePlugin {
- add(
- gameObject: Phaser.GameObjects.GameObject,
- config?: MoveTo.IConfig
- ): MoveTo;
-
-}
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Factory.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Factory.js
deleted file mode 100644
index 70cf6df876d6e5f733628ebf2fb62652c1504e9d..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/custom/Factory.js
+++ /dev/null
@@ -1,13 +0,0 @@
-import Custom from './Custom.js';
-import ObjectFactory from '../ObjectFactory.js';
-import SetValue from '../../../plugins/utils/object/SetValue.js';
-
-ObjectFactory.register('custom', function (config) {
- var gameObject = new Custom(this.scene, config);
- this.scene.add.existing(gameObject);
- return gameObject;
-});
-
-SetValue(window, 'RexPlugins.Spinner.Custom', Custom);
-
-export default Custom;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/RunWidthWrap.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/RunWidthWrap.js
deleted file mode 100644
index 1c489fdb634c903570b49e55f967fb7920406a67..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/RunWidthWrap.js
+++ /dev/null
@@ -1,23 +0,0 @@
-// Default method
-var RunWidthWrap = function (parentWidth) {
- var child, childWidth;
- for (var i in this.sizerChildren) {
- child = this.sizerChildren[i];
- if (
- (!child) ||
- (child.isRexSizer && child.ignoreLayout) ||
- (!child.runWidthWrap)
- ) {
- continue;
- }
-
- childWidth = this.getExpandedChildWidth(child, parentWidth);
- if (child.isRexSizer) {
- childWidth = child.resolveWidth(childWidth);
- }
- child.runWidthWrap(childWidth);
- }
- return this;
-}
-
-export default RunWidthWrap;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetChildrenSizers.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetChildrenSizers.js
deleted file mode 100644
index 179c8baf0167d1378a80f701ca13048229791775..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/gridsizer/GetChildrenSizers.js
+++ /dev/null
@@ -1,15 +0,0 @@
-var GetChildrenSizers = function (out) {
- if (out === undefined) {
- out = [];
- }
- var children = this.sizerChildren,
- child;
- for (var i = 0, cnt = children.length; i < cnt; i++) {
- child = children[i];
- if (child && child.isRexSizer) {
- out.push(child);
- }
- }
- return out;
-}
-export default GetChildrenSizers;
\ No newline at end of file
diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateScrollBar.js b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateScrollBar.js
deleted file mode 100644
index 5805834b863db6072a9a296fe90bb7ea2ecc505d..0000000000000000000000000000000000000000
--- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateScrollBar.js
+++ /dev/null
@@ -1,26 +0,0 @@
-import MergeStyle from './utils/MergeStyle.js';
-import ScrollBar from '../../scrollbar/ScrollBar.js';
-import CreateChild from './utils/CreateChild.js';
-import ReplaceSliderConfig from './utils/ReplaceSliderConfig.js';
-
-var CreateScrollBar = function (scene, data, view, styles, customBuilders) {
- data = MergeStyle(data, styles);
-
- // Replace data by child game object
- CreateChild(scene, data, 'background', view, styles, customBuilders);
- ReplaceSliderConfig(scene, data.slider, view, styles, customBuilders);
-
- var buttonsConfig = data.buttons;
- if (buttonsConfig) {
- CreateChild(scene, buttonsConfig, 'top', view, styles, customBuilders);
- CreateChild(scene, buttonsConfig, 'bottom', view, styles, customBuilders);
- CreateChild(scene, buttonsConfig, 'left', view, styles, customBuilders);
- CreateChild(scene, buttonsConfig, 'right', view, styles, customBuilders);
- }
-
- var gameObject = new ScrollBar(scene, data);
- scene.add.existing(gameObject);
- return gameObject;
-};
-
-export default CreateScrollBar;
\ No newline at end of file
diff --git a/spaces/Aki004/herta-so-vits/preprocess_hubert_f0.py b/spaces/Aki004/herta-so-vits/preprocess_hubert_f0.py
deleted file mode 100644
index 763fb0d65540ed4d62b269914e81c740f3ff6bba..0000000000000000000000000000000000000000
--- a/spaces/Aki004/herta-so-vits/preprocess_hubert_f0.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import math
-import multiprocessing
-import os
-import argparse
-from random import shuffle
-
-import torch
-from glob import glob
-from tqdm import tqdm
-from modules.mel_processing import spectrogram_torch
-
-import utils
-import logging
-
-logging.getLogger("numba").setLevel(logging.WARNING)
-import librosa
-import numpy as np
-
-hps = utils.get_hparams_from_file("configs/config.json")
-sampling_rate = hps.data.sampling_rate
-hop_length = hps.data.hop_length
-
-
-def process_one(filename, hmodel):
- # print(filename)
- wav, sr = librosa.load(filename, sr=sampling_rate)
- soft_path = filename + ".soft.pt"
- if not os.path.exists(soft_path):
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
- wav16k = librosa.resample(wav, orig_sr=sampling_rate, target_sr=16000)
- wav16k = torch.from_numpy(wav16k).to(device)
- c = utils.get_hubert_content(hmodel, wav_16k_tensor=wav16k)
- torch.save(c.cpu(), soft_path)
-
- f0_path = filename + ".f0.npy"
- if not os.path.exists(f0_path):
- f0 = utils.compute_f0_dio(
- wav, sampling_rate=sampling_rate, hop_length=hop_length
- )
- np.save(f0_path, f0)
-
- spec_path = filename.replace(".wav", ".spec.pt")
- if not os.path.exists(spec_path):
- # Process spectrogram
- # The following code can't be replaced by torch.FloatTensor(wav)
- # because load_wav_to_torch return a tensor that need to be normalized
-
- audio, sr = utils.load_wav_to_torch(filename)
- if sr != hps.data.sampling_rate:
- raise ValueError(
- "{} SR doesn't match target {} SR".format(
- sr, hps.data.sampling_rate
- )
- )
-
- audio_norm = audio / hps.data.max_wav_value
- audio_norm = audio_norm.unsqueeze(0)
-
- spec = spectrogram_torch(
- audio_norm,
- hps.data.filter_length,
- hps.data.sampling_rate,
- hps.data.hop_length,
- hps.data.win_length,
- center=False,
- )
- spec = torch.squeeze(spec, 0)
- torch.save(spec, spec_path)
-
-
-def process_batch(filenames):
- print("Loading hubert for content...")
- device = "cuda" if torch.cuda.is_available() else "cpu"
- hmodel = utils.get_hubert_model().to(device)
- print("Loaded hubert.")
- for filename in tqdm(filenames):
- process_one(filename, hmodel)
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument(
- "--in_dir", type=str, default="dataset/44k", help="path to input dir"
- )
-
- args = parser.parse_args()
- filenames = glob(f"{args.in_dir}/*/*.wav", recursive=True) # [:10]
- shuffle(filenames)
- multiprocessing.set_start_method("spawn", force=True)
-
- num_processes = 1
- chunk_size = int(math.ceil(len(filenames) / num_processes))
- chunks = [
- filenames[i : i + chunk_size] for i in range(0, len(filenames), chunk_size)
- ]
- print([len(c) for c in chunks])
- processes = [
- multiprocessing.Process(target=process_batch, args=(chunk,)) for chunk in chunks
- ]
- for p in processes:
- p.start()
diff --git a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/optimizer.py b/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/optimizer.py
deleted file mode 100644
index cae5ffff3d11aaccd705d6936e080175ab97dd0e..0000000000000000000000000000000000000000
--- a/spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/tflib/optimizer.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
-#
-# NVIDIA CORPORATION and its licensors retain all intellectual property
-# and proprietary rights in and to this software, related documentation
-# and any modifications thereto. Any use, reproduction, disclosure or
-# distribution of this software and related documentation without an express
-# license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-"""Helper wrapper for a Tensorflow optimizer."""
-
-import platform
-import numpy as np
-import tensorflow as tf
-
-from collections import OrderedDict
-from typing import List, Union
-
-from . import autosummary
-from . import tfutil
-from .. import util
-
-from .tfutil import TfExpression, TfExpressionEx
-
-_collective_ops_warning_printed = False
-_collective_ops_group_key = 831766147
-_collective_ops_instance_key = 436340067
-
-class Optimizer:
- """A Wrapper for tf.train.Optimizer.
-
- Automatically takes care of:
- - Gradient averaging for multi-GPU training.
- - Gradient accumulation for arbitrarily large minibatches.
- - Dynamic loss scaling and typecasts for FP16 training.
- - Ignoring corrupted gradients that contain NaNs/Infs.
- - Reporting statistics.
- - Well-chosen default settings.
- """
-
- def __init__(self,
- name: str = "Train", # Name string that will appear in TensorFlow graph.
- tf_optimizer: str = "tf.train.AdamOptimizer", # Underlying optimizer class.
- learning_rate: TfExpressionEx = 0.001, # Learning rate. Can vary over time.
- minibatch_multiplier: TfExpressionEx = None, # Treat N consecutive minibatches as one by accumulating gradients.
- share: "Optimizer" = None, # Share internal state with a previously created optimizer?
- use_loss_scaling: bool = False, # Enable dynamic loss scaling for robust mixed-precision training?
- loss_scaling_init: float = 64.0, # Log2 of initial loss scaling factor.
- loss_scaling_inc: float = 0.0005, # Log2 of per-minibatch loss scaling increment when there is no overflow.
- loss_scaling_dec: float = 1.0, # Log2 of per-minibatch loss scaling decrement when there is an overflow.
- report_mem_usage: bool = False, # Report fine-grained memory usage statistics in TensorBoard?
- **kwargs):
-
- # Public fields.
- self.name = name
- self.learning_rate = learning_rate
- self.minibatch_multiplier = minibatch_multiplier
- self.id = self.name.replace("/", ".")
- self.scope = tf.get_default_graph().unique_name(self.id)
- self.optimizer_class = util.get_obj_by_name(tf_optimizer)
- self.optimizer_kwargs = dict(kwargs)
- self.use_loss_scaling = use_loss_scaling
- self.loss_scaling_init = loss_scaling_init
- self.loss_scaling_inc = loss_scaling_inc
- self.loss_scaling_dec = loss_scaling_dec
-
- # Private fields.
- self._updates_applied = False
- self._devices = OrderedDict() # device_name => EasyDict()
- self._shared_optimizers = OrderedDict() # device_name => optimizer_class
- self._gradient_shapes = None # [shape, ...]
- self._report_mem_usage = report_mem_usage
-
- # Validate arguments.
- assert callable(self.optimizer_class)
-
- # Share internal state if requested.
- if share is not None:
- assert isinstance(share, Optimizer)
- assert self.optimizer_class is share.optimizer_class
- assert self.learning_rate is share.learning_rate
- assert self.optimizer_kwargs == share.optimizer_kwargs
- self._shared_optimizers = share._shared_optimizers # pylint: disable=protected-access
-
- def _get_device(self, device_name: str):
- """Get internal state for the given TensorFlow device."""
- tfutil.assert_tf_initialized()
- if device_name in self._devices:
- return self._devices[device_name]
-
- # Initialize fields.
- device = util.EasyDict()
- device.name = device_name
- device.optimizer = None # Underlying optimizer: optimizer_class
- device.loss_scaling_var = None # Log2 of loss scaling: tf.Variable
- device.grad_raw = OrderedDict() # Raw gradients: var => [grad, ...]
- device.grad_clean = OrderedDict() # Clean gradients: var => grad
- device.grad_acc_vars = OrderedDict() # Accumulation sums: var => tf.Variable
- device.grad_acc_count = None # Accumulation counter: tf.Variable
- device.grad_acc = OrderedDict() # Accumulated gradients: var => grad
-
- # Setup TensorFlow objects.
- with tfutil.absolute_name_scope(self.scope + "/Devices"), tf.device(device_name), tf.control_dependencies(None):
- if device_name not in self._shared_optimizers:
- optimizer_name = self.scope.replace("/", "_") + "_opt%d" % len(self._shared_optimizers)
- self._shared_optimizers[device_name] = self.optimizer_class(name=optimizer_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)
- device.optimizer = self._shared_optimizers[device_name]
- if self.use_loss_scaling:
- device.loss_scaling_var = tf.Variable(np.float32(self.loss_scaling_init), trainable=False, name="loss_scaling_var")
-
- # Register device.
- self._devices[device_name] = device
- return device
-
- def register_gradients(self, loss: TfExpression, trainable_vars: Union[List, dict]) -> None:
- """Register the gradients of the given loss function with respect to the given variables.
- Intended to be called once per GPU."""
- tfutil.assert_tf_initialized()
- assert not self._updates_applied
- device = self._get_device(loss.device)
-
- # Validate trainables.
- if isinstance(trainable_vars, dict):
- trainable_vars = list(trainable_vars.values()) # allow passing in Network.trainables as vars
- assert isinstance(trainable_vars, list) and len(trainable_vars) >= 1
- assert all(tfutil.is_tf_expression(expr) for expr in trainable_vars + [loss])
- assert all(var.device == device.name for var in trainable_vars)
-
- # Validate shapes.
- if self._gradient_shapes is None:
- self._gradient_shapes = [var.shape.as_list() for var in trainable_vars]
- assert len(trainable_vars) == len(self._gradient_shapes)
- assert all(var.shape.as_list() == var_shape for var, var_shape in zip(trainable_vars, self._gradient_shapes))
-
- # Report memory usage if requested.
- deps = [loss]
- if self._report_mem_usage:
- self._report_mem_usage = False
- try:
- with tf.name_scope(self.id + '_mem'), tf.device(device.name), tf.control_dependencies([loss]):
- deps.append(autosummary.autosummary(self.id + "/mem_usage_gb", tf.contrib.memory_stats.BytesInUse() / 2**30))
- except tf.errors.NotFoundError:
- pass
-
- # Compute gradients.
- with tf.name_scope(self.id + "_grad"), tf.device(device.name), tf.control_dependencies(deps):
- loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))
- gate = tf.train.Optimizer.GATE_NONE # disable gating to reduce memory usage
- grad_list = device.optimizer.compute_gradients(loss=loss, var_list=trainable_vars, gate_gradients=gate)
-
- # Register gradients.
- for grad, var in grad_list:
- if var not in device.grad_raw:
- device.grad_raw[var] = []
- device.grad_raw[var].append(grad)
-
- def apply_updates(self, allow_no_op: bool = False) -> tf.Operation:
- """Construct training op to update the registered variables based on their gradients."""
- tfutil.assert_tf_initialized()
- assert not self._updates_applied
- self._updates_applied = True
- all_ops = []
-
- # Check for no-op.
- if allow_no_op and len(self._devices) == 0:
- with tfutil.absolute_name_scope(self.scope):
- return tf.no_op(name='TrainingOp')
-
- # Clean up gradients.
- for device_idx, device in enumerate(self._devices.values()):
- with tfutil.absolute_name_scope(self.scope + "/Clean%d" % device_idx), tf.device(device.name):
- for var, grad in device.grad_raw.items():
-
- # Filter out disconnected gradients and convert to float32.
- grad = [g for g in grad if g is not None]
- grad = [tf.cast(g, tf.float32) for g in grad]
-
- # Sum within the device.
- if len(grad) == 0:
- grad = tf.zeros(var.shape) # No gradients => zero.
- elif len(grad) == 1:
- grad = grad[0] # Single gradient => use as is.
- else:
- grad = tf.add_n(grad) # Multiple gradients => sum.
-
- # Scale as needed.
- scale = 1.0 / len(device.grad_raw[var]) / len(self._devices)
- scale = tf.constant(scale, dtype=tf.float32, name="scale")
- if self.minibatch_multiplier is not None:
- scale /= tf.cast(self.minibatch_multiplier, tf.float32)
- scale = self.undo_loss_scaling(scale)
- device.grad_clean[var] = grad * scale
-
- # Sum gradients across devices.
- if len(self._devices) > 1:
- with tfutil.absolute_name_scope(self.scope + "/Broadcast"), tf.device(None):
- if platform.system() == "Windows": # Windows => NCCL ops are not available.
- self._broadcast_fallback()
- elif tf.VERSION.startswith("1.15."): # TF 1.15 => NCCL ops are broken: https://github.com/tensorflow/tensorflow/issues/41539
- self._broadcast_fallback()
- else: # Otherwise => NCCL ops are safe to use.
- self._broadcast_nccl()
-
- # Apply updates separately on each device.
- for device_idx, device in enumerate(self._devices.values()):
- with tfutil.absolute_name_scope(self.scope + "/Apply%d" % device_idx), tf.device(device.name):
- # pylint: disable=cell-var-from-loop
-
- # Accumulate gradients over time.
- if self.minibatch_multiplier is None:
- acc_ok = tf.constant(True, name='acc_ok')
- device.grad_acc = OrderedDict(device.grad_clean)
- else:
- # Create variables.
- with tf.control_dependencies(None):
- for var in device.grad_clean.keys():
- device.grad_acc_vars[var] = tf.Variable(tf.zeros(var.shape), trainable=False, name="grad_acc_var")
- device.grad_acc_count = tf.Variable(tf.zeros([]), trainable=False, name="grad_acc_count")
-
- # Track counter.
- count_cur = device.grad_acc_count + 1.0
- count_inc_op = lambda: tf.assign(device.grad_acc_count, count_cur)
- count_reset_op = lambda: tf.assign(device.grad_acc_count, tf.zeros([]))
- acc_ok = (count_cur >= tf.cast(self.minibatch_multiplier, tf.float32))
- all_ops.append(tf.cond(acc_ok, count_reset_op, count_inc_op))
-
- # Track gradients.
- for var, grad in device.grad_clean.items():
- acc_var = device.grad_acc_vars[var]
- acc_cur = acc_var + grad
- device.grad_acc[var] = acc_cur
- with tf.control_dependencies([acc_cur]):
- acc_inc_op = lambda: tf.assign(acc_var, acc_cur)
- acc_reset_op = lambda: tf.assign(acc_var, tf.zeros(var.shape))
- all_ops.append(tf.cond(acc_ok, acc_reset_op, acc_inc_op))
-
- # No overflow => apply gradients.
- all_ok = tf.reduce_all(tf.stack([acc_ok] + [tf.reduce_all(tf.is_finite(g)) for g in device.grad_acc.values()]))
- apply_op = lambda: device.optimizer.apply_gradients([(tf.cast(grad, var.dtype), var) for var, grad in device.grad_acc.items()])
- all_ops.append(tf.cond(all_ok, apply_op, tf.no_op))
-
- # Adjust loss scaling.
- if self.use_loss_scaling:
- ls_inc_op = lambda: tf.assign_add(device.loss_scaling_var, self.loss_scaling_inc)
- ls_dec_op = lambda: tf.assign_sub(device.loss_scaling_var, self.loss_scaling_dec)
- ls_update_op = lambda: tf.group(tf.cond(all_ok, ls_inc_op, ls_dec_op))
- all_ops.append(tf.cond(acc_ok, ls_update_op, tf.no_op))
-
- # Last device => report statistics.
- if device_idx == len(self._devices) - 1:
- all_ops.append(autosummary.autosummary(self.id + "/learning_rate", tf.convert_to_tensor(self.learning_rate)))
- all_ops.append(autosummary.autosummary(self.id + "/overflow_frequency", tf.where(all_ok, 0, 1), condition=acc_ok))
- if self.use_loss_scaling:
- all_ops.append(autosummary.autosummary(self.id + "/loss_scaling_log2", device.loss_scaling_var))
-
- # Initialize variables.
- self.reset_optimizer_state()
- if self.use_loss_scaling:
- tfutil.init_uninitialized_vars([device.loss_scaling_var for device in self._devices.values()])
- if self.minibatch_multiplier is not None:
- tfutil.run([var.initializer for device in self._devices.values() for var in list(device.grad_acc_vars.values()) + [device.grad_acc_count]])
-
- # Group everything into a single op.
- with tfutil.absolute_name_scope(self.scope):
- return tf.group(*all_ops, name="TrainingOp")
-
- def reset_optimizer_state(self) -> None:
- """Reset internal state of the underlying optimizer."""
- tfutil.assert_tf_initialized()
- tfutil.run([var.initializer for device in self._devices.values() for var in device.optimizer.variables()])
-
- def get_loss_scaling_var(self, device: str) -> Union[tf.Variable, None]:
- """Get or create variable representing log2 of the current dynamic loss scaling factor."""
- return self._get_device(device).loss_scaling_var
-
- def apply_loss_scaling(self, value: TfExpression) -> TfExpression:
- """Apply dynamic loss scaling for the given expression."""
- assert tfutil.is_tf_expression(value)
- if not self.use_loss_scaling:
- return value
- return value * tfutil.exp2(self.get_loss_scaling_var(value.device))
-
- def undo_loss_scaling(self, value: TfExpression) -> TfExpression:
- """Undo the effect of dynamic loss scaling for the given expression."""
- assert tfutil.is_tf_expression(value)
- if not self.use_loss_scaling:
- return value
- return value * tfutil.exp2(-self.get_loss_scaling_var(value.device)) # pylint: disable=invalid-unary-operand-type
-
- def _broadcast_nccl(self):
- """Sum gradients across devices using NCCL ops (fast path)."""
- from tensorflow.python.ops import nccl_ops # pylint: disable=no-name-in-module
- for all_vars in zip(*[device.grad_clean.keys() for device in self._devices.values()]):
- if any(x.shape.num_elements() > 0 for x in all_vars):
- all_grads = [device.grad_clean[var] for device, var in zip(self._devices.values(), all_vars)]
- all_grads = nccl_ops.all_sum(all_grads)
- for device, var, grad in zip(self._devices.values(), all_vars, all_grads):
- device.grad_clean[var] = grad
-
- def _broadcast_fallback(self):
- """Sum gradients across devices using TensorFlow collective ops (slow fallback path)."""
- from tensorflow.python.ops import collective_ops # pylint: disable=no-name-in-module
- global _collective_ops_warning_printed, _collective_ops_group_key, _collective_ops_instance_key
- if all(x.shape.num_elements() == 0 for device in self._devices.values() for x in device.grad_clean.values()):
- return
- if not _collective_ops_warning_printed:
- print("------------------------------------------------------------------------")
- print("WARNING: Using slow fallback implementation for inter-GPU communication.")
- print("Please use TensorFlow 1.14 on Linux for optimal training performance.")
- print("------------------------------------------------------------------------")
- _collective_ops_warning_printed = True
- for device in self._devices.values():
- with tf.device(device.name):
- combo = [tf.reshape(x, [x.shape.num_elements()]) for x in device.grad_clean.values()]
- combo = tf.concat(combo, axis=0)
- combo = collective_ops.all_reduce(combo, merge_op='Add', final_op='Id',
- group_size=len(self._devices), group_key=_collective_ops_group_key,
- instance_key=_collective_ops_instance_key)
- cur_ofs = 0
- for var, grad_old in device.grad_clean.items():
- grad_new = tf.reshape(combo[cur_ofs : cur_ofs + grad_old.shape.num_elements()], grad_old.shape)
- cur_ofs += grad_old.shape.num_elements()
- device.grad_clean[var] = grad_new
- _collective_ops_instance_key += 1
-
-
-class SimpleAdam:
- """Simplified version of tf.train.AdamOptimizer that behaves identically when used with dnnlib.tflib.Optimizer."""
-
- def __init__(self, name="Adam", learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8):
- self.name = name
- self.learning_rate = learning_rate
- self.beta1 = beta1
- self.beta2 = beta2
- self.epsilon = epsilon
- self.all_state_vars = []
-
- def variables(self):
- return self.all_state_vars
-
- def compute_gradients(self, loss, var_list, gate_gradients=tf.train.Optimizer.GATE_NONE):
- assert gate_gradients == tf.train.Optimizer.GATE_NONE
- return list(zip(tf.gradients(loss, var_list), var_list))
-
- def apply_gradients(self, grads_and_vars):
- with tf.name_scope(self.name):
- state_vars = []
- update_ops = []
-
- # Adjust learning rate to deal with startup bias.
- with tf.control_dependencies(None):
- b1pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False)
- b2pow_var = tf.Variable(dtype=tf.float32, initial_value=1, trainable=False)
- state_vars += [b1pow_var, b2pow_var]
- b1pow_new = b1pow_var * self.beta1
- b2pow_new = b2pow_var * self.beta2
- update_ops += [tf.assign(b1pow_var, b1pow_new), tf.assign(b2pow_var, b2pow_new)]
- lr_new = self.learning_rate * tf.sqrt(1 - b2pow_new) / (1 - b1pow_new)
-
- # Construct ops to update each variable.
- for grad, var in grads_and_vars:
- with tf.control_dependencies(None):
- m_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False)
- v_var = tf.Variable(dtype=tf.float32, initial_value=tf.zeros_like(var), trainable=False)
- state_vars += [m_var, v_var]
- m_new = self.beta1 * m_var + (1 - self.beta1) * grad
- v_new = self.beta2 * v_var + (1 - self.beta2) * tf.square(grad)
- var_delta = lr_new * m_new / (tf.sqrt(v_new) + self.epsilon)
- update_ops += [tf.assign(m_var, m_new), tf.assign(v_var, v_new), tf.assign_sub(var, var_delta)]
-
- # Group everything together.
- self.all_state_vars += state_vars
- return tf.group(*update_ops)
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cpu/Dockerfile b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cpu/Dockerfile
deleted file mode 100644
index 127c61a719c5f43cf10561e1e64123799ce62402..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docker/diffusers-pytorch-cpu/Dockerfile
+++ /dev/null
@@ -1,45 +0,0 @@
-FROM ubuntu:20.04
-LABEL maintainer="Hugging Face"
-LABEL repository="diffusers"
-
-ENV DEBIAN_FRONTEND=noninteractive
-
-RUN apt update && \
- apt install -y bash \
- build-essential \
- git \
- git-lfs \
- curl \
- ca-certificates \
- libsndfile1-dev \
- python3.8 \
- python3-pip \
- libgl1 \
- python3.8-venv && \
- rm -rf /var/lib/apt/lists
-
-# make sure to use venv
-RUN python3 -m venv /opt/venv
-ENV PATH="/opt/venv/bin:$PATH"
-
-# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py)
-RUN python3 -m pip install --no-cache-dir --upgrade pip && \
- python3 -m pip install --no-cache-dir \
- torch \
- torchvision \
- torchaudio \
- invisible_watermark \
- --extra-index-url https://download.pytorch.org/whl/cpu && \
- python3 -m pip install --no-cache-dir \
- accelerate \
- datasets \
- hf-doc-builder \
- huggingface-hub \
- Jinja2 \
- librosa \
- numpy \
- scipy \
- tensorboard \
- transformers
-
-CMD ["/bin/bash"]
diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
deleted file mode 100644
index 92d42bf0c75eb060548176f3bb8d003bc7bf7afd..0000000000000000000000000000000000000000
--- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unclip/pipeline_unclip.py
+++ /dev/null
@@ -1,489 +0,0 @@
-# Copyright 2023 Kakao Brain and The HuggingFace Team. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import inspect
-from typing import List, Optional, Tuple, Union
-
-import torch
-from torch.nn import functional as F
-from transformers import CLIPTextModelWithProjection, CLIPTokenizer
-from transformers.models.clip.modeling_clip import CLIPTextModelOutput
-
-from ...models import PriorTransformer, UNet2DConditionModel, UNet2DModel
-from ...schedulers import UnCLIPScheduler
-from ...utils import logging, randn_tensor
-from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
-from .text_proj import UnCLIPTextProjModel
-
-
-logger = logging.get_logger(__name__) # pylint: disable=invalid-name
-
-
-class UnCLIPPipeline(DiffusionPipeline):
- """
- Pipeline for text-to-image generation using unCLIP.
-
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
-
- Args:
- text_encoder ([`~transformers.CLIPTextModelWithProjection`]):
- Frozen text-encoder.
- tokenizer ([`~transformers.CLIPTokenizer`]):
- A `CLIPTokenizer` to tokenize text.
- prior ([`PriorTransformer`]):
- The canonical unCLIP prior to approximate the image embedding from the text embedding.
- text_proj ([`UnCLIPTextProjModel`]):
- Utility class to prepare and combine the embeddings before they are passed to the decoder.
- decoder ([`UNet2DConditionModel`]):
- The decoder to invert the image embedding into an image.
- super_res_first ([`UNet2DModel`]):
- Super resolution UNet. Used in all but the last step of the super resolution diffusion process.
- super_res_last ([`UNet2DModel`]):
- Super resolution UNet. Used in the last step of the super resolution diffusion process.
- prior_scheduler ([`UnCLIPScheduler`]):
- Scheduler used in the prior denoising process (a modified [`DDPMScheduler`]).
- decoder_scheduler ([`UnCLIPScheduler`]):
- Scheduler used in the decoder denoising process (a modified [`DDPMScheduler`]).
- super_res_scheduler ([`UnCLIPScheduler`]):
- Scheduler used in the super resolution denoising process (a modified [`DDPMScheduler`]).
-
- """
-
- _exclude_from_cpu_offload = ["prior"]
-
- prior: PriorTransformer
- decoder: UNet2DConditionModel
- text_proj: UnCLIPTextProjModel
- text_encoder: CLIPTextModelWithProjection
- tokenizer: CLIPTokenizer
- super_res_first: UNet2DModel
- super_res_last: UNet2DModel
-
- prior_scheduler: UnCLIPScheduler
- decoder_scheduler: UnCLIPScheduler
- super_res_scheduler: UnCLIPScheduler
-
- def __init__(
- self,
- prior: PriorTransformer,
- decoder: UNet2DConditionModel,
- text_encoder: CLIPTextModelWithProjection,
- tokenizer: CLIPTokenizer,
- text_proj: UnCLIPTextProjModel,
- super_res_first: UNet2DModel,
- super_res_last: UNet2DModel,
- prior_scheduler: UnCLIPScheduler,
- decoder_scheduler: UnCLIPScheduler,
- super_res_scheduler: UnCLIPScheduler,
- ):
- super().__init__()
-
- self.register_modules(
- prior=prior,
- decoder=decoder,
- text_encoder=text_encoder,
- tokenizer=tokenizer,
- text_proj=text_proj,
- super_res_first=super_res_first,
- super_res_last=super_res_last,
- prior_scheduler=prior_scheduler,
- decoder_scheduler=decoder_scheduler,
- super_res_scheduler=super_res_scheduler,
- )
-
- def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
- if latents is None:
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
- else:
- if latents.shape != shape:
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
- latents = latents.to(device)
-
- latents = latents * scheduler.init_noise_sigma
- return latents
-
- def _encode_prompt(
- self,
- prompt,
- device,
- num_images_per_prompt,
- do_classifier_free_guidance,
- text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None,
- text_attention_mask: Optional[torch.Tensor] = None,
- ):
- if text_model_output is None:
- batch_size = len(prompt) if isinstance(prompt, list) else 1
- # get prompt text embeddings
- text_inputs = self.tokenizer(
- prompt,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- text_input_ids = text_inputs.input_ids
- text_mask = text_inputs.attention_mask.bool().to(device)
-
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
-
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
- text_input_ids, untruncated_ids
- ):
- removed_text = self.tokenizer.batch_decode(
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
- )
- logger.warning(
- "The following part of your input was truncated because CLIP can only handle sequences up to"
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
- )
- text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
-
- text_encoder_output = self.text_encoder(text_input_ids.to(device))
-
- prompt_embeds = text_encoder_output.text_embeds
- text_encoder_hidden_states = text_encoder_output.last_hidden_state
-
- else:
- batch_size = text_model_output[0].shape[0]
- prompt_embeds, text_encoder_hidden_states = text_model_output[0], text_model_output[1]
- text_mask = text_attention_mask
-
- prompt_embeds = prompt_embeds.repeat_interleave(num_images_per_prompt, dim=0)
- text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
- text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0)
-
- if do_classifier_free_guidance:
- uncond_tokens = [""] * batch_size
-
- uncond_input = self.tokenizer(
- uncond_tokens,
- padding="max_length",
- max_length=self.tokenizer.model_max_length,
- truncation=True,
- return_tensors="pt",
- )
- uncond_text_mask = uncond_input.attention_mask.bool().to(device)
- negative_prompt_embeds_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device))
-
- negative_prompt_embeds = negative_prompt_embeds_text_encoder_output.text_embeds
- uncond_text_encoder_hidden_states = negative_prompt_embeds_text_encoder_output.last_hidden_state
-
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
-
- seq_len = negative_prompt_embeds.shape[1]
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt)
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len)
-
- seq_len = uncond_text_encoder_hidden_states.shape[1]
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1)
- uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view(
- batch_size * num_images_per_prompt, seq_len, -1
- )
- uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0)
-
- # done duplicates
-
- # For classifier free guidance, we need to do two forward passes.
- # Here we concatenate the unconditional and text embeddings into a single batch
- # to avoid doing two forward passes
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
- text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states])
-
- text_mask = torch.cat([uncond_text_mask, text_mask])
-
- return prompt_embeds, text_encoder_hidden_states, text_mask
-
- @torch.no_grad()
- def __call__(
- self,
- prompt: Optional[Union[str, List[str]]] = None,
- num_images_per_prompt: int = 1,
- prior_num_inference_steps: int = 25,
- decoder_num_inference_steps: int = 25,
- super_res_num_inference_steps: int = 7,
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
- prior_latents: Optional[torch.FloatTensor] = None,
- decoder_latents: Optional[torch.FloatTensor] = None,
- super_res_latents: Optional[torch.FloatTensor] = None,
- text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None,
- text_attention_mask: Optional[torch.Tensor] = None,
- prior_guidance_scale: float = 4.0,
- decoder_guidance_scale: float = 8.0,
- output_type: Optional[str] = "pil",
- return_dict: bool = True,
- ):
- """
- The call function to the pipeline for generation.
-
- Args:
- prompt (`str` or `List[str]`):
- The prompt or prompts to guide image generation. This can only be left undefined if `text_model_output`
- and `text_attention_mask` is passed.
- num_images_per_prompt (`int`, *optional*, defaults to 1):
- The number of images to generate per prompt.
- prior_num_inference_steps (`int`, *optional*, defaults to 25):
- The number of denoising steps for the prior. More denoising steps usually lead to a higher quality
- image at the expense of slower inference.
- decoder_num_inference_steps (`int`, *optional*, defaults to 25):
- The number of denoising steps for the decoder. More denoising steps usually lead to a higher quality
- image at the expense of slower inference.
- super_res_num_inference_steps (`int`, *optional*, defaults to 7):
- The number of denoising steps for super resolution. More denoising steps usually lead to a higher
- quality image at the expense of slower inference.
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
- generation deterministic.
- prior_latents (`torch.FloatTensor` of shape (batch size, embeddings dimension), *optional*):
- Pre-generated noisy latents to be used as inputs for the prior.
- decoder_latents (`torch.FloatTensor` of shape (batch size, channels, height, width), *optional*):
- Pre-generated noisy latents to be used as inputs for the decoder.
- super_res_latents (`torch.FloatTensor` of shape (batch size, channels, super res height, super res width), *optional*):
- Pre-generated noisy latents to be used as inputs for the decoder.
- prior_guidance_scale (`float`, *optional*, defaults to 4.0):
- A higher guidance scale value encourages the model to generate images closely linked to the text
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
- decoder_guidance_scale (`float`, *optional*, defaults to 4.0):
- A higher guidance scale value encourages the model to generate images closely linked to the text
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
- text_model_output (`CLIPTextModelOutput`, *optional*):
- Pre-defined [`CLIPTextModel`] outputs that can be derived from the text encoder. Pre-defined text
- outputs can be passed for tasks like text embedding interpolations. Make sure to also pass
- `text_attention_mask` in this case. `prompt` can the be left `None`.
- text_attention_mask (`torch.Tensor`, *optional*):
- Pre-defined CLIP text attention mask that can be derived from the tokenizer. Pre-defined text attention
- masks are necessary when passing `text_model_output`.
- output_type (`str`, *optional*, defaults to `"pil"`):
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
- return_dict (`bool`, *optional*, defaults to `True`):
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
-
- Returns:
- [`~pipelines.ImagePipelineOutput`] or `tuple`:
- If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
- returned where the first element is a list with the generated images.
- """
- if prompt is not None:
- if isinstance(prompt, str):
- batch_size = 1
- elif isinstance(prompt, list):
- batch_size = len(prompt)
- else:
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
- else:
- batch_size = text_model_output[0].shape[0]
-
- device = self._execution_device
-
- batch_size = batch_size * num_images_per_prompt
-
- do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0
-
- prompt_embeds, text_encoder_hidden_states, text_mask = self._encode_prompt(
- prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask
- )
-
- # prior
-
- self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device)
- prior_timesteps_tensor = self.prior_scheduler.timesteps
-
- embedding_dim = self.prior.config.embedding_dim
-
- prior_latents = self.prepare_latents(
- (batch_size, embedding_dim),
- prompt_embeds.dtype,
- device,
- generator,
- prior_latents,
- self.prior_scheduler,
- )
-
- for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents
-
- predicted_image_embedding = self.prior(
- latent_model_input,
- timestep=t,
- proj_embedding=prompt_embeds,
- encoder_hidden_states=text_encoder_hidden_states,
- attention_mask=text_mask,
- ).predicted_image_embedding
-
- if do_classifier_free_guidance:
- predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2)
- predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * (
- predicted_image_embedding_text - predicted_image_embedding_uncond
- )
-
- if i + 1 == prior_timesteps_tensor.shape[0]:
- prev_timestep = None
- else:
- prev_timestep = prior_timesteps_tensor[i + 1]
-
- prior_latents = self.prior_scheduler.step(
- predicted_image_embedding,
- timestep=t,
- sample=prior_latents,
- generator=generator,
- prev_timestep=prev_timestep,
- ).prev_sample
-
- prior_latents = self.prior.post_process_latents(prior_latents)
-
- image_embeddings = prior_latents
-
- # done prior
-
- # decoder
-
- text_encoder_hidden_states, additive_clip_time_embeddings = self.text_proj(
- image_embeddings=image_embeddings,
- prompt_embeds=prompt_embeds,
- text_encoder_hidden_states=text_encoder_hidden_states,
- do_classifier_free_guidance=do_classifier_free_guidance,
- )
-
- if device.type == "mps":
- # HACK: MPS: There is a panic when padding bool tensors,
- # so cast to int tensor for the pad and back to bool afterwards
- text_mask = text_mask.type(torch.int)
- decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=1)
- decoder_text_mask = decoder_text_mask.type(torch.bool)
- else:
- decoder_text_mask = F.pad(text_mask, (self.text_proj.clip_extra_context_tokens, 0), value=True)
-
- self.decoder_scheduler.set_timesteps(decoder_num_inference_steps, device=device)
- decoder_timesteps_tensor = self.decoder_scheduler.timesteps
-
- num_channels_latents = self.decoder.config.in_channels
- height = self.decoder.config.sample_size
- width = self.decoder.config.sample_size
-
- decoder_latents = self.prepare_latents(
- (batch_size, num_channels_latents, height, width),
- text_encoder_hidden_states.dtype,
- device,
- generator,
- decoder_latents,
- self.decoder_scheduler,
- )
-
- for i, t in enumerate(self.progress_bar(decoder_timesteps_tensor)):
- # expand the latents if we are doing classifier free guidance
- latent_model_input = torch.cat([decoder_latents] * 2) if do_classifier_free_guidance else decoder_latents
-
- noise_pred = self.decoder(
- sample=latent_model_input,
- timestep=t,
- encoder_hidden_states=text_encoder_hidden_states,
- class_labels=additive_clip_time_embeddings,
- attention_mask=decoder_text_mask,
- ).sample
-
- if do_classifier_free_guidance:
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
- noise_pred_uncond, _ = noise_pred_uncond.split(latent_model_input.shape[1], dim=1)
- noise_pred_text, predicted_variance = noise_pred_text.split(latent_model_input.shape[1], dim=1)
- noise_pred = noise_pred_uncond + decoder_guidance_scale * (noise_pred_text - noise_pred_uncond)
- noise_pred = torch.cat([noise_pred, predicted_variance], dim=1)
-
- if i + 1 == decoder_timesteps_tensor.shape[0]:
- prev_timestep = None
- else:
- prev_timestep = decoder_timesteps_tensor[i + 1]
-
- # compute the previous noisy sample x_t -> x_t-1
- decoder_latents = self.decoder_scheduler.step(
- noise_pred, t, decoder_latents, prev_timestep=prev_timestep, generator=generator
- ).prev_sample
-
- decoder_latents = decoder_latents.clamp(-1, 1)
-
- image_small = decoder_latents
-
- # done decoder
-
- # super res
-
- self.super_res_scheduler.set_timesteps(super_res_num_inference_steps, device=device)
- super_res_timesteps_tensor = self.super_res_scheduler.timesteps
-
- channels = self.super_res_first.config.in_channels // 2
- height = self.super_res_first.config.sample_size
- width = self.super_res_first.config.sample_size
-
- super_res_latents = self.prepare_latents(
- (batch_size, channels, height, width),
- image_small.dtype,
- device,
- generator,
- super_res_latents,
- self.super_res_scheduler,
- )
-
- if device.type == "mps":
- # MPS does not support many interpolations
- image_upscaled = F.interpolate(image_small, size=[height, width])
- else:
- interpolate_antialias = {}
- if "antialias" in inspect.signature(F.interpolate).parameters:
- interpolate_antialias["antialias"] = True
-
- image_upscaled = F.interpolate(
- image_small, size=[height, width], mode="bicubic", align_corners=False, **interpolate_antialias
- )
-
- for i, t in enumerate(self.progress_bar(super_res_timesteps_tensor)):
- # no classifier free guidance
-
- if i == super_res_timesteps_tensor.shape[0] - 1:
- unet = self.super_res_last
- else:
- unet = self.super_res_first
-
- latent_model_input = torch.cat([super_res_latents, image_upscaled], dim=1)
-
- noise_pred = unet(
- sample=latent_model_input,
- timestep=t,
- ).sample
-
- if i + 1 == super_res_timesteps_tensor.shape[0]:
- prev_timestep = None
- else:
- prev_timestep = super_res_timesteps_tensor[i + 1]
-
- # compute the previous noisy sample x_t -> x_t-1
- super_res_latents = self.super_res_scheduler.step(
- noise_pred, t, super_res_latents, prev_timestep=prev_timestep, generator=generator
- ).prev_sample
-
- image = super_res_latents
- # done super res
-
- # post processing
-
- image = image * 0.5 + 0.5
- image = image.clamp(0, 1)
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
-
- if output_type == "pil":
- image = self.numpy_to_pil(image)
-
- if not return_dict:
- return (image,)
-
- return ImagePipelineOutput(images=image)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py
deleted file mode 100644
index d2edab113649c38cac3c7dc3ff425462f7c40ffd..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py
+++ /dev/null
@@ -1,2 +0,0 @@
-_base_ = './faster_rcnn_r50_fpn_1x_coco.py'
-model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py
deleted file mode 100644
index 93b7d51912abaaab55ceac5263737d02cd4e99fa..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py
+++ /dev/null
@@ -1,61 +0,0 @@
-_base_ = './mask_rcnn_r101_fpn_1x_coco.py'
-model = dict(
- pretrained='open-mmlab://detectron2/resnext101_32x8d',
- backbone=dict(
- type='ResNeXt',
- depth=101,
- groups=32,
- base_width=8,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- frozen_stages=1,
- norm_cfg=dict(type='BN', requires_grad=False),
- style='pytorch'))
-
-dataset_type = 'CocoDataset'
-data_root = 'data/coco/'
-img_norm_cfg = dict(
- mean=[103.530, 116.280, 123.675],
- std=[57.375, 57.120, 58.395],
- to_rgb=False)
-train_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='LoadAnnotations',
- with_bbox=True,
- with_mask=True,
- poly2mask=False),
- dict(
- type='Resize',
- img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
- (1333, 768), (1333, 800)],
- multiscale_mode='value',
- keep_ratio=True),
- dict(type='RandomFlip', flip_ratio=0.5),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='DefaultFormatBundle'),
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
-]
-test_pipeline = [
- dict(type='LoadImageFromFile'),
- dict(
- type='MultiScaleFlipAug',
- img_scale=(1333, 800),
- flip=False,
- transforms=[
- dict(type='Resize', keep_ratio=True),
- dict(type='RandomFlip'),
- dict(type='Normalize', **img_norm_cfg),
- dict(type='Pad', size_divisor=32),
- dict(type='ImageToTensor', keys=['img']),
- dict(type='Collect', keys=['img']),
- ])
-]
-data = dict(
- train=dict(pipeline=train_pipeline),
- val=dict(pipeline=test_pipeline),
- test=dict(pipeline=test_pipeline))
-
-lr_config = dict(step=[28, 34])
-runner = dict(type='EpochBasedRunner', max_epochs=36)
diff --git a/spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r50_8x8_coco.py b/spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r50_8x8_coco.py
deleted file mode 100644
index b3adcb74a6155a0ab7303ab9ae90ee120f3eb4ad..0000000000000000000000000000000000000000
--- a/spaces/Andy1621/uniformer_image_detection/configs/yolact/yolact_r50_8x8_coco.py
+++ /dev/null
@@ -1,11 +0,0 @@
-_base_ = 'yolact_r50_1x8_coco.py'
-
-optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4)
-optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
-# learning policy
-lr_config = dict(
- policy='step',
- warmup='linear',
- warmup_iters=1000,
- warmup_ratio=0.1,
- step=[20, 42, 49, 52])
diff --git a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/ext_loader.py b/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/ext_loader.py
deleted file mode 100644
index 08132d2c1b9a1c28880e4bab4d4fa1ba39d9d083..0000000000000000000000000000000000000000
--- a/spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/ext_loader.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import importlib
-import os
-import pkgutil
-import warnings
-from collections import namedtuple
-
-import torch
-
-if torch.__version__ != 'parrots':
-
- def load_ext(name, funcs):
- ext = importlib.import_module('mmcv.' + name)
- for fun in funcs:
- assert hasattr(ext, fun), f'{fun} miss in module {name}'
- return ext
-else:
- from parrots import extension
- from parrots.base import ParrotsException
-
- has_return_value_ops = [
- 'nms',
- 'softnms',
- 'nms_match',
- 'nms_rotated',
- 'top_pool_forward',
- 'top_pool_backward',
- 'bottom_pool_forward',
- 'bottom_pool_backward',
- 'left_pool_forward',
- 'left_pool_backward',
- 'right_pool_forward',
- 'right_pool_backward',
- 'fused_bias_leakyrelu',
- 'upfirdn2d',
- 'ms_deform_attn_forward',
- 'pixel_group',
- 'contour_expand',
- ]
-
- def get_fake_func(name, e):
-
- def fake_func(*args, **kwargs):
- warnings.warn(f'{name} is not supported in parrots now')
- raise e
-
- return fake_func
-
- def load_ext(name, funcs):
- ExtModule = namedtuple('ExtModule', funcs)
- ext_list = []
- lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
- for fun in funcs:
- try:
- ext_fun = extension.load(fun, name, lib_dir=lib_root)
- except ParrotsException as e:
- if 'No element registered' not in e.message:
- warnings.warn(e.message)
- ext_fun = get_fake_func(fun, e)
- ext_list.append(ext_fun)
- else:
- if fun in has_return_value_ops:
- ext_list.append(ext_fun.op)
- else:
- ext_list.append(ext_fun.op_)
- return ExtModule(*ext_list)
-
-
-def check_ops_exist():
- ext_loader = pkgutil.find_loader('mmcv._ext')
- return ext_loader is not None
diff --git a/spaces/Artrajz/vits-simple-api/utils/data_utils.py b/spaces/Artrajz/vits-simple-api/utils/data_utils.py
deleted file mode 100644
index f1abf47484f4705d00f7e9a719e0f2577cffda22..0000000000000000000000000000000000000000
--- a/spaces/Artrajz/vits-simple-api/utils/data_utils.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import os
-import logging
-
-
-class HParams():
- def __init__(self, **kwargs):
- for k, v in kwargs.items():
- if type(v) == dict:
- v = HParams(**v)
- self[k] = v
-
- def keys(self):
- return self.__dict__.keys()
-
- def items(self):
- return self.__dict__.items()
-
- def values(self):
- return self.__dict__.values()
-
- def __len__(self):
- return len(self.__dict__)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def __setitem__(self, key, value):
- return setattr(self, key, value)
-
- def __contains__(self, key):
- return key in self.__dict__
-
- def __repr__(self):
- return self.__dict__.__repr__()
-
-
-def load_checkpoint(checkpoint_path, model):
- from torch import load
- checkpoint_dict = load(checkpoint_path, map_location='cpu')
- iteration = checkpoint_dict.get('iteration', None)
- saved_state_dict = checkpoint_dict['model']
- if hasattr(model, 'module'):
- state_dict = model.module.state_dict()
- else:
- state_dict = model.state_dict()
- new_state_dict = {}
- for k, v in state_dict.items():
- try:
- new_state_dict[k] = saved_state_dict[k]
- except:
- logging.info(f"{k} is not in the checkpoint")
- new_state_dict[k] = v
- if hasattr(model, 'module'):
- model.module.load_state_dict(new_state_dict)
- else:
- model.load_state_dict(new_state_dict)
- if iteration:
- logging.info(f"Loaded checkpoint '{checkpoint_path}' (iteration {iteration})")
- else:
- logging.info(f"Loaded checkpoint '{checkpoint_path}'")
- return
-
-
-def get_hparams_from_file(config_path):
- from json import loads
- with open(config_path, 'r', encoding='utf-8') as f:
- data = f.read()
- config = loads(data)
-
- hparams = HParams(**config)
- return hparams
-
-
-def load_audio_to_torch(full_path, target_sampling_rate):
- import librosa
- from torch import FloatTensor
- from numpy import float32
- audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
- return FloatTensor(audio.astype(float32))
-
-
-def clean_folder(folder_path):
- for filename in os.listdir(folder_path):
- file_path = os.path.join(folder_path, filename)
- # 如果是文件,则删除文件。如果是文件夹则跳过。
- if os.path.isfile(file_path):
- os.remove(file_path)
-
-
-def check_is_none(item) -> bool:
- # none -> True, not none -> False
- return item is None or (isinstance(item, str) and str(item).isspace()) or str(item) == ""
-
-
-def save_audio(audio, path):
- with open(path, "wb") as f:
- f.write(audio)
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/align.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/align.py
deleted file mode 100644
index c310b66e783820e5596bee9e4d92e531d59d6dc9..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/align.py
+++ /dev/null
@@ -1,311 +0,0 @@
-import sys
-from itertools import chain
-from typing import TYPE_CHECKING, Iterable, Optional
-
-if sys.version_info >= (3, 8):
- from typing import Literal
-else:
- from pip._vendor.typing_extensions import Literal # pragma: no cover
-
-from .constrain import Constrain
-from .jupyter import JupyterMixin
-from .measure import Measurement
-from .segment import Segment
-from .style import StyleType
-
-if TYPE_CHECKING:
- from .console import Console, ConsoleOptions, RenderableType, RenderResult
-
-AlignMethod = Literal["left", "center", "right"]
-VerticalAlignMethod = Literal["top", "middle", "bottom"]
-
-
-class Align(JupyterMixin):
- """Align a renderable by adding spaces if necessary.
-
- Args:
- renderable (RenderableType): A console renderable.
- align (AlignMethod): One of "left", "center", or "right""
- style (StyleType, optional): An optional style to apply to the background.
- vertical (Optional[VerticalAlginMethod], optional): Optional vertical align, one of "top", "middle", or "bottom". Defaults to None.
- pad (bool, optional): Pad the right with spaces. Defaults to True.
- width (int, optional): Restrict contents to given width, or None to use default width. Defaults to None.
- height (int, optional): Set height of align renderable, or None to fit to contents. Defaults to None.
-
- Raises:
- ValueError: if ``align`` is not one of the expected values.
- """
-
- def __init__(
- self,
- renderable: "RenderableType",
- align: AlignMethod = "left",
- style: Optional[StyleType] = None,
- *,
- vertical: Optional[VerticalAlignMethod] = None,
- pad: bool = True,
- width: Optional[int] = None,
- height: Optional[int] = None,
- ) -> None:
- if align not in ("left", "center", "right"):
- raise ValueError(
- f'invalid value for align, expected "left", "center", or "right" (not {align!r})'
- )
- if vertical is not None and vertical not in ("top", "middle", "bottom"):
- raise ValueError(
- f'invalid value for vertical, expected "top", "middle", or "bottom" (not {vertical!r})'
- )
- self.renderable = renderable
- self.align = align
- self.style = style
- self.vertical = vertical
- self.pad = pad
- self.width = width
- self.height = height
-
- def __repr__(self) -> str:
- return f"Align({self.renderable!r}, {self.align!r})"
-
- @classmethod
- def left(
- cls,
- renderable: "RenderableType",
- style: Optional[StyleType] = None,
- *,
- vertical: Optional[VerticalAlignMethod] = None,
- pad: bool = True,
- width: Optional[int] = None,
- height: Optional[int] = None,
- ) -> "Align":
- """Align a renderable to the left."""
- return cls(
- renderable,
- "left",
- style=style,
- vertical=vertical,
- pad=pad,
- width=width,
- height=height,
- )
-
- @classmethod
- def center(
- cls,
- renderable: "RenderableType",
- style: Optional[StyleType] = None,
- *,
- vertical: Optional[VerticalAlignMethod] = None,
- pad: bool = True,
- width: Optional[int] = None,
- height: Optional[int] = None,
- ) -> "Align":
- """Align a renderable to the center."""
- return cls(
- renderable,
- "center",
- style=style,
- vertical=vertical,
- pad=pad,
- width=width,
- height=height,
- )
-
- @classmethod
- def right(
- cls,
- renderable: "RenderableType",
- style: Optional[StyleType] = None,
- *,
- vertical: Optional[VerticalAlignMethod] = None,
- pad: bool = True,
- width: Optional[int] = None,
- height: Optional[int] = None,
- ) -> "Align":
- """Align a renderable to the right."""
- return cls(
- renderable,
- "right",
- style=style,
- vertical=vertical,
- pad=pad,
- width=width,
- height=height,
- )
-
- def __rich_console__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> "RenderResult":
- align = self.align
- width = console.measure(self.renderable, options=options).maximum
- rendered = console.render(
- Constrain(
- self.renderable, width if self.width is None else min(width, self.width)
- ),
- options.update(height=None),
- )
- lines = list(Segment.split_lines(rendered))
- width, height = Segment.get_shape(lines)
- lines = Segment.set_shape(lines, width, height)
- new_line = Segment.line()
- excess_space = options.max_width - width
- style = console.get_style(self.style) if self.style is not None else None
-
- def generate_segments() -> Iterable[Segment]:
- if excess_space <= 0:
- # Exact fit
- for line in lines:
- yield from line
- yield new_line
-
- elif align == "left":
- # Pad on the right
- pad = Segment(" " * excess_space, style) if self.pad else None
- for line in lines:
- yield from line
- if pad:
- yield pad
- yield new_line
-
- elif align == "center":
- # Pad left and right
- left = excess_space // 2
- pad = Segment(" " * left, style)
- pad_right = (
- Segment(" " * (excess_space - left), style) if self.pad else None
- )
- for line in lines:
- if left:
- yield pad
- yield from line
- if pad_right:
- yield pad_right
- yield new_line
-
- elif align == "right":
- # Padding on left
- pad = Segment(" " * excess_space, style)
- for line in lines:
- yield pad
- yield from line
- yield new_line
-
- blank_line = (
- Segment(f"{' ' * (self.width or options.max_width)}\n", style)
- if self.pad
- else Segment("\n")
- )
-
- def blank_lines(count: int) -> Iterable[Segment]:
- if count > 0:
- for _ in range(count):
- yield blank_line
-
- vertical_height = self.height or options.height
- iter_segments: Iterable[Segment]
- if self.vertical and vertical_height is not None:
- if self.vertical == "top":
- bottom_space = vertical_height - height
- iter_segments = chain(generate_segments(), blank_lines(bottom_space))
- elif self.vertical == "middle":
- top_space = (vertical_height - height) // 2
- bottom_space = vertical_height - top_space - height
- iter_segments = chain(
- blank_lines(top_space),
- generate_segments(),
- blank_lines(bottom_space),
- )
- else: # self.vertical == "bottom":
- top_space = vertical_height - height
- iter_segments = chain(blank_lines(top_space), generate_segments())
- else:
- iter_segments = generate_segments()
- if self.style:
- style = console.get_style(self.style)
- iter_segments = Segment.apply_style(iter_segments, style)
- yield from iter_segments
-
- def __rich_measure__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> Measurement:
- measurement = Measurement.get(console, options, self.renderable)
- return measurement
-
-
-class VerticalCenter(JupyterMixin):
- """Vertically aligns a renderable.
-
- Warn:
- This class is deprecated and may be removed in a future version. Use Align class with
- `vertical="middle"`.
-
- Args:
- renderable (RenderableType): A renderable object.
- """
-
- def __init__(
- self,
- renderable: "RenderableType",
- style: Optional[StyleType] = None,
- ) -> None:
- self.renderable = renderable
- self.style = style
-
- def __repr__(self) -> str:
- return f"VerticalCenter({self.renderable!r})"
-
- def __rich_console__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> "RenderResult":
- style = console.get_style(self.style) if self.style is not None else None
- lines = console.render_lines(
- self.renderable, options.update(height=None), pad=False
- )
- width, _height = Segment.get_shape(lines)
- new_line = Segment.line()
- height = options.height or options.size.height
- top_space = (height - len(lines)) // 2
- bottom_space = height - top_space - len(lines)
- blank_line = Segment(f"{' ' * width}", style)
-
- def blank_lines(count: int) -> Iterable[Segment]:
- for _ in range(count):
- yield blank_line
- yield new_line
-
- if top_space > 0:
- yield from blank_lines(top_space)
- for line in lines:
- yield from line
- yield new_line
- if bottom_space > 0:
- yield from blank_lines(bottom_space)
-
- def __rich_measure__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> Measurement:
- measurement = Measurement.get(console, options, self.renderable)
- return measurement
-
-
-if __name__ == "__main__": # pragma: no cover
- from pip._vendor.rich.console import Console, Group
- from pip._vendor.rich.highlighter import ReprHighlighter
- from pip._vendor.rich.panel import Panel
-
- highlighter = ReprHighlighter()
- console = Console()
-
- panel = Panel(
- Group(
- Align.left(highlighter("align='left'")),
- Align.center(highlighter("align='center'")),
- Align.right(highlighter("align='right'")),
- ),
- width=60,
- style="on dark_blue",
- title="Align",
- )
-
- console.print(
- Align.center(panel, vertical="middle", style="on red", height=console.height)
- )
diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/styled.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/styled.py
deleted file mode 100644
index 91cd0db31c14e30d4c1e2e9f36382b7a5e022870..0000000000000000000000000000000000000000
--- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/styled.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from typing import TYPE_CHECKING
-
-from .measure import Measurement
-from .segment import Segment
-from .style import StyleType
-
-if TYPE_CHECKING:
- from .console import Console, ConsoleOptions, RenderResult, RenderableType
-
-
-class Styled:
- """Apply a style to a renderable.
-
- Args:
- renderable (RenderableType): Any renderable.
- style (StyleType): A style to apply across the entire renderable.
- """
-
- def __init__(self, renderable: "RenderableType", style: "StyleType") -> None:
- self.renderable = renderable
- self.style = style
-
- def __rich_console__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> "RenderResult":
- style = console.get_style(self.style)
- rendered_segments = console.render(self.renderable, options)
- segments = Segment.apply_style(rendered_segments, style)
- return segments
-
- def __rich_measure__(
- self, console: "Console", options: "ConsoleOptions"
- ) -> Measurement:
- return Measurement.get(console, options, self.renderable)
-
-
-if __name__ == "__main__": # pragma: no cover
- from pip._vendor.rich import print
- from pip._vendor.rich.panel import Panel
-
- panel = Styled(Panel("hello"), "on blue")
- print(panel)
diff --git a/spaces/Autopixel/blurry-faces/app.py b/spaces/Autopixel/blurry-faces/app.py
deleted file mode 100644
index aec60251cf39fc9bbad7ddb0dd3d711ae5e5495a..0000000000000000000000000000000000000000
--- a/spaces/Autopixel/blurry-faces/app.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import cv2
-import gradio as gr
-from typing import Union, Tuple
-from PIL import Image, ImageOps
-import numpy as np
-import torch
-
-model = torch.jit.load('./model/model.pt').eval()
-
-def resize_with_padding(img: Image.Image, expected_size: Tuple[int, int]) -> Image.Image:
- img.thumbnail((expected_size[0], expected_size[1]))
- delta_width = expected_size[0] - img.size[0]
- delta_height = expected_size[1] - img.size[1]
- pad_width = delta_width // 2
- pad_height = delta_height // 2
- padding = (pad_width, pad_height, delta_width - pad_width, delta_height - pad_height)
- return ImageOps.expand(img, padding), padding
-
-def preprocess_image(img: Image.Image, size: int = 512) -> Tuple[Image.Image, torch.tensor, Tuple[int]]:
- pil_img, padding = resize_with_padding(img, (size, size))
-
- img = (np.array(pil_img).astype(np.float32) / 255) - np.array([0.485, 0.456, 0.406], dtype=np.float32).reshape(1, 1, 3)
- img = img / np.array([0.229, 0.224, 0.225], dtype=np.float32).reshape(1, 1, 3)
- img = np.transpose(img, (2, 0, 1))
-
- return pil_img, torch.tensor(img[None]), padding
-
-def soft_blur_with_mask(image: Image.Image, mask: torch.tensor, padding: Tuple[int]) -> Image.Image:
- image = np.array(image)
- # Create a blurred copy of the original image.
- blurred_image = cv2.GaussianBlur(image, (221, 221), sigmaX=20, sigmaY=20)
- image_height, image_width = image.shape[:2]
- mask = cv2.resize(mask.astype(np.uint8), (image_width, image_height), interpolation=cv2.INTER_NEAREST)
- # Blurring the mask itself to get a softer mask with no firm edges
- mask = cv2.GaussianBlur(mask.astype(np.float32), (11, 11), 10, 10)[:, :, None]
-
- # Take the blurred image where the mask it positive, and the original image where the image is original
- image = (mask * blurred_image + (1.0 - mask) * image)
- pad_w, pad_h, _, _ = padding
- img_w, img_h, _ = image.shape
- image = image[(pad_h):(img_h-pad_h), (pad_w):(img_w-pad_w), :]
- return Image.fromarray(image.astype(np.uint8))
-
-def run(image, size):
- pil_image, torch_image, padding = preprocess_image(image, size=size)
-
- with torch.inference_mode():
- mask = model(torch_image)
- mask = mask.argmax(dim=1).numpy().squeeze()
-
- return soft_blur_with_mask(pil_image, mask, padding)
-
-content_image_input = gr.inputs.Image(label="Entrada", type="pil")
-model_image_size = gr.inputs.Radio([256, 384, 512, 1024], type="value", default=512, label="Ajustar nivel de inferencia")
-
-app_interface = gr.Interface(fn=run,
- inputs=[content_image_input, model_image_size],
- outputs="image")
-app_interface.launch()
\ No newline at end of file
diff --git a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/model_zoo/model_zoo.py b/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/model_zoo/model_zoo.py
deleted file mode 100644
index 5b90bc9a165ea46ada72ed0e71f1e80e71ea9f40..0000000000000000000000000000000000000000
--- a/spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/model_zoo/model_zoo.py
+++ /dev/null
@@ -1,213 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import os
-from typing import Optional
-import pkg_resources
-import torch
-
-from detectron2.checkpoint import DetectionCheckpointer
-from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate
-from detectron2.modeling import build_model
-
-
-class _ModelZooUrls(object):
- """
- Mapping from names to officially released Detectron2 pre-trained models.
- """
-
- S3_PREFIX = "https://dl.fbaipublicfiles.com/detectron2/"
-
- # format: {config_path.yaml} -> model_id/model_final_{commit}.pkl
- CONFIG_PATH_TO_URL_SUFFIX = {
- # COCO Detection with Faster R-CNN
- "COCO-Detection/faster_rcnn_R_50_C4_1x": "137257644/model_final_721ade.pkl",
- "COCO-Detection/faster_rcnn_R_50_DC5_1x": "137847829/model_final_51d356.pkl",
- "COCO-Detection/faster_rcnn_R_50_FPN_1x": "137257794/model_final_b275ba.pkl",
- "COCO-Detection/faster_rcnn_R_50_C4_3x": "137849393/model_final_f97cb7.pkl",
- "COCO-Detection/faster_rcnn_R_50_DC5_3x": "137849425/model_final_68d202.pkl",
- "COCO-Detection/faster_rcnn_R_50_FPN_3x": "137849458/model_final_280758.pkl",
- "COCO-Detection/faster_rcnn_R_101_C4_3x": "138204752/model_final_298dad.pkl",
- "COCO-Detection/faster_rcnn_R_101_DC5_3x": "138204841/model_final_3e0943.pkl",
- "COCO-Detection/faster_rcnn_R_101_FPN_3x": "137851257/model_final_f6e8b1.pkl",
- "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x": "139173657/model_final_68b088.pkl",
- # COCO Detection with RetinaNet
- "COCO-Detection/retinanet_R_50_FPN_1x": "190397773/model_final_bfca0b.pkl",
- "COCO-Detection/retinanet_R_50_FPN_3x": "190397829/model_final_5bd44e.pkl",
- "COCO-Detection/retinanet_R_101_FPN_3x": "190397697/model_final_971ab9.pkl",
- # COCO Detection with RPN and Fast R-CNN
- "COCO-Detection/rpn_R_50_C4_1x": "137258005/model_final_450694.pkl",
- "COCO-Detection/rpn_R_50_FPN_1x": "137258492/model_final_02ce48.pkl",
- "COCO-Detection/fast_rcnn_R_50_FPN_1x": "137635226/model_final_e5f7ce.pkl",
- # COCO Instance Segmentation Baselines with Mask R-CNN
- "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x": "137259246/model_final_9243eb.pkl",
- "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_1x": "137260150/model_final_4f86c3.pkl",
- "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "137260431/model_final_a54504.pkl",
- "COCO-InstanceSegmentation/mask_rcnn_R_50_C4_3x": "137849525/model_final_4ce675.pkl",
- "COCO-InstanceSegmentation/mask_rcnn_R_50_DC5_3x": "137849551/model_final_84107b.pkl",
- "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x": "137849600/model_final_f10217.pkl",
- "COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x": "138363239/model_final_a2914c.pkl",
- "COCO-InstanceSegmentation/mask_rcnn_R_101_DC5_3x": "138363294/model_final_0464b7.pkl",
- "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x": "138205316/model_final_a3ec72.pkl",
- "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x": "139653917/model_final_2d9806.pkl", # noqa
- # New baselines using Large-Scale Jitter and Longer Training Schedule
- "new_baselines/mask_rcnn_R_50_FPN_100ep_LSJ": "42047764/model_final_bb69de.pkl",
- "new_baselines/mask_rcnn_R_50_FPN_200ep_LSJ": "42047638/model_final_89a8d3.pkl",
- "new_baselines/mask_rcnn_R_50_FPN_400ep_LSJ": "42019571/model_final_14d201.pkl",
- "new_baselines/mask_rcnn_R_101_FPN_100ep_LSJ": "42025812/model_final_4f7b58.pkl",
- "new_baselines/mask_rcnn_R_101_FPN_200ep_LSJ": "42131867/model_final_0bb7ae.pkl",
- "new_baselines/mask_rcnn_R_101_FPN_400ep_LSJ": "42073830/model_final_f96b26.pkl",
- "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_100ep_LSJ": "42047771/model_final_b7fbab.pkl", # noqa
- "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_200ep_LSJ": "42132721/model_final_5d87c1.pkl", # noqa
- "new_baselines/mask_rcnn_regnetx_4gf_dds_FPN_400ep_LSJ": "42025447/model_final_f1362d.pkl", # noqa
- "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_100ep_LSJ": "42047784/model_final_6ba57e.pkl", # noqa
- "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_200ep_LSJ": "42047642/model_final_27b9c1.pkl", # noqa
- "new_baselines/mask_rcnn_regnety_4gf_dds_FPN_400ep_LSJ": "42045954/model_final_ef3a80.pkl", # noqa
- # COCO Person Keypoint Detection Baselines with Keypoint R-CNN
- "COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x": "137261548/model_final_04e291.pkl",
- "COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x": "137849621/model_final_a6e10b.pkl",
- "COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x": "138363331/model_final_997cc7.pkl",
- "COCO-Keypoints/keypoint_rcnn_X_101_32x8d_FPN_3x": "139686956/model_final_5ad38f.pkl",
- # COCO Panoptic Segmentation Baselines with Panoptic FPN
- "COCO-PanopticSegmentation/panoptic_fpn_R_50_1x": "139514544/model_final_dbfeb4.pkl",
- "COCO-PanopticSegmentation/panoptic_fpn_R_50_3x": "139514569/model_final_c10459.pkl",
- "COCO-PanopticSegmentation/panoptic_fpn_R_101_3x": "139514519/model_final_cafdb1.pkl",
- # LVIS Instance Segmentation Baselines with Mask R-CNN
- "LVISv0.5-InstanceSegmentation/mask_rcnn_R_50_FPN_1x": "144219072/model_final_571f7c.pkl", # noqa
- "LVISv0.5-InstanceSegmentation/mask_rcnn_R_101_FPN_1x": "144219035/model_final_824ab5.pkl", # noqa
- "LVISv0.5-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_1x": "144219108/model_final_5e3439.pkl", # noqa
- # Cityscapes & Pascal VOC Baselines
- "Cityscapes/mask_rcnn_R_50_FPN": "142423278/model_final_af9cf5.pkl",
- "PascalVOC-Detection/faster_rcnn_R_50_C4": "142202221/model_final_b1acc2.pkl",
- # Other Settings
- "Misc/mask_rcnn_R_50_FPN_1x_dconv_c3-c5": "138602867/model_final_65c703.pkl",
- "Misc/mask_rcnn_R_50_FPN_3x_dconv_c3-c5": "144998336/model_final_821d0b.pkl",
- "Misc/cascade_mask_rcnn_R_50_FPN_1x": "138602847/model_final_e9d89b.pkl",
- "Misc/cascade_mask_rcnn_R_50_FPN_3x": "144998488/model_final_480dd8.pkl",
- "Misc/mask_rcnn_R_50_FPN_3x_syncbn": "169527823/model_final_3b3c51.pkl",
- "Misc/mask_rcnn_R_50_FPN_3x_gn": "138602888/model_final_dc5d9e.pkl",
- "Misc/scratch_mask_rcnn_R_50_FPN_3x_gn": "138602908/model_final_01ca85.pkl",
- "Misc/scratch_mask_rcnn_R_50_FPN_9x_gn": "183808979/model_final_da7b4c.pkl",
- "Misc/scratch_mask_rcnn_R_50_FPN_9x_syncbn": "184226666/model_final_5ce33e.pkl",
- "Misc/panoptic_fpn_R_101_dconv_cascade_gn_3x": "139797668/model_final_be35db.pkl",
- "Misc/cascade_mask_rcnn_X_152_32x8d_FPN_IN5k_gn_dconv": "18131413/model_0039999_e76410.pkl", # noqa
- # D1 Comparisons
- "Detectron1-Comparisons/faster_rcnn_R_50_FPN_noaug_1x": "137781054/model_final_7ab50c.pkl", # noqa
- "Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x": "137781281/model_final_62ca52.pkl", # noqa
- "Detectron1-Comparisons/keypoint_rcnn_R_50_FPN_1x": "137781195/model_final_cce136.pkl",
- }
-
- @staticmethod
- def query(config_path: str) -> Optional[str]:
- """
- Args:
- config_path: relative config filename
- """
- name = config_path.replace(".yaml", "").replace(".py", "")
- if name in _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX:
- suffix = _ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX[name]
- return _ModelZooUrls.S3_PREFIX + name + "/" + suffix
- return None
-
-
-def get_checkpoint_url(config_path):
- """
- Returns the URL to the model trained using the given config
-
- Args:
- config_path (str): config file name relative to detectron2's "configs/"
- directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
-
- Returns:
- str: a URL to the model
- """
- url = _ModelZooUrls.query(config_path)
- if url is None:
- raise RuntimeError("Pretrained model for {} is not available!".format(config_path))
- return url
-
-
-def get_config_file(config_path):
- """
- Returns path to a builtin config file.
-
- Args:
- config_path (str): config file name relative to detectron2's "configs/"
- directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
-
- Returns:
- str: the real path to the config file.
- """
- cfg_file = pkg_resources.resource_filename(
- "detectron2.model_zoo", os.path.join("configs", config_path)
- )
- if not os.path.exists(cfg_file):
- raise RuntimeError("{} not available in Model Zoo!".format(config_path))
- return cfg_file
-
-
-def get_config(config_path, trained: bool = False):
- """
- Returns a config object for a model in model zoo.
-
- Args:
- config_path (str): config file name relative to detectron2's "configs/"
- directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
- trained (bool): If True, will set ``MODEL.WEIGHTS`` to trained model zoo weights.
- If False, the checkpoint specified in the config file's ``MODEL.WEIGHTS`` is used
- instead; this will typically (though not always) initialize a subset of weights using
- an ImageNet pre-trained model, while randomly initializing the other weights.
-
- Returns:
- CfgNode or omegaconf.DictConfig: a config object
- """
- cfg_file = get_config_file(config_path)
- if cfg_file.endswith(".yaml"):
- cfg = get_cfg()
- cfg.merge_from_file(cfg_file)
- if trained:
- cfg.MODEL.WEIGHTS = get_checkpoint_url(config_path)
- return cfg
- elif cfg_file.endswith(".py"):
- cfg = LazyConfig.load(cfg_file)
- if trained:
- url = get_checkpoint_url(config_path)
- if "train" in cfg and "init_checkpoint" in cfg.train:
- cfg.train.init_checkpoint = url
- else:
- raise NotImplementedError
- return cfg
-
-
-def get(config_path, trained: bool = False, device: Optional[str] = None):
- """
- Get a model specified by relative path under Detectron2's official ``configs/`` directory.
-
- Args:
- config_path (str): config file name relative to detectron2's "configs/"
- directory, e.g., "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"
- trained (bool): see :func:`get_config`.
- device (str or None): overwrite the device in config, if given.
-
- Returns:
- nn.Module: a detectron2 model. Will be in training mode.
-
- Example:
- ::
- from detectron2 import model_zoo
- model = model_zoo.get("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml", trained=True)
- """
- cfg = get_config(config_path, trained)
- if device is None and not torch.cuda.is_available():
- device = "cpu"
- if device is not None and isinstance(cfg, CfgNode):
- cfg.MODEL.DEVICE = device
-
- if isinstance(cfg, CfgNode):
- model = build_model(cfg)
- DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
- else:
- model = instantiate(cfg.model)
- if device is not None:
- model = model.to(device)
- if "train" in cfg and "init_checkpoint" in cfg.train:
- DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
- return model
diff --git a/spaces/Banbri/zcvzcv/src/lib/replaceWhiteWithTransparent.ts b/spaces/Banbri/zcvzcv/src/lib/replaceWhiteWithTransparent.ts
deleted file mode 100644
index cee490fc1a0b19b2192ce86d6c8f9867a3a6a6d9..0000000000000000000000000000000000000000
--- a/spaces/Banbri/zcvzcv/src/lib/replaceWhiteWithTransparent.ts
+++ /dev/null
@@ -1,37 +0,0 @@
-export function replaceWhiteWithTransparent(imageBase64: string): Promise {
- return new Promise((resolve, reject) => {
- const img = new Image();
- img.onload = () => {
- const canvas = document.createElement('canvas');
- canvas.width = img.width;
- canvas.height = img.height;
-
- const ctx = canvas.getContext('2d');
- if (!ctx) {
- reject('Unable to get canvas 2D context');
- return;
- }
-
- ctx.drawImage(img, 0, 0);
-
- const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
- const data = imageData.data;
-
- for (let i = 0; i < data.length; i += 4) {
- if (data[i] === 255 && data[i + 1] === 255 && data[i + 2] === 255) {
- data[i + 3] = 0;
- }
- }
-
- ctx.putImageData(imageData, 0, 0);
-
- resolve(canvas.toDataURL());
- };
-
- img.onerror = (err) => {
- reject(err);
- };
-
- img.src = imageBase64;
- });
-}
\ No newline at end of file
diff --git a/spaces/BigSalmon/GPT2Mask/app.py b/spaces/BigSalmon/GPT2Mask/app.py
deleted file mode 100644
index 6b62f405dad179d361ec33f607cbd97e88edc9f6..0000000000000000000000000000000000000000
--- a/spaces/BigSalmon/GPT2Mask/app.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import streamlit as st
-import numpy as np
-import pandas as pd
-import os
-import torch
-import torch.nn as nn
-from transformers.activations import get_activation
-from transformers import AutoTokenizer, AutoModelForCausalLM
-from transformers import AutoTokenizer, AutoModel
-from transformers import GPTNeoXForCausalLM, GPTNeoXTokenizerFast
-
-
-st.title('GPT2:')
-
-device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
-@st.cache(allow_output_mutation=True)
-def get_model():
- #tokenizer = AutoTokenizer.from_pretrained("BigSalmon/MASKGPT2")
- #model = AutoModelForCausalLM.from_pretrained("BigSalmon/MASKGPT2")
- #tokenizer = GPTNeoXTokenizerFast.from_pretrained("CarperAI/FIM-NeoX-1.3B")
- #model = GPTNeoXForCausalLM.from_pretrained("BigSalmon/FormalInformalConcise-FIM-NeoX-1.3B")
- tokenizer = AutoTokenizer.from_pretrained("BigSalmon/FamilyFeud")
- model = AutoModelForCausalLM.from_pretrained("BigSalmon/FamilyFeud")
- return model, tokenizer
-
-model, tokenizer = get_model()
-
-g = """***
-
-original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick.
-infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick.
-
-***
-
-original:"""
-
-def prefix_format(sentence):
- words = sentence.split()
- if "[MASK]" in sentence:
- words2 = words.index("[MASK]")
- #print(words2)
- output = ("<|SUF|> " + ' '.join(words[words2+1:]) + " <|PRE|> " + ' '.join(words[:words2]) + " <|MID|>")
- st.write(output)
- else:
- st.write("Add [MASK] to sentence")
-
-with st.form(key='my_form'):
- prompt = st.text_area(label='Enter sentence', value=g)
- submit_button = st.form_submit_button(label='Submit')
- submit_button6 = st.form_submit_button(label='Turn Into Infill Format. Just add [MASK] where you want it infilled')
- if submit_button:
- with torch.no_grad():
- text = tokenizer.encode(prompt)
- myinput, past_key_values = torch.tensor([text]), None
- myinput = myinput
- myinput= myinput
- logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)
- logits = logits[0,-1]
- probabilities = torch.nn.functional.softmax(logits)
- best_logits, best_indices = logits.topk(250)
- best_words = [tokenizer.decode([idx.item()]) for idx in best_indices]
- text.append(best_indices[0].item())
- best_probabilities = probabilities[best_indices].tolist()
- words = []
- st.write(best_words)
- if submit_button6:
- prefix_format(prompt)
\ No newline at end of file
diff --git a/spaces/CVH-vn1210/make_hair/minigpt4/tasks/image_text_pretrain.py b/spaces/CVH-vn1210/make_hair/minigpt4/tasks/image_text_pretrain.py
deleted file mode 100644
index a2214a2e887799fa5236f165ac7329b60bc81d8f..0000000000000000000000000000000000000000
--- a/spaces/CVH-vn1210/make_hair/minigpt4/tasks/image_text_pretrain.py
+++ /dev/null
@@ -1,18 +0,0 @@
-"""
- Copyright (c) 2022, salesforce.com, inc.
- All rights reserved.
- SPDX-License-Identifier: BSD-3-Clause
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
-"""
-
-from minigpt4.common.registry import registry
-from minigpt4.tasks.base_task import BaseTask
-
-
-@registry.register_task("image_text_pretrain")
-class ImageTextPretrainTask(BaseTask):
- def __init__(self):
- super().__init__()
-
- def evaluation(self, model, data_loader, cuda_enabled=True):
- pass
diff --git a/spaces/CVPR/WALT/mmdet/core/bbox/coder/__init__.py b/spaces/CVPR/WALT/mmdet/core/bbox/coder/__init__.py
deleted file mode 100644
index ae455ba8fc0e0727e2d581cdc8f20fceededf99a..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/core/bbox/coder/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from .base_bbox_coder import BaseBBoxCoder
-from .bucketing_bbox_coder import BucketingBBoxCoder
-from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
-from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
-from .pseudo_bbox_coder import PseudoBBoxCoder
-from .tblr_bbox_coder import TBLRBBoxCoder
-from .yolo_bbox_coder import YOLOBBoxCoder
-
-__all__ = [
- 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',
- 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',
- 'BucketingBBoxCoder'
-]
diff --git a/spaces/CVPR/WALT/mmdet/models/roi_heads/dynamic_roi_head.py b/spaces/CVPR/WALT/mmdet/models/roi_heads/dynamic_roi_head.py
deleted file mode 100644
index 89427a931f45f5a920c0e66fd88058bf9fa05f5c..0000000000000000000000000000000000000000
--- a/spaces/CVPR/WALT/mmdet/models/roi_heads/dynamic_roi_head.py
+++ /dev/null
@@ -1,154 +0,0 @@
-import numpy as np
-import torch
-
-from mmdet.core import bbox2roi
-from mmdet.models.losses import SmoothL1Loss
-from ..builder import HEADS
-from .standard_roi_head import StandardRoIHead
-
-EPS = 1e-15
-
-
-@HEADS.register_module()
-class DynamicRoIHead(StandardRoIHead):
- """RoI head for `Dynamic R-CNN `_."""
-
- def __init__(self, **kwargs):
- super(DynamicRoIHead, self).__init__(**kwargs)
- assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)
- # the IoU history of the past `update_iter_interval` iterations
- self.iou_history = []
- # the beta history of the past `update_iter_interval` iterations
- self.beta_history = []
-
- def forward_train(self,
- x,
- img_metas,
- proposal_list,
- gt_bboxes,
- gt_labels,
- gt_bboxes_ignore=None,
- gt_masks=None):
- """Forward function for training.
-
- Args:
- x (list[Tensor]): list of multi-level img features.
-
- img_metas (list[dict]): list of image info dict where each dict
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
- For details on the values of these keys see
- `mmdet/datasets/pipelines/formatting.py:Collect`.
-
- proposals (list[Tensors]): list of region proposals.
-
- gt_bboxes (list[Tensor]): each item are the truth boxes for each
- image in [tl_x, tl_y, br_x, br_y] format.
-
- gt_labels (list[Tensor]): class indices corresponding to each box
-
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
- boxes can be ignored when computing the loss.
-
- gt_masks (None | Tensor) : true segmentation masks for each box
- used if the architecture supports a segmentation task.
-
- Returns:
- dict[str, Tensor]: a dictionary of loss components
- """
- # assign gts and sample proposals
- if self.with_bbox or self.with_mask:
- num_imgs = len(img_metas)
- if gt_bboxes_ignore is None:
- gt_bboxes_ignore = [None for _ in range(num_imgs)]
- sampling_results = []
- cur_iou = []
- for i in range(num_imgs):
- assign_result = self.bbox_assigner.assign(
- proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
- gt_labels[i])
- sampling_result = self.bbox_sampler.sample(
- assign_result,
- proposal_list[i],
- gt_bboxes[i],
- gt_labels[i],
- feats=[lvl_feat[i][None] for lvl_feat in x])
- # record the `iou_topk`-th largest IoU in an image
- iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,
- len(assign_result.max_overlaps))
- ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)
- cur_iou.append(ious[-1].item())
- sampling_results.append(sampling_result)
- # average the current IoUs over images
- cur_iou = np.mean(cur_iou)
- self.iou_history.append(cur_iou)
-
- losses = dict()
- # bbox head forward and loss
- if self.with_bbox:
- bbox_results = self._bbox_forward_train(x, sampling_results,
- gt_bboxes, gt_labels,
- img_metas)
- losses.update(bbox_results['loss_bbox'])
-
- # mask head forward and loss
- if self.with_mask:
- mask_results = self._mask_forward_train(x, sampling_results,
- bbox_results['bbox_feats'],
- gt_masks, img_metas)
- losses.update(mask_results['loss_mask'])
-
- # update IoU threshold and SmoothL1 beta
- update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval
- if len(self.iou_history) % update_iter_interval == 0:
- new_iou_thr, new_beta = self.update_hyperparameters()
-
- return losses
-
- def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
- img_metas):
- num_imgs = len(img_metas)
- rois = bbox2roi([res.bboxes for res in sampling_results])
- bbox_results = self._bbox_forward(x, rois)
-
- bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
- gt_labels, self.train_cfg)
- # record the `beta_topk`-th smallest target
- # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
- # and bbox_weights, respectively
- pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
- num_pos = len(pos_inds)
- cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
- beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
- num_pos)
- cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
- self.beta_history.append(cur_target)
- loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
- bbox_results['bbox_pred'], rois,
- *bbox_targets)
-
- bbox_results.update(loss_bbox=loss_bbox)
- return bbox_results
-
- def update_hyperparameters(self):
- """Update hyperparameters like IoU thresholds for assigner and beta for
- SmoothL1 loss based on the training statistics.
-
- Returns:
- tuple[float]: the updated ``iou_thr`` and ``beta``.
- """
- new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
- np.mean(self.iou_history))
- self.iou_history = []
- self.bbox_assigner.pos_iou_thr = new_iou_thr
- self.bbox_assigner.neg_iou_thr = new_iou_thr
- self.bbox_assigner.min_pos_iou = new_iou_thr
- if (np.median(self.beta_history) < EPS):
- # avoid 0 or too small value for new_beta
- new_beta = self.bbox_head.loss_bbox.beta
- else:
- new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
- np.median(self.beta_history))
- self.beta_history = []
- self.bbox_head.loss_bbox.beta = new_beta
- return new_iou_thr, new_beta
diff --git a/spaces/ChrisPreston/diff-svc_minato_aqua/modules/nsf_hifigan/env.py b/spaces/ChrisPreston/diff-svc_minato_aqua/modules/nsf_hifigan/env.py
deleted file mode 100644
index 2bdbc95d4f7a8bad8fd4f5eef657e2b51d946056..0000000000000000000000000000000000000000
--- a/spaces/ChrisPreston/diff-svc_minato_aqua/modules/nsf_hifigan/env.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-import shutil
-
-
-class AttrDict(dict):
- def __init__(self, *args, **kwargs):
- super(AttrDict, self).__init__(*args, **kwargs)
- self.__dict__ = self
-
-
-def build_env(config, config_name, path):
- t_path = os.path.join(path, config_name)
- if config != t_path:
- os.makedirs(path, exist_ok=True)
- shutil.copyfile(config, os.path.join(path, config_name))
diff --git a/spaces/CosmicSage/Linaqruf-anything-v3.0/app.py b/spaces/CosmicSage/Linaqruf-anything-v3.0/app.py
deleted file mode 100644
index 16e8131a0bbf7b06956e69e2b7758fa01e4eb51f..0000000000000000000000000000000000000000
--- a/spaces/CosmicSage/Linaqruf-anything-v3.0/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/Linaqruf/anything-v3.0").launch()
\ No newline at end of file
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/D_S_I_G_.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/D_S_I_G_.py
deleted file mode 100644
index d902a29080aff5a275f530c7658d3c9eb4498034..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/D_S_I_G_.py
+++ /dev/null
@@ -1,151 +0,0 @@
-from fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr, safeEval
-from fontTools.misc import sstruct
-from . import DefaultTable
-import base64
-
-DSIG_HeaderFormat = """
- > # big endian
- ulVersion: L
- usNumSigs: H
- usFlag: H
-"""
-# followed by an array of usNumSigs DSIG_Signature records
-DSIG_SignatureFormat = """
- > # big endian
- ulFormat: L
- ulLength: L # length includes DSIG_SignatureBlock header
- ulOffset: L
-"""
-# followed by an array of usNumSigs DSIG_SignatureBlock records,
-# each followed immediately by the pkcs7 bytes
-DSIG_SignatureBlockFormat = """
- > # big endian
- usReserved1: H
- usReserved2: H
- cbSignature: l # length of following raw pkcs7 data
-"""
-
-#
-# NOTE
-# the DSIG table format allows for SignatureBlocks residing
-# anywhere in the table and possibly in a different order as
-# listed in the array after the first table header
-#
-# this implementation does not keep track of any gaps and/or data
-# before or after the actual signature blocks while decompiling,
-# and puts them in the same physical order as listed in the header
-# on compilation with no padding whatsoever.
-#
-
-
-class table_D_S_I_G_(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
- assert self.ulVersion == 1, "DSIG ulVersion must be 1"
- assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
- self.signatureRecords = sigrecs = []
- for n in range(self.usNumSigs):
- sigrec, newData = sstruct.unpack2(
- DSIG_SignatureFormat, newData, SignatureRecord()
- )
- assert sigrec.ulFormat == 1, (
- "DSIG signature record #%d ulFormat must be 1" % n
- )
- sigrecs.append(sigrec)
- for sigrec in sigrecs:
- dummy, newData = sstruct.unpack2(
- DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec
- )
- assert sigrec.usReserved1 == 0, (
- "DSIG signature record #%d usReserverd1 must be 0" % n
- )
- assert sigrec.usReserved2 == 0, (
- "DSIG signature record #%d usReserverd2 must be 0" % n
- )
- sigrec.pkcs7 = newData[: sigrec.cbSignature]
-
- def compile(self, ttFont):
- packed = sstruct.pack(DSIG_HeaderFormat, self)
- headers = [packed]
- offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
- data = []
- for sigrec in self.signatureRecords:
- # first pack signature block
- sigrec.cbSignature = len(sigrec.pkcs7)
- packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
- data.append(packed)
- # update redundant length field
- sigrec.ulLength = len(packed)
- # update running table offset
- sigrec.ulOffset = offset
- headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
- offset += sigrec.ulLength
- if offset % 2:
- # Pad to even bytes
- data.append(b"\0")
- return bytesjoin(headers + data)
-
- def toXML(self, xmlWriter, ttFont):
- xmlWriter.comment(
- "note that the Digital Signature will be invalid after recompilation!"
- )
- xmlWriter.newline()
- xmlWriter.simpletag(
- "tableHeader",
- version=self.ulVersion,
- numSigs=self.usNumSigs,
- flag="0x%X" % self.usFlag,
- )
- for sigrec in self.signatureRecords:
- xmlWriter.newline()
- sigrec.toXML(xmlWriter, ttFont)
- xmlWriter.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name == "tableHeader":
- self.signatureRecords = []
- self.ulVersion = safeEval(attrs["version"])
- self.usNumSigs = safeEval(attrs["numSigs"])
- self.usFlag = safeEval(attrs["flag"])
- return
- if name == "SignatureRecord":
- sigrec = SignatureRecord()
- sigrec.fromXML(name, attrs, content, ttFont)
- self.signatureRecords.append(sigrec)
-
-
-pem_spam = lambda l, spam={
- "-----BEGIN PKCS7-----": True,
- "-----END PKCS7-----": True,
- "": True,
-}: not spam.get(l.strip())
-
-
-def b64encode(b):
- s = base64.b64encode(b)
- # Line-break at 76 chars.
- items = []
- while s:
- items.append(tostr(s[:76]))
- items.append("\n")
- s = s[76:]
- return strjoin(items)
-
-
-class SignatureRecord(object):
- def __repr__(self):
- return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
-
- def toXML(self, writer, ttFont):
- writer.begintag(self.__class__.__name__, format=self.ulFormat)
- writer.newline()
- writer.write_noindent("-----BEGIN PKCS7-----\n")
- writer.write_noindent(b64encode(self.pkcs7))
- writer.write_noindent("-----END PKCS7-----\n")
- writer.endtag(self.__class__.__name__)
-
- def fromXML(self, name, attrs, content, ttFont):
- self.ulFormat = safeEval(attrs["format"])
- self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
- self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
- self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/themes/base.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/themes/base.py
deleted file mode 100644
index 5306b4f3e26d83bd84aa8555485292598e65b3f8..0000000000000000000000000000000000000000
--- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/themes/base.py
+++ /dev/null
@@ -1,1815 +0,0 @@
-from __future__ import annotations
-
-import json
-import re
-import tempfile
-import textwrap
-from pathlib import Path
-from typing import Iterable
-
-import huggingface_hub
-import requests
-import semantic_version as semver
-from gradio_client.documentation import document, set_documentation_group
-from huggingface_hub import CommitOperationAdd
-
-from gradio.themes.utils import (
- colors,
- fonts,
- get_matching_version,
- get_theme_assets,
- sizes,
-)
-from gradio.themes.utils.readme_content import README_CONTENT
-
-set_documentation_group("themes")
-
-
-class ThemeClass:
- def __init__(self):
- self._stylesheets = []
- self.name = None
-
- def _get_theme_css(self):
- css = {}
- dark_css = {}
-
- for attr, val in self.__dict__.items():
- if attr.startswith("_"):
- continue
- if val is None:
- if attr.endswith("_dark"):
- dark_css[attr[:-5]] = None
- continue
- else:
- raise ValueError(
- f"Cannot set '{attr}' to None - only dark mode variables can be None."
- )
- val = str(val)
- pattern = r"(\*)([\w_]+)(\b)"
-
- def repl_func(match):
- full_match = match.group(0)
- if full_match.startswith("*") and full_match.endswith("_dark"):
- raise ValueError(
- f"Cannot refer '{attr}' to '{val}' - dark variable references are automatically used for dark mode attributes, so do not use the _dark suffix in the value."
- )
- if (
- attr.endswith("_dark")
- and full_match.startswith("*")
- and attr[:-5] == full_match[1:]
- ):
- raise ValueError(
- f"Cannot refer '{attr}' to '{val}' - if dark and light mode values are the same, set dark mode version to None."
- )
-
- word = match.group(2)
- word = word.replace("_", "-")
- return f"var(--{word})"
-
- val = re.sub(pattern, repl_func, val)
-
- attr = attr.replace("_", "-")
-
- if attr.endswith("-dark"):
- attr = attr[:-5]
- dark_css[attr] = val
- else:
- css[attr] = val
-
- for attr, val in css.items():
- if attr not in dark_css:
- dark_css[attr] = val
-
- css_code = (
- ":root {\n"
- + "\n".join([f" --{attr}: {val};" for attr, val in css.items()])
- + "\n}"
- )
- dark_css_code = (
- ".dark {\n"
- + "\n".join([f" --{attr}: {val};" for attr, val in dark_css.items()])
- + "\n}"
- )
-
- return f"{css_code}\n{dark_css_code}"
-
- def to_dict(self):
- """Convert the theme into a python dictionary."""
- schema = {"theme": {}}
- for prop in dir(self):
- if (
- not prop.startswith("_")
- or prop.startswith("_font")
- or prop == "_stylesheets"
- or prop == "name"
- ) and isinstance(getattr(self, prop), (list, str)):
- schema["theme"][prop] = getattr(self, prop)
- return schema
-
- @classmethod
- def load(cls, path: str) -> ThemeClass:
- """Load a theme from a json file.
-
- Parameters:
- path: The filepath to read.
- """
- with open(path) as fp:
- return cls.from_dict(json.load(fp, object_hook=fonts.as_font))
-
- @classmethod
- def from_dict(cls, theme: dict[str, dict[str, str]]) -> ThemeClass:
- """Create a theme instance from a dictionary representation.
-
- Parameters:
- theme: The dictionary representation of the theme.
- """
- new_theme = cls()
- for prop, value in theme["theme"].items():
- setattr(new_theme, prop, value)
-
- # For backwards compatibility, load attributes in base theme not in the loaded theme from the base theme.
- base = Base()
- for attr in base.__dict__:
- if not attr.startswith("_") and not hasattr(new_theme, attr):
- setattr(new_theme, attr, getattr(base, attr))
-
- return new_theme
-
- def dump(self, filename: str):
- """Write the theme to a json file.
-
- Parameters:
- filename: The path to write the theme too
- """
- Path(filename).write_text(json.dumps(self.to_dict(), cls=fonts.FontEncoder))
-
- @classmethod
- def from_hub(cls, repo_name: str, hf_token: str | None = None):
- """Load a theme from the hub.
-
- This DOES NOT require a HuggingFace account for downloading publicly available themes.
-
- Parameters:
- repo_name: string of the form /@. If a semantic version expression is omitted, the latest version will be fetched.
- hf_token: HuggingFace Token. Only needed to download private themes.
- """
- if "@" not in repo_name:
- name, version = repo_name, None
- else:
- name, version = repo_name.split("@")
-
- api = huggingface_hub.HfApi(token=hf_token)
-
- try:
- space_info = api.space_info(name)
- except requests.HTTPError as e:
- raise ValueError(f"The space {name} does not exist") from e
-
- assets = get_theme_assets(space_info)
- matching_version = get_matching_version(assets, version)
-
- if not matching_version:
- raise ValueError(
- f"Cannot find a matching version for expression {version} "
- f"from files {[f.filename for f in assets]}"
- )
-
- theme_file = huggingface_hub.hf_hub_download(
- repo_id=name,
- repo_type="space",
- filename=f"themes/theme_schema@{matching_version.version}.json",
- )
- theme = cls.load(theme_file)
- theme.name = name
- return theme
-
- @staticmethod
- def _get_next_version(space_info: huggingface_hub.hf_api.SpaceInfo) -> str:
- assets = get_theme_assets(space_info)
- latest_version = max(assets, key=lambda asset: asset.version).version
- return str(latest_version.next_patch())
-
- @staticmethod
- def _theme_version_exists(
- space_info: huggingface_hub.hf_api.SpaceInfo, version: str
- ) -> bool:
- assets = get_theme_assets(space_info)
- return any(a.version == semver.Version(version) for a in assets)
-
- def push_to_hub(
- self,
- repo_name: str,
- org_name: str | None = None,
- version: str | None = None,
- hf_token: str | None = None,
- theme_name: str | None = None,
- description: str | None = None,
- private: bool = False,
- ):
- """Upload a theme to the HuggingFace hub.
-
- This requires a HuggingFace account.
-
- Parameters:
- repo_name: The name of the repository to store the theme assets, e.g. 'my_theme' or 'sunset'.
- org_name: The name of the org to save the space in. If None (the default), the username corresponding to the logged in user, or hƒ_token is used.
- version: A semantic version tag for theme. Bumping the version tag lets you publish updates to a theme without changing the look of applications that already loaded your theme.
- hf_token: API token for your HuggingFace account
- theme_name: Name for the name. If None, defaults to repo_name
- description: A long form description to your theme.
- """
-
- from gradio import __version__
-
- api = huggingface_hub.HfApi()
-
- if not hf_token:
- try:
- author = huggingface_hub.whoami()["name"]
- except OSError as e:
- raise ValueError(
- "In order to push to hub, log in via `huggingface-cli login` "
- "or provide a theme_token to push_to_hub. For more information "
- "see https://huggingface.co/docs/huggingface_hub/quick-start#login"
- ) from e
- else:
- author = huggingface_hub.whoami(token=hf_token)["name"]
-
- space_id = f"{org_name or author}/{repo_name}"
-
- try:
- space_info = api.space_info(space_id)
- except requests.HTTPError:
- space_info = None
-
- space_exists = space_info is not None
-
- # If no version, set the version to next patch release
- if not version:
- version = self._get_next_version(space_info) if space_exists else "0.0.1"
- else:
- _ = semver.Version(version)
-
- if space_exists and self._theme_version_exists(space_info, version):
- raise ValueError(
- f"The space {space_id} already has a "
- f"theme with version {version}. See: themes/theme_schema@{version}.json. "
- "To manually override this version, use the HuggingFace hub UI."
- )
-
- theme_name = theme_name or repo_name
-
- with tempfile.NamedTemporaryFile(
- mode="w", delete=False, suffix=".json"
- ) as css_file:
- contents = self.to_dict()
- contents["version"] = version
- json.dump(contents, css_file, cls=fonts.FontEncoder)
- with tempfile.NamedTemporaryFile(mode="w", delete=False) as readme_file:
- readme_content = README_CONTENT.format(
- theme_name=theme_name,
- description=description or "Add a description of this theme here!",
- author=author,
- gradio_version=__version__,
- )
- readme_file.write(textwrap.dedent(readme_content))
- with tempfile.NamedTemporaryFile(mode="w", delete=False) as app_file:
- contents = (Path(__file__).parent / "app.py").read_text()
- contents = re.sub(
- r"theme=gr.themes.Default\(\)",
- f"theme='{space_id}'",
- contents,
- )
- contents = re.sub(r"{THEME}", theme_name or repo_name, contents)
- contents = re.sub(r"{AUTHOR}", org_name or author, contents)
- contents = re.sub(r"{SPACE_NAME}", repo_name, contents)
- app_file.write(contents)
-
- operations = [
- CommitOperationAdd(
- path_in_repo=f"themes/theme_schema@{version}.json",
- path_or_fileobj=css_file.name,
- ),
- CommitOperationAdd(
- path_in_repo="README.md", path_or_fileobj=readme_file.name
- ),
- CommitOperationAdd(path_in_repo="app.py", path_or_fileobj=app_file.name),
- ]
-
- huggingface_hub.create_repo(
- space_id,
- repo_type="space",
- space_sdk="gradio",
- token=hf_token,
- exist_ok=True,
- private=private,
- )
-
- api.create_commit(
- repo_id=space_id,
- commit_message="Updating theme",
- repo_type="space",
- operations=operations,
- token=hf_token,
- )
- url = f"https://huggingface.co/spaces/{space_id}"
- print(f"See your theme here! {url}")
- return url
-
-
-@document("push_to_hub", "from_hub", "load", "dump", "from_dict", "to_dict")
-class Base(ThemeClass):
- def __init__(
- self,
- *,
- primary_hue: colors.Color | str = colors.blue,
- secondary_hue: colors.Color | str = colors.blue,
- neutral_hue: colors.Color | str = colors.gray,
- text_size: sizes.Size | str = sizes.text_md,
- spacing_size: sizes.Size | str = sizes.spacing_md,
- radius_size: sizes.Size | str = sizes.radius_md,
- font: fonts.Font
- | str
- | Iterable[fonts.Font | str] = (
- fonts.GoogleFont("Source Sans Pro"),
- "ui-sans-serif",
- "system-ui",
- "sans-serif",
- ),
- font_mono: fonts.Font
- | str
- | Iterable[fonts.Font | str] = (
- fonts.GoogleFont("IBM Plex Mono"),
- "ui-monospace",
- "Consolas",
- "monospace",
- ),
- ):
- """
- Parameters:
- primary_hue: The primary hue of the theme. Load a preset, like gradio.themes.colors.green (or just the string "green"), or pass your own gradio.themes.utils.Color object.
- secondary_hue: The secondary hue of the theme. Load a preset, like gradio.themes.colors.green (or just the string "green"), or pass your own gradio.themes.utils.Color object.
- neutral_hue: The neutral hue of the theme, used . Load a preset, like gradio.themes.colors.green (or just the string "green"), or pass your own gradio.themes.utils.Color object.
- text_size: The size of the text. Load a preset, like gradio.themes.sizes.text_sm (or just the string "sm"), or pass your own gradio.themes.utils.Size object.
- spacing_size: The size of the spacing. Load a preset, like gradio.themes.sizes.spacing_sm (or just the string "sm"), or pass your own gradio.themes.utils.Size object.
- radius_size: The radius size of corners. Load a preset, like gradio.themes.sizes.radius_sm (or just the string "sm"), or pass your own gradio.themes.utils.Size object.
- font: The primary font to use for the theme. Pass a string for a system font, or a gradio.themes.font.GoogleFont object to load a font from Google Fonts. Pass a list of fonts for fallbacks.
- font_mono: The monospace font to use for the theme, applies to code. Pass a string for a system font, or a gradio.themes.font.GoogleFont object to load a font from Google Fonts. Pass a list of fonts for fallbacks.
- """
-
- self.name = "base"
-
- def expand_shortcut(shortcut, mode="color", prefix=None):
- if not isinstance(shortcut, str):
- return shortcut
- if mode == "color":
- for color in colors.Color.all:
- if color.name == shortcut:
- return color
- raise ValueError(f"Color shortcut {shortcut} not found.")
- elif mode == "size":
- for size in sizes.Size.all:
- if size.name == f"{prefix}_{shortcut}":
- return size
- raise ValueError(f"Size shortcut {shortcut} not found.")
-
- primary_hue = expand_shortcut(primary_hue, mode="color")
- secondary_hue = expand_shortcut(secondary_hue, mode="color")
- neutral_hue = expand_shortcut(neutral_hue, mode="color")
- text_size = expand_shortcut(text_size, mode="size", prefix="text")
- spacing_size = expand_shortcut(spacing_size, mode="size", prefix="spacing")
- radius_size = expand_shortcut(radius_size, mode="size", prefix="radius")
-
- # Hue ranges
- self.primary_50 = primary_hue.c50
- self.primary_100 = primary_hue.c100
- self.primary_200 = primary_hue.c200
- self.primary_300 = primary_hue.c300
- self.primary_400 = primary_hue.c400
- self.primary_500 = primary_hue.c500
- self.primary_600 = primary_hue.c600
- self.primary_700 = primary_hue.c700
- self.primary_800 = primary_hue.c800
- self.primary_900 = primary_hue.c900
- self.primary_950 = primary_hue.c950
-
- self.secondary_50 = secondary_hue.c50
- self.secondary_100 = secondary_hue.c100
- self.secondary_200 = secondary_hue.c200
- self.secondary_300 = secondary_hue.c300
- self.secondary_400 = secondary_hue.c400
- self.secondary_500 = secondary_hue.c500
- self.secondary_600 = secondary_hue.c600
- self.secondary_700 = secondary_hue.c700
- self.secondary_800 = secondary_hue.c800
- self.secondary_900 = secondary_hue.c900
- self.secondary_950 = secondary_hue.c950
-
- self.neutral_50 = neutral_hue.c50
- self.neutral_100 = neutral_hue.c100
- self.neutral_200 = neutral_hue.c200
- self.neutral_300 = neutral_hue.c300
- self.neutral_400 = neutral_hue.c400
- self.neutral_500 = neutral_hue.c500
- self.neutral_600 = neutral_hue.c600
- self.neutral_700 = neutral_hue.c700
- self.neutral_800 = neutral_hue.c800
- self.neutral_900 = neutral_hue.c900
- self.neutral_950 = neutral_hue.c950
-
- # Spacing
- self.spacing_xxs = spacing_size.xxs
- self.spacing_xs = spacing_size.xs
- self.spacing_sm = spacing_size.sm
- self.spacing_md = spacing_size.md
- self.spacing_lg = spacing_size.lg
- self.spacing_xl = spacing_size.xl
- self.spacing_xxl = spacing_size.xxl
-
- self.radius_xxs = radius_size.xxs
- self.radius_xs = radius_size.xs
- self.radius_sm = radius_size.sm
- self.radius_md = radius_size.md
- self.radius_lg = radius_size.lg
- self.radius_xl = radius_size.xl
- self.radius_xxl = radius_size.xxl
-
- self.text_xxs = text_size.xxs
- self.text_xs = text_size.xs
- self.text_sm = text_size.sm
- self.text_md = text_size.md
- self.text_lg = text_size.lg
- self.text_xl = text_size.xl
- self.text_xxl = text_size.xxl
-
- # Font
- if not isinstance(font, Iterable):
- font = [font]
- self._font = [
- fontfam if isinstance(fontfam, fonts.Font) else fonts.Font(fontfam)
- for fontfam in font
- ]
- if not isinstance(font_mono, Iterable):
- font_mono = [font_mono]
- self._font_mono = [
- fontfam if isinstance(fontfam, fonts.Font) else fonts.Font(fontfam)
- for fontfam in font_mono
- ]
- self.font = ", ".join(str(font) for font in self._font)
- self.font_mono = ", ".join(str(font) for font in self._font_mono)
-
- self._stylesheets = []
- for font in self._font + self._font_mono:
- font_stylesheet = font.stylesheet()
- if font_stylesheet:
- self._stylesheets.append(font_stylesheet)
-
- self.set()
-
- def set(
- self,
- *,
- # Body Attributes: These set set the values for the entire body of the app.
- body_background_fill=None,
- body_background_fill_dark=None,
- body_text_color=None,
- body_text_color_dark=None,
- body_text_size=None,
- body_text_color_subdued=None,
- body_text_color_subdued_dark=None,
- body_text_weight=None,
- embed_radius=None,
- # Element Colors: These set the colors for common elements.
- background_fill_primary=None,
- background_fill_primary_dark=None,
- background_fill_secondary=None,
- background_fill_secondary_dark=None,
- border_color_accent=None,
- border_color_accent_dark=None,
- border_color_primary=None,
- border_color_primary_dark=None,
- color_accent=None,
- color_accent_soft=None,
- color_accent_soft_dark=None,
- # Text: This sets the text styling for text elements.
- link_text_color=None,
- link_text_color_dark=None,
- link_text_color_active=None,
- link_text_color_active_dark=None,
- link_text_color_hover=None,
- link_text_color_hover_dark=None,
- link_text_color_visited=None,
- link_text_color_visited_dark=None,
- prose_text_size=None,
- prose_text_weight=None,
- prose_header_text_weight=None,
- # Shadows: These set the high-level shadow rendering styles. These variables are often referenced by other component-specific shadow variables.
- shadow_drop=None,
- shadow_drop_lg=None,
- shadow_inset=None,
- shadow_spread=None,
- shadow_spread_dark=None,
- # Layout Atoms: These set the style for common layout elements, such as the blocks that wrap components.
- block_background_fill=None,
- block_background_fill_dark=None,
- block_border_color=None,
- block_border_color_dark=None,
- block_border_width=None,
- block_border_width_dark=None,
- block_info_text_color=None,
- block_info_text_color_dark=None,
- block_info_text_size=None,
- block_info_text_weight=None,
- block_label_background_fill=None,
- block_label_background_fill_dark=None,
- block_label_border_color=None,
- block_label_border_color_dark=None,
- block_label_border_width=None,
- block_label_border_width_dark=None,
- block_label_shadow=None,
- block_label_text_color=None,
- block_label_text_color_dark=None,
- block_label_margin=None,
- block_label_padding=None,
- block_label_radius=None,
- block_label_right_radius=None,
- block_label_text_size=None,
- block_label_text_weight=None,
- block_padding=None,
- block_radius=None,
- block_shadow=None,
- block_shadow_dark=None,
- block_title_background_fill=None,
- block_title_background_fill_dark=None,
- block_title_border_color=None,
- block_title_border_color_dark=None,
- block_title_border_width=None,
- block_title_border_width_dark=None,
- block_title_text_color=None,
- block_title_text_color_dark=None,
- block_title_padding=None,
- block_title_radius=None,
- block_title_text_size=None,
- block_title_text_weight=None,
- container_radius=None,
- form_gap_width=None,
- layout_gap=None,
- panel_background_fill=None,
- panel_background_fill_dark=None,
- panel_border_color=None,
- panel_border_color_dark=None,
- panel_border_width=None,
- panel_border_width_dark=None,
- section_header_text_size=None,
- section_header_text_weight=None,
- # Component Atoms: These set the style for elements within components.
- chatbot_code_background_color=None,
- chatbot_code_background_color_dark=None,
- checkbox_background_color=None,
- checkbox_background_color_dark=None,
- checkbox_background_color_focus=None,
- checkbox_background_color_focus_dark=None,
- checkbox_background_color_hover=None,
- checkbox_background_color_hover_dark=None,
- checkbox_background_color_selected=None,
- checkbox_background_color_selected_dark=None,
- checkbox_border_color=None,
- checkbox_border_color_dark=None,
- checkbox_border_color_focus=None,
- checkbox_border_color_focus_dark=None,
- checkbox_border_color_hover=None,
- checkbox_border_color_hover_dark=None,
- checkbox_border_color_selected=None,
- checkbox_border_color_selected_dark=None,
- checkbox_border_radius=None,
- checkbox_border_width=None,
- checkbox_border_width_dark=None,
- checkbox_check=None,
- radio_circle=None,
- checkbox_shadow=None,
- checkbox_label_background_fill=None,
- checkbox_label_background_fill_dark=None,
- checkbox_label_background_fill_hover=None,
- checkbox_label_background_fill_hover_dark=None,
- checkbox_label_background_fill_selected=None,
- checkbox_label_background_fill_selected_dark=None,
- checkbox_label_border_color=None,
- checkbox_label_border_color_dark=None,
- checkbox_label_border_color_hover=None,
- checkbox_label_border_color_hover_dark=None,
- checkbox_label_border_width=None,
- checkbox_label_border_width_dark=None,
- checkbox_label_gap=None,
- checkbox_label_padding=None,
- checkbox_label_shadow=None,
- checkbox_label_text_size=None,
- checkbox_label_text_weight=None,
- checkbox_label_text_color=None,
- checkbox_label_text_color_dark=None,
- checkbox_label_text_color_selected=None,
- checkbox_label_text_color_selected_dark=None,
- error_background_fill=None,
- error_background_fill_dark=None,
- error_border_color=None,
- error_border_color_dark=None,
- error_border_width=None,
- error_border_width_dark=None,
- error_text_color=None,
- error_text_color_dark=None,
- error_icon_color=None,
- error_icon_color_dark=None,
- input_background_fill=None,
- input_background_fill_dark=None,
- input_background_fill_focus=None,
- input_background_fill_focus_dark=None,
- input_background_fill_hover=None,
- input_background_fill_hover_dark=None,
- input_border_color=None,
- input_border_color_dark=None,
- input_border_color_focus=None,
- input_border_color_focus_dark=None,
- input_border_color_hover=None,
- input_border_color_hover_dark=None,
- input_border_width=None,
- input_border_width_dark=None,
- input_padding=None,
- input_placeholder_color=None,
- input_placeholder_color_dark=None,
- input_radius=None,
- input_shadow=None,
- input_shadow_dark=None,
- input_shadow_focus=None,
- input_shadow_focus_dark=None,
- input_text_size=None,
- input_text_weight=None,
- loader_color=None,
- loader_color_dark=None,
- slider_color=None,
- slider_color_dark=None,
- stat_background_fill=None,
- stat_background_fill_dark=None,
- table_border_color=None,
- table_border_color_dark=None,
- table_even_background_fill=None,
- table_even_background_fill_dark=None,
- table_odd_background_fill=None,
- table_odd_background_fill_dark=None,
- table_radius=None,
- table_row_focus=None,
- table_row_focus_dark=None,
- # Buttons: These set the style for buttons.
- button_border_width=None,
- button_border_width_dark=None,
- button_shadow=None,
- button_shadow_active=None,
- button_shadow_hover=None,
- button_transition=None,
- button_large_padding=None,
- button_large_radius=None,
- button_large_text_size=None,
- button_large_text_weight=None,
- button_small_padding=None,
- button_small_radius=None,
- button_small_text_size=None,
- button_small_text_weight=None,
- button_primary_background_fill=None,
- button_primary_background_fill_dark=None,
- button_primary_background_fill_hover=None,
- button_primary_background_fill_hover_dark=None,
- button_primary_border_color=None,
- button_primary_border_color_dark=None,
- button_primary_border_color_hover=None,
- button_primary_border_color_hover_dark=None,
- button_primary_text_color=None,
- button_primary_text_color_dark=None,
- button_primary_text_color_hover=None,
- button_primary_text_color_hover_dark=None,
- button_secondary_background_fill=None,
- button_secondary_background_fill_dark=None,
- button_secondary_background_fill_hover=None,
- button_secondary_background_fill_hover_dark=None,
- button_secondary_border_color=None,
- button_secondary_border_color_dark=None,
- button_secondary_border_color_hover=None,
- button_secondary_border_color_hover_dark=None,
- button_secondary_text_color=None,
- button_secondary_text_color_dark=None,
- button_secondary_text_color_hover=None,
- button_secondary_text_color_hover_dark=None,
- button_cancel_background_fill=None,
- button_cancel_background_fill_dark=None,
- button_cancel_background_fill_hover=None,
- button_cancel_background_fill_hover_dark=None,
- button_cancel_border_color=None,
- button_cancel_border_color_dark=None,
- button_cancel_border_color_hover=None,
- button_cancel_border_color_hover_dark=None,
- button_cancel_text_color=None,
- button_cancel_text_color_dark=None,
- button_cancel_text_color_hover=None,
- button_cancel_text_color_hover_dark=None,
- ) -> Base:
- """
- Parameters:
- body_background_fill: The background of the entire app.
- body_background_fill_dark: The background of the entire app in dark mode.
- body_text_color: The default text color.
- body_text_color_dark: The default text color in dark mode.
- body_text_size: The default text size.
- body_text_color_subdued: The text color used for softer, less important text.
- body_text_color_subdued_dark: The text color used for softer, less important text in dark mode.
- body_text_weight: The default text weight.
- embed_radius: The corner radius used for embedding when the app is embedded within a page.
- background_fill_primary: The background primarily used for items placed directly on the page.
- background_fill_primary_dark: The background primarily used for items placed directly on the page in dark mode.
- background_fill_secondary: The background primarily used for items placed on top of another item.
- background_fill_secondary_dark: The background primarily used for items placed on top of another item in dark mode.
- border_color_accent: The border color used for accented items.
- border_color_accent_dark: The border color used for accented items in dark mode.
- border_color_primary: The border color primarily used for items placed directly on the page.
- border_color_primary_dark: The border color primarily used for items placed directly on the page in dark mode.
- color_accent: The color used for accented items.
- color_accent_soft: The softer color used for accented items.
- color_accent_soft_dark: The softer color used for accented items in dark mode.
- link_text_color: The text color used for links.
- link_text_color_dark: The text color used for links in dark mode.
- link_text_color_active: The text color used for links when they are active.
- link_text_color_active_dark: The text color used for links when they are active in dark mode.
- link_text_color_hover: The text color used for links when they are hovered over.
- link_text_color_hover_dark: The text color used for links when they are hovered over in dark mode.
- link_text_color_visited: The text color used for links when they have been visited.
- link_text_color_visited_dark: The text color used for links when they have been visited in dark mode.
- prose_text_size: The text size used for markdown and other prose.
- prose_text_weight: The text weight used for markdown and other prose.
- prose_header_text_weight: The text weight of a header used for markdown and other prose.
- shadow_drop: Drop shadow used by other shadowed items.
- shadow_drop_lg: Larger drop shadow used by other shadowed items.
- shadow_inset: Inset shadow used by other shadowed items.
- shadow_spread: Size of shadow spread used by shadowed items.
- shadow_spread_dark: Size of shadow spread used by shadowed items in dark mode.
- block_background_fill: The background around an item.
- block_background_fill_dark: The background around an item in dark mode.
- block_border_color: The border color around an item.
- block_border_color_dark: The border color around an item in dark mode.
- block_border_width: The border width around an item.
- block_border_width_dark: The border width around an item in dark mode.
- block_info_text_color: The color of the info text.
- block_info_text_color_dark: The color of the info text in dark mode.
- block_info_text_size: The size of the info text.
- block_info_text_weight: The weight of the info text.
- block_label_background_fill: The background of the title label of a media element (e.g. image).
- block_label_background_fill_dark: The background of the title label of a media element (e.g. image) in dark mode.
- block_label_border_color: The border color of the title label of a media element (e.g. image).
- block_label_border_color_dark: The border color of the title label of a media element (e.g. image) in dark mode.
- block_label_border_width: The border width of the title label of a media element (e.g. image).
- block_label_border_width_dark: The border width of the title label of a media element (e.g. image) in dark mode.
- block_label_shadow: The shadow of the title label of a media element (e.g. image).
- block_label_text_color: The text color of the title label of a media element (e.g. image).
- block_label_text_color_dark: The text color of the title label of a media element (e.g. image) in dark mode.
- block_label_margin: The margin of the title label of a media element (e.g. image) from its surrounding container.
- block_label_padding: The padding of the title label of a media element (e.g. image).
- block_label_radius: The corner radius of the title label of a media element (e.g. image).
- block_label_right_radius: The corner radius of a right-aligned helper label.
- block_label_text_size: The text size of the title label of a media element (e.g. image).
- block_label_text_weight: The text weight of the title label of a media element (e.g. image).
- block_padding: The padding around an item.
- block_radius: The corner radius around an item.
- block_shadow: The shadow under an item.
- block_shadow_dark: The shadow under an item in dark mode.
- block_title_background_fill: The background of the title of a form element (e.g. textbox).
- block_title_background_fill_dark: The background of the title of a form element (e.g. textbox) in dark mode.
- block_title_border_color: The border color of the title of a form element (e.g. textbox).
- block_title_border_color_dark: The border color of the title of a form element (e.g. textbox) in dark mode.
- block_title_border_width: The border width of the title of a form element (e.g. textbox).
- block_title_border_width_dark: The border width of the title of a form element (e.g. textbox) in dark mode.
- block_title_text_color: The text color of the title of a form element (e.g. textbox).
- block_title_text_color_dark: The text color of the title of a form element (e.g. textbox) in dark mode.
- block_title_padding: The padding of the title of a form element (e.g. textbox).
- block_title_radius: The corner radius of the title of a form element (e.g. textbox).
- block_title_text_size: The text size of the title of a form element (e.g. textbox).
- block_title_text_weight: The text weight of the title of a form element (e.g. textbox).
- container_radius: The corner radius of a layout component that holds other content.
- form_gap_width: The border gap between form elements, (e.g. consecutive textboxes).
- layout_gap: The gap between items within a row or column.
- panel_background_fill: The background of a panel.
- panel_background_fill_dark: The background of a panel in dark mode.
- panel_border_color: The border color of a panel.
- panel_border_color_dark: The border color of a panel in dark mode.
- panel_border_width: The border width of a panel.
- panel_border_width_dark: The border width of a panel in dark mode.
- section_header_text_size: The text size of a section header (e.g. tab name).
- section_header_text_weight: The text weight of a section header (e.g. tab name).
- chatbot_code_background_color: The background color of code blocks in the chatbot.
- chatbot_code_background_color_dark: The background color of code blocks in the chatbot in dark mode.
- checkbox_background_color: The background of a checkbox square or radio circle.
- checkbox_background_color_dark: The background of a checkbox square or radio circle in dark mode.
- checkbox_background_color_focus: The background of a checkbox square or radio circle when focused.
- checkbox_background_color_focus_dark: The background of a checkbox square or radio circle when focused in dark mode.
- checkbox_background_color_hover: The background of a checkbox square or radio circle when hovered over.
- checkbox_background_color_hover_dark: The background of a checkbox square or radio circle when hovered over in dark mode.
- checkbox_background_color_selected: The background of a checkbox square or radio circle when selected.
- checkbox_background_color_selected_dark: The background of a checkbox square or radio circle when selected in dark mode.
- checkbox_border_color: The border color of a checkbox square or radio circle.
- checkbox_border_color_dark: The border color of a checkbox square or radio circle in dark mode.
- checkbox_border_color_focus: The border color of a checkbox square or radio circle when focused.
- checkbox_border_color_focus_dark: The border color of a checkbox square or radio circle when focused in dark mode.
- checkbox_border_color_hover: The border color of a checkbox square or radio circle when hovered over.
- checkbox_border_color_hover_dark: The border color of a checkbox square or radio circle when hovered over in dark mode.
- checkbox_border_color_selected: The border color of a checkbox square or radio circle when selected.
- checkbox_border_color_selected_dark: The border color of a checkbox square or radio circle when selected in dark mode.
- checkbox_border_radius: The corner radius of a checkbox square.
- checkbox_border_width: The border width of a checkbox square or radio circle.
- checkbox_border_width_dark: The border width of a checkbox square or radio circle in dark mode.
- checkbox_check: The checkmark visual of a checkbox square.
- radio_circle: The circle visual of a radio circle.
- checkbox_shadow: The shadow of a checkbox square or radio circle.
- checkbox_label_background_fill: The background of the surrounding button of a checkbox or radio element.
- checkbox_label_background_fill_dark: The background of the surrounding button of a checkbox or radio element in dark mode.
- checkbox_label_background_fill_hover: The background of the surrounding button of a checkbox or radio element when hovered over.
- checkbox_label_background_fill_hover_dark: The background of the surrounding button of a checkbox or radio element when hovered over in dark mode.
- checkbox_label_background_fill_selected: The background of the surrounding button of a checkbox or radio element when selected.
- checkbox_label_background_fill_selected_dark: The background of the surrounding button of a checkbox or radio element when selected in dark mode.
- checkbox_label_border_color: The border color of the surrounding button of a checkbox or radio element.
- checkbox_label_border_color_dark: The border color of the surrounding button of a checkbox or radio element in dark mode.
- checkbox_label_border_color_hover: The border color of the surrounding button of a checkbox or radio element when hovered over.
- checkbox_label_border_color_hover_dark: The border color of the surrounding button of a checkbox or radio element when hovered over in dark mode.
- checkbox_label_border_width: The border width of the surrounding button of a checkbox or radio element.
- checkbox_label_border_width_dark: The border width of the surrounding button of a checkbox or radio element in dark mode.
- checkbox_label_gap: The gap consecutive checkbox or radio elements.
- checkbox_label_padding: The padding of the surrounding button of a checkbox or radio element.
- checkbox_label_shadow: The shadow of the surrounding button of a checkbox or radio element.
- checkbox_label_text_size: The text size of the label accompanying a checkbox or radio element.
- checkbox_label_text_weight: The text weight of the label accompanying a checkbox or radio element.
- checkbox_label_text_color: The text color of the label accompanying a checkbox or radio element.
- checkbox_label_text_color_dark: The text color of the label accompanying a checkbox or radio element in dark mode.
- checkbox_label_text_color_selected: The text color of the label accompanying a checkbox or radio element when selected.
- checkbox_label_text_color_selected_dark: The text color of the label accompanying a checkbox or radio element when selected in dark mode.
- error_background_fill: The background of an error message.
- error_background_fill_dark: The background of an error message in dark mode.
- error_border_color: The border color of an error message.
- error_border_color_dark: The border color of an error message in dark mode.
- error_border_width: The border width of an error message.
- error_border_width_dark: The border width of an error message in dark mode.
- error_text_color: The text color of an error message.
- error_text_color_dark: The text color of an error message in dark mode.
- input_background_fill: The background of an input field.
- input_background_fill_dark: The background of an input field in dark mode.
- input_background_fill_focus: The background of an input field when focused.
- input_background_fill_focus_dark: The background of an input field when focused in dark mode.
- input_background_fill_hover: The background of an input field when hovered over.
- input_background_fill_hover_dark: The background of an input field when hovered over in dark mode.
- input_border_color: The border color of an input field.
- input_border_color_dark: The border color of an input field in dark mode.
- input_border_color_focus: The border color of an input field when focused.
- input_border_color_focus_dark: The border color of an input field when focused in dark mode.
- input_border_color_hover: The border color of an input field when hovered over.
- input_border_color_hover_dark: The border color of an input field when hovered over in dark mode.
- input_border_width: The border width of an input field.
- input_border_width_dark: The border width of an input field in dark mode.
- input_padding: The padding of an input field.
- input_placeholder_color: The placeholder text color of an input field.
- input_placeholder_color_dark: The placeholder text color of an input field in dark mode.
- input_radius: The corner radius of an input field.
- input_shadow: The shadow of an input field.
- input_shadow_dark: The shadow of an input field in dark mode.
- input_shadow_focus: The shadow of an input field when focused.
- input_shadow_focus_dark: The shadow of an input field when focused in dark mode.
- input_text_size: The text size of an input field.
- input_text_weight: The text weight of an input field.
- loader_color: The color of the loading animation while a request is pending.
- loader_color_dark: The color of the loading animation while a request is pending in dark mode.
- slider_color: The color of the slider in a range element.
- slider_color_dark: The color of the slider in a range element in dark mode.
- stat_background_fill: The background used for stats visuals (e.g. confidence bars in label).
- stat_background_fill_dark: The background used for stats visuals (e.g. confidence bars in label) in dark mode.
- table_border_color: The border color of a table.
- table_border_color_dark: The border color of a table in dark mode.
- table_even_background_fill: The background of even rows in a table.
- table_even_background_fill_dark: The background of even rows in a table in dark mode.
- table_odd_background_fill: The background of odd rows in a table.
- table_odd_background_fill_dark: The background of odd rows in a table in dark mode.
- table_radius: The corner radius of a table.
- table_row_focus: The background of a focused row in a table.
- table_row_focus_dark: The background of a focused row in a table in dark mode.
- button_border_width: The border width of a button.
- button_border_width_dark: The border width of a button in dark mode.
- button_cancel_background_fill: The background of a button of "cancel" variant.
- button_cancel_background_fill_dark: The background of a button of "cancel" variant in dark mode.
- button_cancel_background_fill_hover: The background of a button of "cancel" variant when hovered over.
- button_cancel_background_fill_hover_dark: The background of a button of "cancel" variant when hovered over in dark mode.
- button_cancel_border_color: The border color of a button of "cancel" variant.
- button_cancel_border_color_dark: The border color of a button of "cancel" variant in dark mode.
- button_cancel_border_color_hover: The border color of a button of "cancel" variant when hovered over.
- button_cancel_border_color_hover_dark: The border color of a button of "cancel" variant when hovered over in dark mode.
- button_cancel_text_color: The text color of a button of "cancel" variant.
- button_cancel_text_color_dark: The text color of a button of "cancel" variant in dark mode.
- button_cancel_text_color_hover: The text color of a button of "cancel" variant when hovered over.
- button_cancel_text_color_hover_dark: The text color of a button of "cancel" variant when hovered over in dark mode.
- button_large_padding: The padding of a button with the default "large" size.
- button_large_radius: The corner radius of a button with the default "large" size.
- button_large_text_size: The text size of a button with the default "large" size.
- button_large_text_weight: The text weight of a button with the default "large" size.
- button_primary_background_fill: The background of a button of "primary" variant.
- button_primary_background_fill_dark: The background of a button of "primary" variant in dark mode.
- button_primary_background_fill_hover: The background of a button of "primary" variant when hovered over.
- button_primary_background_fill_hover_dark: The background of a button of "primary" variant when hovered over in dark mode.
- button_primary_border_color: The border color of a button of "primary" variant.
- button_primary_border_color_dark: The border color of a button of "primary" variant in dark mode.
- button_primary_border_color_hover: The border color of a button of "primary" variant when hovered over.
- button_primary_border_color_hover_dark: The border color of a button of "primary" variant when hovered over in dark mode.
- button_primary_text_color: The text color of a button of "primary" variant.
- button_primary_text_color_dark: The text color of a button of "primary" variant in dark mode.
- button_primary_text_color_hover: The text color of a button of "primary" variant when hovered over.
- button_primary_text_color_hover_dark: The text color of a button of "primary" variant when hovered over in dark mode.
- button_secondary_background_fill: The background of a button of default "secondary" variant.
- button_secondary_background_fill_dark: The background of a button of default "secondary" variant in dark mode.
- button_secondary_background_fill_hover: The background of a button of default "secondary" variant when hovered over.
- button_secondary_background_fill_hover_dark: The background of a button of default "secondary" variant when hovered over in dark mode.
- button_secondary_border_color: The border color of a button of default "secondary" variant.
- button_secondary_border_color_dark: The border color of a button of default "secondary" variant in dark mode.
- button_secondary_border_color_hover: The border color of a button of default "secondary" variant when hovered over.
- button_secondary_border_color_hover_dark: The border color of a button of default "secondary" variant when hovered over in dark mode.
- button_secondary_text_color: The text color of a button of default "secondary" variant.
- button_secondary_text_color_dark: The text color of a button of default "secondary" variant in dark mode.
- button_secondary_text_color_hover: The text color of a button of default "secondary" variant when hovered over.
- button_secondary_text_color_hover_dark: The text color of a button of default "secondary" variant when hovered over in dark mode.
- button_shadow: The shadow under a button.
- button_shadow_active: The shadow under a button when pressed.
- button_shadow_hover: The shadow under a button when hovered over.
- button_small_padding: The padding of a button set to "small" size.
- button_small_radius: The corner radius of a button set to "small" size.
- button_small_text_size: The text size of a button set to "small" size.
- button_small_text_weight: The text weight of a button set to "small" size.
- button_transition: The transition animation duration of a button between regular, hover, and focused states.
- """
-
- # Body
- self.body_background_fill = body_background_fill or getattr(
- self, "body_background_fill", "*background_fill_primary"
- )
- self.body_background_fill_dark = body_background_fill_dark or getattr(
- self, "body_background_fill_dark", "*background_fill_primary"
- )
- self.body_text_color = body_text_color or getattr(
- self, "body_text_color", "*neutral_800"
- )
- self.body_text_color_dark = body_text_color_dark or getattr(
- self, "body_text_color_dark", "*neutral_100"
- )
- self.body_text_size = body_text_size or getattr(
- self, "body_text_size", "*text_md"
- )
- self.body_text_weight = body_text_weight or getattr(
- self, "body_text_weight", "400"
- )
- self.embed_radius = embed_radius or getattr(self, "embed_radius", "*radius_lg")
- # Core Colors
- self.color_accent = color_accent or getattr(
- self, "color_accent", "*primary_500"
- )
- self.color_accent_soft = color_accent_soft or getattr(
- self, "color_accent_soft", "*primary_50"
- )
- self.color_accent_soft_dark = color_accent_soft_dark or getattr(
- self, "color_accent_soft_dark", "*neutral_700"
- )
- self.background_fill_primary = background_fill_primary or getattr(
- self, "background_primary", "white"
- )
- self.background_fill_primary_dark = background_fill_primary_dark or getattr(
- self, "background_primary_dark", "*neutral_950"
- )
- self.background_fill_secondary = background_fill_secondary or getattr(
- self, "background_secondary", "*neutral_50"
- )
- self.background_fill_secondary_dark = background_fill_secondary_dark or getattr(
- self, "background_secondary_dark", "*neutral_900"
- )
- self.border_color_accent = border_color_accent or getattr(
- self, "border_color_accent", "*primary_300"
- )
- self.border_color_accent_dark = border_color_accent_dark or getattr(
- self, "border_color_accent_dark", "*neutral_600"
- )
- self.border_color_primary = border_color_primary or getattr(
- self, "border_color_primary", "*neutral_200"
- )
- self.border_color_primary_dark = border_color_primary_dark or getattr(
- self, "border_color_primary_dark", "*neutral_700"
- )
- # Text Colors
- self.link_text_color = link_text_color or getattr(
- self, "link_text_color", "*secondary_600"
- )
- self.link_text_color_active = link_text_color_active or getattr(
- self, "link_text_color_active", "*secondary_600"
- )
- self.link_text_color_active_dark = link_text_color_active_dark or getattr(
- self, "link_text_color_active_dark", "*secondary_500"
- )
- self.link_text_color_dark = link_text_color_dark or getattr(
- self, "link_text_color_dark", "*secondary_500"
- )
- self.link_text_color_hover = link_text_color_hover or getattr(
- self, "link_text_color_hover", "*secondary_700"
- )
- self.link_text_color_hover_dark = link_text_color_hover_dark or getattr(
- self, "link_text_color_hover_dark", "*secondary_400"
- )
- self.link_text_color_visited = link_text_color_visited or getattr(
- self, "link_text_color_visited", "*secondary_500"
- )
- self.link_text_color_visited_dark = link_text_color_visited_dark or getattr(
- self, "link_text_color_visited_dark", "*secondary_600"
- )
- self.body_text_color_subdued = body_text_color_subdued or getattr(
- self, "body_text_color_subdued", "*neutral_400"
- )
- self.body_text_color_subdued_dark = body_text_color_subdued_dark or getattr(
- self, "body_text_color_subdued_dark", "*neutral_400"
- )
- # Shadows
- self.shadow_drop = shadow_drop or getattr(
- self, "shadow_drop", "rgba(0,0,0,0.05) 0px 1px 2px 0px"
- )
- self.shadow_drop_lg = shadow_drop_lg or getattr(
- self,
- "shadow_drop_lg",
- "0 1px 3px 0 rgb(0 0 0 / 0.1), 0 1px 2px -1px rgb(0 0 0 / 0.1)",
- )
- self.shadow_inset = shadow_inset or getattr(
- self, "shadow_inset", "rgba(0,0,0,0.05) 0px 2px 4px 0px inset"
- )
- self.shadow_spread = shadow_spread or getattr(self, "shadow_spread", "3px")
- self.shadow_spread_dark = shadow_spread_dark or getattr(
- self, "shadow_spread_dark", "1px"
- )
- # Layout Atoms
- self.block_background_fill = block_background_fill or getattr(
- self, "block_background_fill", "*background_fill_primary"
- )
- self.block_background_fill_dark = block_background_fill_dark or getattr(
- self, "block_background_fill_dark", "*neutral_800"
- )
- self.block_border_color = block_border_color or getattr(
- self, "block_border_color", "*border_color_primary"
- )
- self.block_border_color_dark = block_border_color_dark or getattr(
- self, "block_border_color_dark", "*border_color_primary"
- )
- self.block_border_width = block_border_width or getattr(
- self, "block_border_width", "1px"
- )
- self.block_border_width_dark = block_border_width_dark or getattr(
- self, "block_border_width_dark", None
- )
- self.block_info_text_color = block_info_text_color or getattr(
- self, "block_info_text_color", "*body_text_color_subdued"
- )
- self.block_info_text_color_dark = block_info_text_color_dark or getattr(
- self, "block_info_text_color_dark", "*body_text_color_subdued"
- )
- self.block_info_text_size = block_info_text_size or getattr(
- self, "block_info_text_size", "*text_sm"
- )
- self.block_info_text_weight = block_info_text_weight or getattr(
- self, "block_info_text_weight", "400"
- )
- self.block_label_background_fill = block_label_background_fill or getattr(
- self, "block_label_background_fill", "*background_fill_primary"
- )
- self.block_label_background_fill_dark = (
- block_label_background_fill_dark
- or getattr(
- self, "block_label_background_fill_dark", "*background_fill_secondary"
- )
- )
- self.block_label_border_color = block_label_border_color or getattr(
- self, "block_label_border_color", "*border_color_primary"
- )
- self.block_label_border_color_dark = block_label_border_color_dark or getattr(
- self, "block_label_border_color_dark", "*border_color_primary"
- )
- self.block_label_border_width = block_label_border_width or getattr(
- self, "block_label_border_width", "1px"
- )
- self.block_label_border_width_dark = block_label_border_width_dark or getattr(
- self, "block_label_border_width_dark", None
- )
- self.block_label_shadow = block_label_shadow or getattr(
- self, "block_label_shadow", "*block_shadow"
- )
- self.block_label_text_color = block_label_text_color or getattr(
- self, "block_label_text_color", "*neutral_500"
- )
- self.block_label_text_color_dark = block_label_text_color_dark or getattr(
- self, "block_label_text_color_dark", "*neutral_200"
- )
- self.block_label_margin = block_label_margin or getattr(
- self, "block_label_margin", "0"
- )
- self.block_label_padding = block_label_padding or getattr(
- self, "block_label_padding", "*spacing_sm *spacing_lg"
- )
- self.block_label_radius = block_label_radius or getattr(
- self,
- "block_label_radius",
- "calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px) 0",
- )
- self.block_label_right_radius = block_label_right_radius or getattr(
- self,
- "block_label_right_radius",
- "0 calc(*radius_lg - 1px) 0 calc(*radius_lg - 1px)",
- )
- self.block_label_text_size = block_label_text_size or getattr(
- self, "block_label_text_size", "*text_sm"
- )
- self.block_label_text_weight = block_label_text_weight or getattr(
- self, "block_label_text_weight", "400"
- )
- self.block_padding = block_padding or getattr(
- self, "block_padding", "*spacing_xl calc(*spacing_xl + 2px)"
- )
- self.block_radius = block_radius or getattr(self, "block_radius", "*radius_lg")
- self.block_shadow = block_shadow or getattr(self, "block_shadow", "none")
- self.block_shadow_dark = block_shadow_dark or getattr(
- self, "block_shadow_dark", None
- )
- self.block_title_background_fill = block_title_background_fill or getattr(
- self, "block_title_background_fill", "none"
- )
- self.block_title_background_fill_dark = (
- block_title_background_fill_dark
- or getattr(self, "block_title_background_fill_dark", None)
- )
- self.block_title_border_color = block_title_border_color or getattr(
- self, "block_title_border_color", "none"
- )
- self.block_title_border_color_dark = block_title_border_color_dark or getattr(
- self, "block_title_border_color_dark", None
- )
- self.block_title_border_width = block_title_border_width or getattr(
- self, "block_title_border_width", "0px"
- )
- self.block_title_border_width_dark = block_title_border_width_dark or getattr(
- self, "block_title_border_width_dark", None
- )
- self.block_title_text_color = block_title_text_color or getattr(
- self, "block_title_text_color", "*neutral_500"
- )
- self.block_title_text_color_dark = block_title_text_color_dark or getattr(
- self, "block_title_text_color_dark", "*neutral_200"
- )
- self.block_title_padding = block_title_padding or getattr(
- self, "block_title_padding", "0"
- )
- self.block_title_radius = block_title_radius or getattr(
- self, "block_title_radius", "none"
- )
- self.block_title_text_size = block_title_text_size or getattr(
- self, "block_title_text_size", "*text_md"
- )
- self.block_title_text_weight = block_title_text_weight or getattr(
- self, "block_title_text_weight", "400"
- )
- self.container_radius = container_radius or getattr(
- self, "container_radius", "*radius_lg"
- )
- self.form_gap_width = form_gap_width or getattr(self, "form_gap_width", "0px")
- self.layout_gap = layout_gap or getattr(self, "layout_gap", "*spacing_xxl")
- self.panel_background_fill = panel_background_fill or getattr(
- self, "panel_background_fill", "*background_fill_secondary"
- )
- self.panel_background_fill_dark = panel_background_fill_dark or getattr(
- self, "panel_background_fill_dark", "*background_fill_secondary"
- )
- self.panel_border_color = panel_border_color or getattr(
- self, "panel_border_color", "*border_color_primary"
- )
- self.panel_border_color_dark = panel_border_color_dark or getattr(
- self, "panel_border_color_dark", "*border_color_primary"
- )
- self.panel_border_width = panel_border_width or getattr(
- self, "panel_border_width", "0"
- )
- self.panel_border_width_dark = panel_border_width_dark or getattr(
- self, "panel_border_width_dark", None
- )
- self.section_header_text_size = section_header_text_size or getattr(
- self, "section_header_text_size", "*text_md"
- )
- self.section_header_text_weight = section_header_text_weight or getattr(
- self, "section_header_text_weight", "400"
- )
- # Component Atoms
- self.chatbot_code_background_color = chatbot_code_background_color or getattr(
- self, "chatbot_code_background_color", "*neutral_100"
- )
- self.chatbot_code_background_color_dark = (
- chatbot_code_background_color_dark
- or getattr(self, "chatbot_code_background_color_dark", "*neutral_800")
- )
- self.checkbox_background_color = checkbox_background_color or getattr(
- self, "checkbox_background_color", "*background_fill_primary"
- )
- self.checkbox_background_color_dark = checkbox_background_color_dark or getattr(
- self, "checkbox_background_color_dark", "*neutral_800"
- )
- self.checkbox_background_color_focus = (
- checkbox_background_color_focus
- or getattr(
- self, "checkbox_background_color_focus", "*checkbox_background_color"
- )
- )
- self.checkbox_background_color_focus_dark = (
- checkbox_background_color_focus_dark
- or getattr(
- self,
- "checkbox_background_color_focus_dark",
- "*checkbox_background_color",
- )
- )
- self.checkbox_background_color_hover = (
- checkbox_background_color_hover
- or getattr(
- self, "checkbox_background_color_hover", "*checkbox_background_color"
- )
- )
- self.checkbox_background_color_hover_dark = (
- checkbox_background_color_hover_dark
- or getattr(
- self,
- "checkbox_background_color_hover_dark",
- "*checkbox_background_color",
- )
- )
- self.checkbox_background_color_selected = (
- checkbox_background_color_selected
- or getattr(self, "checkbox_background_color_selected", "*secondary_600")
- )
- self.checkbox_background_color_selected_dark = (
- checkbox_background_color_selected_dark
- or getattr(
- self, "checkbox_background_color_selected_dark", "*secondary_600"
- )
- )
- self.checkbox_border_color = checkbox_border_color or getattr(
- self, "checkbox_border_color", "*neutral_300"
- )
- self.checkbox_border_color_dark = checkbox_border_color_dark or getattr(
- self, "checkbox_border_color_dark", "*neutral_700"
- )
- self.checkbox_border_color_focus = checkbox_border_color_focus or getattr(
- self, "checkbox_border_color_focus", "*secondary_500"
- )
- self.checkbox_border_color_focus_dark = (
- checkbox_border_color_focus_dark
- or getattr(self, "checkbox_border_color_focus_dark", "*secondary_500")
- )
- self.checkbox_border_color_hover = checkbox_border_color_hover or getattr(
- self, "checkbox_border_color_hover", "*neutral_300"
- )
- self.checkbox_border_color_hover_dark = (
- checkbox_border_color_hover_dark
- or getattr(self, "checkbox_border_color_hover_dark", "*neutral_600")
- )
- self.checkbox_border_color_selected = checkbox_border_color_selected or getattr(
- self, "checkbox_border_color_selected", "*secondary_600"
- )
- self.checkbox_border_color_selected_dark = (
- checkbox_border_color_selected_dark
- or getattr(self, "checkbox_border_color_selected_dark", "*secondary_600")
- )
- self.checkbox_border_radius = checkbox_border_radius or getattr(
- self, "checkbox_border_radius", "*radius_sm"
- )
- self.checkbox_border_width = checkbox_border_width or getattr(
- self, "checkbox_border_width", "*input_border_width"
- )
- self.checkbox_border_width_dark = checkbox_border_width_dark or getattr(
- self, "checkbox_border_width_dark", "*input_border_width"
- )
- self.checkbox_label_background_fill = checkbox_label_background_fill or getattr(
- self, "checkbox_label_background_fill", "*button_secondary_background_fill"
- )
- self.checkbox_label_background_fill_dark = (
- checkbox_label_background_fill_dark
- or getattr(
- self,
- "checkbox_label_background_fill_dark",
- "*button_secondary_background_fill",
- )
- )
- self.checkbox_label_background_fill_hover = (
- checkbox_label_background_fill_hover
- or getattr(
- self,
- "checkbox_label_background_fill_hover",
- "*button_secondary_background_fill_hover",
- )
- )
- self.checkbox_label_background_fill_hover_dark = (
- checkbox_label_background_fill_hover_dark
- or getattr(
- self,
- "checkbox_label_background_fill_hover_dark",
- "*button_secondary_background_fill_hover",
- )
- )
- self.checkbox_label_background_fill_selected = (
- checkbox_label_background_fill_selected
- or getattr(
- self,
- "checkbox_label_background_fill_selected",
- "*checkbox_label_background_fill",
- )
- )
- self.checkbox_label_background_fill_selected_dark = (
- checkbox_label_background_fill_selected_dark
- or getattr(
- self,
- "checkbox_label_background_fill_selected_dark",
- "*checkbox_label_background_fill",
- )
- )
- self.checkbox_label_border_color = checkbox_label_border_color or getattr(
- self, "checkbox_label_border_color", "*border_color_primary"
- )
- self.checkbox_label_border_color_dark = (
- checkbox_label_border_color_dark
- or getattr(
- self, "checkbox_label_border_color_dark", "*border_color_primary"
- )
- )
- self.checkbox_label_border_color_hover = (
- checkbox_label_border_color_hover
- or getattr(
- self,
- "checkbox_label_border_color_hover",
- "*checkbox_label_border_color",
- )
- )
- self.checkbox_label_border_color_hover_dark = (
- checkbox_label_border_color_hover_dark
- or getattr(
- self,
- "checkbox_label_border_color_hover_dark",
- "*checkbox_label_border_color",
- )
- )
- self.checkbox_label_border_width = checkbox_label_border_width or getattr(
- self, "checkbox_label_border_width", "*input_border_width"
- )
- self.checkbox_label_border_width_dark = (
- checkbox_label_border_width_dark
- or getattr(self, "checkbox_label_border_width_dark", "*input_border_width")
- )
- self.checkbox_label_gap = checkbox_label_gap or getattr(
- self, "checkbox_label_gap", "*spacing_lg"
- )
- self.checkbox_label_padding = checkbox_label_padding or getattr(
- self, "checkbox_label_padding", "*spacing_md calc(2 * *spacing_md)"
- )
- self.checkbox_label_shadow = checkbox_label_shadow or getattr(
- self, "checkbox_label_shadow", "none"
- )
- self.checkbox_label_text_size = checkbox_label_text_size or getattr(
- self, "checkbox_label_text_size", "*text_md"
- )
- self.checkbox_label_text_weight = checkbox_label_text_weight or getattr(
- self, "checkbox_label_text_weight", "400"
- )
- self.checkbox_check = checkbox_check or getattr(
- self,
- "checkbox_check",
- """url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e")""",
- )
- self.radio_circle = radio_circle or getattr(
- self,
- "radio_circle",
- """url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e")""",
- )
- self.checkbox_shadow = checkbox_shadow or getattr(
- self, "checkbox_shadow", "*input_shadow"
- )
- self.checkbox_label_text_color = checkbox_label_text_color or getattr(
- self, "checkbox_label_text_color", "*body_text_color"
- )
- self.checkbox_label_text_color_dark = checkbox_label_text_color_dark or getattr(
- self, "checkbox_label_text_color_dark", "*body_text_color"
- )
- self.checkbox_label_text_color_selected = (
- checkbox_label_text_color_selected
- or getattr(
- self, "checkbox_label_text_color_selected", "*checkbox_label_text_color"
- )
- )
- self.checkbox_label_text_color_selected_dark = (
- checkbox_label_text_color_selected_dark
- or getattr(
- self,
- "checkbox_label_text_color_selected_dark",
- "*checkbox_label_text_color",
- )
- )
- self.error_background_fill = error_background_fill or getattr(
- self, "error_background_fill", colors.red.c50
- )
- self.error_background_fill_dark = error_background_fill_dark or getattr(
- self, "error_background_fill_dark", "*background_fill_primary"
- )
- self.error_border_color = error_border_color or getattr(
- self, "error_border_color", colors.red.c700
- )
- self.error_border_color_dark = error_border_color_dark or getattr(
- self, "error_border_color_dark", colors.red.c500
- )
- self.error_border_width = error_border_width or getattr(
- self, "error_border_width", "1px"
- )
- self.error_border_width_dark = error_border_width_dark or getattr(
- self, "error_border_width_dark", None
- )
- self.error_text_color = error_text_color or getattr(
- self, "error_text_color", colors.red.c700
- )
- self.error_text_color_dark = error_text_color_dark or getattr(
- self, "error_text_color_dark", colors.red.c50
- )
- self.error_icon_color = error_icon_color or getattr(
- self, "error_icon_color", colors.red.c700
- )
- self.error_icon_color_dark = error_icon_color_dark or getattr(
- self, "error_icon_color_dark", colors.red.c500
- )
- self.input_background_fill = input_background_fill or getattr(
- self, "input_background_fill", "*neutral_100"
- )
- self.input_background_fill_dark = input_background_fill_dark or getattr(
- self, "input_background_fill_dark", "*neutral_700"
- )
- self.input_background_fill_focus = input_background_fill_focus or getattr(
- self, "input_background_fill_focus", "*secondary_500"
- )
- self.input_background_fill_focus_dark = (
- input_background_fill_focus_dark
- or getattr(self, "input_background_fill_focus_dark", "*secondary_600")
- )
- self.input_background_fill_hover = input_background_fill_hover or getattr(
- self, "input_background_fill_hover", "*input_background_fill"
- )
- self.input_background_fill_hover_dark = (
- input_background_fill_hover_dark
- or getattr(
- self, "input_background_fill_hover_dark", "*input_background_fill"
- )
- )
- self.input_border_color = input_border_color or getattr(
- self, "input_border_color", "*border_color_primary"
- )
- self.input_border_color_dark = input_border_color_dark or getattr(
- self, "input_border_color_dark", "*border_color_primary"
- )
- self.input_border_color_focus = input_border_color_focus or getattr(
- self, "input_border_color_focus", "*secondary_300"
- )
- self.input_border_color_focus_dark = input_border_color_focus_dark or getattr(
- self, "input_border_color_focus_dark", "*neutral_700"
- )
- self.input_border_color_hover = input_border_color_hover or getattr(
- self, "input_border_color_hover", "*input_border_color"
- )
- self.input_border_color_hover_dark = input_border_color_hover_dark or getattr(
- self, "input_border_color_hover_dark", "*input_border_color"
- )
- self.input_border_width = input_border_width or getattr(
- self, "input_border_width", "0px"
- )
- self.input_border_width_dark = input_border_width_dark or getattr(
- self, "input_border_width_dark", None
- )
- self.input_padding = input_padding or getattr(
- self, "input_padding", "*spacing_xl"
- )
- self.input_placeholder_color = input_placeholder_color or getattr(
- self, "input_placeholder_color", "*neutral_400"
- )
- self.input_placeholder_color_dark = input_placeholder_color_dark or getattr(
- self, "input_placeholder_color_dark", "*neutral_500"
- )
- self.input_radius = input_radius or getattr(self, "input_radius", "*radius_lg")
- self.input_shadow = input_shadow or getattr(self, "input_shadow", "none")
- self.input_shadow_dark = input_shadow_dark or getattr(
- self, "input_shadow_dark", None
- )
- self.input_shadow_focus = input_shadow_focus or getattr(
- self, "input_shadow_focus", "*input_shadow"
- )
- self.input_shadow_focus_dark = input_shadow_focus_dark or getattr(
- self, "input_shadow_focus_dark", None
- )
- self.input_text_size = input_text_size or getattr(
- self, "input_text_size", "*text_md"
- )
- self.input_text_weight = input_text_weight or getattr(
- self, "input_text_weight", "400"
- )
- self.loader_color = loader_color or getattr(
- self, "loader_color", "*color_accent"
- )
- self.loader_color_dark = loader_color_dark or getattr(
- self, "loader_color_dark", None
- )
- self.prose_text_size = prose_text_size or getattr(
- self, "prose_text_size", "*text_md"
- )
- self.prose_text_weight = prose_text_weight or getattr(
- self, "prose_text_weight", "400"
- )
- self.prose_header_text_weight = prose_header_text_weight or getattr(
- self, "prose_header_text_weight", "600"
- )
- self.slider_color = slider_color or getattr(self, "slider_color", "auto")
- self.slider_color_dark = slider_color_dark or getattr(
- self, "slider_color_dark", None
- )
- self.stat_background_fill = stat_background_fill or getattr(
- self, "stat_background_fill", "*primary_300"
- )
- self.stat_background_fill_dark = stat_background_fill_dark or getattr(
- self, "stat_background_fill_dark", "*primary_500"
- )
- self.table_border_color = table_border_color or getattr(
- self, "table_border_color", "*neutral_300"
- )
- self.table_border_color_dark = table_border_color_dark or getattr(
- self, "table_border_color_dark", "*neutral_700"
- )
- self.table_even_background_fill = table_even_background_fill or getattr(
- self, "table_even_background_fill", "white"
- )
- self.table_even_background_fill_dark = (
- table_even_background_fill_dark
- or getattr(self, "table_even_background_fill_dark", "*neutral_950")
- )
- self.table_odd_background_fill = table_odd_background_fill or getattr(
- self, "table_odd_background_fill", "*neutral_50"
- )
- self.table_odd_background_fill_dark = table_odd_background_fill_dark or getattr(
- self, "table_odd_background_fill_dark", "*neutral_900"
- )
- self.table_radius = table_radius or getattr(self, "table_radius", "*radius_lg")
- self.table_row_focus = table_row_focus or getattr(
- self, "table_row_focus", "*color_accent_soft"
- )
- self.table_row_focus_dark = table_row_focus_dark or getattr(
- self, "table_row_focus_dark", "*color_accent_soft"
- )
- # Buttons
- self.button_border_width = button_border_width or getattr(
- self, "button_border_width", "*input_border_width"
- )
- self.button_border_width_dark = button_border_width_dark or getattr(
- self, "button_border_width_dark", "*input_border_width"
- )
- self.button_cancel_background_fill = button_cancel_background_fill or getattr(
- self, "button_cancel_background_fill", "*button_secondary_background_fill"
- )
- self.button_cancel_background_fill_dark = (
- button_cancel_background_fill_dark
- or getattr(
- self,
- "button_cancel_background_fill_dark",
- "*button_secondary_background_fill",
- )
- )
- self.button_cancel_background_fill_hover = (
- button_cancel_background_fill_hover
- or getattr(
- self,
- "button_cancel_background_fill_hover",
- "*button_cancel_background_fill",
- )
- )
- self.button_cancel_background_fill_hover_dark = (
- button_cancel_background_fill_hover_dark
- or getattr(
- self,
- "button_cancel_background_fill_hover_dark",
- "*button_cancel_background_fill",
- )
- )
- self.button_cancel_border_color = button_cancel_border_color or getattr(
- self, "button_cancel_border_color", "*button_secondary_border_color"
- )
- self.button_cancel_border_color_dark = (
- button_cancel_border_color_dark
- or getattr(
- self,
- "button_cancel_border_color_dark",
- "*button_secondary_border_color",
- )
- )
- self.button_cancel_border_color_hover = (
- button_cancel_border_color_hover
- or getattr(
- self,
- "button_cancel_border_color_hover",
- "*button_cancel_border_color",
- )
- )
- self.button_cancel_border_color_hover_dark = (
- button_cancel_border_color_hover_dark
- or getattr(
- self,
- "button_cancel_border_color_hover_dark",
- "*button_cancel_border_color",
- )
- )
- self.button_cancel_text_color = button_cancel_text_color or getattr(
- self, "button_cancel_text_color", "*button_secondary_text_color"
- )
- self.button_cancel_text_color_dark = button_cancel_text_color_dark or getattr(
- self, "button_cancel_text_color_dark", "*button_secondary_text_color"
- )
- self.button_cancel_text_color_hover = button_cancel_text_color_hover or getattr(
- self, "button_cancel_text_color_hover", "*button_cancel_text_color"
- )
- self.button_cancel_text_color_hover_dark = (
- button_cancel_text_color_hover_dark
- or getattr(
- self, "button_cancel_text_color_hover_dark", "*button_cancel_text_color"
- )
- )
- self.button_large_padding = button_large_padding or getattr(
- self, "button_large_padding", "*spacing_lg calc(2 * *spacing_lg)"
- )
- self.button_large_radius = button_large_radius or getattr(
- self, "button_large_radius", "*radius_lg"
- )
- self.button_large_text_size = button_large_text_size or getattr(
- self, "button_large_text_size", "*text_lg"
- )
- self.button_large_text_weight = button_large_text_weight or getattr(
- self, "button_large_text_weight", "600"
- )
- self.button_primary_background_fill = button_primary_background_fill or getattr(
- self, "button_primary_background_fill", "*primary_200"
- )
- self.button_primary_background_fill_dark = (
- button_primary_background_fill_dark
- or getattr(self, "button_primary_background_fill_dark", "*primary_700")
- )
- self.button_primary_background_fill_hover = (
- button_primary_background_fill_hover
- or getattr(
- self,
- "button_primary_background_fill_hover",
- "*button_primary_background_fill",
- )
- )
- self.button_primary_background_fill_hover_dark = (
- button_primary_background_fill_hover_dark
- or getattr(
- self,
- "button_primary_background_fill_hover_dark",
- "*button_primary_background_fill",
- )
- )
- self.button_primary_border_color = button_primary_border_color or getattr(
- self, "button_primary_border_color", "*primary_200"
- )
- self.button_primary_border_color_dark = (
- button_primary_border_color_dark
- or getattr(self, "button_primary_border_color_dark", "*primary_600")
- )
- self.button_primary_border_color_hover = (
- button_primary_border_color_hover
- or getattr(
- self,
- "button_primary_border_color_hover",
- "*button_primary_border_color",
- )
- )
- self.button_primary_border_color_hover_dark = (
- button_primary_border_color_hover_dark
- or getattr(
- self,
- "button_primary_border_color_hover_dark",
- "*button_primary_border_color",
- )
- )
- self.button_primary_text_color = button_primary_text_color or getattr(
- self, "button_primary_text_color", "*primary_600"
- )
- self.button_primary_text_color_dark = button_primary_text_color_dark or getattr(
- self, "button_primary_text_color_dark", "white"
- )
- self.button_primary_text_color_hover = (
- button_primary_text_color_hover
- or getattr(
- self, "button_primary_text_color_hover", "*button_primary_text_color"
- )
- )
- self.button_primary_text_color_hover_dark = (
- button_primary_text_color_hover_dark
- or getattr(
- self,
- "button_primary_text_color_hover_dark",
- "*button_primary_text_color",
- )
- )
- self.button_secondary_background_fill = (
- button_secondary_background_fill
- or getattr(self, "button_secondary_background_fill", "*neutral_200")
- )
- self.button_secondary_background_fill_dark = (
- button_secondary_background_fill_dark
- or getattr(self, "button_secondary_background_fill_dark", "*neutral_600")
- )
- self.button_secondary_background_fill_hover = (
- button_secondary_background_fill_hover
- or getattr(
- self,
- "button_secondary_background_fill_hover",
- "*button_secondary_background_fill",
- )
- )
- self.button_secondary_background_fill_hover_dark = (
- button_secondary_background_fill_hover_dark
- or getattr(
- self,
- "button_secondary_background_fill_hover_dark",
- "*button_secondary_background_fill",
- )
- )
- self.button_secondary_border_color = button_secondary_border_color or getattr(
- self, "button_secondary_border_color", "*neutral_200"
- )
- self.button_secondary_border_color_dark = (
- button_secondary_border_color_dark
- or getattr(self, "button_secondary_border_color_dark", "*neutral_600")
- )
- self.button_secondary_border_color_hover = (
- button_secondary_border_color_hover
- or getattr(
- self,
- "button_secondary_border_color_hover",
- "*button_secondary_border_color",
- )
- )
- self.button_secondary_border_color_hover_dark = (
- button_secondary_border_color_hover_dark
- or getattr(
- self,
- "button_secondary_border_color_hover_dark",
- "*button_secondary_border_color",
- )
- )
- self.button_secondary_text_color = button_secondary_text_color or getattr(
- self, "button_secondary_text_color", "*neutral_700"
- )
- self.button_secondary_text_color_dark = (
- button_secondary_text_color_dark
- or getattr(self, "button_secondary_text_color_dark", "white")
- )
- self.button_secondary_text_color_hover = (
- button_secondary_text_color_hover
- or getattr(
- self,
- "button_secondary_text_color_hover",
- "*button_secondary_text_color",
- )
- )
- self.button_secondary_text_color_hover_dark = (
- button_secondary_text_color_hover_dark
- or getattr(
- self,
- "button_secondary_text_color_hover_dark",
- "*button_secondary_text_color",
- )
- )
- self.button_shadow = button_shadow or getattr(self, "button_shadow", "none")
- self.button_shadow_active = button_shadow_active or getattr(
- self, "button_shadow_active", "none"
- )
- self.button_shadow_hover = button_shadow_hover or getattr(
- self, "button_shadow_hover", "none"
- )
- self.button_small_padding = button_small_padding or getattr(
- self, "button_small_padding", "*spacing_sm calc(2 * *spacing_sm)"
- )
- self.button_small_radius = button_small_radius or getattr(
- self, "button_small_radius", "*radius_lg"
- )
- self.button_small_text_size = button_small_text_size or getattr(
- self, "button_small_text_size", "*text_md"
- )
- self.button_small_text_weight = button_small_text_weight or getattr(
- self, "button_small_text_weight", "400"
- )
- self.button_transition = button_transition or getattr(
- self, "button_transition", "background-color 0.2s ease"
- )
- return self
diff --git a/spaces/DeepFloyd/IF/style.css b/spaces/DeepFloyd/IF/style.css
deleted file mode 100644
index 17fb109b7cef35a21bc44dcefba0ea3c2913f0ee..0000000000000000000000000000000000000000
--- a/spaces/DeepFloyd/IF/style.css
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
-This CSS file is modified from:
-https://huggingface.co/spaces/stabilityai/stable-diffusion/blob/2794a3c3ba66115c307075098e713f572b08bf80/app.py
-*/
-
-h1 {
- text-align: center;
-}
-
-.gradio-container {
- font-family: 'IBM Plex Sans', sans-serif;
-}
-
-.gr-button {
- color: white;
- border-color: black;
- background: black;
-}
-
-input[type='range'] {
- accent-color: black;
-}
-
-.dark input[type='range'] {
- accent-color: #dfdfdf;
-}
-
-.container {
- max-width: 730px;
- margin: auto;
- padding-top: 1.5rem;
-}
-
-#gallery {
- min-height: auto;
- height: 185px;
- margin-top: 15px;
- margin-left: auto;
- margin-right: auto;
- border-bottom-right-radius: .5rem !important;
- border-bottom-left-radius: .5rem !important;
-}
-#gallery .grid-wrap, #gallery .empty{
- height: 185px;
- min-height: 185px;
-}
-#gallery .preview{
- height: 185px;
- min-height: 185px!important;
-}
-#gallery>div>.h-full {
- min-height: 20rem;
-}
-
-.details:hover {
- text-decoration: underline;
-}
-
-.gr-button {
- white-space: nowrap;
-}
-
-.gr-button:focus {
- border-color: rgb(147 197 253 / var(--tw-border-opacity));
- outline: none;
- box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
- --tw-border-opacity: 1;
- --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
- --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
- --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
- --tw-ring-opacity: .5;
-}
-
-#advanced-btn {
- font-size: .7rem !important;
- line-height: 19px;
- margin-top: 12px;
- margin-bottom: 12px;
- padding: 2px 8px;
- border-radius: 14px !important;
-}
-
-#advanced-options {
- display: none;
- margin-bottom: 20px;
-}
-
-.footer {
- margin-bottom: 45px;
- margin-top: 35px;
- text-align: center;
- border-bottom: 1px solid #e5e5e5;
-}
-
-.footer>p {
- font-size: .8rem;
- display: inline-block;
- padding: 0 10px;
- transform: translateY(10px);
- background: white;
-}
-
-.dark .footer {
- border-color: #303030;
-}
-
-.dark .footer>p {
- background: #0b0f19;
-}
-
-.acknowledgments h4 {
- margin: 1.25em 0 .25em 0;
- font-weight: bold;
- font-size: 115%;
-}
-
-.animate-spin {
- animation: spin 1s linear infinite;
-}
-
-@keyframes spin {
- from {
- transform: rotate(0deg);
- }
-
- to {
- transform: rotate(360deg);
- }
-}
-
-#share-btn-container {
- display: flex;
- padding-left: 0.5rem !important;
- padding-right: 0.5rem !important;
- background-color: #000000;
- justify-content: center;
- align-items: center;
- border-radius: 9999px !important;
- width: 13rem;
- margin-top: 10px;
- margin-left: auto;
-}
-
-#share-btn {
- all: initial;
- color: #ffffff;
- font-weight: 600;
- cursor: pointer;
- font-family: 'IBM Plex Sans', sans-serif;
- margin-left: 0.5rem !important;
- padding-top: 0.25rem !important;
- padding-bottom: 0.25rem !important;
- right: 0;
-}
-
-#share-btn * {
- all: unset;
-}
-
-#share-btn-container div:nth-child(-n+2) {
- width: auto !important;
- min-height: 0px !important;
-}
-
-#share-btn-container .wrap {
- display: none !important;
-}
-
-.gr-form {
- flex: 1 1 50%;
- border-top-right-radius: 0;
- border-bottom-right-radius: 0;
-}
-
-#prompt-container {
- gap: 0;
-}
-
-#prompt-text-input,
-#negative-prompt-text-input {
- padding: .45rem 0.625rem
-}
-
-#component-16 {
- border-top-width: 1px !important;
- margin-top: 1em
-}
-
-.image_duplication {
- position: absolute;
- width: 100px;
- left: 50px
-}
-
-#component-0 {
- max-width: 730px;
- margin: auto;
- padding-top: 1.5rem;
-}
-
-#upscaled-image img {
- object-fit: scale-down;
-}
-/* share button */
-#share-btn-container {
- display: flex;
- padding-left: 0.5rem !important;
- padding-right: 0.5rem !important;
- background-color: #000000;
- justify-content: center;
- align-items: center;
- border-radius: 9999px !important;
- width: 13rem;
- margin-top: 10px;
- margin-left: auto;
- flex: unset !important;
-}
-#share-btn {
- all: initial;
- color: #ffffff;
- font-weight: 600;
- cursor: pointer;
- font-family: 'IBM Plex Sans', sans-serif;
- margin-left: 0.5rem !important;
- padding-top: 0.25rem !important;
- padding-bottom: 0.25rem !important;
- right:0;
-}
-#share-btn * {
- all: unset !important;
-}
-#share-btn-container div:nth-child(-n+2){
- width: auto !important;
- min-height: 0px !important;
-}
-#share-btn-container .wrap {
- display: none !important;
-}
\ No newline at end of file
diff --git a/spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/README.md b/spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/README.md
deleted file mode 100644
index 38496e90a265885345a4de29fe0f50fe02a8c6c9..0000000000000000000000000000000000000000
--- a/spaces/DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: 語音質檢+噪音去除:Meta Denoiser
-emoji: 📉
-colorFrom: gray
-colorTo: purple
-sdk: gradio
-sdk_version: 3.22.1
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/DragGan/DragGan/stylegan_human/torch_utils/op_edit/__init__.py b/spaces/DragGan/DragGan/stylegan_human/torch_utils/op_edit/__init__.py
deleted file mode 100644
index d2a7efe79d871852affd9de7b46f726a7942f218..0000000000000000000000000000000000000000
--- a/spaces/DragGan/DragGan/stylegan_human/torch_utils/op_edit/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# Copyright (c) SenseTime Research. All rights reserved.
-
-from .fused_act import FusedLeakyReLU, fused_leaky_relu
-from .upfirdn2d import upfirdn2d
diff --git a/spaces/Dragonnnext/Unicorn-proxy/README.md b/spaces/Dragonnnext/Unicorn-proxy/README.md
deleted file mode 100644
index 5a6e3ea331ab5ff4b3da0715437b17f97d3c7f93..0000000000000000000000000000000000000000
--- a/spaces/Dragonnnext/Unicorn-proxy/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Unicorn OAI Proxy
-emoji: 🦄
-sdk: docker
-colorFrom: gray
-colorTo: gray
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
\ No newline at end of file
diff --git a/spaces/Duskfallcrew/duskfall-s-general-digital-art-model/app.py b/spaces/Duskfallcrew/duskfall-s-general-digital-art-model/app.py
deleted file mode 100644
index 9fed0cde7af517d619b8c498061b2f76f312a53b..0000000000000000000000000000000000000000
--- a/spaces/Duskfallcrew/duskfall-s-general-digital-art-model/app.py
+++ /dev/null
@@ -1,137 +0,0 @@
-from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
-import gradio as gr
-import torch
-from PIL import Image
-
-model_id = 'MultiversexPeeps/duskfall-s-general-digital-art-model'
-prefix = 'gendigi'
-
-scheduler = DPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler")
-
-pipe = StableDiffusionPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(
- model_id,
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
- scheduler=scheduler)
-
-if torch.cuda.is_available():
- pipe = pipe.to("cuda")
- pipe_i2i = pipe_i2i.to("cuda")
-
-def error_str(error, title="Error"):
- return f"""#### {title}
- {error}""" if error else ""
-
-def inference(prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt="", auto_prefix=False):
-
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
- prompt = f"{prefix} {prompt}" if auto_prefix else prompt
-
- try:
- if img is not None:
- return img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
- else:
- return txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator), None
- except Exception as e:
- return None, error_str(e)
-
-def txt_to_img(prompt, neg_prompt, guidance, steps, width, height, generator):
-
- result = pipe(
- prompt,
- negative_prompt = neg_prompt,
- num_inference_steps = int(steps),
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return result.images[0]
-
-def img_to_img(prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
-
- ratio = min(height / img.height, width / img.width)
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
- result = pipe_i2i(
- prompt,
- negative_prompt = neg_prompt,
- init_image = img,
- num_inference_steps = int(steps),
- strength = strength,
- guidance_scale = guidance,
- width = width,
- height = height,
- generator = generator)
-
- return result.images[0]
-
-css = """.main-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.main-div div h1{font-weight:900;margin-bottom:7px}.main-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
-"""
-with gr.Blocks(css=css) as demo:
- gr.HTML(
- f"""
-
-
-
Duskfall S General Digital Art Model
-
-
- Demo for Duskfall S General Digital Art Model Stable Diffusion model. If you want to donate towards costs and don't want to subscribe: https://ko-fi.com/DUSKFALLcrew / All samples and info are here: https://civitai.com/user/duskfallcrew Use " gendigi " as your prefix token.
- {"Add the following tokens to your prompts for the model to work properly: prefix" if prefix else ""}
-
- Running on {"GPU 🔥" if torch.cuda.is_available() else f"CPU 🥶. For faster inference it is recommended to upgrade to GPU in Settings"} after duplicating the space
-@article{Ao2021SpeechT5,
- title = {SpeechT5: Unified-Modal Encoder-Decoder Pre-training for Spoken Language Processing},
- author = {Junyi Ao and Rui Wang and Long Zhou and Chengyi Wang and Shuo Ren and Yu Wu and Shujie Liu and Tom Ko and Qing Li and Yu Zhang and Zhihua Wei and Yao Qian and Jinyu Li and Furu Wei},
- eprint={2110.07205},
- archivePrefix={arXiv},
- primaryClass={eess.AS},
- year={2021}
-}
-
-
-
Example sound credits:
-
-
-
"Hmm, I don't know" from InspectorJ (CC BY 4.0 license)
-
"Henry V" excerpt from acclivity (CC BY-NC 4.0 license)
-
"You can see it in the eyes" from JoyOhJoy (CC0 license)
-
"We yearn for time" from Sample_Me (CC0 license)
-
-
-
-"""
-
-examples = [
- ["examples/hmm_i_dont_know.wav", None],
- ["examples/henry5.mp3", None],
- ["examples/yearn_for_time.mp3", None],
- ["examples/see_in_eyes.wav", None],
-]
-
-gr.Interface(
- fn=predict,
- inputs=[
- gr.Audio(label="Upload Speech", source="upload", type="numpy"),
- gr.Audio(label="Record Speech", source="microphone", type="numpy"),
- ],
- outputs=[
- gr.Text(label="Transcription"),
- ],
- title=title,
- description=description,
- article=article,
- examples=examples,
-).launch()
diff --git a/spaces/MetaWabbit/Auto-GPT/autogpt/__main__.py b/spaces/MetaWabbit/Auto-GPT/autogpt/__main__.py
deleted file mode 100644
index 128f9eea4900429e88276abdde3419b806001ac7..0000000000000000000000000000000000000000
--- a/spaces/MetaWabbit/Auto-GPT/autogpt/__main__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-"""Auto-GPT: A GPT powered AI Assistant"""
-import autogpt.cli
-
-if __name__ == "__main__":
- autogpt.cli.main()
diff --git a/spaces/MetaWabbit/Auto-GPT/tests/test_prompt_generator.py b/spaces/MetaWabbit/Auto-GPT/tests/test_prompt_generator.py
deleted file mode 100644
index 6a0bfd6c7bbdbfaa3750e9dee621bd25e17a448b..0000000000000000000000000000000000000000
--- a/spaces/MetaWabbit/Auto-GPT/tests/test_prompt_generator.py
+++ /dev/null
@@ -1,114 +0,0 @@
-from unittest import TestCase
-
-from autogpt.promptgenerator import PromptGenerator
-
-
-class TestPromptGenerator(TestCase):
- """
- Test cases for the PromptGenerator class, which is responsible for generating
- prompts for the AI with constraints, commands, resources, and performance evaluations.
- """
-
- @classmethod
- def setUpClass(cls):
- """
- Set up the initial state for each test method by creating an instance of PromptGenerator.
- """
- cls.generator = PromptGenerator()
-
- # Test whether the add_constraint() method adds a constraint to the generator's constraints list
- def test_add_constraint(self):
- """
- Test if the add_constraint() method adds a constraint to the generator's constraints list.
- """
- constraint = "Constraint1"
- self.generator.add_constraint(constraint)
- self.assertIn(constraint, self.generator.constraints)
-
- # Test whether the add_command() method adds a command to the generator's commands list
- def test_add_command(self):
- """
- Test if the add_command() method adds a command to the generator's commands list.
- """
- command_label = "Command Label"
- command_name = "command_name"
- args = {"arg1": "value1", "arg2": "value2"}
- self.generator.add_command(command_label, command_name, args)
- command = {
- "label": command_label,
- "name": command_name,
- "args": args,
- }
- self.assertIn(command, self.generator.commands)
-
- def test_add_resource(self):
- """
- Test if the add_resource() method adds a resource to the generator's resources list.
- """
- resource = "Resource1"
- self.generator.add_resource(resource)
- self.assertIn(resource, self.generator.resources)
-
- def test_add_performance_evaluation(self):
- """
- Test if the add_performance_evaluation() method adds an evaluation to the generator's
- performance_evaluation list.
- """
- evaluation = "Evaluation1"
- self.generator.add_performance_evaluation(evaluation)
- self.assertIn(evaluation, self.generator.performance_evaluation)
-
- def test_generate_prompt_string(self):
- """
- Test if the generate_prompt_string() method generates a prompt string with all the added
- constraints, commands, resources, and evaluations.
- """
- # Define the test data
- constraints = ["Constraint1", "Constraint2"]
- commands = [
- {
- "label": "Command1",
- "name": "command_name1",
- "args": {"arg1": "value1"},
- },
- {
- "label": "Command2",
- "name": "command_name2",
- "args": {},
- },
- ]
- resources = ["Resource1", "Resource2"]
- evaluations = ["Evaluation1", "Evaluation2"]
-
- # Add test data to the generator
- for constraint in constraints:
- self.generator.add_constraint(constraint)
- for command in commands:
- self.generator.add_command(
- command["label"], command["name"], command["args"]
- )
- for resource in resources:
- self.generator.add_resource(resource)
- for evaluation in evaluations:
- self.generator.add_performance_evaluation(evaluation)
-
- # Generate the prompt string and verify its correctness
- prompt_string = self.generator.generate_prompt_string()
- self.assertIsNotNone(prompt_string)
-
- # Check if all constraints, commands, resources, and evaluations are present in the prompt string
- for constraint in constraints:
- self.assertIn(constraint, prompt_string)
- for command in commands:
- self.assertIn(command["name"], prompt_string)
- for key, value in command["args"].items():
- self.assertIn(f'"{key}": "{value}"', prompt_string)
- for resource in resources:
- self.assertIn(resource, prompt_string)
- for evaluation in evaluations:
- self.assertIn(evaluation, prompt_string)
-
- self.assertIn("constraints", prompt_string.lower())
- self.assertIn("commands", prompt_string.lower())
- self.assertIn("resources", prompt_string.lower())
- self.assertIn("performance evaluation", prompt_string.lower())
diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/satrn/README.md b/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/satrn/README.md
deleted file mode 100644
index f0421c9e04e4ea4719cf953ed5871808685cb907..0000000000000000000000000000000000000000
--- a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/satrn/README.md
+++ /dev/null
@@ -1,53 +0,0 @@
-# SATRN
-
-> [On Recognizing Texts of Arbitrary Shapes with 2D Self-Attention](https://arxiv.org/abs/1910.04396)
-
-
-
-## Abstract
-
-Scene text recognition (STR) is the task of recognizing character sequences in natural scenes. While there have been great advances in STR methods, current methods still fail to recognize texts in arbitrary shapes, such as heavily curved or rotated texts, which are abundant in daily life (e.g. restaurant signs, product labels, company logos, etc). This paper introduces a novel architecture to recognizing texts of arbitrary shapes, named Self-Attention Text Recognition Network (SATRN), which is inspired by the Transformer. SATRN utilizes the self-attention mechanism to describe two-dimensional (2D) spatial dependencies of characters in a scene text image. Exploiting the full-graph propagation of self-attention, SATRN can recognize texts with arbitrary arrangements and large inter-character spacing. As a result, SATRN outperforms existing STR models by a large margin of 5.7 pp on average in "irregular text" benchmarks. We provide empirical analyses that illustrate the inner mechanisms and the extent to which the model is applicable (e.g. rotated and multi-line text). We will open-source the code.
-
-
-
-
-
-## Dataset
-
-### Train Dataset
-
-| trainset | instance_num | repeat_num | source |
-| :-------: | :----------: | :--------: | :----: |
-| SynthText | 7266686 | 1 | synth |
-| Syn90k | 8919273 | 1 | synth |
-
-### Test Dataset
-
-| testset | instance_num | type |
-| :-----: | :----------: | :-------: |
-| IIIT5K | 3000 | regular |
-| SVT | 647 | regular |
-| IC13 | 1015 | regular |
-| IC15 | 2077 | irregular |
-| SVTP | 645 | irregular |
-| CT80 | 288 | irregular |
-
-## Results and Models
-
-| Methods | | Regular Text | | | | Irregular Text | | download |
-| :--------------------------------------------------------------------: | :----: | :----------: | :-------: | :-: | :-------: | :------------: | :----: | :---------------------------------------------------------------------: |
-| | IIIT5K | SVT | IC13-1015 | | IC15-2077 | SVTP | CT80 | |
-| [Satrn](/configs/textrecog/satrn/satrn_shallow_5e_st_mj.py) | 0.9600 | 0.9181 | 0.9606 | | 0.8045 | 0.8837 | 0.8993 | [model](https://download.openmmlab.com/mmocr/textrecog/satrn/satrn_shallow_5e_st_mj/satrn_shallow_5e_st_mj_20220915_152443-5fd04a4c.pth) \| [log](https://download.openmmlab.com/mmocr/textrecog/satrn/satrn_shallow_5e_st_mj/20220915_152443.log) |
-| [Satrn-TTA](/configs/textrecog/satrn/satrn_shallow_5e_st_mj.py) | 0.9530 | 0.9181 | 0.9527 | | 0.8276 | 0.8884 | 0.9028 | |
-| [Satrn_small](/configs/textrecog/satrn/satrn_shallow-small_5e_st_mj.py) | 0.9423 | 0.9011 | 0.9567 | | 0.7886 | 0.8574 | 0.8472 | [model](https://download.openmmlab.com/mmocr/textrecog/satrn/satrn_shallow-small_5e_st_mj/satrn_shallow-small_5e_st_mj_20220915_152442-5591bf27.pth) \| [log](https://download.openmmlab.com/mmocr/textrecog/satrn/satrn_shallow-small_5e_st_mj/20220915_152442.log) |
-| [Satrn_small-TTA](/configs/textrecog/satrn/satrn_shallow-small_5e_st_mj.py) | 0.9380 | 0.8995 | 0.9488 | | 0.8122 | 0.8620 | 0.8507 | |
-
-## Citation
-
-```bibtex
-@article{junyeop2019recognizing,
- title={On Recognizing Texts of Arbitrary Shapes with 2D Self-Attention},
- author={Junyeop Lee, Sungrae Park, Jeonghun Baek, Seong Joon Oh, Seonghyeon Kim, Hwalsuk Lee},
- year={2019}
-}
-```
diff --git a/spaces/MuGeminorum/insecta/khandy/split_utils.py b/spaces/MuGeminorum/insecta/khandy/split_utils.py
deleted file mode 100644
index a524e817d865a30f99c540e0cc52be230b7ad469..0000000000000000000000000000000000000000
--- a/spaces/MuGeminorum/insecta/khandy/split_utils.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import numbers
-from collections.abc import Sequence
-
-import numpy as np
-
-
-def split_by_num(x, num_splits, strict=True):
- """
- Args:
- num_splits: an integer indicating the number of splits
-
- References:
- numpy.split and numpy.array_split
- """
- # NB: np.ndarray is not Sequence
- assert isinstance(x, (Sequence, np.ndarray))
- assert isinstance(num_splits, numbers.Integral)
-
- if strict:
- assert len(x) % num_splits == 0
- split_size = (len(x) + num_splits - 1) // num_splits
- out_list = []
- for i in range(0, len(x), split_size):
- out_list.append(x[i: i + split_size])
- return out_list
-
-
-def split_by_size(x, sizes):
- """
- References:
- tf.split
- https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/misc.py
- """
- # NB: np.ndarray is not Sequence
- assert isinstance(x, (Sequence, np.ndarray))
- assert isinstance(sizes, (list, tuple))
-
- assert sum(sizes) == len(x)
- out_list = []
- start_index = 0
- for size in sizes:
- out_list.append(x[start_index: start_index + size])
- start_index += size
- return out_list
-
-
-def split_by_slice(x, slices):
- """
- References:
- SliceLayer in Caffe, and numpy.split
- """
- # NB: np.ndarray is not Sequence
- assert isinstance(x, (Sequence, np.ndarray))
- assert isinstance(slices, (list, tuple))
-
- out_list = []
- indices = [0] + list(slices) + [len(x)]
- for i in range(len(slices) + 1):
- out_list.append(x[indices[i]: indices[i + 1]])
- return out_list
-
-
-def split_by_ratio(x, ratios):
- # NB: np.ndarray is not Sequence
- assert isinstance(x, (Sequence, np.ndarray))
- assert isinstance(ratios, (list, tuple))
-
- pdf = [k / sum(ratios) for k in ratios]
- cdf = [sum(pdf[:k]) for k in range(len(pdf) + 1)]
- indices = [int(round(len(x) * k)) for k in cdf]
- return [x[indices[i]: indices[i + 1]] for i in range(len(ratios))]
diff --git a/spaces/NCTCMumbai/NCTC/models/official/utils/testing/scripts/builds_common.sh b/spaces/NCTCMumbai/NCTC/models/official/utils/testing/scripts/builds_common.sh
deleted file mode 100644
index 3cf08bb510d2a8ba0b06b1d38ccd1294b159ce15..0000000000000000000000000000000000000000
--- a/spaces/NCTCMumbai/NCTC/models/official/utils/testing/scripts/builds_common.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env bash
-# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-#
-# Common Bash functions used by build scripts
-
-COLOR_NC='\033[0m'
-COLOR_BOLD='\033[1m'
-COLOR_LIGHT_GRAY='\033[0;37m'
-COLOR_GREEN='\033[0;32m'
-COLOR_RED='\033[0;31m'
-
-die() {
- # Print a message and exit with code 1.
- #
- # Usage: die
- # e.g., die "Something bad happened."
-
- echo $@
- exit 1
-}
-
-num_cpus() {
- # Get the number of CPUs
- N_CPUS=$(grep -c ^processor /proc/cpuinfo)
- if [[ -z ${N_CPUS} ]]; then
- die "ERROR: Unable to determine the number of CPUs"
- fi
-
- echo ${N_CPUS}
-}
-
-# List files changed (i.e., added, or revised) from
-# the common ancestor of HEAD and the latest master branch.
-# Usage: get_changed_files_from_master_branch
-get_changed_files_from_master_branch() {
- ANCESTOR=$(git merge-base HEAD master origin/master)
- git diff ${ANCESTOR} --diff-filter=d --name-only "$@"
-}
-
-# List python files changed that still exist,
-# i.e., not removed.
-# Usage: get_py_files_to_check [--incremental]
-get_py_files_to_check() {
- if [[ "$1" == "--incremental" ]]; then
- get_changed_files_from_master_branch -- '*.py'
- elif [[ -z "$1" ]]; then
- find official/ -name '*.py'
- else
- die "Found unsupported args: $@ for get_py_files_to_check."
- fi
-}
diff --git a/spaces/Namit2111/id_verfiy/face_extract.py b/spaces/Namit2111/id_verfiy/face_extract.py
deleted file mode 100644
index 780475813c76d705a1516028b0eb3e04caa5c714..0000000000000000000000000000000000000000
--- a/spaces/Namit2111/id_verfiy/face_extract.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import cv2
-import sys
-
-
-
-
-def extract(face):
- imagePath = face
-
- image = cv2.imread(imagePath)
- gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
-
- faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
- faces = faceCascade.detectMultiScale(
- gray,
- scaleFactor=1.3,
- minNeighbors=3,
- minSize=(30, 30)
- )
-
- # print("[INFO] Found {0} Faces.".format(len(faces)))
-
- for (x, y, w, h) in faces:
- cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
- roi_color = image[y:y + h, x:x + w]
-
- name = str(w)+str(h)+'_faces.jpg'
- cv2.imwrite(str(w) + str(h) + '_faces.jpg', roi_color)
- return name
-# status = cv2.imwrite('faces_detected.jpg', image)
-# print("[INFO] Image faces_detected.jpg written to filesystem: ", status)
\ No newline at end of file
diff --git a/spaces/NbAiLab/whisper-norwegian-small/app.py b/spaces/NbAiLab/whisper-norwegian-small/app.py
deleted file mode 100644
index 0b831ad481d02520215df9bbad8534d0a3811f6a..0000000000000000000000000000000000000000
--- a/spaces/NbAiLab/whisper-norwegian-small/app.py
+++ /dev/null
@@ -1,97 +0,0 @@
-import torch
-
-import gradio as gr
-import pytube as pt
-from transformers import pipeline
-from huggingface_hub import model_info
-
-MODEL_NAME = "NbAiLab/whisper-norwegian-small-test" #this always needs to stay in line 8 :D sorry for the hackiness
-lang = "no"
-
-device = 0 if torch.cuda.is_available() else "cpu"
-pipe = pipeline(
- task="automatic-speech-recognition",
- model=MODEL_NAME,
- chunk_length_s=30,
- device=device,
-)
-
-pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
-
-def transcribe(microphone, file_upload):
- warn_output = ""
- if (microphone is not None) and (file_upload is not None):
- warn_output = (
- "WARNING: You've uploaded an audio file and used the microphone. "
- "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
- )
-
- elif (microphone is None) and (file_upload is None):
- return "ERROR: You have to either use the microphone or upload an audio file"
-
- file = microphone if microphone is not None else file_upload
-
- text = pipe(file)["text"]
-
- return warn_output + text
-
-
-def _return_yt_html_embed(yt_url):
- video_id = yt_url.split("?v=")[-1]
- HTML_str = (
- f'
'
- "
"
- )
- return HTML_str
-
-
-def yt_transcribe(yt_url):
- yt = pt.YouTube(yt_url)
- html_embed_str = _return_yt_html_embed(yt_url)
- stream = yt.streams.filter(only_audio=True)[0]
- stream.download(filename="audio.mp3")
-
- text = pipe("audio.mp3")["text"]
-
- return html_embed_str, text
-
-
-demo = gr.Blocks()
-
-mf_transcribe = gr.Interface(
- fn=transcribe,
- inputs=[
- gr.inputs.Audio(source="microphone", type="filepath", optional=True),
- gr.inputs.Audio(source="upload", type="filepath", optional=True),
- ],
- outputs="text",
- layout="horizontal",
- theme="huggingface",
- title="Norwegian Whisper Small Demo: Transcribe Audio",
- description=(
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the the fine-tuned"
- f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
- " of arbitrary length."
- ),
- allow_flagging="never",
-)
-
-yt_transcribe = gr.Interface(
- fn=yt_transcribe,
- inputs=[gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL")],
- outputs=["html", "text"],
- layout="horizontal",
- theme="huggingface",
- title="Norwegian Whisper Small Demo: Transcribe YouTube",
- description=(
- "Transcribe long-form YouTube videos with the click of a button! Demo uses the the fine-tuned checkpoint:"
- f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files of"
- " arbitrary length."
- ),
- allow_flagging="never",
-)
-
-with demo:
- gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"])
-
-demo.launch(enable_queue=True)
diff --git a/spaces/NechkaP/arxiv-streamlit-lab/app.py b/spaces/NechkaP/arxiv-streamlit-lab/app.py
deleted file mode 100644
index 64c63062991f36d0384568ab1ba4dc61bb0a74a0..0000000000000000000000000000000000000000
--- a/spaces/NechkaP/arxiv-streamlit-lab/app.py
+++ /dev/null
@@ -1,224 +0,0 @@
-import streamlit as st
-
-import warnings
-warnings.simplefilter('ignore')
-import numpy as np
-import pandas as pd
-from tqdm import tqdm
-from sklearn import metrics
-import transformers
-import torch
-import json
-import pandas as pd
-from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
-from sklearn.model_selection import train_test_split
-from transformers import DistilBertTokenizer, DistilBertModel
-import logging
-logging.basicConfig(level=logging.ERROR)
-from torch import cuda
-device = 'cuda' if cuda.is_available() else 'cpu'
-
-
-st.markdown("## arXiv classificator")
-# st.markdown("", unsafe_allow_html=True)
-st.markdown("Please type the article's title and abstract below")
-
-title = st.text_input("Title")
-abstract = st.text_input("Abstract")
-
-
-def is_good(tag: str) -> bool:
- return "stat." in tag\
- or "cs." in tag\
- or "math." in tag\
- or "ph." in tag\
- or "fin." in tag\
- or "bio." in tag\
- or "eess." in tag\
- or "econ." in tag
-
-
-def get_all_tags(tag_str: str):
- tag_json = tag_str.replace("'", '"').replace("None", '"None"')
- return [elem["term"] for elem in json.loads(tag_json) if is_good(elem["term"])]
-
-def join_title_and_summary(row) -> str:
- return row["title"].replace("\n", " ") + " " + row["summary"].replace("\n", " ")
-
-class MultiLabelDataset(Dataset):
- def __init__(self, dataframe, tokenizer, max_len):
- self.tokenizer = tokenizer
- self.data = dataframe
- self.text = self.data["Text"]
- self.targets = self.data["Labels"]
- self.max_len = max_len
-
- def __len__(self):
- return len(self.text)
-
- def __getitem__(self, index):
- text = str(self.text[index])
- text = " ".join(text.split())
-
- inputs = self.tokenizer.encode_plus(
- text,
- truncation=True,
- add_special_tokens=True,
- max_length=self.max_len,
- pad_to_max_length=True,
- return_token_type_ids=True
- )
- ids = inputs['input_ids']
- mask = inputs['attention_mask']
- token_type_ids = inputs["token_type_ids"]
-
- return {
- 'ids': torch.tensor(ids, dtype=torch.long),
- 'mask': torch.tensor(mask, dtype=torch.long),
- 'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),
- 'targets': torch.tensor(self.targets[index], dtype=torch.float)
- }
-
-class DistilBERTClass(torch.nn.Module):
- def __init__(self):
- super(DistilBERTClass, self).__init__()
- self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased")
- self.pre_classifier = torch.nn.Linear(768, 768)
- self.dropout = torch.nn.Dropout(0.1)
- self.classifier = torch.nn.Linear(768, 124)
-
- def forward(self, input_ids, attention_mask, token_type_ids):
- output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
- hidden_state = output_1[0]
- pooler = hidden_state[:, 0]
- pooler = self.pre_classifier(pooler)
- pooler = torch.nn.Tanh()(pooler)
- pooler = self.dropout(pooler)
- output = self.classifier(pooler)
- return output
-
-def loss_fn(outputs, targets):
- return torch.nn.BCEWithLogitsLoss()(outputs, targets)
-
-@st.cache
-def prepare_model():
- with open("./arxivData.json", 'r') as fp:
- data = json.load(fp)
- data = pd.DataFrame(data)
- data.drop(['id', "month", "author", "day", "year", "link"], inplace=True, axis=1)
- labels = data["tag"].map(get_all_tags)
- good_tags = set()
- for tags in labels:
- for tag in tags:
- good_tags.add(tag)
- enum_tags = dict()
- enum_tags_reverse = [None for _ in range(len(good_tags))]
- for idx, tag in enumerate(good_tags):
- enum_tags[tag] = idx
- enum_tags_reverse[idx] = tag
- def map_tags_to_target_vector(tags):
- target_vector = [0.0] * len(enum_tags)
- for tag in tags:
- idx = enum_tags[tag]
- target_vector[idx] = 1.0 / len(tags)
-
- assert np.allclose(np.sum(target_vector), 1.0, 0.000001)
- return target_vector
-
- vectors = labels.map(map_tags_to_target_vector)
- texts = data.apply(join_title_and_summary, axis=1)
- preprocessed_data = pd.DataFrame({
- "Labels": vectors,
- "Text": texts
- })
- MAX_LEN = 512
- TRAIN_BATCH_SIZE = 4
- VALID_BATCH_SIZE = 4
- EPOCHS = 1
- LEARNING_RATE = 1e-05
- tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased', truncation=True,
-to_lower_case=True)
-
- train_data, test_data = train_test_split(preprocessed_data, train_size=0.8)
- train_data.reset_index(drop=True, inplace=True)
- test_data.reset_index(drop=True, inplace=True)
-
- training_set = MultiLabelDataset(train_data, tokenizer, MAX_LEN)
- testing_set = MultiLabelDataset(test_data, tokenizer, MAX_LEN)
-
- train_params = {'batch_size': TRAIN_BATCH_SIZE,
- 'shuffle': True,
- 'num_workers': 0
- }
-
- test_params = {'batch_size': VALID_BATCH_SIZE,
- 'shuffle': True,
- 'num_workers': 0
- }
-
- training_loader = DataLoader(training_set, **train_params)
- testing_loader = DataLoader(testing_set, **test_params)
-
- model = DistilBERTClass()
- model.to(device)
-
- optimizer = torch.optim.Adam(params = model.parameters(), lr=LEARNING_RATE)
-
- def train(epoch):
- model.train()
- for _, data in tqdm(enumerate(training_loader, 0)):
- ids = data['ids'].to(device, dtype = torch.long)
- mask = data['mask'].to(device, dtype = torch.long)
- token_type_ids = data['token_type_ids'].to(device, dtype = torch.long)
- targets = data['targets'].to(device, dtype = torch.float)
- outputs = model(ids, mask, token_type_ids)
-
- optimizer.zero_grad()
- loss = loss_fn(outputs, targets)
- if _ % 100==0:
- print(f'Epoch: {epoch}, Loss: {loss.item()}')
-
- loss.backward()
- optimizer.step()
- for epoch in range(EPOCHS):
- train(epoch)
-
- def predict(text, abstract):
- text += " " + abstract
- text = " ".join(text.split())
-
- inputs = tokenizer.encode_plus(
- text,
- truncation=True,
- add_special_tokens=True,
- max_length=MAX_LEN,
- pad_to_max_length=True,
- return_token_type_ids=True
- )
-
- ids = torch.tensor(inputs['input_ids'], dtype=torch.long).to(device, dtype = torch.long)
- mask = torch.tensor(inputs['attention_mask'], dtype=torch.long).to(device, dtype = torch.long)
- token_type_ids = torch.tensor(inputs["token_type_ids"], dtype=torch.long).to(device, dtype = torch.long)
-
- with torch.no_grad():
- logits = model(ids, attention_mask=mask, token_type_ids=token_type_ids)
-
- argmax = logits.cpu().detach().numpy().argmax()
- return enum_tags_reverse[argmax]
-
- return predict
-
-predict_function = prepare_model()
-
-try:
- raw_predictions = predict_function(title, abstract)
- st.markdown(f"The most likely arXiv tag for this article is:")# {raw_predictions}")
- if raw_predictions:
- for item in raw_predictions:
- st.markdown(f"* {item}")
- elif (title or abstract):
- st.markdown("* cs.CV")
- else:
- st.markdown("Oops... your input is empty")
-except:
- st.markdown("Oops... something went wrong")
\ No newline at end of file
diff --git a/spaces/NeilRokad/dreambooth-training/train_dreambooth.py b/spaces/NeilRokad/dreambooth-training/train_dreambooth.py
deleted file mode 100644
index f4ff135e549f0d6c72f733092f3df817cb178e01..0000000000000000000000000000000000000000
--- a/spaces/NeilRokad/dreambooth-training/train_dreambooth.py
+++ /dev/null
@@ -1,889 +0,0 @@
-import argparse
-import itertools
-import math
-import os
-from pathlib import Path
-from typing import Optional
-import subprocess
-import sys
-import gc
-import random
-
-import torch
-import torch.nn.functional as F
-import torch.utils.checkpoint
-from torch.utils.data import Dataset
-
-from accelerate import Accelerator
-from accelerate.logging import get_logger
-from accelerate.utils import set_seed
-from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
-from diffusers.utils.import_utils import is_xformers_available
-from diffusers.optimization import get_scheduler
-from huggingface_hub import HfFolder, Repository, whoami
-from PIL import Image
-from torchvision import transforms
-from tqdm.auto import tqdm
-from transformers import CLIPTextModel, CLIPTokenizer
-
-
-logger = get_logger(__name__)
-
-
-def parse_args():
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
- parser.add_argument(
- "--pretrained_model_name_or_path",
- type=str,
- default=None,
- #required=True,
- help="Path to pretrained model or model identifier from huggingface.co/models.",
- )
- parser.add_argument(
- "--tokenizer_name",
- type=str,
- default=None,
- help="Pretrained tokenizer name or path if not the same as model_name",
- )
- parser.add_argument(
- "--instance_data_dir",
- type=str,
- default=None,
- #required=True,
- help="A folder containing the training data of instance images.",
- )
- parser.add_argument(
- "--class_data_dir",
- type=str,
- default=None,
- #required=False,
- help="A folder containing the training data of class images.",
- )
- parser.add_argument(
- "--instance_prompt",
- type=str,
- default=None,
- help="The prompt with identifier specifying the instance",
- )
- parser.add_argument(
- "--class_prompt",
- type=str,
- default="",
- help="The prompt to specify images in the same class as provided instance images.",
- )
- parser.add_argument(
- "--with_prior_preservation",
- default=False,
- action="store_true",
- help="Flag to add prior preservation loss.",
- )
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
- parser.add_argument(
- "--num_class_images",
- type=int,
- default=100,
- help=(
- "Minimal class images for prior preservation loss. If not have enough images, additional images will be"
- " sampled with class_prompt."
- ),
- )
- parser.add_argument(
- "--output_dir",
- type=str,
- default="",
- help="The output directory where the model predictions and checkpoints will be written.",
- )
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
- parser.add_argument(
- "--resolution",
- type=int,
- default=512,
- help=(
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
- " resolution"
- ),
- )
- parser.add_argument(
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
- )
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
- parser.add_argument(
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
- )
- parser.add_argument(
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
- )
- parser.add_argument("--num_train_epochs", type=int, default=1)
- parser.add_argument(
- "--max_train_steps",
- type=int,
- default=None,
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
- )
- parser.add_argument(
- "--gradient_accumulation_steps",
- type=int,
- default=1,
- help="Number of updates steps to accumulate before performing a backward/update pass.",
- )
- parser.add_argument(
- "--gradient_checkpointing",
- action="store_true",
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
- )
- parser.add_argument(
- "--learning_rate",
- type=float,
- default=5e-6,
- help="Initial learning rate (after the potential warmup period) to use.",
- )
- parser.add_argument(
- "--scale_lr",
- action="store_true",
- default=False,
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
- )
- parser.add_argument(
- "--lr_scheduler",
- type=str,
- default="constant",
- help=(
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
- ' "constant", "constant_with_warmup"]'
- ),
- )
- parser.add_argument(
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
- )
- parser.add_argument(
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
- )
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
- parser.add_argument(
- "--hub_model_id",
- type=str,
- default=None,
- help="The name of the repository to keep in sync with the local `output_dir`.",
- )
- parser.add_argument(
- "--logging_dir",
- type=str,
- default="logs",
- help=(
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
- ),
- )
- parser.add_argument(
- "--mixed_precision",
- type=str,
- default="no",
- choices=["no", "fp16", "bf16"],
- help=(
- "Whether to use mixed precision. Choose"
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
- "and an Nvidia Ampere GPU."
- ),
- )
-
- parser.add_argument(
- "--save_n_steps",
- type=int,
- default=1,
- help=("Save the model every n global_steps"),
- )
-
-
- parser.add_argument(
- "--save_starting_step",
- type=int,
- default=1,
- help=("The step from which it starts saving intermediary checkpoints"),
- )
-
- parser.add_argument(
- "--stop_text_encoder_training",
- type=int,
- default=1000000,
- help=("The step at which the text_encoder is no longer trained"),
- )
-
-
- parser.add_argument(
- "--image_captions_filename",
- action="store_true",
- help="Get captions from filename",
- )
-
-
- parser.add_argument(
- "--dump_only_text_encoder",
- action="store_true",
- default=False,
- help="Dump only text encoder",
- )
-
- parser.add_argument(
- "--train_only_unet",
- action="store_true",
- default=False,
- help="Train only the unet",
- )
-
- parser.add_argument(
- "--cache_latents",
- action="store_true",
- default=False,
- help="Train only the unet",
- )
-
- parser.add_argument(
- "--Session_dir",
- type=str,
- default="",
- help="Current session directory",
- )
-
-
-
-
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
-
- args = parser.parse_args()
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
- if env_local_rank != -1 and env_local_rank != args.local_rank:
- args.local_rank = env_local_rank
-
- #if args.instance_data_dir is None:
- # raise ValueError("You must specify a train data directory.")
-
- #if args.with_prior_preservation:
- # if args.class_data_dir is None:
- # raise ValueError("You must specify a data directory for class images.")
- # if args.class_prompt is None:
- # raise ValueError("You must specify prompt for class images.")
-
- return args
-
-
-class DreamBoothDataset(Dataset):
- """
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
- It pre-processes the images and the tokenizes prompts.
- """
-
- def __init__(
- self,
- instance_data_root,
- instance_prompt,
- tokenizer,
- args,
- class_data_root=None,
- class_prompt=None,
- size=512,
- center_crop=False,
- ):
- self.size = size
- self.center_crop = center_crop
- self.tokenizer = tokenizer
- self.image_captions_filename = None
-
- self.instance_data_root = Path(instance_data_root)
- if not self.instance_data_root.exists():
- raise ValueError("Instance images root doesn't exists.")
-
- self.instance_images_path = list(Path(instance_data_root).iterdir())
- self.num_instance_images = len(self.instance_images_path)
- self.instance_prompt = instance_prompt
- self._length = self.num_instance_images
-
- if args.image_captions_filename:
- self.image_captions_filename = True
-
- if class_data_root is not None:
- self.class_data_root = Path(class_data_root)
- self.class_data_root.mkdir(parents=True, exist_ok=True)
- self.class_images_path = list(self.class_data_root.iterdir())
- random.shuffle(self.class_images_path)
- self.num_class_images = len(self.class_images_path)
- self._length = max(self.num_class_images, self.num_instance_images)
- self.class_prompt = class_prompt
- else:
- self.class_data_root = None
-
- self.image_transforms = transforms.Compose(
- [
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
- transforms.ToTensor(),
- transforms.Normalize([0.5], [0.5]),
- ]
- )
-
- def __len__(self):
- return self._length
-
- def __getitem__(self, index):
- example = {}
- path = self.instance_images_path[index % self.num_instance_images]
- instance_image = Image.open(path)
- if not instance_image.mode == "RGB":
- instance_image = instance_image.convert("RGB")
-
- instance_prompt = self.instance_prompt
-
- if self.image_captions_filename:
- filename = Path(path).stem
- pt=''.join([i for i in filename if not i.isdigit()])
- pt=pt.replace("_"," ")
- pt=pt.replace("(","")
- pt=pt.replace(")","")
- pt=pt.replace("-","")
- instance_prompt = pt
- sys.stdout.write(" [0;32m" +instance_prompt+" [0m")
- sys.stdout.flush()
-
-
- example["instance_images"] = self.image_transforms(instance_image)
- example["instance_prompt_ids"] = self.tokenizer(
- instance_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- if self.class_data_root:
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
- if not class_image.mode == "RGB":
- class_image = class_image.convert("RGB")
- example["class_images"] = self.image_transforms(class_image)
- example["class_prompt_ids"] = self.tokenizer(
- self.class_prompt,
- padding="do_not_pad",
- truncation=True,
- max_length=self.tokenizer.model_max_length,
- ).input_ids
-
- return example
-
-
-
-class PromptDataset(Dataset):
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
-
- def __init__(self, prompt, num_samples):
- self.prompt = prompt
- self.num_samples = num_samples
-
- def __len__(self):
- return self.num_samples
-
- def __getitem__(self, index):
- example = {}
- example["prompt"] = self.prompt
- example["index"] = index
- return example
-
-class LatentsDataset(Dataset):
- def __init__(self, latents_cache, text_encoder_cache):
- self.latents_cache = latents_cache
- self.text_encoder_cache = text_encoder_cache
-
- def __len__(self):
- return len(self.latents_cache)
-
- def __getitem__(self, index):
- return self.latents_cache[index], self.text_encoder_cache[index]
-
-def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
- if token is None:
- token = HfFolder.get_token()
- if organization is None:
- username = whoami(token)["name"]
- return f"{username}/{model_id}"
- else:
- return f"{organization}/{model_id}"
-
-def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict:
- """
- Starts from base starting dict and then adds the remaining key values from updater replacing the values from
- the first starting/base dict with the second updater dict.
-
- For later: how does d = {**d1, **d2} replace collision?
-
- :param starting_dict:
- :param updater_dict:
- :return:
- """
- new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict
- new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict
- return new_dict
-
-def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace:
- """
-
- ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x
- :param args1:
- :param args2:
- :return:
- """
- # - the merged args
- # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}.
- merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2))
- args = argparse.Namespace(**merged_key_values_for_namespace)
- return args
-
-def run_training(args_imported):
- args_default = parse_args()
- args = merge_args(args_default, args_imported)
- print(args)
- logging_dir = Path(args.output_dir, args.logging_dir)
- i=args.save_starting_step
- accelerator = Accelerator(
- gradient_accumulation_steps=args.gradient_accumulation_steps,
- mixed_precision=args.mixed_precision,
- log_with="tensorboard",
- logging_dir=logging_dir,
- )
-
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
- if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
- raise ValueError(
- "Gradient accumulation is not supported when training the text encoder in distributed training. "
- "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
- )
-
- if args.seed is not None:
- set_seed(args.seed)
-
- if args.with_prior_preservation:
- class_images_dir = Path(args.class_data_dir)
- if not class_images_dir.exists():
- class_images_dir.mkdir(parents=True)
- cur_class_images = len(list(class_images_dir.iterdir()))
-
- if cur_class_images < args.num_class_images:
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path, torch_dtype=torch_dtype
- )
- pipeline.set_progress_bar_config(disable=True)
-
- num_new_images = args.num_class_images - cur_class_images
- logger.info(f"Number of class images to sample: {num_new_images}.")
-
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
-
- sample_dataloader = accelerator.prepare(sample_dataloader)
- pipeline.to(accelerator.device)
-
- for example in tqdm(
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
- ):
- with torch.autocast("cuda"):
- images = pipeline(example["prompt"]).images
-
- for i, image in enumerate(images):
- image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg")
-
- del pipeline
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- # Handle the repository creation
- if accelerator.is_main_process:
- if args.push_to_hub:
- if args.hub_model_id is None:
- repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
- else:
- repo_name = args.hub_model_id
- repo = Repository(args.output_dir, clone_from=repo_name)
-
- with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
- if "step_*" not in gitignore:
- gitignore.write("step_*\n")
- if "epoch_*" not in gitignore:
- gitignore.write("epoch_*\n")
- elif args.output_dir is not None:
- os.makedirs(args.output_dir, exist_ok=True)
-
- # Load the tokenizer
- if args.tokenizer_name:
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
- elif args.pretrained_model_name_or_path:
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
-
- # Load models and create wrapper for stable diffusion
- if args.train_only_unet:
- if os.path.exists(str(args.output_dir+"/text_encoder_trained")):
- text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained")
- elif os.path.exists(str(args.output_dir+"/text_encoder")):
- text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder")
- else:
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
- else:
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
- unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
- if is_xformers_available():
- try:
- print("Enabling memory efficient attention with xformers...")
- unet.enable_xformers_memory_efficient_attention()
- except Exception as e:
- logger.warning(
- f"Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: {e}"
- )
- vae.requires_grad_(False)
- if not args.train_text_encoder:
- text_encoder.requires_grad_(False)
-
- if args.gradient_checkpointing:
- unet.enable_gradient_checkpointing()
- if args.train_text_encoder:
- text_encoder.gradient_checkpointing_enable()
-
- if args.scale_lr:
- args.learning_rate = (
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
- )
-
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
- if args.use_8bit_adam:
- try:
- import bitsandbytes as bnb
- except ImportError:
- raise ImportError(
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
- )
-
- optimizer_class = bnb.optim.AdamW8bit
- else:
- optimizer_class = torch.optim.AdamW
-
- params_to_optimize = (
- itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
- )
- optimizer = optimizer_class(
- params_to_optimize,
- lr=args.learning_rate,
- betas=(args.adam_beta1, args.adam_beta2),
- weight_decay=args.adam_weight_decay,
- eps=args.adam_epsilon,
- )
-
- noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
-
- train_dataset = DreamBoothDataset(
- instance_data_root=args.instance_data_dir,
- instance_prompt=args.instance_prompt,
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
- class_prompt=args.class_prompt,
- tokenizer=tokenizer,
- size=args.resolution,
- center_crop=args.center_crop,
- args=args,
- )
-
- def collate_fn(examples):
- input_ids = [example["instance_prompt_ids"] for example in examples]
- pixel_values = [example["instance_images"] for example in examples]
-
- # Concat class and instance examples for prior preservation.
- # We do this to avoid doing two forward passes.
- if args.with_prior_preservation:
- input_ids += [example["class_prompt_ids"] for example in examples]
- pixel_values += [example["class_images"] for example in examples]
-
- pixel_values = torch.stack(pixel_values)
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
-
- input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
-
- batch = {
- "input_ids": input_ids,
- "pixel_values": pixel_values,
- }
- return batch
-
- train_dataloader = torch.utils.data.DataLoader(
- train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
- )
-
- # Scheduler and math around the number of training steps.
- overrode_max_train_steps = False
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if args.max_train_steps is None:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- overrode_max_train_steps = True
-
- lr_scheduler = get_scheduler(
- args.lr_scheduler,
- optimizer=optimizer,
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
- num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
- )
-
- if args.train_text_encoder:
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler
- )
- else:
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
- unet, optimizer, train_dataloader, lr_scheduler
- )
-
- weight_dtype = torch.float32
- if args.mixed_precision == "fp16":
- weight_dtype = torch.float16
- elif args.mixed_precision == "bf16":
- weight_dtype = torch.bfloat16
-
- # Move text_encode and vae to gpu.
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
- # as these models are only used for inference, keeping weights in full precision is not required.
- vae.to(accelerator.device, dtype=weight_dtype)
- if not args.train_text_encoder:
- text_encoder.to(accelerator.device, dtype=weight_dtype)
-
-
- if args.cache_latents:
- latents_cache = []
- text_encoder_cache = []
- for batch in tqdm(train_dataloader, desc="Caching latents"):
- with torch.no_grad():
- batch["pixel_values"] = batch["pixel_values"].to(accelerator.device, non_blocking=True, dtype=weight_dtype)
- batch["input_ids"] = batch["input_ids"].to(accelerator.device, non_blocking=True)
- latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist)
- if args.train_text_encoder:
- text_encoder_cache.append(batch["input_ids"])
- else:
- text_encoder_cache.append(text_encoder(batch["input_ids"])[0])
- train_dataset = LatentsDataset(latents_cache, text_encoder_cache)
- train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, collate_fn=lambda x: x, shuffle=True)
-
- del vae
- #if not args.train_text_encoder:
- # del text_encoder
- if torch.cuda.is_available():
- torch.cuda.empty_cache()
-
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
- if overrode_max_train_steps:
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
- # Afterwards we recalculate our number of training epochs
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
-
- # We need to initialize the trackers we use, and also store our configuration.
- # The trackers initializes automatically on the main process.
- if accelerator.is_main_process:
- accelerator.init_trackers("dreambooth", config=vars(args))
-
- def bar(prg):
- br='|'+'█' * prg + ' ' * (25-prg)+'|'
- return br
-
- # Train!
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
-
- logger.info("***** Running training *****")
- logger.info(f" Num examples = {len(train_dataset)}")
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
- logger.info(f" Num Epochs = {args.num_train_epochs}")
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
- logger.info(f" Total optimization steps = {args.max_train_steps}")
- # Only show the progress bar once on each machine.
- progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
- global_step = 0
-
- for epoch in range(args.num_train_epochs):
- unet.train()
- if args.train_text_encoder:
- text_encoder.train()
- for step, batch in enumerate(train_dataloader):
- with accelerator.accumulate(unet):
- # Convert images to latent space
- with torch.no_grad():
- if args.cache_latents:
- latents_dist = batch[0][0]
- else:
- latents_dist = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist
- latents = latents_dist.sample() * 0.18215
-
- # Sample noise that we'll add to the latents
- noise = torch.randn_like(latents)
- bsz = latents.shape[0]
- # Sample a random timestep for each image
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
- timesteps = timesteps.long()
-
- # Add noise to the latents according to the noise magnitude at each timestep
- # (this is the forward diffusion process)
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
-
- # Get the text embedding for conditioning
- if(args.cache_latents):
- if args.train_text_encoder:
- encoder_hidden_states = text_encoder(batch[0][1])[0]
- else:
- encoder_hidden_states = batch[0][1]
- else:
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
-
- # Predict the noise residual
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
-
- # Get the target for loss depending on the prediction type
- if noise_scheduler.config.prediction_type == "epsilon":
- target = noise
- elif noise_scheduler.config.prediction_type == "v_prediction":
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
- else:
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
-
- if args.with_prior_preservation:
- # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
- model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
- target, target_prior = torch.chunk(target, 2, dim=0)
-
- # Compute instance loss
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
-
- # Compute prior loss
- prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
-
- # Add the prior loss to the instance loss.
- loss = loss + args.prior_loss_weight * prior_loss
- else:
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
-
- accelerator.backward(loss)
- if accelerator.sync_gradients:
- params_to_clip = (
- itertools.chain(unet.parameters(), text_encoder.parameters())
- if args.train_text_encoder
- else unet.parameters()
- )
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
- optimizer.step()
- lr_scheduler.step()
- optimizer.zero_grad()
-
- # Checks if the accelerator has performed an optimization step behind the scenes
- if accelerator.sync_gradients:
- progress_bar.update(1)
- global_step += 1
-
- fll=round((global_step*100)/args.max_train_steps)
- fll=round(fll/4)
- pr=bar(fll)
-
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
- progress_bar.set_postfix(**logs)
- progress_bar.set_description_str("Progress:"+pr)
- accelerator.log(logs, step=global_step)
-
- if global_step >= args.max_train_steps:
- break
-
- if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30:
- if accelerator.is_main_process:
- print(" [0;32m" +" Freezing the text_encoder ..."+" [0m")
- frz_dir=args.output_dir + "/text_encoder_frozen"
- if os.path.exists(frz_dir):
- subprocess.call('rm -r '+ frz_dir, shell=True)
- os.mkdir(frz_dir)
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.text_encoder.save_pretrained(frz_dir)
-
- if args.save_n_steps >= 200:
- if global_step < args.max_train_steps and global_step+1==i:
- ckpt_name = "_step_" + str(global_step+1)
- save_dir = Path(args.output_dir+ckpt_name)
- save_dir=str(save_dir)
- save_dir=save_dir.replace(" ", "_")
- if not os.path.exists(save_dir):
- os.mkdir(save_dir)
- inst=save_dir[16:]
- inst=inst.replace(" ", "_")
- print(" [1;32mSAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt")
- # Create the pipeline using the trained modules and save it.
- if accelerator.is_main_process:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.save_pretrained(save_dir)
- frz_dir=args.output_dir + "/text_encoder_frozen"
- if args.train_text_encoder and os.path.exists(frz_dir):
- subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True)
- subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True)
- chkpth=args.Session_dir+"/"+inst+".ckpt"
- subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True)
- subprocess.call('rm -r '+ save_dir, shell=True)
- i=i+args.save_n_steps
-
- accelerator.wait_for_everyone()
-
- # Create the pipeline using using the trained modules and save it.
- if accelerator.is_main_process:
- if args.dump_only_text_encoder:
- txt_dir=args.output_dir + "/text_encoder_trained"
- if not os.path.exists(txt_dir):
- os.mkdir(txt_dir)
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.text_encoder.save_pretrained(txt_dir)
-
- elif args.train_only_unet:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- pipeline.save_pretrained(args.output_dir)
- txt_dir=args.output_dir + "/text_encoder_trained"
- subprocess.call('rm -r '+txt_dir, shell=True)
-
- else:
- pipeline = StableDiffusionPipeline.from_pretrained(
- args.pretrained_model_name_or_path,
- unet=accelerator.unwrap_model(unet),
- text_encoder=accelerator.unwrap_model(text_encoder),
- )
- frz_dir=args.output_dir + "/text_encoder_frozen"
- pipeline.save_pretrained(args.output_dir)
- if args.train_text_encoder and os.path.exists(frz_dir):
- subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True)
- subprocess.call('rm -r '+ frz_dir, shell=True)
-
- if args.push_to_hub:
- repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
-
- accelerator.end_training()
- del pipeline
- torch.cuda.empty_cache()
- gc.collect()
-if __name__ == "__main__":
- pass
- #main()
-
diff --git a/spaces/Ninjagolover69/text_generator1/README.md b/spaces/Ninjagolover69/text_generator1/README.md
deleted file mode 100644
index 0114fe0b8319c43e207b532377b72d98ef892950..0000000000000000000000000000000000000000
--- a/spaces/Ninjagolover69/text_generator1/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Text Generator1
-emoji: 📉
-colorFrom: pink
-colorTo: blue
-sdk: gradio
-sdk_version: 3.15.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/OAOA/DifFace/basicsr/archs/rrdbnet_arch.py b/spaces/OAOA/DifFace/basicsr/archs/rrdbnet_arch.py
deleted file mode 100644
index 63d07080c2ec1305090c59b7bfbbda2b003b18e4..0000000000000000000000000000000000000000
--- a/spaces/OAOA/DifFace/basicsr/archs/rrdbnet_arch.py
+++ /dev/null
@@ -1,119 +0,0 @@
-import torch
-from torch import nn as nn
-from torch.nn import functional as F
-
-from basicsr.utils.registry import ARCH_REGISTRY
-from .arch_util import default_init_weights, make_layer, pixel_unshuffle
-
-
-class ResidualDenseBlock(nn.Module):
- """Residual Dense Block.
-
- Used in RRDB block in ESRGAN.
-
- Args:
- num_feat (int): Channel number of intermediate features.
- num_grow_ch (int): Channels for each growth.
- """
-
- def __init__(self, num_feat=64, num_grow_ch=32):
- super(ResidualDenseBlock, self).__init__()
- self.conv1 = nn.Conv2d(num_feat, num_grow_ch, 3, 1, 1)
- self.conv2 = nn.Conv2d(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
- self.conv3 = nn.Conv2d(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1)
- self.conv4 = nn.Conv2d(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1)
- self.conv5 = nn.Conv2d(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
-
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
-
- # initialization
- default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
-
- def forward(self, x):
- x1 = self.lrelu(self.conv1(x))
- x2 = self.lrelu(self.conv2(torch.cat((x, x1), 1)))
- x3 = self.lrelu(self.conv3(torch.cat((x, x1, x2), 1)))
- x4 = self.lrelu(self.conv4(torch.cat((x, x1, x2, x3), 1)))
- x5 = self.conv5(torch.cat((x, x1, x2, x3, x4), 1))
- # Empirically, we use 0.2 to scale the residual for better performance
- return x5 * 0.2 + x
-
-
-class RRDB(nn.Module):
- """Residual in Residual Dense Block.
-
- Used in RRDB-Net in ESRGAN.
-
- Args:
- num_feat (int): Channel number of intermediate features.
- num_grow_ch (int): Channels for each growth.
- """
-
- def __init__(self, num_feat, num_grow_ch=32):
- super(RRDB, self).__init__()
- self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch)
- self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch)
- self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch)
-
- def forward(self, x):
- out = self.rdb1(x)
- out = self.rdb2(out)
- out = self.rdb3(out)
- # Empirically, we use 0.2 to scale the residual for better performance
- return out * 0.2 + x
-
-
-@ARCH_REGISTRY.register()
-class RRDBNet(nn.Module):
- """Networks consisting of Residual in Residual Dense Block, which is used
- in ESRGAN.
-
- ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.
-
- We extend ESRGAN for scale x2 and scale x1.
- Note: This is one option for scale 1, scale 2 in RRDBNet.
- We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size
- and enlarge the channel size before feeding inputs into the main ESRGAN architecture.
-
- Args:
- num_in_ch (int): Channel number of inputs.
- num_out_ch (int): Channel number of outputs.
- num_feat (int): Channel number of intermediate features.
- Default: 64
- num_block (int): Block number in the trunk network. Defaults: 23
- num_grow_ch (int): Channels for each growth. Default: 32.
- """
-
- def __init__(self, num_in_ch, num_out_ch, scale=4, num_feat=64, num_block=23, num_grow_ch=32):
- super(RRDBNet, self).__init__()
- self.scale = scale
- if scale == 2:
- num_in_ch = num_in_ch * 4
- elif scale == 1:
- num_in_ch = num_in_ch * 16
- self.conv_first = nn.Conv2d(num_in_ch, num_feat, 3, 1, 1)
- self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)
- self.conv_body = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- # upsample
- self.conv_up1 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_up2 = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_hr = nn.Conv2d(num_feat, num_feat, 3, 1, 1)
- self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1)
-
- self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
-
- def forward(self, x):
- if self.scale == 2:
- feat = pixel_unshuffle(x, scale=2)
- elif self.scale == 1:
- feat = pixel_unshuffle(x, scale=4)
- else:
- feat = x
- feat = self.conv_first(feat)
- body_feat = self.conv_body(self.body(feat))
- feat = feat + body_feat
- # upsample
- feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest')))
- feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest')))
- out = self.conv_last(self.lrelu(self.conv_hr(feat)))
- return out
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/hubert/simple_kmeans/dump_w2v2_feature.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/hubert/simple_kmeans/dump_w2v2_feature.py
deleted file mode 100644
index a1f0d902acf0756580a1f4604feee8fc499a9a63..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/hubert/simple_kmeans/dump_w2v2_feature.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import os
-import sys
-
-import fairseq
-import soundfile as sf
-import torch
-import torch.nn.functional as F
-
-from feature_utils import get_path_iterator, dump_feature
-
-
-logging.basicConfig(
- format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
- datefmt="%Y-%m-%d %H:%M:%S",
- level=os.environ.get("LOGLEVEL", "INFO").upper(),
- stream=sys.stdout,
-)
-logger = logging.getLogger("dump_w2v2_feature")
-
-
-class Wav2Vec2FeatureReader(object):
- def __init__(self, ckpt_path, layer, max_chunk=1600000):
- (
- model,
- cfg,
- task,
- ) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
- self.model = model[0].eval().cuda()
- self.task = task
- self.layer = layer # assume this is 1-based like HuBERT
- self.max_chunk = max_chunk
- logger.info(f"TASK CONFIG:\n{self.task.cfg}")
- logger.info(f" max_chunk = {self.max_chunk}")
- logger.info(f" model:\n{self.model}")
-
- def read_audio(self, path, ref_len=None):
- wav, sr = sf.read(path)
- assert sr == self.task.cfg.sample_rate, sr
- if wav.ndim == 2:
- wav = wav.mean(-1)
- assert wav.ndim == 1, wav.ndim
- if ref_len is not None and abs(ref_len - len(wav)) > 160:
- logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
- return wav
-
- def get_feats(self, path, ref_len=None):
- x = self.read_audio(path, ref_len)
- with torch.no_grad():
- x = torch.from_numpy(x).float().cuda()
- if self.task.cfg.normalize:
- x = F.layer_norm(x, x.shape)
- x = x.view(1, -1)
-
- feat = []
- for start in range(0, x.size(1), self.max_chunk):
- x_chunk = x[:, start: start + self.max_chunk]
- res = self.model.extract_features(
- source=x_chunk,
- padding_mask=None,
- mask=False,
- layer=self.layer - 1,
- )
- feat_chunk = res["x"]
- feat.append(feat_chunk)
- return torch.cat(feat, 1).squeeze(0)
-
-
-def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk):
- reader = Wav2Vec2FeatureReader(ckpt_path, layer, max_chunk)
- generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
- dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
-
-
-if __name__ == "__main__":
- import argparse
-
- parser = argparse.ArgumentParser()
- parser.add_argument("tsv_dir")
- parser.add_argument("split")
- parser.add_argument("ckpt_path")
- parser.add_argument("layer", type=int)
- parser.add_argument("nshard", type=int)
- parser.add_argument("rank", type=int)
- parser.add_argument("feat_dir")
- parser.add_argument("--max_chunk", type=int, default=1600000)
- args = parser.parse_args()
- logger.info(args)
-
- main(**vars(args))
diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/simultaneous_translation/tests/test_text_models.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/simultaneous_translation/tests/test_text_models.py
deleted file mode 100644
index 127adfa6337333ba5ae598fcd158956def0d520f..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/simultaneous_translation/tests/test_text_models.py
+++ /dev/null
@@ -1,407 +0,0 @@
-import argparse
-import unittest
-from typing import Any, Dict
-
-import torch
-from examples.simultaneous_translation.models import (
- transformer_monotonic_attention
-)
-
-
-from tests.test_roberta import FakeTask
-
-
-DEFAULT_CONFIG = {
- "attention_eps": 1e-6,
- "mass_preservation": True,
- "noise_type": "flat",
- "noise_mean": 0.0,
- "noise_var": 1.0,
- "energy_bias_init": -2,
- "energy_bias": True
-}
-
-
-PAD_INDEX = 1
-
-
-def generate_config(overrides_kv):
- new_dict = {key: value for key, value in DEFAULT_CONFIG.items()}
- for key, value in overrides_kv.items():
- new_dict[key] = value
- return new_dict
-
-
-def make_sample_with_padding(longer_src=False) -> Dict[str, Any]:
- tokens_1 = torch.LongTensor(
- [
- [2, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15, 2],
- [
- 2, 11, 12, 14, 15, 10, 11, 12, 13, 14, 15, 2,
- PAD_INDEX, PAD_INDEX
- ],
- ]
- )
- tokens_2 = torch.LongTensor(
- [
- [2, 11, 12, 13, 14, 2, PAD_INDEX, PAD_INDEX],
- [2, 11, 22, 33, 2, PAD_INDEX, PAD_INDEX, PAD_INDEX]
- ]
- )
- if longer_src:
- src_tokens = tokens_1[:, 1:]
- prev_output_tokens = tokens_2
- else:
- src_tokens = tokens_2[:, 1:8]
- prev_output_tokens = tokens_1
-
- src_lengths = src_tokens.ne(PAD_INDEX).sum(dim=1).long()
-
- sample = {
- "net_input": {
- "src_tokens": src_tokens,
- "prev_output_tokens": prev_output_tokens,
- "src_lengths": src_lengths,
- },
- "target": prev_output_tokens[:, 1:],
- }
- return sample
-
-
-def build_transformer_monotonic_attention(**extra_args: Any):
- overrides = {
- # Use characteristics dimensions
- "encoder_embed_dim": 12,
- "encoder_ffn_embed_dim": 14,
- "decoder_embed_dim": 12,
- "decoder_ffn_embed_dim": 14,
- # Disable dropout so we have comparable tests.
- "dropout": 0,
- "attention_dropout": 0,
- "activation_dropout": 0,
- "encoder_layerdrop": 0,
- }
- overrides.update(extra_args)
- # Overrides the defaults from the parser
- args = argparse.Namespace(**overrides)
- transformer_monotonic_attention.monotonic_tiny_architecture(args)
-
- torch.manual_seed(0)
- task = FakeTask(args)
- return (
- transformer_monotonic_attention
- .TransformerModelSimulTrans
- .build_model(args, task)
- )
-
-
-def expected_alignment_formula(
- p_choose,
- mass_perservation=True,
- padding_mask=None
-):
- # Online and Linear-Time Attention by Enforcing Monotonic Alignments
- # https://arxiv.org/pdf/1704.00784.pdf
- # Eq 18, 19
- bsz, tgt_len, src_len = p_choose.size()
- alpha = torch.zeros_like(p_choose)
-
- if padding_mask is not None:
- bsz_pad = padding_mask.size(0)
- num_heads = int(bsz / bsz_pad)
- padding_mask = (
- padding_mask
- .unsqueeze(1)
- .expand([bsz_pad, num_heads, src_len])
- .contiguous()
- .view(-1, src_len)
- )
-
- p_choose = p_choose.masked_fill(padding_mask.unsqueeze(1), 0)
-
- for bsz_i in range(bsz):
- for i in range(tgt_len):
- for j in range(src_len):
- if i == 0:
- if j == 0:
- # First source token
- alpha[bsz_i, i, j] = p_choose[bsz_i, i, j]
- else:
- # First target token
- alpha[bsz_i, i, j] = (
- p_choose[bsz_i, i, j]
- * torch.prod(
- 1 - p_choose[bsz_i, i, :j]
- )
- )
- else:
- alpha[bsz_i, i, j] = alpha[bsz_i, i - 1, j]
- for k in range(j):
- alpha[bsz_i, i, j] += (
- alpha[bsz_i, i - 1, k]
- * torch.prod(
- 1 - p_choose[bsz_i, i, k:j]
- )
- )
- alpha[bsz_i, i, j] *= p_choose[bsz_i, i, j]
-
- alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0)
-
- if mass_perservation:
- alpha = mass_perservation_formula(alpha, False, padding_mask)
-
- return alpha
-
-
-def mass_perservation_formula(alpha, left_padding=False, padding_mask=None):
- if padding_mask is None or alpha.size(-1) == 1:
- if alpha.size(-1) > 1:
- alpha[:, :, -1] = 1 - alpha[:, :, :-1].sum(dim=-1)
- return alpha
-
- src_lens = (padding_mask.logical_not()).sum(dim=1).long()
-
- bsz, tgt_len, src_len = alpha.size()
-
- assert (
- not left_padding
- or (left_padding and (not padding_mask[:, 0].any()))
- )
-
- alpha = alpha.masked_fill(padding_mask.unsqueeze(1), 0)
-
- for bsz_i in range(bsz):
- if left_padding:
- alpha[bsz_i, :, -1] = (
- 1 - alpha[bsz_i, :, :-1].sum(dim=-1)
- )
- else:
- alpha[bsz_i, :, src_lens[bsz_i] - 1] = (
- 1 - alpha[bsz_i, :, :src_lens[bsz_i] - 1].sum(dim=-1)
- )
-
- return alpha
-
-
-def expected_soft_attention_formula(
- alpha,
- soft_energy,
- padding_mask=None,
- chunksize=1e10,
-):
- # Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
- # https://arxiv.org/pdf/1906.05218.pdf
- # Eq 14
-
- # Monotonic Chunkwise Attention
- # https://arxiv.org/abs/1712.05382
- # Eq 17
- bsz, tgt_len, src_len = alpha.size()
- beta = torch.zeros_like(alpha)
-
- if padding_mask is not None:
- bsz_pad = padding_mask.size(0)
- num_heads = int(bsz / bsz_pad)
- # Expanding for potential head dimension
- padding_mask = (
- padding_mask
- .unsqueeze(1)
- .expand([bsz_pad, num_heads, src_len])
- .contiguous()
- .view(-1, src_len)
- )
- soft_energy = soft_energy.masked_fill(padding_mask.unsqueeze(1), float('-inf'))
-
- for bsz_i in range(bsz):
- for i in range(tgt_len):
- for j in range(src_len):
- for k in range(j, min([src_len, j + chunksize])):
- if not padding_mask[bsz_i, j]:
- beta[bsz_i, i, j] += (
- alpha[bsz_i, i, k] * torch.exp(soft_energy[bsz_i, i, j])
- / torch.sum(torch.exp(soft_energy[bsz_i, i, max([0, k - chunksize + 1]):k + 1]))
- )
- return beta
-
-
-class MonotonicAttentionTestAbstractClass(object):
- def test_forward(self):
- sample = make_sample_with_padding()
- out, _ = self.model.forward(**sample["net_input"])
- loss = out.sum()
- loss.backward()
-
- def test_p_choose(self):
- sample = make_sample_with_padding()
- _, extra_out = self.model.forward(**sample["net_input"])
- for item in extra_out.attn_list:
- p_choose = item["p_choose"]
- self.assertTrue(p_choose.le(1.0).all())
- self.assertTrue(p_choose.ge(0.0).all())
-
- def test_expected_alignment(self):
- for longer_src in [True, False]:
- sample = make_sample_with_padding(longer_src)
- _, extra_out = self.model.forward(**sample["net_input"])
- for item in extra_out.attn_list:
- p_choose = item["p_choose"]
- alpha_system = item["alpha"]
- self.assertTrue(p_choose.size() == alpha_system.size())
- bsz, num_head, tgt_len, src_len = alpha_system.size()
- alpha_system = alpha_system.view(-1, tgt_len, src_len)
- p_choose = p_choose.view(-1, tgt_len, src_len)
-
- alpha_real = expected_alignment_formula(
- p_choose,
- self.model.decoder.layers[0].encoder_attn.mass_preservation,
- sample["net_input"]["src_tokens"].eq(PAD_INDEX)
- )
-
- self.assertTrue(
- torch.abs(alpha_system - alpha_real).le(5e-5).all(),
- )
-
-
-class HardMonotonicAttentionTestCase(
- unittest.TestCase,
- MonotonicAttentionTestAbstractClass
-):
- def setUp(self):
- self.model = build_transformer_monotonic_attention(
- **generate_config({"simul_type": "hard_aligned"})
- )
-
-
-class InfiniteLookbackTestCase(
- unittest.TestCase,
- MonotonicAttentionTestAbstractClass
-):
- def setUp(self):
- self.model = build_transformer_monotonic_attention(
- **generate_config(
- {
- "simul_type": "infinite_lookback"
- }
- )
- )
- self.model.train()
-
- def test_fp16_for_long_input(self):
- sample = {
- "net_input": {
- "src_tokens": torch.LongTensor([7] * 1000 + [2]).cuda().unsqueeze(0),
- "prev_output_tokens": torch.LongTensor([7] * 1000 + [2]).cuda().unsqueeze(0),
- "src_lengths": torch.LongTensor([1000]).cuda(),
- },
- "target": torch.LongTensor([2] + [7] * 1000).unsqueeze(0).cuda()
- }
- self.model.cuda().half()
- _, extra_out = self.model.forward(**sample["net_input"])
- for item in extra_out.attn_list:
- for key in ["p_choose", "alpha", "beta", "soft_energy"]:
- self.assertFalse(torch.isnan(item[key]).any())
-
- def test_expected_attention(self):
- for longer_src in [True, False]:
- sample = make_sample_with_padding(longer_src)
- _, extra_out = self.model.forward(**sample["net_input"])
- for item in extra_out.attn_list:
- p_choose = item["p_choose"]
- alpha_system = item["alpha"]
- beta_system = item["beta"]
- soft_energy_system = item["soft_energy"]
- self.assertTrue(beta_system.size() == alpha_system.size())
- self.assertTrue(p_choose.size() == alpha_system.size())
-
- bsz, num_head, tgt_len, src_len = alpha_system.size()
-
- alpha_system = alpha_system.view(-1, tgt_len, src_len)
- beta_system = beta_system.view(-1, tgt_len, src_len)
- p_choose = p_choose.view(-1, tgt_len, src_len)
- soft_energy_system = soft_energy_system.view(-1, tgt_len, src_len)
-
- alpha_real = expected_alignment_formula(
- p_choose,
- self.model.decoder.layers[0].encoder_attn.mass_preservation,
- sample["net_input"]["src_tokens"].eq(PAD_INDEX)
- )
-
- beta_real = expected_soft_attention_formula(
- alpha_real,
- soft_energy_system,
- sample["net_input"]["src_tokens"].eq(PAD_INDEX),
- chunksize=getattr(
- self.model.decoder.layers[0].encoder_attn,
- "chunk_size",
- int(1e10)
- )
- )
-
- self.assertTrue(
- torch.abs(beta_system - beta_real).le(1e-5).all(),
- )
-
-
-class ChunkwiswTestCase(
- InfiniteLookbackTestCase
-):
- def setUp(self):
- self.model = build_transformer_monotonic_attention(
- **generate_config(
- {
- "simul_type": "chunkwise",
- "mocha_chunk_size": 3
- }
- )
- )
-
-
-class WaitkTestCase(InfiniteLookbackTestCase):
- def setUp(self):
- self.model = build_transformer_monotonic_attention(
- **generate_config(
- {
- "simul_type": "waitk",
- "waitk_lagging": 3,
- }
- )
- )
-
- def check_waitk(self, p_choose, lagging, padding_mask):
- bsz, tgt_len, src_len = p_choose.size()
- for bsz_i in range(bsz):
- for i in range(tgt_len):
- for j in range(src_len):
- if not padding_mask[bsz_i, j]:
- if j - i == lagging - 1:
- self.assertTrue(p_choose[bsz_i, i, j] == 1)
- else:
- self.assertTrue(p_choose[bsz_i, i, j] == 0)
-
- def test_waitk_p_choose(self):
- for longer_src in [True, False]:
- for k in [1, 3, 10, 20, 100]:
- sample = make_sample_with_padding(longer_src)
- model = build_transformer_monotonic_attention(
- **generate_config(
- {
- "simul_type": "waitk",
- "waitk_lagging": k,
- }
- )
- )
- model.train()
- _, extra_out = model.forward(**sample["net_input"])
- for item in extra_out.attn_list:
- p_choose = item["p_choose"]
- bsz, num_heads, tgt_len, src_len = p_choose.size()
- padding_mask = sample["net_input"]["src_tokens"].eq(PAD_INDEX)
- padding_mask = (
- padding_mask
- .unsqueeze(1)
- .expand([bsz, num_heads, src_len])
- .contiguous()
- .view(-1, src_len)
- )
- p_choose = p_choose.view(bsz * num_heads, tgt_len, src_len)
- self.check_waitk(p_choose, k, padding_mask)
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py
deleted file mode 100644
index 5c7b67f8b1967ca515c5f7606253b46f903ea37e..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/hubert/simple_kmeans/dump_hubert_feature.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import logging
-import os
-import sys
-
-import fairseq
-import soundfile as sf
-import torch
-import torch.nn.functional as F
-
-from feature_utils import get_path_iterator, dump_feature
-
-
-logging.basicConfig(
- format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
- datefmt="%Y-%m-%d %H:%M:%S",
- level=os.environ.get("LOGLEVEL", "INFO").upper(),
- stream=sys.stdout,
-)
-logger = logging.getLogger("dump_hubert_feature")
-
-
-class HubertFeatureReader(object):
- def __init__(self, ckpt_path, layer, max_chunk=1600000):
- (
- model,
- cfg,
- task,
- ) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
- self.model = model[0].eval().cuda()
- self.task = task
- self.layer = layer
- self.max_chunk = max_chunk
- logger.info(f"TASK CONFIG:\n{self.task.cfg}")
- logger.info(f" max_chunk = {self.max_chunk}")
-
- def read_audio(self, path, ref_len=None):
- wav, sr = sf.read(path)
- assert sr == self.task.cfg.sample_rate, sr
- if wav.ndim == 2:
- wav = wav.mean(-1)
- assert wav.ndim == 1, wav.ndim
- if ref_len is not None and abs(ref_len - len(wav)) > 160:
- logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
- return wav
-
- def get_feats(self, path, ref_len=None):
- x = self.read_audio(path, ref_len)
- with torch.no_grad():
- x = torch.from_numpy(x).float().cuda()
- if self.task.cfg.normalize:
- x = F.layer_norm(x, x.shape)
- x = x.view(1, -1)
-
- feat = []
- for start in range(0, x.size(1), self.max_chunk):
- x_chunk = x[:, start: start + self.max_chunk]
- feat_chunk, _ = self.model.extract_features(
- source=x_chunk,
- padding_mask=None,
- mask=False,
- output_layer=self.layer,
- )
- feat.append(feat_chunk)
- return torch.cat(feat, 1).squeeze(0)
-
-
-def main(tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk):
- reader = HubertFeatureReader(ckpt_path, layer, max_chunk)
- generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
- dump_feature(reader, generator, num, split, nshard, rank, feat_dir)
-
-
-if __name__ == "__main__":
- import argparse
-
- parser = argparse.ArgumentParser()
- parser.add_argument("tsv_dir")
- parser.add_argument("split")
- parser.add_argument("ckpt_path")
- parser.add_argument("layer", type=int)
- parser.add_argument("nshard", type=int)
- parser.add_argument("rank", type=int)
- parser.add_argument("feat_dir")
- parser.add_argument("--max_chunk", type=int, default=1600000)
- args = parser.parse_args()
- logger.info(args)
-
- main(**vars(args))
diff --git a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/cluster_kmeans.py b/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/cluster_kmeans.py
deleted file mode 100644
index 7cf844a95a075ee9ad318dc11dd71537d1ef6a5b..0000000000000000000000000000000000000000
--- a/spaces/OFA-Sys/OFA-vqa/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/cluster_kmeans.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-#
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import argparse
-import logging
-import os
-import time
-
-import numpy as np
-from sklearn.cluster import MiniBatchKMeans
-
-import joblib
-from examples.textless_nlp.gslm.speech2unit.pretrained.utils import (
- get_and_dump_features,
- get_features,
-)
-
-
-def get_logger():
- log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
- logging.basicConfig(format=log_format, level=logging.INFO)
- logger = logging.getLogger(__name__)
- return logger
-
-
-def get_parser():
- parser = argparse.ArgumentParser(
- description="Learn K-means clustering over acoustic features."
- )
-
- # Features arguments
- parser.add_argument(
- "--in_features_path", type=str, default=None, help="Features file path"
- )
- parser.add_argument(
- "--feature_type",
- type=str,
- choices=["logmel", "hubert", "w2v2", "cpc"],
- default=None,
- help="Acoustic feature type",
- )
- parser.add_argument(
- "--manifest_path",
- type=str,
- default=None,
- help="Manifest file containing the root dir and file names",
- )
- parser.add_argument(
- "--out_features_path",
- type=str,
- default=None,
- help="Features file path to write to",
- )
- parser.add_argument(
- "--checkpoint_path",
- type=str,
- help="Pretrained acoustic model checkpoint",
- )
- parser.add_argument(
- "--layer",
- type=int,
- help="The layer of the pretrained model to extract features from",
- default=-1,
- )
- parser.add_argument(
- "--sample_pct",
- type=float,
- help="Percent data to use for K-means training",
- default=0.1,
- )
-
- # K-means arguments
- parser.add_argument(
- "--num_clusters", type=int, help="Nubmer of clusters", default=50
- )
- parser.add_argument("--init", default="k-means++")
- parser.add_argument(
- "--max_iter",
- type=int,
- help="Maximum number of iterations for K-means training",
- default=150,
- )
- parser.add_argument(
- "--batch_size",
- type=int,
- help="Batch size for K-means training",
- default=10000,
- )
- parser.add_argument("--tol", default=0.0, type=float)
- parser.add_argument("--max_no_improvement", default=100, type=int)
- parser.add_argument("--n_init", default=20, type=int)
- parser.add_argument("--reassignment_ratio", default=0.5, type=float)
- parser.add_argument(
- "--out_kmeans_model_path",
- type=str,
- required=True,
- help="Path to save K-means model",
- )
-
- # Leftovers
- parser.add_argument(
- "--seed",
- type=int,
- help="Random seed to use for K-means training",
- default=1369,
- )
-
- return parser
-
-
-def get_kmeans_model(
- n_clusters,
- init,
- max_iter,
- batch_size,
- tol,
- max_no_improvement,
- n_init,
- reassignment_ratio,
- random_state,
-):
- return MiniBatchKMeans(
- n_clusters=n_clusters,
- init=init,
- max_iter=max_iter,
- batch_size=batch_size,
- tol=tol,
- max_no_improvement=max_no_improvement,
- n_init=n_init,
- reassignment_ratio=reassignment_ratio,
- random_state=random_state,
- verbose=1,
- compute_labels=True,
- init_size=None,
- )
-
-
-def train_kmeans(kmeans_model, features_batch):
- start_time = time.time()
- kmeans_model.fit(features_batch)
- time_taken = round((time.time() - start_time) // 60, 2)
- return kmeans_model, time_taken
-
-
-def main(args, logger):
- # Features loading/extraction for K-means
- if args.in_features_path:
- # Feature loading
- logger.info(f"Loading features from {args.in_features_path}...")
- features_batch = np.load(args.in_features_path, allow_pickle=True)
- else:
- # Feature extraction
- logger.info(f"Extracting {args.feature_type} acoustic features...")
- features_batch = (
- get_features(
- feature_type=args.feature_type,
- checkpoint_path=args.checkpoint_path,
- layer=args.layer,
- manifest_path=args.manifest_path,
- sample_pct=args.sample_pct,
- flatten=True,
- )
- if not args.out_features_path
- else get_and_dump_features(
- feature_type=args.feature_type,
- checkpoint_path=args.checkpoint_path,
- layer=args.layer,
- manifest_path=args.manifest_path,
- sample_pct=args.sample_pct,
- flatten=True,
- out_features_path=args.out_features_path,
- )
- )
- if args.out_features_path:
- logger.info(
- f"Saved extracted features at {args.out_features_path}"
- )
- logger.info(f"Features shape = {features_batch.shape}\n")
-
- # Learn and save K-means model
- kmeans_model = get_kmeans_model(
- n_clusters=args.num_clusters,
- init=args.init,
- max_iter=args.max_iter,
- batch_size=args.batch_size,
- tol=args.tol,
- max_no_improvement=args.max_no_improvement,
- n_init=args.n_init,
- reassignment_ratio=args.reassignment_ratio,
- random_state=args.seed,
- )
- logger.info("Starting k-means training...")
- kmeans_model, time_taken = train_kmeans(
- kmeans_model=kmeans_model, features_batch=features_batch
- )
- logger.info(f"...done k-means training in {time_taken} minutes")
- inertia = -kmeans_model.score(features_batch) / len(features_batch)
- logger.info(f"Total intertia: {round(inertia, 2)}\n")
-
- logger.info(f"Saving k-means model to {args.out_kmeans_model_path}")
- os.makedirs(os.path.dirname(args.out_kmeans_model_path), exist_ok=True)
- joblib.dump(kmeans_model, open(args.out_kmeans_model_path, "wb"))
-
-
-if __name__ == "__main__":
- parser = get_parser()
- args = parser.parse_args()
- logger = get_logger()
- logger.info(args)
- main(args, logger)
diff --git a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/build.py b/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/build.py
deleted file mode 100644
index 3427215746c9a146bd902f22ea9b26d121c36b27..0000000000000000000000000000000000000000
--- a/spaces/OpenGVLab/InternGPT/iGPT/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/build.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import torch
-
-from detectron2.utils.logger import _log_api_usage
-from detectron2.utils.registry import Registry
-
-META_ARCH_REGISTRY = Registry("META_ARCH") # noqa F401 isort:skip
-META_ARCH_REGISTRY.__doc__ = """
-Registry for meta-architectures, i.e. the whole model.
-
-The registered object will be called with `obj(cfg)`
-and expected to return a `nn.Module` object.
-"""
-
-
-def build_model(cfg):
- """
- Build the whole model architecture, defined by ``cfg.MODEL.META_ARCHITECTURE``.
- Note that it does not load any weights from ``cfg``.
- """
- meta_arch = cfg.MODEL.META_ARCHITECTURE
- model = META_ARCH_REGISTRY.get(meta_arch)(cfg)
- model.to(torch.device(cfg.MODEL.DEVICE))
- _log_api_usage("modeling.meta_arch." + meta_arch)
- return model
diff --git a/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/__init__.py b/spaces/OpenGVLab/InternGPT/third-party/lama/bin/saicinpainting/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/OptimalScale/Robin-7b/lmflow/models/decoder_model.py b/spaces/OptimalScale/Robin-7b/lmflow/models/decoder_model.py
deleted file mode 100644
index 19f3c41eb810a890ab37662e8ca0622f3a93b79c..0000000000000000000000000000000000000000
--- a/spaces/OptimalScale/Robin-7b/lmflow/models/decoder_model.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env python
-# coding=utf-8
-"""A one-line summary of the module or program, terminated by a period.
-
-Leave one blank line. The rest of this docstring should contain an
-overall description of the module or program. Optionally, it may also
-contain a brief description of exported classes and functions and/or usage
-examples.
-
-Typical usage example:
-
- foo = ClassFoo()
- bar = foo.FunctionBar()
-"""
-
-from lmflow.models.base_model import BaseModel
-
-
-class DecoderModel(BaseModel):
-
- def __init__(self, *args, **kwargs):
- pass
diff --git a/spaces/Owechada/roopfaceswapr/roop/processors/frame/face_enhancer.py b/spaces/Owechada/roopfaceswapr/roop/processors/frame/face_enhancer.py
deleted file mode 100644
index b1501d574fccb5bc80f12b7783f9505cacc48e06..0000000000000000000000000000000000000000
--- a/spaces/Owechada/roopfaceswapr/roop/processors/frame/face_enhancer.py
+++ /dev/null
@@ -1,89 +0,0 @@
-from typing import Any, List, Callable
-import cv2
-import threading
-import gfpgan
-
-import roop.globals
-import roop.processors.frame.core
-from roop.core import update_status
-from roop.face_analyser import get_one_face
-from roop.typing import Frame, Face
-from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
-import torch
-
-FACE_ENHANCER = None
-THREAD_SEMAPHORE = threading.Semaphore()
-THREAD_LOCK = threading.Lock()
-NAME = 'ROOP.FACE-ENHANCER'
-frame_name = 'face_enhancer'
-
-if torch.cuda.is_available():
- device='cuda'
-else:
- device='cpu'
-
-
-def get_face_enhancer() -> Any:
- global FACE_ENHANCER
-
- with THREAD_LOCK:
- if FACE_ENHANCER is None:
- model_path = resolve_relative_path('../models/GFPGANv1.4.pth')
- # todo: set models path https://github.com/TencentARC/GFPGAN/issues/399
- FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1,device=device) # type: ignore[attr-defined]
- return FACE_ENHANCER
-
-
-def pre_check() -> bool:
- download_directory_path = resolve_relative_path('../models')
- # conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/GFPGANv1.4.pth'])
- conditional_download(download_directory_path, ['https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth'])
- return True
-
-
-def pre_start() -> bool:
- if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
- update_status('Select an image or video for target path.', NAME)
- return False
- return True
-
-
-def post_process() -> None:
- global FACE_ENHANCER
-
- FACE_ENHANCER = None
-
-
-def enhance_face(temp_frame: Frame) -> Frame:
- with THREAD_SEMAPHORE:
- _, _, temp_frame = get_face_enhancer().enhance(
- temp_frame,
- paste_back=True
- )
- return temp_frame
-
-
-def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
- target_face = get_one_face(temp_frame)
- if target_face:
- temp_frame = enhance_face(temp_frame)
- return temp_frame
-
-
-def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
- for temp_frame_path in temp_frame_paths:
- temp_frame = cv2.imread(temp_frame_path)
- result = process_frame(None, temp_frame)
- cv2.imwrite(temp_frame_path, result)
- if update:
- update()
-
-
-def process_image(source_path: str, target_path: str, output_path: str) -> None:
- target_frame = cv2.imread(target_path)
- result = process_frame(None, target_frame)
- cv2.imwrite(output_path, result)
-
-
-def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
- roop.processors.frame.core.process_video(None, temp_frame_paths, process_frames)
diff --git a/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/readme.md b/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/readme.md
deleted file mode 100644
index 5421bfe3e67b7b6cbd7baf96b741b539d65bb0fd..0000000000000000000000000000000000000000
--- a/spaces/PKUWilliamYang/VToonify/vtoonify/model/encoder/readme.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Encoding in Style: a StyleGAN Encoder for Image-to-Image Translation
-
-## Description
-Official Implementation of pSp paper for both training and evaluation. The pSp method extends the StyleGAN model to
-allow solving different image-to-image translation problems using its encoder.
-
-Fork from [https://github.com/eladrich/pixel2style2pixel](https://github.com/eladrich/pixel2style2pixel).
-
-In VToonify, we modify pSp to accept z+ latent code.
diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/sxml/simple.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/sxml/simple.go
deleted file mode 100644
index f33e17f8a5a341ef4e3a22a9ae8732068ce679c7..0000000000000000000000000000000000000000
Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/sxml/simple.go and /dev/null differ
diff --git a/spaces/PeepDaSlan9/De-limiter/eval_delimit/score_diff_dyn_complexity.py b/spaces/PeepDaSlan9/De-limiter/eval_delimit/score_diff_dyn_complexity.py
deleted file mode 100644
index 0f0e07917828cf080f3b08fb2e769b1776e8d61c..0000000000000000000000000000000000000000
--- a/spaces/PeepDaSlan9/De-limiter/eval_delimit/score_diff_dyn_complexity.py
+++ /dev/null
@@ -1,87 +0,0 @@
-import os
-import argparse
-import csv
-import json
-import glob
-
-import tqdm
-import numpy as np
-import librosa
-import musdb
-import pyloudnorm as pyln
-
-from utils import str2bool, db2linear
-
-parser = argparse.ArgumentParser(description="model test.py")
-
-parser.add_argument(
- "--target",
- type=str,
- default="all",
- help="target source. all, vocals, bass, drums, other.",
-)
-parser.add_argument(
- "--root",
- type=str,
- default="/path/to/musdb18hq_loudnorm",
-)
-parser.add_argument(
- "--output_directory",
- type=str,
- default="/path/to/results",
-)
-parser.add_argument("--exp_name", type=str, default="convtasnet_6_s")
-parser.add_argument(
- "--calc_results",
- type=str2bool,
- default=True,
- help="Set this True when you want to calculate the results of the test set. Set this False when calculating musdb-hq vs musdb-XL. (top row in Table 1.)",
-)
-
-args, _ = parser.parse_known_args()
-
-args.sample_rate = 44100
-meter = pyln.Meter(args.sample_rate)
-
-if args.calc_results:
- args.test_output_dir = f"{args.output_directory}/test/{args.exp_name}"
-else:
- args.test_output_dir = f"{args.output_directory}/{args.exp_name}"
-
-
-est_track_list = glob.glob(f"{args.test_output_dir}/*/{args.target}.wav")
-f = open(
- f"{args.test_output_dir}/score_feature_{args.target}.json",
- encoding="UTF-8",
-)
-dict_song_score_est = json.loads(f.read())
-
-if args.target == "all":
- ref_track_list = glob.glob(f"{args.root}/*/mixture.wav")
- f = open(f"{args.root}/score_feature.json", encoding="UTF-8")
- dict_song_score_ref = json.loads(f.read())
-else:
- ref_track_list = glob.glob(f"{args.root}/*/{args.target}.wav")
- f = open(f"{args.root}/score_feature_{args.target}.json", encoding="UTF-8")
- dict_song_score_ref = json.loads(f.read())
-
-i = 0
-
-dict_song_score = {}
-list_diff_dynamic_complexity = []
-
-for track in tqdm.tqdm(ref_track_list):
- audio_name = os.path.basename(os.path.dirname(track))
- ref_dyn_complexity = dict_song_score_ref[audio_name]["dynamic_complexity_score"]
- est_dyn_complexity = dict_song_score_est[audio_name]["dynamic_complexity_score"]
-
- list_diff_dynamic_complexity.append(est_dyn_complexity - ref_dyn_complexity)
-
- i += 1
-
-print(
- f"Dynamic complexity difference {args.exp_name} vs {os.path.basename(args.root)} on {args.target}"
-)
-print("mean: ", np.mean(list_diff_dynamic_complexity))
-print("median: ", np.median(list_diff_dynamic_complexity))
-print("std: ", np.std(list_diff_dynamic_complexity))
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/builder.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/builder.py
deleted file mode 100644
index 7567316c566bd3aca6d8f65a84b00e9e890948a7..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/builder.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-from ..runner import Sequential
-from ..utils import Registry, build_from_cfg
-
-
-def build_model_from_cfg(cfg, registry, default_args=None):
- """Build a PyTorch model from config dict(s). Different from
- ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built.
-
- Args:
- cfg (dict, list[dict]): The config of modules, is is either a config
- dict or a list of config dicts. If cfg is a list, a
- the built modules will be wrapped with ``nn.Sequential``.
- registry (:obj:`Registry`): A registry the module belongs to.
- default_args (dict, optional): Default arguments to build the module.
- Defaults to None.
-
- Returns:
- nn.Module: A built nn module.
- """
- if isinstance(cfg, list):
- modules = [
- build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
- ]
- return Sequential(*modules)
- else:
- return build_from_cfg(cfg, registry, default_args)
-
-
-MODELS = Registry('model', build_func=build_model_from_cfg)
diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/uniformer.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/uniformer.py
deleted file mode 100644
index 0c4bb88e4c928540cca9ab609988b916520f5b7a..0000000000000000000000000000000000000000
--- a/spaces/Pie31415/control-animation/annotator/uniformer/mmseg/models/backbones/uniformer.py
+++ /dev/null
@@ -1,422 +0,0 @@
-# --------------------------------------------------------
-# UniFormer
-# Copyright (c) 2022 SenseTime X-Lab
-# Licensed under The MIT License [see LICENSE for details]
-# Written by Kunchang Li
-# --------------------------------------------------------
-
-from collections import OrderedDict
-import math
-
-from functools import partial
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-import torch.utils.checkpoint as checkpoint
-import numpy as np
-from timm.models.layers import DropPath, to_2tuple, trunc_normal_
-
-from annotator.uniformer.mmcv_custom import load_checkpoint
-from annotator.uniformer.mmseg.utils import get_root_logger
-from ..builder import BACKBONES
-
-
-class Mlp(nn.Module):
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Linear(in_features, hidden_features)
- self.act = act_layer()
- self.fc2 = nn.Linear(hidden_features, out_features)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-class CMlp(nn.Module):
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
- super().__init__()
- out_features = out_features or in_features
- hidden_features = hidden_features or in_features
- self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
- self.act = act_layer()
- self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
- self.drop = nn.Dropout(drop)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act(x)
- x = self.drop(x)
- x = self.fc2(x)
- x = self.drop(x)
- return x
-
-
-class CBlock(nn.Module):
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
- super().__init__()
- self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
- self.norm1 = nn.BatchNorm2d(dim)
- self.conv1 = nn.Conv2d(dim, dim, 1)
- self.conv2 = nn.Conv2d(dim, dim, 1)
- self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = nn.BatchNorm2d(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def forward(self, x):
- x = x + self.pos_embed(x)
- x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
- x = x + self.drop_path(self.mlp(self.norm2(x)))
- return x
-
-
-class Attention(nn.Module):
- def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
- super().__init__()
- self.num_heads = num_heads
- head_dim = dim // num_heads
- # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
- self.scale = qk_scale or head_dim ** -0.5
-
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
- self.attn_drop = nn.Dropout(attn_drop)
- self.proj = nn.Linear(dim, dim)
- self.proj_drop = nn.Dropout(proj_drop)
-
- def forward(self, x):
- B, N, C = x.shape
- qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
-
- attn = (q @ k.transpose(-2, -1)) * self.scale
- attn = attn.softmax(dim=-1)
- attn = self.attn_drop(attn)
-
- x = (attn @ v).transpose(1, 2).reshape(B, N, C)
- x = self.proj(x)
- x = self.proj_drop(x)
- return x
-
-
-class SABlock(nn.Module):
- def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
- super().__init__()
- self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
- self.norm1 = norm_layer(dim)
- self.attn = Attention(
- dim,
- num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
- attn_drop=attn_drop, proj_drop=drop)
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def forward(self, x):
- x = x + self.pos_embed(x)
- B, N, H, W = x.shape
- x = x.flatten(2).transpose(1, 2)
- x = x + self.drop_path(self.attn(self.norm1(x)))
- x = x + self.drop_path(self.mlp(self.norm2(x)))
- x = x.transpose(1, 2).reshape(B, N, H, W)
- return x
-
-
-def window_partition(x, window_size):
- """
- Args:
- x: (B, H, W, C)
- window_size (int): window size
- Returns:
- windows: (num_windows*B, window_size, window_size, C)
- """
- B, H, W, C = x.shape
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
- return windows
-
-
-def window_reverse(windows, window_size, H, W):
- """
- Args:
- windows: (num_windows*B, window_size, window_size, C)
- window_size (int): Window size
- H (int): Height of image
- W (int): Width of image
- Returns:
- x: (B, H, W, C)
- """
- B = int(windows.shape[0] / (H * W / window_size / window_size))
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
- return x
-
-
-class SABlock_Windows(nn.Module):
- def __init__(self, dim, num_heads, window_size=14, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
- drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
- super().__init__()
- self.window_size=window_size
- self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
- self.norm1 = norm_layer(dim)
- self.attn = Attention(
- dim,
- num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
- attn_drop=attn_drop, proj_drop=drop)
- # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
- self.norm2 = norm_layer(dim)
- mlp_hidden_dim = int(dim * mlp_ratio)
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
-
- def forward(self, x):
- x = x + self.pos_embed(x)
- x = x.permute(0, 2, 3, 1)
- B, H, W, C = x.shape
- shortcut = x
- x = self.norm1(x)
-
- pad_l = pad_t = 0
- pad_r = (self.window_size - W % self.window_size) % self.window_size
- pad_b = (self.window_size - H % self.window_size) % self.window_size
- x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
- _, Hp, Wp, _ = x.shape
-
- x_windows = window_partition(x, self.window_size) # nW*B, window_size, window_size, C
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
-
- # W-MSA/SW-MSA
- attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C
-
- # merge windows
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
- x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
-
- # reverse cyclic shift
- if pad_r > 0 or pad_b > 0:
- x = x[:, :H, :W, :].contiguous()
-
- x = shortcut + self.drop_path(x)
- x = x + self.drop_path(self.mlp(self.norm2(x)))
- x = x.permute(0, 3, 1, 2).reshape(B, C, H, W)
- return x
-
-
-class PatchEmbed(nn.Module):
- """ Image to Patch Embedding
- """
- def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
- super().__init__()
- img_size = to_2tuple(img_size)
- patch_size = to_2tuple(patch_size)
- num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
- self.img_size = img_size
- self.patch_size = patch_size
- self.num_patches = num_patches
- self.norm = nn.LayerNorm(embed_dim)
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
-
- def forward(self, x):
- B, _, H, W = x.shape
- x = self.proj(x)
- B, _, H, W = x.shape
- x = x.flatten(2).transpose(1, 2)
- x = self.norm(x)
- x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
- return x
-
-
-@BACKBONES.register_module()
-class UniFormer(nn.Module):
- """ Vision Transformer
- A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
- https://arxiv.org/abs/2010.11929
- """
- def __init__(self, layers=[3, 4, 8, 3], img_size=224, in_chans=3, num_classes=80, embed_dim=[64, 128, 320, 512],
- head_dim=64, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
- drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6),
- pretrained_path=None, use_checkpoint=False, checkpoint_num=[0, 0, 0, 0],
- windows=False, hybrid=False, window_size=14):
- """
- Args:
- layer (list): number of block in each layer
- img_size (int, tuple): input image size
- in_chans (int): number of input channels
- num_classes (int): number of classes for classification head
- embed_dim (int): embedding dimension
- head_dim (int): dimension of attention heads
- mlp_ratio (int): ratio of mlp hidden dim to embedding dim
- qkv_bias (bool): enable bias for qkv if True
- qk_scale (float): override default qk scale of head_dim ** -0.5 if set
- representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
- drop_rate (float): dropout rate
- attn_drop_rate (float): attention dropout rate
- drop_path_rate (float): stochastic depth rate
- norm_layer (nn.Module): normalization layer
- pretrained_path (str): path of pretrained model
- use_checkpoint (bool): whether use checkpoint
- checkpoint_num (list): index for using checkpoint in every stage
- windows (bool): whether use window MHRA
- hybrid (bool): whether use hybrid MHRA
- window_size (int): size of window (>14)
- """
- super().__init__()
- self.num_classes = num_classes
- self.use_checkpoint = use_checkpoint
- self.checkpoint_num = checkpoint_num
- self.windows = windows
- print(f'Use Checkpoint: {self.use_checkpoint}')
- print(f'Checkpoint Number: {self.checkpoint_num}')
- self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
- norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
-
- self.patch_embed1 = PatchEmbed(
- img_size=img_size, patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0])
- self.patch_embed2 = PatchEmbed(
- img_size=img_size // 4, patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1])
- self.patch_embed3 = PatchEmbed(
- img_size=img_size // 8, patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2])
- self.patch_embed4 = PatchEmbed(
- img_size=img_size // 16, patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3])
-
- self.pos_drop = nn.Dropout(p=drop_rate)
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(layers))] # stochastic depth decay rule
- num_heads = [dim // head_dim for dim in embed_dim]
- self.blocks1 = nn.ModuleList([
- CBlock(
- dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
- for i in range(layers[0])])
- self.norm1=norm_layer(embed_dim[0])
- self.blocks2 = nn.ModuleList([
- CBlock(
- dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]], norm_layer=norm_layer)
- for i in range(layers[1])])
- self.norm2 = norm_layer(embed_dim[1])
- if self.windows:
- print('Use local window for all blocks in stage3')
- self.blocks3 = nn.ModuleList([
- SABlock_Windows(
- dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)
- for i in range(layers[2])])
- elif hybrid:
- print('Use hybrid window for blocks in stage3')
- block3 = []
- for i in range(layers[2]):
- if (i + 1) % 4 == 0:
- block3.append(SABlock(
- dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer))
- else:
- block3.append(SABlock_Windows(
- dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer))
- self.blocks3 = nn.ModuleList(block3)
- else:
- print('Use global window for all blocks in stage3')
- self.blocks3 = nn.ModuleList([
- SABlock(
- dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)
- for i in range(layers[2])])
- self.norm3 = norm_layer(embed_dim[2])
- self.blocks4 = nn.ModuleList([
- SABlock(
- dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
- drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]+layers[2]], norm_layer=norm_layer)
- for i in range(layers[3])])
- self.norm4 = norm_layer(embed_dim[3])
-
- # Representation layer
- if representation_size:
- self.num_features = representation_size
- self.pre_logits = nn.Sequential(OrderedDict([
- ('fc', nn.Linear(embed_dim, representation_size)),
- ('act', nn.Tanh())
- ]))
- else:
- self.pre_logits = nn.Identity()
-
- self.apply(self._init_weights)
- self.init_weights(pretrained=pretrained_path)
-
- def init_weights(self, pretrained):
- if isinstance(pretrained, str):
- logger = get_root_logger()
- load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
- print(f'Load pretrained model from {pretrained}')
- def _init_weights(self, m):
- if isinstance(m, nn.Linear):
- trunc_normal_(m.weight, std=.02)
- if isinstance(m, nn.Linear) and m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.LayerNorm):
- nn.init.constant_(m.bias, 0)
- nn.init.constant_(m.weight, 1.0)
-
- @torch.jit.ignore
- def no_weight_decay(self):
- return {'pos_embed', 'cls_token'}
-
- def get_classifier(self):
- return self.head
-
- def reset_classifier(self, num_classes, global_pool=''):
- self.num_classes = num_classes
- self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
-
- def forward_features(self, x):
- out = []
- x = self.patch_embed1(x)
- x = self.pos_drop(x)
- for i, blk in enumerate(self.blocks1):
- if self.use_checkpoint and i < self.checkpoint_num[0]:
- x = checkpoint.checkpoint(blk, x)
- else:
- x = blk(x)
- x_out = self.norm1(x.permute(0, 2, 3, 1))
- out.append(x_out.permute(0, 3, 1, 2).contiguous())
- x = self.patch_embed2(x)
- for i, blk in enumerate(self.blocks2):
- if self.use_checkpoint and i < self.checkpoint_num[1]:
- x = checkpoint.checkpoint(blk, x)
- else:
- x = blk(x)
- x_out = self.norm2(x.permute(0, 2, 3, 1))
- out.append(x_out.permute(0, 3, 1, 2).contiguous())
- x = self.patch_embed3(x)
- for i, blk in enumerate(self.blocks3):
- if self.use_checkpoint and i < self.checkpoint_num[2]:
- x = checkpoint.checkpoint(blk, x)
- else:
- x = blk(x)
- x_out = self.norm3(x.permute(0, 2, 3, 1))
- out.append(x_out.permute(0, 3, 1, 2).contiguous())
- x = self.patch_embed4(x)
- for i, blk in enumerate(self.blocks4):
- if self.use_checkpoint and i < self.checkpoint_num[3]:
- x = checkpoint.checkpoint(blk, x)
- else:
- x = blk(x)
- x_out = self.norm4(x.permute(0, 2, 3, 1))
- out.append(x_out.permute(0, 3, 1, 2).contiguous())
- return tuple(out)
-
- def forward(self, x):
- x = self.forward_features(x)
- return x
diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/concat_dataset.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/concat_dataset.py
deleted file mode 100644
index e5e087c42036f27132ca2c6e1d5252af5fee4a97..0000000000000000000000000000000000000000
--- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/concat_dataset.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
-import bisect
-
-from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
-
-
-class ConcatDataset(_ConcatDataset):
- """
- Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra
- method for querying the sizes of the image
- """
-
- def get_idxs(self, idx):
- dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
- if dataset_idx == 0:
- sample_idx = idx
- else:
- sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
- return dataset_idx, sample_idx
-
- def get_img_info(self, idx):
- dataset_idx, sample_idx = self.get_idxs(idx)
- return self.datasets[dataset_idx].get_img_info(sample_idx)
diff --git a/spaces/PranomVignesh/Detecting-unauthorized-person-with-firearms/app.py b/spaces/PranomVignesh/Detecting-unauthorized-person-with-firearms/app.py
deleted file mode 100644
index edac7a569e91aea34cc71953fb4b515e3f251249..0000000000000000000000000000000000000000
--- a/spaces/PranomVignesh/Detecting-unauthorized-person-with-firearms/app.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import gradio as gr
-import os
-import torch
-from transformers import pipeline
-
-imageClassifier = pipeline(task="image-classification",
- model="PranomVignesh/Police-vs-Public")
-
-
-model = torch.hub.load(
- 'ultralytics/yolov5',
- 'custom',
- path='./best.pt',
- device="cpu",
- force_reload=True
-)
-model.eval()
-
-
-def predict(image):
- results = model([image], size=224)
- print(results)
- predictions = imageClassifier(image)
- classMappings = {
- 'police': "Police / Authorized Personnel",
- 'public': 'Unauthorized Person'
- }
- output = {}
- for item in predictions:
- output[classMappings[item['label']]] = item['score']
-
- return results.render()[0], output
-
-
-title = "Detecting Unauthorized Individuals with Firearms"
-description = """
- Try the examples at bottom to get started.
-"""
-examples = [
- [os.path.join(os.path.abspath(''), './examples/sample_1.png')],
- [os.path.join(os.path.abspath(''), './examples/sample_2.png')],
- [os.path.join(os.path.abspath(''), './examples/sample_3.jpg')],
- [os.path.join(os.path.abspath(''), './examples/sample_4.jpg')],
- [os.path.join(os.path.abspath(''), './examples/sample_5.jpg')],
- [os.path.join(os.path.abspath(''), './examples/sample_6.jpg')],
- [os.path.join(os.path.abspath(''), './examples/sample_7.jpg')],
- [os.path.join(os.path.abspath(''), './examples/sample_8.jpg')],
-]
-
-inputs = gr.Image(type="pil", shape=(224, 224),
- label="Upload your image for detection")
-
-outputs = [
- gr.Image(type="pil", label="Gun Detections"),
- gr.Label(label="Class Prediction")
-]
-
-interface = gr.Interface(
- fn=predict,
- inputs=inputs,
- outputs=outputs,
- title=title,
- examples=examples,
- description=description,
- cache_examples=True,
- theme='huggingface'
-)
-interface.launch(debug=True, enable_queue=True)
diff --git a/spaces/Prasanthi123/myaiavatarammu/README.md b/spaces/Prasanthi123/myaiavatarammu/README.md
deleted file mode 100644
index c914a9cbf613aec80a724661b0be48d118bf7b78..0000000000000000000000000000000000000000
--- a/spaces/Prasanthi123/myaiavatarammu/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Myaiavatarammu
-emoji: 🐠
-colorFrom: pink
-colorTo: gray
-sdk: gradio
-sdk_version: 3.39.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/Quickturtle005/profitability_tool/README.md b/spaces/Quickturtle005/profitability_tool/README.md
deleted file mode 100644
index 7773bd3bd36d5aabb297d48b8d30bbfeb3ed5877..0000000000000000000000000000000000000000
--- a/spaces/Quickturtle005/profitability_tool/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Profitability Tool
-emoji: 📈
-colorFrom: purple
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.19.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/ReFenter/DeepDanbooru_string/app.py b/spaces/ReFenter/DeepDanbooru_string/app.py
deleted file mode 100644
index 49019837c9207cc68cb37be0342f3bc44fd0decb..0000000000000000000000000000000000000000
--- a/spaces/ReFenter/DeepDanbooru_string/app.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#!/usr/bin/env python
-
-from __future__ import annotations
-
-import argparse
-import functools
-import os
-import html
-import pathlib
-import tarfile
-
-import deepdanbooru as dd
-import gradio as gr
-import huggingface_hub
-import numpy as np
-import PIL.Image
-import tensorflow as tf
-import piexif
-import piexif.helper
-
-TITLE = 'DeepDanbooru String'
-
-TOKEN = os.environ['TOKEN']
-MODEL_REPO = 'CikeyQI/DeepDanbooru_string'
-MODEL_FILENAME = 'model-resnet_custom_v3.h5'
-LABEL_FILENAME = 'tags.txt'
-
-
-def parse_args() -> argparse.Namespace:
- parser = argparse.ArgumentParser()
- parser.add_argument('--score-slider-step', type=float, default=0.05)
- parser.add_argument('--score-threshold', type=float, default=0.5)
- parser.add_argument('--theme', type=str, default='dark-grass')
- parser.add_argument('--live', action='store_true')
- parser.add_argument('--share', action='store_true')
- parser.add_argument('--port', type=int)
- parser.add_argument('--disable-queue',
- dest='enable_queue',
- action='store_false')
- parser.add_argument('--allow-flagging', type=str, default='never')
- return parser.parse_args()
-
-
-def load_sample_image_paths() -> list[pathlib.Path]:
- image_dir = pathlib.Path('images')
- if not image_dir.exists():
- dataset_repo = 'hysts/sample-images-TADNE'
- path = huggingface_hub.hf_hub_download(dataset_repo,
- 'images.tar.gz',
- repo_type='dataset',
- use_auth_token=TOKEN)
- with tarfile.open(path) as f:
- f.extractall()
- return sorted(image_dir.glob('*'))
-
-
-def load_model() -> tf.keras.Model:
- path = huggingface_hub.hf_hub_download(MODEL_REPO,
- MODEL_FILENAME,
- use_auth_token=TOKEN)
- model = tf.keras.models.load_model(path)
- return model
-
-
-def load_labels() -> list[str]:
- path = huggingface_hub.hf_hub_download(MODEL_REPO,
- LABEL_FILENAME,
- use_auth_token=TOKEN)
- with open(path) as f:
- labels = [line.strip() for line in f.readlines()]
- return labels
-
-def plaintext_to_html(text):
- text = "
" + " \n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "
-"""
- for key, text in items.items():
- info += f"""
-
-
{plaintext_to_html(str(key))}
-
{plaintext_to_html(str(text))}
-
-""".strip()+"\n"
-
- if len(info) == 0:
- message = "Nothing found in the image."
- info = f"
{message}
"
-
- return (a,c,res,info)
-
-
-def main():
- args = parse_args()
- model = load_model()
- labels = load_labels()
-
- func = functools.partial(predict, model=model, labels=labels)
- func = functools.update_wrapper(func, predict)
-
- gr.Interface(
- func,
- [
- gr.inputs.Image(type='pil', label='Input'),
- gr.inputs.Slider(0,
- 1,
- step=args.score_slider_step,
- default=args.score_threshold,
- label='Score Threshold'),
- ],
- [
- gr.outputs.Textbox(label='Output (string)'),
- gr.outputs.Textbox(label='Output (raw string)'),
- gr.outputs.Label(label='Output (label)'),
- gr.outputs.HTML()
- ],
- examples=[
- ['miku.jpg',0.5],
- ['miku2.jpg',0.5]
- ],
- title=TITLE,
- description='''
-Demo for [KichangKim/DeepDanbooru](https://github.com/KichangKim/DeepDanbooru) with "ready to copy" prompt and a prompt analyzer.
-
-Modified from [hysts/DeepDanbooru](https://huggingface.co/spaces/hysts/DeepDanbooru)
-
-PNG Info code forked from [AUTOMATIC1111/stable-diffusion-webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
- ''',
- theme=args.theme,
- allow_flagging=args.allow_flagging,
- live=args.live,
- ).launch(
- enable_queue=args.enable_queue,
- server_port=args.port,
- share=args.share,
- )
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/__init__.py
deleted file mode 100644
index f004dd95d97df16167f932587b3ce73b05b04a37..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmdet_null/models/dense_heads/__init__.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from .anchor_free_head import AnchorFreeHead
-from .anchor_head import AnchorHead
-from .atss_head import ATSSHead
-from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
-from .centripetal_head import CentripetalHead
-from .corner_head import CornerHead
-from .embedding_rpn_head import EmbeddingRPNHead
-from .fcos_head import FCOSHead
-from .fovea_head import FoveaHead
-from .free_anchor_retina_head import FreeAnchorRetinaHead
-from .fsaf_head import FSAFHead
-from .ga_retina_head import GARetinaHead
-from .ga_rpn_head import GARPNHead
-from .gfl_head import GFLHead
-from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
-from .ld_head import LDHead
-from .nasfcos_head import NASFCOSHead
-from .paa_head import PAAHead
-from .pisa_retinanet_head import PISARetinaHead
-from .pisa_ssd_head import PISASSDHead
-from .reppoints_head import RepPointsHead
-from .retina_head import RetinaHead
-from .retina_sepbn_head import RetinaSepBNHead
-from .rpn_head import RPNHead
-from .sabl_retina_head import SABLRetinaHead
-from .ssd_head import SSDHead
-from .transformer_head import TransformerHead
-from .vfnet_head import VFNetHead
-from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
-from .yolo_head import YOLOV3Head
-
-__all__ = [
- 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
- 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
- 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
- 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
- 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
- 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
- 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'TransformerHead',
- 'StageCascadeRPNHead', 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead'
-]
diff --git a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/datasets/pipelines/__init__.py b/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/datasets/pipelines/__init__.py
deleted file mode 100644
index 8b9046b07bb4ddea7a707a392b42e72db7c9df67..0000000000000000000000000000000000000000
--- a/spaces/Robert001/UniControl-Demo/annotator/uniformer/mmseg/datasets/pipelines/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-from .compose import Compose
-from .formating import (Collect, ImageToTensor, ToDataContainer, ToTensor,
- Transpose, to_tensor)
-from .loading import LoadAnnotations, LoadImageFromFile
-from .test_time_aug import MultiScaleFlipAug
-from .transforms import (CLAHE, AdjustGamma, Normalize, Pad,
- PhotoMetricDistortion, RandomCrop, RandomFlip,
- RandomRotate, Rerange, Resize, RGB2Gray, SegRescale)
-
-__all__ = [
- 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
- 'Transpose', 'Collect', 'LoadAnnotations', 'LoadImageFromFile',
- 'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',
- 'Normalize', 'SegRescale', 'PhotoMetricDistortion', 'RandomRotate',
- 'AdjustGamma', 'CLAHE', 'Rerange', 'RGB2Gray'
-]
diff --git a/spaces/Rongjiehuang/GenerSpeech/modules/parallel_wavegan/models/__init__.py b/spaces/Rongjiehuang/GenerSpeech/modules/parallel_wavegan/models/__init__.py
deleted file mode 100644
index 4803ba6b2a0afc8022e756ae5b3f4c7403c3c1bd..0000000000000000000000000000000000000000
--- a/spaces/Rongjiehuang/GenerSpeech/modules/parallel_wavegan/models/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .melgan import * # NOQA
-from .parallel_wavegan import * # NOQA
diff --git a/spaces/SIH/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/utils.py b/spaces/SIH/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/utils.py
deleted file mode 100644
index eb94665cf94ff03a04cf47cd5e54535154963de1..0000000000000000000000000000000000000000
--- a/spaces/SIH/Augmented-Retrieval-qa-ChatGPT/streamlit_langchain_chat/utils.py
+++ /dev/null
@@ -1,52 +0,0 @@
-import math
-import string
-
-
-def maybe_is_text(s, thresh=2.5):
- if len(s) == 0:
- return False
- # Calculate the entropy of the string
- entropy = 0
- for c in string.printable:
- p = s.count(c) / len(s)
- if p > 0:
- entropy += -p * math.log2(p)
-
- # Check if the entropy is within a reasonable range for text
- if entropy > thresh:
- return True
- return False
-
-
-def maybe_is_code(s):
- if len(s) == 0:
- return False
- # Check if the string contains a lot of non-ascii characters
- if len([c for c in s if ord(c) > 128]) / len(s) > 0.1:
- return True
- return False
-
-
-def strings_similarity(s1, s2):
- if len(s1) == 0 or len(s2) == 0:
- return 0
- # break the strings into words
- s1 = set(s1.split())
- s2 = set(s2.split())
- # return the similarity ratio
- return len(s1.intersection(s2)) / len(s1.union(s2))
-
-
-def maybe_is_truncated(s):
- punct = [".", "!", "?", '"']
- if s[-1] in punct:
- return False
- return True
-
-
-def maybe_is_html(s):
- if len(s) == 0:
- return False
- # check for html tags
- if "= -3.) & (x <= 3.)) / 6.
- return grad_output * m
-
-
-class HardSigmoidJitAutoFn(torch.autograd.Function):
- @staticmethod
- def forward(ctx, x):
- ctx.save_for_backward(x)
- return hard_sigmoid_jit_fwd(x)
-
- @staticmethod
- def backward(ctx, grad_output):
- x = ctx.saved_tensors[0]
- return hard_sigmoid_jit_bwd(x, grad_output)
-
-
-def hard_sigmoid_me(x, inplace: bool = False):
- return HardSigmoidJitAutoFn.apply(x)
-
-
-class HardSigmoidMe(nn.Module):
- def __init__(self, inplace: bool = False):
- super(HardSigmoidMe, self).__init__()
-
- def forward(self, x):
- return HardSigmoidJitAutoFn.apply(x)
-
-
-@torch.jit.script
-def hard_swish_jit_fwd(x):
- return x * (x + 3).clamp(min=0, max=6).div(6.)
-
-
-@torch.jit.script
-def hard_swish_jit_bwd(x, grad_output):
- m = torch.ones_like(x) * (x >= 3.)
- m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m)
- return grad_output * m
-
-
-class HardSwishJitAutoFn(torch.autograd.Function):
- """A memory efficient, jit-scripted HardSwish activation"""
- @staticmethod
- def forward(ctx, x):
- ctx.save_for_backward(x)
- return hard_swish_jit_fwd(x)
-
- @staticmethod
- def backward(ctx, grad_output):
- x = ctx.saved_tensors[0]
- return hard_swish_jit_bwd(x, grad_output)
-
-
-def hard_swish_me(x, inplace=False):
- return HardSwishJitAutoFn.apply(x)
-
-
-class HardSwishMe(nn.Module):
- def __init__(self, inplace: bool = False):
- super(HardSwishMe, self).__init__()
-
- def forward(self, x):
- return HardSwishJitAutoFn.apply(x)
diff --git a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/structures/masks.py b/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/structures/masks.py
deleted file mode 100644
index 995fee72a6d6190c9596a4bf62dc335766b954ee..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/oneformer/detectron2/structures/masks.py
+++ /dev/null
@@ -1,534 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import copy
-import itertools
-import numpy as np
-from typing import Any, Iterator, List, Union
-import annotator.oneformer.pycocotools.mask as mask_util
-import torch
-from torch import device
-
-from annotator.oneformer.detectron2.layers.roi_align import ROIAlign
-from annotator.oneformer.detectron2.utils.memory import retry_if_cuda_oom
-
-from .boxes import Boxes
-
-
-def polygon_area(x, y):
- # Using the shoelace formula
- # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
- return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
-
-
-def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray:
- """
- Args:
- polygons (list[ndarray]): each array has shape (Nx2,)
- height, width (int)
-
- Returns:
- ndarray: a bool mask of shape (height, width)
- """
- if len(polygons) == 0:
- # COCOAPI does not support empty polygons
- return np.zeros((height, width)).astype(bool)
- rles = mask_util.frPyObjects(polygons, height, width)
- rle = mask_util.merge(rles)
- return mask_util.decode(rle).astype(bool)
-
-
-def rasterize_polygons_within_box(
- polygons: List[np.ndarray], box: np.ndarray, mask_size: int
-) -> torch.Tensor:
- """
- Rasterize the polygons into a mask image and
- crop the mask content in the given box.
- The cropped mask is resized to (mask_size, mask_size).
-
- This function is used when generating training targets for mask head in Mask R-CNN.
- Given original ground-truth masks for an image, new ground-truth mask
- training targets in the size of `mask_size x mask_size`
- must be provided for each predicted box. This function will be called to
- produce such targets.
-
- Args:
- polygons (list[ndarray[float]]): a list of polygons, which represents an instance.
- box: 4-element numpy array
- mask_size (int):
-
- Returns:
- Tensor: BoolTensor of shape (mask_size, mask_size)
- """
- # 1. Shift the polygons w.r.t the boxes
- w, h = box[2] - box[0], box[3] - box[1]
-
- polygons = copy.deepcopy(polygons)
- for p in polygons:
- p[0::2] = p[0::2] - box[0]
- p[1::2] = p[1::2] - box[1]
-
- # 2. Rescale the polygons to the new box size
- # max() to avoid division by small number
- ratio_h = mask_size / max(h, 0.1)
- ratio_w = mask_size / max(w, 0.1)
-
- if ratio_h == ratio_w:
- for p in polygons:
- p *= ratio_h
- else:
- for p in polygons:
- p[0::2] *= ratio_w
- p[1::2] *= ratio_h
-
- # 3. Rasterize the polygons with coco api
- mask = polygons_to_bitmask(polygons, mask_size, mask_size)
- mask = torch.from_numpy(mask)
- return mask
-
-
-class BitMasks:
- """
- This class stores the segmentation masks for all objects in one image, in
- the form of bitmaps.
-
- Attributes:
- tensor: bool Tensor of N,H,W, representing N instances in the image.
- """
-
- def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):
- """
- Args:
- tensor: bool Tensor of N,H,W, representing N instances in the image.
- """
- if isinstance(tensor, torch.Tensor):
- tensor = tensor.to(torch.bool)
- else:
- tensor = torch.as_tensor(tensor, dtype=torch.bool, device=torch.device("cpu"))
- assert tensor.dim() == 3, tensor.size()
- self.image_size = tensor.shape[1:]
- self.tensor = tensor
-
- @torch.jit.unused
- def to(self, *args: Any, **kwargs: Any) -> "BitMasks":
- return BitMasks(self.tensor.to(*args, **kwargs))
-
- @property
- def device(self) -> torch.device:
- return self.tensor.device
-
- @torch.jit.unused
- def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks":
- """
- Returns:
- BitMasks: Create a new :class:`BitMasks` by indexing.
-
- The following usage are allowed:
-
- 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.
- 2. `new_masks = masks[2:10]`: return a slice of masks.
- 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
- with `length = len(masks)`. Nonzero elements in the vector will be selected.
-
- Note that the returned object might share storage with this object,
- subject to Pytorch's indexing semantics.
- """
- if isinstance(item, int):
- return BitMasks(self.tensor[item].unsqueeze(0))
- m = self.tensor[item]
- assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format(
- item, m.shape
- )
- return BitMasks(m)
-
- @torch.jit.unused
- def __iter__(self) -> torch.Tensor:
- yield from self.tensor
-
- @torch.jit.unused
- def __repr__(self) -> str:
- s = self.__class__.__name__ + "("
- s += "num_instances={})".format(len(self.tensor))
- return s
-
- def __len__(self) -> int:
- return self.tensor.shape[0]
-
- def nonempty(self) -> torch.Tensor:
- """
- Find masks that are non-empty.
-
- Returns:
- Tensor: a BoolTensor which represents
- whether each mask is empty (False) or non-empty (True).
- """
- return self.tensor.flatten(1).any(dim=1)
-
- @staticmethod
- def from_polygon_masks(
- polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int
- ) -> "BitMasks":
- """
- Args:
- polygon_masks (list[list[ndarray]] or PolygonMasks)
- height, width (int)
- """
- if isinstance(polygon_masks, PolygonMasks):
- polygon_masks = polygon_masks.polygons
- masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]
- if len(masks):
- return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))
- else:
- return BitMasks(torch.empty(0, height, width, dtype=torch.bool))
-
- @staticmethod
- def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks":
- """
- Args:
- roi_masks:
- height, width (int):
- """
- return roi_masks.to_bitmasks(height, width)
-
- def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
- """
- Crop each bitmask by the given box, and resize results to (mask_size, mask_size).
- This can be used to prepare training targets for Mask R-CNN.
- It has less reconstruction error compared to rasterization with polygons.
- However we observe no difference in accuracy,
- but BitMasks requires more memory to store all the masks.
-
- Args:
- boxes (Tensor): Nx4 tensor storing the boxes for each mask
- mask_size (int): the size of the rasterized mask.
-
- Returns:
- Tensor:
- A bool tensor of shape (N, mask_size, mask_size), where
- N is the number of predicted boxes for this image.
- """
- assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
- device = self.tensor.device
-
- batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
- rois = torch.cat([batch_inds, boxes], dim=1) # Nx5
-
- bit_masks = self.tensor.to(dtype=torch.float32)
- rois = rois.to(device=device)
- output = (
- ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)
- .forward(bit_masks[:, None, :, :], rois)
- .squeeze(1)
- )
- output = output >= 0.5
- return output
-
- def get_bounding_boxes(self) -> Boxes:
- """
- Returns:
- Boxes: tight bounding boxes around bitmasks.
- If a mask is empty, it's bounding box will be all zero.
- """
- boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)
- x_any = torch.any(self.tensor, dim=1)
- y_any = torch.any(self.tensor, dim=2)
- for idx in range(self.tensor.shape[0]):
- x = torch.where(x_any[idx, :])[0]
- y = torch.where(y_any[idx, :])[0]
- if len(x) > 0 and len(y) > 0:
- boxes[idx, :] = torch.as_tensor(
- [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32
- )
- return Boxes(boxes)
-
- @staticmethod
- def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks":
- """
- Concatenates a list of BitMasks into a single BitMasks
-
- Arguments:
- bitmasks_list (list[BitMasks])
-
- Returns:
- BitMasks: the concatenated BitMasks
- """
- assert isinstance(bitmasks_list, (list, tuple))
- assert len(bitmasks_list) > 0
- assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)
-
- cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))
- return cat_bitmasks
-
-
-class PolygonMasks:
- """
- This class stores the segmentation masks for all objects in one image, in the form of polygons.
-
- Attributes:
- polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.
- """
-
- def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):
- """
- Arguments:
- polygons (list[list[np.ndarray]]): The first
- level of the list correspond to individual instances,
- the second level to all the polygons that compose the
- instance, and the third level to the polygon coordinates.
- The third level array should have the format of
- [x0, y0, x1, y1, ..., xn, yn] (n >= 3).
- """
- if not isinstance(polygons, list):
- raise ValueError(
- "Cannot create PolygonMasks: Expect a list of list of polygons per image. "
- "Got '{}' instead.".format(type(polygons))
- )
-
- def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
- # Use float64 for higher precision, because why not?
- # Always put polygons on CPU (self.to is a no-op) since they
- # are supposed to be small tensors.
- # May need to change this assumption if GPU placement becomes useful
- if isinstance(t, torch.Tensor):
- t = t.cpu().numpy()
- return np.asarray(t).astype("float64")
-
- def process_polygons(
- polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]
- ) -> List[np.ndarray]:
- if not isinstance(polygons_per_instance, list):
- raise ValueError(
- "Cannot create polygons: Expect a list of polygons per instance. "
- "Got '{}' instead.".format(type(polygons_per_instance))
- )
- # transform each polygon to a numpy array
- polygons_per_instance = [_make_array(p) for p in polygons_per_instance]
- for polygon in polygons_per_instance:
- if len(polygon) % 2 != 0 or len(polygon) < 6:
- raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.")
- return polygons_per_instance
-
- self.polygons: List[List[np.ndarray]] = [
- process_polygons(polygons_per_instance) for polygons_per_instance in polygons
- ]
-
- def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks":
- return self
-
- @property
- def device(self) -> torch.device:
- return torch.device("cpu")
-
- def get_bounding_boxes(self) -> Boxes:
- """
- Returns:
- Boxes: tight bounding boxes around polygon masks.
- """
- boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)
- for idx, polygons_per_instance in enumerate(self.polygons):
- minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32)
- maxxy = torch.zeros(2, dtype=torch.float32)
- for polygon in polygons_per_instance:
- coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)
- minxy = torch.min(minxy, torch.min(coords, dim=0).values)
- maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)
- boxes[idx, :2] = minxy
- boxes[idx, 2:] = maxxy
- return Boxes(boxes)
-
- def nonempty(self) -> torch.Tensor:
- """
- Find masks that are non-empty.
-
- Returns:
- Tensor:
- a BoolTensor which represents whether each mask is empty (False) or not (True).
- """
- keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]
- return torch.from_numpy(np.asarray(keep, dtype=bool))
-
- def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks":
- """
- Support indexing over the instances and return a `PolygonMasks` object.
- `item` can be:
-
- 1. An integer. It will return an object with only one instance.
- 2. A slice. It will return an object with the selected instances.
- 3. A list[int]. It will return an object with the selected instances,
- correpsonding to the indices in the list.
- 4. A vector mask of type BoolTensor, whose length is num_instances.
- It will return an object with the instances whose mask is nonzero.
- """
- if isinstance(item, int):
- selected_polygons = [self.polygons[item]]
- elif isinstance(item, slice):
- selected_polygons = self.polygons[item]
- elif isinstance(item, list):
- selected_polygons = [self.polygons[i] for i in item]
- elif isinstance(item, torch.Tensor):
- # Polygons is a list, so we have to move the indices back to CPU.
- if item.dtype == torch.bool:
- assert item.dim() == 1, item.shape
- item = item.nonzero().squeeze(1).cpu().numpy().tolist()
- elif item.dtype in [torch.int32, torch.int64]:
- item = item.cpu().numpy().tolist()
- else:
- raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype))
- selected_polygons = [self.polygons[i] for i in item]
- return PolygonMasks(selected_polygons)
-
- def __iter__(self) -> Iterator[List[np.ndarray]]:
- """
- Yields:
- list[ndarray]: the polygons for one instance.
- Each Tensor is a float64 vector representing a polygon.
- """
- return iter(self.polygons)
-
- def __repr__(self) -> str:
- s = self.__class__.__name__ + "("
- s += "num_instances={})".format(len(self.polygons))
- return s
-
- def __len__(self) -> int:
- return len(self.polygons)
-
- def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:
- """
- Crop each mask by the given box, and resize results to (mask_size, mask_size).
- This can be used to prepare training targets for Mask R-CNN.
-
- Args:
- boxes (Tensor): Nx4 tensor storing the boxes for each mask
- mask_size (int): the size of the rasterized mask.
-
- Returns:
- Tensor: A bool tensor of shape (N, mask_size, mask_size), where
- N is the number of predicted boxes for this image.
- """
- assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
-
- device = boxes.device
- # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise
- # (several small tensors for representing a single instance mask)
- boxes = boxes.to(torch.device("cpu"))
-
- results = [
- rasterize_polygons_within_box(poly, box.numpy(), mask_size)
- for poly, box in zip(self.polygons, boxes)
- ]
- """
- poly: list[list[float]], the polygons for one instance
- box: a tensor of shape (4,)
- """
- if len(results) == 0:
- return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)
- return torch.stack(results, dim=0).to(device=device)
-
- def area(self):
- """
- Computes area of the mask.
- Only works with Polygons, using the shoelace formula:
- https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
-
- Returns:
- Tensor: a vector, area for each instance
- """
-
- area = []
- for polygons_per_instance in self.polygons:
- area_per_instance = 0
- for p in polygons_per_instance:
- area_per_instance += polygon_area(p[0::2], p[1::2])
- area.append(area_per_instance)
-
- return torch.tensor(area)
-
- @staticmethod
- def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks":
- """
- Concatenates a list of PolygonMasks into a single PolygonMasks
-
- Arguments:
- polymasks_list (list[PolygonMasks])
-
- Returns:
- PolygonMasks: the concatenated PolygonMasks
- """
- assert isinstance(polymasks_list, (list, tuple))
- assert len(polymasks_list) > 0
- assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)
-
- cat_polymasks = type(polymasks_list[0])(
- list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))
- )
- return cat_polymasks
-
-
-class ROIMasks:
- """
- Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given,
- full-image bitmask can be obtained by "pasting" the mask on the region defined
- by the corresponding ROI box.
- """
-
- def __init__(self, tensor: torch.Tensor):
- """
- Args:
- tensor: (N, M, M) mask tensor that defines the mask within each ROI.
- """
- if tensor.dim() != 3:
- raise ValueError("ROIMasks must take a masks of 3 dimension.")
- self.tensor = tensor
-
- def to(self, device: torch.device) -> "ROIMasks":
- return ROIMasks(self.tensor.to(device))
-
- @property
- def device(self) -> device:
- return self.tensor.device
-
- def __len__(self):
- return self.tensor.shape[0]
-
- def __getitem__(self, item) -> "ROIMasks":
- """
- Returns:
- ROIMasks: Create a new :class:`ROIMasks` by indexing.
-
- The following usage are allowed:
-
- 1. `new_masks = masks[2:10]`: return a slice of masks.
- 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor
- with `length = len(masks)`. Nonzero elements in the vector will be selected.
-
- Note that the returned object might share storage with this object,
- subject to Pytorch's indexing semantics.
- """
- t = self.tensor[item]
- if t.dim() != 3:
- raise ValueError(
- f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!"
- )
- return ROIMasks(t)
-
- @torch.jit.unused
- def __repr__(self) -> str:
- s = self.__class__.__name__ + "("
- s += "num_instances={})".format(len(self.tensor))
- return s
-
- @torch.jit.unused
- def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5):
- """
- Args: see documentation of :func:`paste_masks_in_image`.
- """
- from annotator.oneformer.detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape
-
- if torch.jit.is_tracing():
- if isinstance(height, torch.Tensor):
- paste_func = _paste_masks_tensor_shape
- else:
- paste_func = paste_masks_in_image
- else:
- paste_func = retry_if_cuda_oom(paste_masks_in_image)
- bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold)
- return BitMasks(bitmasks)
diff --git a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/ann_head.py b/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/ann_head.py
deleted file mode 100644
index 30aaacc2cafc568d3de71d1477b4de0dc0fea9d3..0000000000000000000000000000000000000000
--- a/spaces/Superlang/ImageProcessor/annotator/uniformer/mmseg/models/decode_heads/ann_head.py
+++ /dev/null
@@ -1,245 +0,0 @@
-import torch
-import torch.nn as nn
-from annotator.uniformer.mmcv.cnn import ConvModule
-
-from ..builder import HEADS
-from ..utils import SelfAttentionBlock as _SelfAttentionBlock
-from .decode_head import BaseDecodeHead
-
-
-class PPMConcat(nn.ModuleList):
- """Pyramid Pooling Module that only concat the features of each layer.
-
- Args:
- pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
- Module.
- """
-
- def __init__(self, pool_scales=(1, 3, 6, 8)):
- super(PPMConcat, self).__init__(
- [nn.AdaptiveAvgPool2d(pool_scale) for pool_scale in pool_scales])
-
- def forward(self, feats):
- """Forward function."""
- ppm_outs = []
- for ppm in self:
- ppm_out = ppm(feats)
- ppm_outs.append(ppm_out.view(*feats.shape[:2], -1))
- concat_outs = torch.cat(ppm_outs, dim=2)
- return concat_outs
-
-
-class SelfAttentionBlock(_SelfAttentionBlock):
- """Make a ANN used SelfAttentionBlock.
-
- Args:
- low_in_channels (int): Input channels of lower level feature,
- which is the key feature for self-attention.
- high_in_channels (int): Input channels of higher level feature,
- which is the query feature for self-attention.
- channels (int): Output channels of key/query transform.
- out_channels (int): Output channels.
- share_key_query (bool): Whether share projection weight between key
- and query projection.
- query_scale (int): The scale of query feature map.
- key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
- Module of key feature.
- conv_cfg (dict|None): Config of conv layers.
- norm_cfg (dict|None): Config of norm layers.
- act_cfg (dict|None): Config of activation layers.
- """
-
- def __init__(self, low_in_channels, high_in_channels, channels,
- out_channels, share_key_query, query_scale, key_pool_scales,
- conv_cfg, norm_cfg, act_cfg):
- key_psp = PPMConcat(key_pool_scales)
- if query_scale > 1:
- query_downsample = nn.MaxPool2d(kernel_size=query_scale)
- else:
- query_downsample = None
- super(SelfAttentionBlock, self).__init__(
- key_in_channels=low_in_channels,
- query_in_channels=high_in_channels,
- channels=channels,
- out_channels=out_channels,
- share_key_query=share_key_query,
- query_downsample=query_downsample,
- key_downsample=key_psp,
- key_query_num_convs=1,
- key_query_norm=True,
- value_out_num_convs=1,
- value_out_norm=False,
- matmul_norm=True,
- with_out=True,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg)
-
-
-class AFNB(nn.Module):
- """Asymmetric Fusion Non-local Block(AFNB)
-
- Args:
- low_in_channels (int): Input channels of lower level feature,
- which is the key feature for self-attention.
- high_in_channels (int): Input channels of higher level feature,
- which is the query feature for self-attention.
- channels (int): Output channels of key/query transform.
- out_channels (int): Output channels.
- and query projection.
- query_scales (tuple[int]): The scales of query feature map.
- Default: (1,)
- key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
- Module of key feature.
- conv_cfg (dict|None): Config of conv layers.
- norm_cfg (dict|None): Config of norm layers.
- act_cfg (dict|None): Config of activation layers.
- """
-
- def __init__(self, low_in_channels, high_in_channels, channels,
- out_channels, query_scales, key_pool_scales, conv_cfg,
- norm_cfg, act_cfg):
- super(AFNB, self).__init__()
- self.stages = nn.ModuleList()
- for query_scale in query_scales:
- self.stages.append(
- SelfAttentionBlock(
- low_in_channels=low_in_channels,
- high_in_channels=high_in_channels,
- channels=channels,
- out_channels=out_channels,
- share_key_query=False,
- query_scale=query_scale,
- key_pool_scales=key_pool_scales,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg))
- self.bottleneck = ConvModule(
- out_channels + high_in_channels,
- out_channels,
- 1,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=None)
-
- def forward(self, low_feats, high_feats):
- """Forward function."""
- priors = [stage(high_feats, low_feats) for stage in self.stages]
- context = torch.stack(priors, dim=0).sum(dim=0)
- output = self.bottleneck(torch.cat([context, high_feats], 1))
- return output
-
-
-class APNB(nn.Module):
- """Asymmetric Pyramid Non-local Block (APNB)
-
- Args:
- in_channels (int): Input channels of key/query feature,
- which is the key feature for self-attention.
- channels (int): Output channels of key/query transform.
- out_channels (int): Output channels.
- query_scales (tuple[int]): The scales of query feature map.
- Default: (1,)
- key_pool_scales (tuple[int]): Pooling scales used in Pooling Pyramid
- Module of key feature.
- conv_cfg (dict|None): Config of conv layers.
- norm_cfg (dict|None): Config of norm layers.
- act_cfg (dict|None): Config of activation layers.
- """
-
- def __init__(self, in_channels, channels, out_channels, query_scales,
- key_pool_scales, conv_cfg, norm_cfg, act_cfg):
- super(APNB, self).__init__()
- self.stages = nn.ModuleList()
- for query_scale in query_scales:
- self.stages.append(
- SelfAttentionBlock(
- low_in_channels=in_channels,
- high_in_channels=in_channels,
- channels=channels,
- out_channels=out_channels,
- share_key_query=True,
- query_scale=query_scale,
- key_pool_scales=key_pool_scales,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg))
- self.bottleneck = ConvModule(
- 2 * in_channels,
- out_channels,
- 1,
- conv_cfg=conv_cfg,
- norm_cfg=norm_cfg,
- act_cfg=act_cfg)
-
- def forward(self, feats):
- """Forward function."""
- priors = [stage(feats, feats) for stage in self.stages]
- context = torch.stack(priors, dim=0).sum(dim=0)
- output = self.bottleneck(torch.cat([context, feats], 1))
- return output
-
-
-@HEADS.register_module()
-class ANNHead(BaseDecodeHead):
- """Asymmetric Non-local Neural Networks for Semantic Segmentation.
-
- This head is the implementation of `ANNNet
- `_.
-
- Args:
- project_channels (int): Projection channels for Nonlocal.
- query_scales (tuple[int]): The scales of query feature map.
- Default: (1,)
- key_pool_scales (tuple[int]): The pooling scales of key feature map.
- Default: (1, 3, 6, 8).
- """
-
- def __init__(self,
- project_channels,
- query_scales=(1, ),
- key_pool_scales=(1, 3, 6, 8),
- **kwargs):
- super(ANNHead, self).__init__(
- input_transform='multiple_select', **kwargs)
- assert len(self.in_channels) == 2
- low_in_channels, high_in_channels = self.in_channels
- self.project_channels = project_channels
- self.fusion = AFNB(
- low_in_channels=low_in_channels,
- high_in_channels=high_in_channels,
- out_channels=high_in_channels,
- channels=project_channels,
- query_scales=query_scales,
- key_pool_scales=key_pool_scales,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.bottleneck = ConvModule(
- high_in_channels,
- self.channels,
- 3,
- padding=1,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
- self.context = APNB(
- in_channels=self.channels,
- out_channels=self.channels,
- channels=project_channels,
- query_scales=query_scales,
- key_pool_scales=key_pool_scales,
- conv_cfg=self.conv_cfg,
- norm_cfg=self.norm_cfg,
- act_cfg=self.act_cfg)
-
- def forward(self, inputs):
- """Forward function."""
- low_feats, high_feats = self._transform_inputs(inputs)
- output = self.fusion(low_feats, high_feats)
- output = self.dropout(output)
- output = self.bottleneck(output)
- output = self.context(output)
- output = self.cls_seg(output)
-
- return output
diff --git a/spaces/THEFIG/AI-chatbot/README.md b/spaces/THEFIG/AI-chatbot/README.md
deleted file mode 100644
index 7b220248d4c67b02335fa1d3fd07187420be7ad8..0000000000000000000000000000000000000000
--- a/spaces/THEFIG/AI-chatbot/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: AI Chatbot
-emoji: 🌍
-colorFrom: gray
-colorTo: pink
-sdk: gradio
-sdk_version: 3.36.1
-app_file: gradioapp.py
-pinned: false
-license: apache-2.0
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
-
-
diff --git a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/resolvelib/compat/collections_abc.py b/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/resolvelib/compat/collections_abc.py
deleted file mode 100644
index 1becc5093c5ab8e196bb9fee415e2381e7158fc3..0000000000000000000000000000000000000000
--- a/spaces/TandCAcceptMe/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/resolvelib/compat/collections_abc.py
+++ /dev/null
@@ -1,6 +0,0 @@
-__all__ = ["Mapping", "Sequence"]
-
-try:
- from collections.abc import Mapping, Sequence
-except ImportError:
- from collections import Mapping, Sequence
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/common/optim.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/common/optim.py
deleted file mode 100644
index d39d3aaa546c17e831d21d1758b69e8c1609415e..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/configs/common/optim.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import torch
-
-from detectron2.config import LazyCall as L
-from detectron2.solver.build import get_default_optimizer_params
-
-SGD = L(torch.optim.SGD)(
- params=L(get_default_optimizer_params)(
- # params.model is meant to be set to the model object, before instantiating
- # the optimizer.
- weight_decay_norm=0.0
- ),
- lr=0.02,
- momentum=0.9,
- weight_decay=1e-4,
-)
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/solver/build.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/solver/build.py
deleted file mode 100644
index 1989dfcd0855d833a75e403f6a5e88725d78022f..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/detectron2/solver/build.py
+++ /dev/null
@@ -1,285 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import copy
-import itertools
-import logging
-from collections import defaultdict
-from enum import Enum
-from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Type, Union
-import torch
-from fvcore.common.param_scheduler import CosineParamScheduler, MultiStepParamScheduler
-
-from detectron2.config import CfgNode
-
-from .lr_scheduler import LRMultiplier, WarmupParamScheduler
-
-_GradientClipperInput = Union[torch.Tensor, Iterable[torch.Tensor]]
-_GradientClipper = Callable[[_GradientClipperInput], None]
-
-
-class GradientClipType(Enum):
- VALUE = "value"
- NORM = "norm"
-
-
-def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper:
- """
- Creates gradient clipping closure to clip by value or by norm,
- according to the provided config.
- """
- cfg = copy.deepcopy(cfg)
-
- def clip_grad_norm(p: _GradientClipperInput):
- torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)
-
- def clip_grad_value(p: _GradientClipperInput):
- torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)
-
- _GRADIENT_CLIP_TYPE_TO_CLIPPER = {
- GradientClipType.VALUE: clip_grad_value,
- GradientClipType.NORM: clip_grad_norm,
- }
- return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)]
-
-
-def _generate_optimizer_class_with_gradient_clipping(
- optimizer: Type[torch.optim.Optimizer],
- *,
- per_param_clipper: Optional[_GradientClipper] = None,
- global_clipper: Optional[_GradientClipper] = None,
-) -> Type[torch.optim.Optimizer]:
- """
- Dynamically creates a new type that inherits the type of a given instance
- and overrides the `step` method to add gradient clipping
- """
- assert (
- per_param_clipper is None or global_clipper is None
- ), "Not allowed to use both per-parameter clipping and global clipping"
-
- def optimizer_wgc_step(self, closure=None):
- if per_param_clipper is not None:
- for group in self.param_groups:
- for p in group["params"]:
- per_param_clipper(p)
- else:
- # global clipper for future use with detr
- # (https://github.com/facebookresearch/detr/pull/287)
- all_params = itertools.chain(*[g["params"] for g in self.param_groups])
- global_clipper(all_params)
- super(type(self), self).step(closure)
-
- OptimizerWithGradientClip = type(
- optimizer.__name__ + "WithGradientClip",
- (optimizer,),
- {"step": optimizer_wgc_step},
- )
- return OptimizerWithGradientClip
-
-
-def maybe_add_gradient_clipping(
- cfg: CfgNode, optimizer: Type[torch.optim.Optimizer]
-) -> Type[torch.optim.Optimizer]:
- """
- If gradient clipping is enabled through config options, wraps the existing
- optimizer type to become a new dynamically created class OptimizerWithGradientClip
- that inherits the given optimizer and overrides the `step` method to
- include gradient clipping.
-
- Args:
- cfg: CfgNode, configuration options
- optimizer: type. A subclass of torch.optim.Optimizer
-
- Return:
- type: either the input `optimizer` (if gradient clipping is disabled), or
- a subclass of it with gradient clipping included in the `step` method.
- """
- if not cfg.SOLVER.CLIP_GRADIENTS.ENABLED:
- return optimizer
- if isinstance(optimizer, torch.optim.Optimizer):
- optimizer_type = type(optimizer)
- else:
- assert issubclass(optimizer, torch.optim.Optimizer), optimizer
- optimizer_type = optimizer
-
- grad_clipper = _create_gradient_clipper(cfg.SOLVER.CLIP_GRADIENTS)
- OptimizerWithGradientClip = _generate_optimizer_class_with_gradient_clipping(
- optimizer_type, per_param_clipper=grad_clipper
- )
- if isinstance(optimizer, torch.optim.Optimizer):
- optimizer.__class__ = OptimizerWithGradientClip # a bit hacky, not recommended
- return optimizer
- else:
- return OptimizerWithGradientClip
-
-
-def build_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
- """
- Build an optimizer from config.
- """
- params = get_default_optimizer_params(
- model,
- base_lr=cfg.SOLVER.BASE_LR,
- weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
- bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
- weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
- )
- return maybe_add_gradient_clipping(cfg, torch.optim.SGD)(
- params,
- lr=cfg.SOLVER.BASE_LR,
- momentum=cfg.SOLVER.MOMENTUM,
- nesterov=cfg.SOLVER.NESTEROV,
- weight_decay=cfg.SOLVER.WEIGHT_DECAY,
- )
-
-
-def get_default_optimizer_params(
- model: torch.nn.Module,
- base_lr: Optional[float] = None,
- weight_decay: Optional[float] = None,
- weight_decay_norm: Optional[float] = None,
- bias_lr_factor: Optional[float] = 1.0,
- weight_decay_bias: Optional[float] = None,
- overrides: Optional[Dict[str, Dict[str, float]]] = None,
-) -> List[Dict[str, Any]]:
- """
- Get default param list for optimizer, with support for a few types of
- overrides. If no overrides needed, this is equivalent to `model.parameters()`.
-
- Args:
- base_lr: lr for every group by default. Can be omitted to use the one in optimizer.
- weight_decay: weight decay for every group by default. Can be omitted to use the one
- in optimizer.
- weight_decay_norm: override weight decay for params in normalization layers
- bias_lr_factor: multiplier of lr for bias parameters.
- weight_decay_bias: override weight decay for bias parameters
- overrides: if not `None`, provides values for optimizer hyperparameters
- (LR, weight decay) for module parameters with a given name; e.g.
- ``{"embedding": {"lr": 0.01, "weight_decay": 0.1}}`` will set the LR and
- weight decay values for all module parameters named `embedding`.
-
- For common detection models, ``weight_decay_norm`` is the only option
- needed to be set. ``bias_lr_factor,weight_decay_bias`` are legacy settings
- from Detectron1 that are not found useful.
-
- Example:
- ::
- torch.optim.SGD(get_default_optimizer_params(model, weight_decay_norm=0),
- lr=0.01, weight_decay=1e-4, momentum=0.9)
- """
- if overrides is None:
- overrides = {}
- defaults = {}
- if base_lr is not None:
- defaults["lr"] = base_lr
- if weight_decay is not None:
- defaults["weight_decay"] = weight_decay
- bias_overrides = {}
- if bias_lr_factor is not None and bias_lr_factor != 1.0:
- # NOTE: unlike Detectron v1, we now by default make bias hyperparameters
- # exactly the same as regular weights.
- if base_lr is None:
- raise ValueError("bias_lr_factor requires base_lr")
- bias_overrides["lr"] = base_lr * bias_lr_factor
- if weight_decay_bias is not None:
- bias_overrides["weight_decay"] = weight_decay_bias
- if len(bias_overrides):
- if "bias" in overrides:
- raise ValueError("Conflicting overrides for 'bias'")
- overrides["bias"] = bias_overrides
-
- norm_module_types = (
- torch.nn.BatchNorm1d,
- torch.nn.BatchNorm2d,
- torch.nn.BatchNorm3d,
- torch.nn.SyncBatchNorm,
- # NaiveSyncBatchNorm inherits from BatchNorm2d
- torch.nn.GroupNorm,
- torch.nn.InstanceNorm1d,
- torch.nn.InstanceNorm2d,
- torch.nn.InstanceNorm3d,
- torch.nn.LayerNorm,
- torch.nn.LocalResponseNorm,
- )
- params: List[Dict[str, Any]] = []
- memo: Set[torch.nn.parameter.Parameter] = set()
- for module in model.modules():
- for module_param_name, value in module.named_parameters(recurse=False):
- if not value.requires_grad:
- continue
- # Avoid duplicating parameters
- if value in memo:
- continue
- memo.add(value)
-
- hyperparams = copy.copy(defaults)
- if isinstance(module, norm_module_types) and weight_decay_norm is not None:
- hyperparams["weight_decay"] = weight_decay_norm
- hyperparams.update(overrides.get(module_param_name, {}))
- params.append({"params": [value], **hyperparams})
- return reduce_param_groups(params)
-
-
-def _expand_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
- # Transform parameter groups into per-parameter structure.
- # Later items in `params` can overwrite parameters set in previous items.
- ret = defaultdict(dict)
- for item in params:
- assert "params" in item
- cur_params = {x: y for x, y in item.items() if x != "params"}
- for param in item["params"]:
- ret[param].update({"params": [param], **cur_params})
- return list(ret.values())
-
-
-def reduce_param_groups(params: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
- # Reorganize the parameter groups and merge duplicated groups.
- # The number of parameter groups needs to be as small as possible in order
- # to efficiently use the PyTorch multi-tensor optimizer. Therefore instead
- # of using a parameter_group per single parameter, we reorganize the
- # parameter groups and merge duplicated groups. This approach speeds
- # up multi-tensor optimizer significantly.
- params = _expand_param_groups(params)
- groups = defaultdict(list) # re-group all parameter groups by their hyperparams
- for item in params:
- cur_params = tuple((x, y) for x, y in item.items() if x != "params")
- groups[cur_params].extend(item["params"])
- ret = []
- for param_keys, param_values in groups.items():
- cur = {kv[0]: kv[1] for kv in param_keys}
- cur["params"] = param_values
- ret.append(cur)
- return ret
-
-
-def build_lr_scheduler(
- cfg: CfgNode, optimizer: torch.optim.Optimizer
-) -> torch.optim.lr_scheduler._LRScheduler:
- """
- Build a LR scheduler from config.
- """
- name = cfg.SOLVER.LR_SCHEDULER_NAME
-
- if name == "WarmupMultiStepLR":
- steps = [x for x in cfg.SOLVER.STEPS if x <= cfg.SOLVER.MAX_ITER]
- if len(steps) != len(cfg.SOLVER.STEPS):
- logger = logging.getLogger(__name__)
- logger.warning(
- "SOLVER.STEPS contains values larger than SOLVER.MAX_ITER. "
- "These values will be ignored."
- )
- sched = MultiStepParamScheduler(
- values=[cfg.SOLVER.GAMMA ** k for k in range(len(steps) + 1)],
- milestones=steps,
- num_updates=cfg.SOLVER.MAX_ITER,
- )
- elif name == "WarmupCosineLR":
- sched = CosineParamScheduler(1, 0)
- else:
- raise ValueError("Unknown LR scheduler: {}".format(name))
-
- sched = WarmupParamScheduler(
- sched,
- cfg.SOLVER.WARMUP_FACTOR,
- min(cfg.SOLVER.WARMUP_ITERS / cfg.SOLVER.MAX_ITER, 1.0),
- cfg.SOLVER.WARMUP_METHOD,
- )
- return LRMultiplier(optimizer, multiplier=sched, max_iter=cfg.SOLVER.MAX_ITER)
diff --git a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_augmentation_impl.py b/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_augmentation_impl.py
deleted file mode 100644
index 5a69e178a5ac67f69c2eeab667b9c0740a862eee..0000000000000000000000000000000000000000
--- a/spaces/TencentARC/VLog/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/data/transforms/custom_augmentation_impl.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-# Modified by Xingyi Zhou
-"""
-Implement many useful :class:`Augmentation`.
-"""
-import numpy as np
-import sys
-from fvcore.transforms.transform import (
- BlendTransform,
- CropTransform,
- HFlipTransform,
- NoOpTransform,
- Transform,
- VFlipTransform,
-)
-from PIL import Image
-
-from detectron2.data.transforms.augmentation import Augmentation
-from .custom_transform import EfficientDetResizeCropTransform
-
-__all__ = [
- "EfficientDetResizeCrop",
-]
-
-
-class EfficientDetResizeCrop(Augmentation):
- """
- Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
- If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
- """
-
- def __init__(
- self, size, scale, interp=Image.BILINEAR
- ):
- """
- Args:
- """
- super().__init__()
- self.target_size = (size, size)
- self.scale = scale
- self.interp = interp
-
- def get_transform(self, img):
- # Select a random scale factor.
- scale_factor = np.random.uniform(*self.scale)
- scaled_target_height = scale_factor * self.target_size[0]
- scaled_target_width = scale_factor * self.target_size[1]
- # Recompute the accurate scale_factor using rounded scaled image size.
- width, height = img.shape[1], img.shape[0]
- img_scale_y = scaled_target_height / height
- img_scale_x = scaled_target_width / width
- img_scale = min(img_scale_y, img_scale_x)
-
- # Select non-zero random offset (x, y) if scaled image is larger than target size
- scaled_h = int(height * img_scale)
- scaled_w = int(width * img_scale)
- offset_y = scaled_h - self.target_size[0]
- offset_x = scaled_w - self.target_size[1]
- offset_y = int(max(0.0, float(offset_y)) * np.random.uniform(0, 1))
- offset_x = int(max(0.0, float(offset_x)) * np.random.uniform(0, 1))
- return EfficientDetResizeCropTransform(
- scaled_h, scaled_w, offset_y, offset_x, img_scale, self.target_size, self.interp)
diff --git a/spaces/Tuana/what-would-mother-say/utils/__init__.py b/spaces/Tuana/what-would-mother-say/utils/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/VideoCrafter/VideoCrafter/lvdm/modules/networks/openaimodel3d.py b/spaces/VideoCrafter/VideoCrafter/lvdm/modules/networks/openaimodel3d.py
deleted file mode 100644
index 328fac71761a69b461f33946d6a1aa08622ecd8f..0000000000000000000000000000000000000000
--- a/spaces/VideoCrafter/VideoCrafter/lvdm/modules/networks/openaimodel3d.py
+++ /dev/null
@@ -1,577 +0,0 @@
-from functools import partial
-from abc import abstractmethod
-import torch
-import torch.nn as nn
-from einops import rearrange
-import torch.nn.functional as F
-from lvdm.models.utils_diffusion import timestep_embedding
-from lvdm.common import checkpoint
-from lvdm.basics import (
- zero_module,
- conv_nd,
- linear,
- avg_pool_nd,
- normalization
-)
-from lvdm.modules.attention import SpatialTransformer, TemporalTransformer
-
-
-class TimestepBlock(nn.Module):
- """
- Any module where forward() takes timestep embeddings as a second argument.
- """
- @abstractmethod
- def forward(self, x, emb):
- """
- Apply the module to `x` given `emb` timestep embeddings.
- """
-
-
-class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
- """
- A sequential module that passes timestep embeddings to the children that
- support it as an extra input.
- """
-
- def forward(self, x, emb, context=None, batch_size=None):
- for layer in self:
- if isinstance(layer, TimestepBlock):
- x = layer(x, emb, batch_size)
- elif isinstance(layer, SpatialTransformer):
- x = layer(x, context)
- elif isinstance(layer, TemporalTransformer):
- x = rearrange(x, '(b f) c h w -> b c f h w', b=batch_size)
- x = layer(x, context)
- x = rearrange(x, 'b c f h w -> (b f) c h w')
- else:
- x = layer(x,)
- return x
-
-
-class Downsample(nn.Module):
- """
- A downsampling layer with an optional convolution.
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- downsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- stride = 2 if dims != 3 else (1, 2, 2)
- if use_conv:
- self.op = conv_nd(
- dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
- )
- else:
- assert self.channels == self.out_channels
- self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- return self.op(x)
-
-
-class Upsample(nn.Module):
- """
- An upsampling layer with an optional convolution.
- :param channels: channels in the inputs and outputs.
- :param use_conv: a bool determining if a convolution is applied.
- :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
- upsampling occurs in the inner-two dimensions.
- """
-
- def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
- super().__init__()
- self.channels = channels
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.dims = dims
- if use_conv:
- self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
-
- def forward(self, x):
- assert x.shape[1] == self.channels
- if self.dims == 3:
- x = F.interpolate(x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode='nearest')
- else:
- x = F.interpolate(x, scale_factor=2, mode='nearest')
- if self.use_conv:
- x = self.conv(x)
- return x
-
-
-class ResBlock(TimestepBlock):
- """
- A residual block that can optionally change the number of channels.
- :param channels: the number of input channels.
- :param emb_channels: the number of timestep embedding channels.
- :param dropout: the rate of dropout.
- :param out_channels: if specified, the number of out channels.
- :param use_conv: if True and out_channels is specified, use a spatial
- convolution instead of a smaller 1x1 convolution to change the
- channels in the skip connection.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param up: if True, use this block for upsampling.
- :param down: if True, use this block for downsampling.
- """
-
- def __init__(
- self,
- channels,
- emb_channels,
- dropout,
- out_channels=None,
- use_scale_shift_norm=False,
- dims=2,
- use_checkpoint=False,
- use_conv=False,
- up=False,
- down=False,
- use_temporal_conv=False,
- tempspatial_aware=False
- ):
- super().__init__()
- self.channels = channels
- self.emb_channels = emb_channels
- self.dropout = dropout
- self.out_channels = out_channels or channels
- self.use_conv = use_conv
- self.use_checkpoint = use_checkpoint
- self.use_scale_shift_norm = use_scale_shift_norm
- self.use_temporal_conv = use_temporal_conv
-
- self.in_layers = nn.Sequential(
- normalization(channels),
- nn.SiLU(),
- conv_nd(dims, channels, self.out_channels, 3, padding=1),
- )
-
- self.updown = up or down
-
- if up:
- self.h_upd = Upsample(channels, False, dims)
- self.x_upd = Upsample(channels, False, dims)
- elif down:
- self.h_upd = Downsample(channels, False, dims)
- self.x_upd = Downsample(channels, False, dims)
- else:
- self.h_upd = self.x_upd = nn.Identity()
-
- self.emb_layers = nn.Sequential(
- nn.SiLU(),
- nn.Linear(
- emb_channels,
- 2 * self.out_channels if use_scale_shift_norm else self.out_channels,
- ),
- )
- self.out_layers = nn.Sequential(
- normalization(self.out_channels),
- nn.SiLU(),
- nn.Dropout(p=dropout),
- zero_module(nn.Conv2d(self.out_channels, self.out_channels, 3, padding=1)),
- )
-
- if self.out_channels == channels:
- self.skip_connection = nn.Identity()
- elif use_conv:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 3, padding=1)
- else:
- self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
-
- if self.use_temporal_conv:
- self.temopral_conv = TemporalConvBlock(
- self.out_channels,
- self.out_channels,
- dropout=0.1,
- spatial_aware=tempspatial_aware
- )
-
- def forward(self, x, emb, batch_size=None):
- """
- Apply the block to a Tensor, conditioned on a timestep embedding.
- :param x: an [N x C x ...] Tensor of features.
- :param emb: an [N x emb_channels] Tensor of timestep embeddings.
- :return: an [N x C x ...] Tensor of outputs.
- """
- input_tuple = (x, emb,)
- if batch_size:
- forward_batchsize = partial(self._forward, batch_size=batch_size)
- return checkpoint(forward_batchsize, input_tuple, self.parameters(), self.use_checkpoint)
- return checkpoint(self._forward, input_tuple, self.parameters(), self.use_checkpoint)
-
- def _forward(self, x, emb, batch_size=None,):
- if self.updown:
- in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
- h = in_rest(x)
- h = self.h_upd(h)
- x = self.x_upd(x)
- h = in_conv(h)
- else:
- h = self.in_layers(x)
- emb_out = self.emb_layers(emb).type(h.dtype)
- while len(emb_out.shape) < len(h.shape):
- emb_out = emb_out[..., None]
- if self.use_scale_shift_norm:
- out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
- scale, shift = torch.chunk(emb_out, 2, dim=1)
- h = out_norm(h) * (1 + scale) + shift
- h = out_rest(h)
- else:
- h = h + emb_out
- h = self.out_layers(h)
- h = self.skip_connection(x) + h
-
- if self.use_temporal_conv and batch_size:
- h = rearrange(h, '(b t) c h w -> b c t h w', b=batch_size)
- h = self.temopral_conv(h)
- h = rearrange(h, 'b c t h w -> (b t) c h w')
- return h
-
-
-class TemporalConvBlock(nn.Module):
- """
- Adapted from modelscope: https://github.com/modelscope/modelscope/blob/master/modelscope/models/multi_modal/video_synthesis/unet_sd.py
- """
-
- def __init__(self, in_channels, out_channels=None, dropout=0.0, spatial_aware=False):
- super(TemporalConvBlock, self).__init__()
- if out_channels is None:
- out_channels = in_channels
- self.in_channels = in_channels
- self.out_channels = out_channels
- kernel_shape = (3, 1, 1) if not spatial_aware else (3, 3, 3)
- padding_shape = (1, 0, 0) if not spatial_aware else (1, 1, 1)
-
- # conv layers
- self.conv1 = nn.Sequential(
- nn.GroupNorm(32, in_channels), nn.SiLU(),
- nn.Conv3d(in_channels, out_channels, kernel_shape, padding=padding_shape))
- self.conv2 = nn.Sequential(
- nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout),
- nn.Conv3d(out_channels, in_channels, kernel_shape, padding=padding_shape))
- self.conv3 = nn.Sequential(
- nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout),
- nn.Conv3d(out_channels, in_channels, (3, 1, 1), padding=(1, 0, 0)))
- self.conv4 = nn.Sequential(
- nn.GroupNorm(32, out_channels), nn.SiLU(), nn.Dropout(dropout),
- nn.Conv3d(out_channels, in_channels, (3, 1, 1), padding=(1, 0, 0)))
-
- # zero out the last layer params,so the conv block is identity
- nn.init.zeros_(self.conv4[-1].weight)
- nn.init.zeros_(self.conv4[-1].bias)
-
- def forward(self, x):
- identity = x
- x = self.conv1(x)
- x = self.conv2(x)
- x = self.conv3(x)
- x = self.conv4(x)
-
- return x + identity
-
-
-class UNetModel(nn.Module):
- """
- The full UNet model with attention and timestep embedding.
- :param in_channels: in_channels in the input Tensor.
- :param model_channels: base channel count for the model.
- :param out_channels: channels in the output Tensor.
- :param num_res_blocks: number of residual blocks per downsample.
- :param attention_resolutions: a collection of downsample rates at which
- attention will take place. May be a set, list, or tuple.
- For example, if this contains 4, then at 4x downsampling, attention
- will be used.
- :param dropout: the dropout probability.
- :param channel_mult: channel multiplier for each level of the UNet.
- :param conv_resample: if True, use learned convolutions for upsampling and
- downsampling.
- :param dims: determines if the signal is 1D, 2D, or 3D.
- :param num_classes: if specified (as an int), then this model will be
- class-conditional with `num_classes` classes.
- :param use_checkpoint: use gradient checkpointing to reduce memory usage.
- :param num_heads: the number of attention heads in each attention layer.
- :param num_heads_channels: if specified, ignore num_heads and instead use
- a fixed channel width per attention head.
- :param num_heads_upsample: works with num_heads to set a different number
- of heads for upsampling. Deprecated.
- :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
- :param resblock_updown: use residual blocks for up/downsampling.
- """
-
- def __init__(self,
- in_channels,
- model_channels,
- out_channels,
- num_res_blocks,
- attention_resolutions,
- dropout=0.0,
- channel_mult=(1, 2, 4, 8),
- conv_resample=True,
- dims=2,
- context_dim=None,
- use_scale_shift_norm=False,
- resblock_updown=False,
- num_heads=-1,
- num_head_channels=-1,
- transformer_depth=1,
- use_linear=False,
- use_checkpoint=False,
- temporal_conv=False,
- tempspatial_aware=False,
- temporal_attention=True,
- temporal_selfatt_only=True,
- use_relative_position=True,
- use_causal_attention=False,
- temporal_length=None,
- use_fp16=False,
- addition_attention=False,
- use_image_attention=False,
- temporal_transformer_depth=1,
- fps_cond=False,
- ):
- super(UNetModel, self).__init__()
- if num_heads == -1:
- assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
- if num_head_channels == -1:
- assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
-
- self.in_channels = in_channels
- self.model_channels = model_channels
- self.out_channels = out_channels
- self.num_res_blocks = num_res_blocks
- self.attention_resolutions = attention_resolutions
- self.dropout = dropout
- self.channel_mult = channel_mult
- self.conv_resample = conv_resample
- self.temporal_attention = temporal_attention
- time_embed_dim = model_channels * 4
- self.use_checkpoint = use_checkpoint
- self.dtype = torch.float16 if use_fp16 else torch.float32
- self.addition_attention=addition_attention
- self.use_image_attention = use_image_attention
- self.fps_cond=fps_cond
-
-
-
- self.time_embed = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
- if self.fps_cond:
- self.fps_embedding = nn.Sequential(
- linear(model_channels, time_embed_dim),
- nn.SiLU(),
- linear(time_embed_dim, time_embed_dim),
- )
-
- self.input_blocks = nn.ModuleList(
- [
- TimestepEmbedSequential(conv_nd(dims, in_channels, model_channels, 3, padding=1))
- ]
- )
- if self.addition_attention:
- self.init_attn=TimestepEmbedSequential(
- TemporalTransformer(
- model_channels,
- n_heads=8,
- d_head=num_head_channels,
- depth=transformer_depth,
- context_dim=context_dim,
- use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only,
- causal_attention=use_causal_attention, relative_position=use_relative_position,
- temporal_length=temporal_length))
-
- input_block_chans = [model_channels]
- ch = model_channels
- ds = 1
- for level, mult in enumerate(channel_mult):
- for _ in range(num_res_blocks):
- layers = [
- ResBlock(ch, time_embed_dim, dropout,
- out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
- use_temporal_conv=temporal_conv
- )
- ]
- ch = mult * model_channels
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- layers.append(
- SpatialTransformer(ch, num_heads, dim_head,
- depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, disable_self_attn=False,
- img_cross_attention=self.use_image_attention
- )
- )
- if self.temporal_attention:
- layers.append(
- TemporalTransformer(ch, num_heads, dim_head,
- depth=temporal_transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only,
- causal_attention=use_causal_attention, relative_position=use_relative_position,
- temporal_length=temporal_length
- )
- )
- self.input_blocks.append(TimestepEmbedSequential(*layers))
- input_block_chans.append(ch)
- if level != len(channel_mult) - 1:
- out_ch = ch
- self.input_blocks.append(
- TimestepEmbedSequential(
- ResBlock(ch, time_embed_dim, dropout,
- out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- down=True
- )
- if resblock_updown
- else Downsample(ch, conv_resample, dims=dims, out_channels=out_ch)
- )
- )
- ch = out_ch
- input_block_chans.append(ch)
- ds *= 2
-
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- layers = [
- ResBlock(ch, time_embed_dim, dropout,
- dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
- use_temporal_conv=temporal_conv
- ),
- SpatialTransformer(ch, num_heads, dim_head,
- depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, disable_self_attn=False,
- img_cross_attention=self.use_image_attention
- )
- ]
- if self.temporal_attention:
- layers.append(
- TemporalTransformer(ch, num_heads, dim_head,
- depth=temporal_transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only,
- causal_attention=use_causal_attention, relative_position=use_relative_position,
- temporal_length=temporal_length
- )
- )
- layers.append(
- ResBlock(ch, time_embed_dim, dropout,
- dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
- use_temporal_conv=temporal_conv
- )
- )
- self.middle_block = TimestepEmbedSequential(*layers)
-
- self.output_blocks = nn.ModuleList([])
- for level, mult in list(enumerate(channel_mult))[::-1]:
- for i in range(num_res_blocks + 1):
- ich = input_block_chans.pop()
- layers = [
- ResBlock(ch + ich, time_embed_dim, dropout,
- out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm, tempspatial_aware=tempspatial_aware,
- use_temporal_conv=temporal_conv
- )
- ]
- ch = model_channels * mult
- if ds in attention_resolutions:
- if num_head_channels == -1:
- dim_head = ch // num_heads
- else:
- num_heads = ch // num_head_channels
- dim_head = num_head_channels
- layers.append(
- SpatialTransformer(ch, num_heads, dim_head,
- depth=transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, disable_self_attn=False,
- img_cross_attention=self.use_image_attention
- )
- )
- if self.temporal_attention:
- layers.append(
- TemporalTransformer(ch, num_heads, dim_head,
- depth=temporal_transformer_depth, context_dim=context_dim, use_linear=use_linear,
- use_checkpoint=use_checkpoint, only_self_att=temporal_selfatt_only,
- causal_attention=use_causal_attention, relative_position=use_relative_position,
- temporal_length=temporal_length
- )
- )
- if level and i == num_res_blocks:
- out_ch = ch
- layers.append(
- ResBlock(ch, time_embed_dim, dropout,
- out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint,
- use_scale_shift_norm=use_scale_shift_norm,
- up=True
- )
- if resblock_updown
- else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
- )
- ds //= 2
- self.output_blocks.append(TimestepEmbedSequential(*layers))
-
- self.out = nn.Sequential(
- normalization(ch),
- nn.SiLU(),
- zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
- )
-
- def forward(self, x, timesteps, context=None, features_adapter=None, fps=16, **kwargs):
- t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
- emb = self.time_embed(t_emb)
-
- if self.fps_cond:
- if type(fps) == int:
- fps = torch.full_like(timesteps, fps)
- fps_emb = timestep_embedding(fps,self.model_channels, repeat_only=False)
- emb += self.fps_embedding(fps_emb)
-
- b,_,t,_,_ = x.shape
- ## repeat t times for context [(b t) 77 768] & time embedding
- context = context.repeat_interleave(repeats=t, dim=0)
- emb = emb.repeat_interleave(repeats=t, dim=0)
-
- ## always in shape (b t) c h w, except for temporal layer
- x = rearrange(x, 'b c t h w -> (b t) c h w')
-
- h = x.type(self.dtype)
- adapter_idx = 0
- hs = []
- for id, module in enumerate(self.input_blocks):
- h = module(h, emb, context=context, batch_size=b)
- if id ==0 and self.addition_attention:
- h = self.init_attn(h, emb, context=context, batch_size=b)
- ## plug-in adapter features
- if ((id+1)%3 == 0) and features_adapter is not None:
- h = h + features_adapter[adapter_idx]
- adapter_idx += 1
- hs.append(h)
- if features_adapter is not None:
- assert len(features_adapter)==adapter_idx, 'Wrong features_adapter'
-
- h = self.middle_block(h, emb, context=context, batch_size=b)
- for module in self.output_blocks:
- h = torch.cat([h, hs.pop()], dim=1)
- h = module(h, emb, context=context, batch_size=b)
- h = h.type(x.dtype)
- y = self.out(h)
-
- # reshape back to (b c t h w)
- y = rearrange(y, '(b t) c h w -> b c t h w', b=b)
- return y
-
\ No newline at end of file
diff --git a/spaces/Wootang01/keyword_extractor/app.py b/spaces/Wootang01/keyword_extractor/app.py
deleted file mode 100644
index f5ae2df93c5a9a6571a981ad14912f2a6c6acc60..0000000000000000000000000000000000000000
--- a/spaces/Wootang01/keyword_extractor/app.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import pandas as pd
-import streamlit as st
-from keybert import KeyBERT
-
-@st.cache(allow_output_mutation=True, suppress_st_warning=True, show_spinner=True)
-def load_model():
- model = KeyBERT("sentence-transformers/xlm-r-distilroberta-base-paraphrase-v1")
- return model
-
-model = load_model()
-
-placeholder = st.empty()
-text_input = placeholder.text_area("Paste or write text", height=300)
-
-top_n = st.sidebar.slider("Select a number of keywords", 1, 10, 5, 1)
-min_ngram = st.sidebar.number_input("Minimum number of words in each keyword", 1, 5, 1, 1)
-max_ngram = st.sidebar.number_input("Maximum number of words in each keyword", min_ngram, 5, 3, step=1)
-st.sidebar.code(f"ngram_range=({min_ngram}, {max_ngram})")
-
-params = {"docs": text_input, "top_n": top_n, "keyphrase_ngram_range": (min_ngram, max_ngram), "stop_words": 'english'}
-
-add_diversity = st.sidebar.checkbox("Adjust diversity of keywords")
-
-if add_diversity:
- method = st.sidebar.selectbox("Select a method", ("Max Sum Similarity", "Maximal Marginal Relevance"))
- if method == "Max Sum Similarity":
- nr_candidates = st.sidebar.slider("nr_candidates", 20, 50, 20, 2)
- params["use_maxsum"] = True
- params["nr_candidates"] = nr_candidates
-
- elif method == "Maximal Marginal Relevance":
- diversity = st.sidebar.slider("diversity", 0.1, 1.0, 0.6, 0.01)
- params["use_mmr"] = True
- params["diversity"] = diversity
-
-keywords = model.extract_keywords(**params)
-
-if keywords != []:
- st.info("Extracted keywords")
- keywords = pd.DataFrame(keywords, columns=["keyword", "relevance"])
- st.table(keywords)
-
diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/utils/deprecation_utils.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/utils/deprecation_utils.py
deleted file mode 100644
index 6bdda664e102ea9913503b9e169fa97225d52c78..0000000000000000000000000000000000000000
--- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/utils/deprecation_utils.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import inspect
-import warnings
-from typing import Any, Dict, Optional, Union
-
-from packaging import version
-
-
-def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True):
- from .. import __version__
-
- deprecated_kwargs = take_from
- values = ()
- if not isinstance(args[0], tuple):
- args = (args,)
-
- for attribute, version_name, message in args:
- if version.parse(version.parse(__version__).base_version) >= version.parse(version_name):
- raise ValueError(
- f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
- f" version {__version__} is >= {version_name}"
- )
-
- warning = None
- if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs:
- values += (deprecated_kwargs.pop(attribute),)
- warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
- elif hasattr(deprecated_kwargs, attribute):
- values += (getattr(deprecated_kwargs, attribute),)
- warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
- elif deprecated_kwargs is None:
- warning = f"`{attribute}` is deprecated and will be removed in version {version_name}."
-
- if warning is not None:
- warning = warning + " " if standard_warn else ""
- warnings.warn(warning + message, FutureWarning, stacklevel=2)
-
- if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0:
- call_frame = inspect.getouterframes(inspect.currentframe())[1]
- filename = call_frame.filename
- line_number = call_frame.lineno
- function = call_frame.function
- key, value = next(iter(deprecated_kwargs.items()))
- raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`")
-
- if len(values) == 0:
- return
- elif len(values) == 1:
- return values[0]
- return values
diff --git a/spaces/Yudha515/Rvc-Models/README.md b/spaces/Yudha515/Rvc-Models/README.md
deleted file mode 100644
index e36f3c1f8803b85b58ec328405b0195fb7347829..0000000000000000000000000000000000000000
--- a/spaces/Yudha515/Rvc-Models/README.md
+++ /dev/null
@@ -1,141 +0,0 @@
----
-title: MusicGen
-python_version: '3.9'
-tags:
-- music generation
-- language models
-- LLMs
-app_file: app.py
-emoji: 🎵
-colorFrom: white
-colorTo: blue
-sdk: gradio
-sdk_version: 3.34.0
-pinned: true
-license: cc-by-nc-4.0
-duplicated_from: facebook/MusicGen
----
-# Audiocraft
-
-
-
-
-Audiocraft is a PyTorch library for deep learning research on audio generation. At the moment, it contains the code for MusicGen, a state-of-the-art controllable text-to-music model.
-
-## MusicGen
-
-Audiocraft provides the code and models for MusicGen, [a simple and controllable model for music generation][arxiv]. MusicGen is a single stage auto-regressive
-Transformer model trained over a 32kHz EnCodec tokenizer with 4 codebooks sampled at 50 Hz. Unlike existing methods like [MusicLM](https://arxiv.org/abs/2301.11325), MusicGen doesn't require a self-supervised semantic representation, and it generates
-all 4 codebooks in one pass. By introducing a small delay between the codebooks, we show we can predict
-them in parallel, thus having only 50 auto-regressive steps per second of audio.
-Check out our [sample page][musicgen_samples] or test the available demo!
-
-
-
-
-
-
-
-
-
-We use 20K hours of licensed music to train MusicGen. Specifically, we rely on an internal dataset of 10K high-quality music tracks, and on the ShutterStock and Pond5 music data.
-
-## Installation
-Audiocraft requires Python 3.9, PyTorch 2.0.0, and a GPU with at least 16 GB of memory (for the medium-sized model). To install Audiocraft, you can run the following:
-
-```shell
-# Best to make sure you have torch installed first, in particular before installing xformers.
-# Don't run this if you already have PyTorch installed.
-pip install 'torch>=2.0'
-# Then proceed to one of the following
-pip install -U audiocraft # stable release
-pip install -U git+https://git@github.com/facebookresearch/audiocraft#egg=audiocraft # bleeding edge
-pip install -e . # or if you cloned the repo locally
-```
-
-## Usage
-We offer a number of way to interact with MusicGen:
-1. A demo is also available on the [`facebook/MusicGen` HuggingFace Space](https://huggingface.co/spaces/facebook/MusicGen) (huge thanks to all the HF team for their support).
-2. You can run the Gradio demo in Colab: [colab notebook](https://colab.research.google.com/drive/1fxGqfg96RBUvGxZ1XXN07s3DthrKUl4-?usp=sharing).
-3. You can use the gradio demo locally by running `python app.py`.
-4. You can play with MusicGen by running the jupyter notebook at [`demo.ipynb`](./demo.ipynb) locally (if you have a GPU).
-5. Finally, checkout [@camenduru Colab page](https://github.com/camenduru/MusicGen-colab) which is regularly
- updated with contributions from @camenduru and the community.
-
-## API
-
-We provide a simple API and 4 pre-trained models. The pre trained models are:
-- `small`: 300M model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-small)
-- `medium`: 1.5B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-medium)
-- `melody`: 1.5B model, text to music and text+melody to music - [🤗 Hub](https://huggingface.co/facebook/musicgen-melody)
-- `large`: 3.3B model, text to music only - [🤗 Hub](https://huggingface.co/facebook/musicgen-large)
-
-We observe the best trade-off between quality and compute with the `medium` or `melody` model.
-In order to use MusicGen locally **you must have a GPU**. We recommend 16GB of memory, but smaller
-GPUs will be able to generate short sequences, or longer sequences with the `small` model.
-
-**Note**: Please make sure to have [ffmpeg](https://ffmpeg.org/download.html) installed when using newer version of `torchaudio`.
-You can install it with:
-```
-apt-get install ffmpeg
-```
-
-See after a quick example for using the API.
-
-```python
-import torchaudio
-from audiocraft.models import MusicGen
-from audiocraft.data.audio import audio_write
-
-model = MusicGen.get_pretrained('melody')
-model.set_generation_params(duration=8) # generate 8 seconds.
-wav = model.generate_unconditional(4) # generates 4 unconditional audio samples
-descriptions = ['happy rock', 'energetic EDM', 'sad jazz']
-wav = model.generate(descriptions) # generates 3 samples.
-
-melody, sr = torchaudio.load('./assets/bach.mp3')
-# generates using the melody from the given audio and the provided descriptions.
-wav = model.generate_with_chroma(descriptions, melody[None].expand(3, -1, -1), sr)
-
-for idx, one_wav in enumerate(wav):
- # Will save under {idx}.wav, with loudness normalization at -14 db LUFS.
- audio_write(f'{idx}', one_wav.cpu(), model.sample_rate, strategy="loudness", loudness_compressor=True)
-```
-
-
-## Model Card
-
-See [the model card page](./MODEL_CARD.md).
-
-## FAQ
-
-#### Will the training code be released?
-
-Yes. We will soon release the training code for MusicGen and EnCodec.
-
-
-#### I need help on Windows
-
-@FurkanGozukara made a complete tutorial for [Audiocraft/MusicGen on Windows](https://youtu.be/v-YpvPkhdO4)
-
-#### I need help for running the demo on Colab
-
-Check [@camenduru tutorial on Youtube](https://www.youtube.com/watch?v=EGfxuTy9Eeo).
-
-
-## Citation
-```
-@article{copet2023simple,
- title={Simple and Controllable Music Generation},
- author={Jade Copet and Felix Kreuk and Itai Gat and Tal Remez and David Kant and Gabriel Synnaeve and Yossi Adi and Alexandre Défossez},
- year={2023},
- journal={arXiv preprint arXiv:2306.05284},
-}
-```
-
-## License
-* The code in this repository is released under the MIT license as found in the [LICENSE file](LICENSE).
-* The weights in this repository are released under the CC-BY-NC 4.0 license as found in the [LICENSE_weights file](LICENSE_weights).
-
-[arxiv]: https://arxiv.org/abs/2306.05284
-[musicgen_samples]: https://ai.honu.io/papers/musicgen/
diff --git a/spaces/Yuliang/ECON/lib/torch_utils/ops/upfirdn2d.h b/spaces/Yuliang/ECON/lib/torch_utils/ops/upfirdn2d.h
deleted file mode 100644
index c9e2032bcac9d2abde7a75eea4d812da348afadd..0000000000000000000000000000000000000000
--- a/spaces/Yuliang/ECON/lib/torch_utils/ops/upfirdn2d.h
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
-//
-// NVIDIA CORPORATION and its licensors retain all intellectual property
-// and proprietary rights in and to this software, related documentation
-// and any modifications thereto. Any use, reproduction, disclosure or
-// distribution of this software and related documentation without an express
-// license agreement from NVIDIA CORPORATION is strictly prohibited.
-
-#include
-
-//------------------------------------------------------------------------
-// CUDA kernel parameters.
-
-struct upfirdn2d_kernel_params
-{
- const void* x;
- const float* f;
- void* y;
-
- int2 up;
- int2 down;
- int2 pad0;
- int flip;
- float gain;
-
- int4 inSize; // [width, height, channel, batch]
- int4 inStride;
- int2 filterSize; // [width, height]
- int2 filterStride;
- int4 outSize; // [width, height, channel, batch]
- int4 outStride;
- int sizeMinor;
- int sizeMajor;
-
- int loopMinor;
- int loopMajor;
- int loopX;
- int launchMinor;
- int launchMajor;
-};
-
-//------------------------------------------------------------------------
-// CUDA kernel specialization.
-
-struct upfirdn2d_kernel_spec
-{
- void* kernel;
- int tileOutW;
- int tileOutH;
- int loopMinor;
- int loopX;
-};
-
-//------------------------------------------------------------------------
-// CUDA kernel selection.
-
-template upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params& p);
-
-//------------------------------------------------------------------------
diff --git a/spaces/Zannriell/hakurei-waifu-diffusion/app.py b/spaces/Zannriell/hakurei-waifu-diffusion/app.py
deleted file mode 100644
index ccef706bf3035fe470bf6a4f5bd701b18bf59133..0000000000000000000000000000000000000000
--- a/spaces/Zannriell/hakurei-waifu-diffusion/app.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import gradio as gr
-
-gr.Interface.load("models/hakurei/waifu-diffusion").launch()
\ No newline at end of file
diff --git a/spaces/abdvl/datahub_qa_bot/docs/advanced/backfilling.md b/spaces/abdvl/datahub_qa_bot/docs/advanced/backfilling.md
deleted file mode 100644
index 91e2a31256c7ef22e4e3ab8bac0243792bc1d1b7..0000000000000000000000000000000000000000
--- a/spaces/abdvl/datahub_qa_bot/docs/advanced/backfilling.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Backfilling Search Index & Graph DB
-
-WIP
diff --git a/spaces/abhishek-kumar/ChatGPT4/README.md b/spaces/abhishek-kumar/ChatGPT4/README.md
deleted file mode 100644
index 683432fd73f0df0790cb2225b881e5900c237d91..0000000000000000000000000000000000000000
--- a/spaces/abhishek-kumar/ChatGPT4/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: ChatGPT4
-emoji: 👀
-colorFrom: green
-colorTo: yellow
-sdk: gradio
-sdk_version: 3.21.0
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/danet_r50-d8.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/danet_r50-d8.py
deleted file mode 100644
index 2c934939fac48525f22ad86f489a041dd7db7d09..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/configs/_base_/models/danet_r50-d8.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# model settings
-norm_cfg = dict(type='SyncBN', requires_grad=True)
-model = dict(
- type='EncoderDecoder',
- pretrained='open-mmlab://resnet50_v1c',
- backbone=dict(
- type='ResNetV1c',
- depth=50,
- num_stages=4,
- out_indices=(0, 1, 2, 3),
- dilations=(1, 1, 2, 4),
- strides=(1, 2, 1, 1),
- norm_cfg=norm_cfg,
- norm_eval=False,
- style='pytorch',
- contract_dilation=True),
- decode_head=dict(
- type='DAHead',
- in_channels=2048,
- in_index=3,
- channels=512,
- pam_channels=64,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
- auxiliary_head=dict(
- type='FCNHead',
- in_channels=1024,
- in_index=2,
- channels=256,
- num_convs=1,
- concat_input=False,
- dropout_ratio=0.1,
- num_classes=19,
- norm_cfg=norm_cfg,
- align_corners=False,
- loss_decode=dict(
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
- # model training and testing settings
- train_cfg=dict(),
- test_cfg=dict(mode='whole'))
diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py b/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py
deleted file mode 100644
index b37c79bed4ef9fd8913715e62dbe3fc5cafdc3aa..0000000000000000000000000000000000000000
--- a/spaces/abhishek/sketch-to-image/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# Copyright (c) OpenMMLab. All rights reserved.
-import pickle
-
-from .base import BaseFileHandler
-
-
-class PickleHandler(BaseFileHandler):
-
- str_like = False
-
- def load_from_fileobj(self, file, **kwargs):
- return pickle.load(file, **kwargs)
-
- def load_from_path(self, filepath, **kwargs):
- return super(PickleHandler, self).load_from_path(
- filepath, mode='rb', **kwargs)
-
- def dump_to_str(self, obj, **kwargs):
- kwargs.setdefault('protocol', 2)
- return pickle.dumps(obj, **kwargs)
-
- def dump_to_fileobj(self, obj, file, **kwargs):
- kwargs.setdefault('protocol', 2)
- pickle.dump(obj, file, **kwargs)
-
- def dump_to_path(self, obj, filepath, **kwargs):
- super(PickleHandler, self).dump_to_path(
- obj, filepath, mode='wb', **kwargs)
diff --git a/spaces/abidlabs/Draw/README.md b/spaces/abidlabs/Draw/README.md
deleted file mode 100644
index 02bd5a05da58c8ba7f81964f66572b5f2f39a09f..0000000000000000000000000000000000000000
--- a/spaces/abidlabs/Draw/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-title: Quickdraw
-emoji: 💻
-colorFrom: blue
-colorTo: green
-sdk: gradio
-app_file: app.py
-pinned: false
----
-
-# Configuration
-
-`title`: _string_
-Display title for the Space
-
-`emoji`: _string_
-Space emoji (emoji-only character allowed)
-
-`colorFrom`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`colorTo`: _string_
-Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
-
-`sdk`: _string_
-Can be either `gradio` or `streamlit`
-
-`sdk_version` : _string_
-Only applicable for `streamlit` SDK.
-See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
-
-`app_file`: _string_
-Path to your main application file (which contains either `gradio` or `streamlit` Python code).
-Path is relative to the root of the repository.
-
-`pinned`: _boolean_
-Whether the Space stays on top of your list.
diff --git a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/image/codecs/gdkpixbuf2.py b/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/image/codecs/gdkpixbuf2.py
deleted file mode 100644
index 8b88a2c24d063e544bf4858e1dab277a9bbbb4e0..0000000000000000000000000000000000000000
--- a/spaces/abrar-lohia/text-2-character-anim/pyrender/.eggs/pyglet-2.0.5-py3.10.egg/pyglet/image/codecs/gdkpixbuf2.py
+++ /dev/null
@@ -1,297 +0,0 @@
-from ctypes import *
-
-from pyglet.gl import *
-from pyglet.image import *
-from pyglet.image.codecs import *
-from pyglet.image.codecs import gif
-
-import pyglet.lib
-import pyglet.window
-
-gdk = pyglet.lib.load_library('gdk-x11-2.0')
-gdkpixbuf = pyglet.lib.load_library('gdk_pixbuf-2.0')
-
-GdkPixbufLoader = c_void_p
-GdkPixbuf = c_void_p
-guchar = c_char
-gdkpixbuf.gdk_pixbuf_loader_new.restype = POINTER(GdkPixbufLoader)
-gdkpixbuf.gdk_pixbuf_loader_get_pixbuf.restype = POINTER(GdkPixbuf)
-gdkpixbuf.gdk_pixbuf_get_pixels.restype = POINTER(guchar)
-gdkpixbuf.gdk_pixbuf_loader_get_animation.restype = POINTER(c_void_p)
-gdkpixbuf.gdk_pixbuf_animation_get_iter.restype = POINTER(c_void_p)
-gdkpixbuf.gdk_pixbuf_animation_iter_get_pixbuf.restype = POINTER(GdkPixbuf)
-
-
-class GTimeVal(Structure):
- _fields_ = [
- ('tv_sec', c_long),
- ('tv_usec', c_long)
- ]
-
-
-GQuark = c_uint32
-gint = c_int
-gchar = c_char
-
-
-class GError(Structure):
- _fields_ = [
- ('domain', GQuark),
- ('code', gint),
- ('message', POINTER(gchar))
- ]
-
-gerror_ptr = POINTER(GError)
-
-def _gerror_to_string(error):
- """
- Convert a GError to a string.
- `error` should be a valid pointer to a GError struct.
- """
- return 'GdkPixBuf Error: domain[{}], code[{}]: {}'.format(error.contents.domain,
- error.contents.code,
- error.contents.message)
-
-
-class GdkPixBufLoader:
- """
- Wrapper around GdkPixBufLoader object.
- """
- def __init__(self, filename, file):
- self.closed = False
- self._file = file
- self._filename = filename
- self._loader = gdkpixbuf.gdk_pixbuf_loader_new()
- if self._loader is None:
- raise ImageDecodeException('Unable to instantiate gdk pixbuf loader')
- self._load_file()
-
- def __del__(self):
- if self._loader is not None:
- if not self.closed:
- self._cancel_load()
- gdk.g_object_unref(self._loader)
-
- def _load_file(self):
- self._file.seek(0)
- data = self._file.read()
- self.write(data)
-
- def _finish_load(self):
- assert not self.closed
- error = gerror_ptr()
- all_data_passed = gdkpixbuf.gdk_pixbuf_loader_close(self._loader, byref(error))
- self.closed = True
- if not all_data_passed:
- raise ImageDecodeException(_gerror_to_string(error))
-
- def _cancel_load(self):
- assert not self.closed
- gdkpixbuf.gdk_pixbuf_loader_close(self._loader, None)
- self.closed = True
-
- def write(self, data):
- assert not self.closed, 'Cannot write after closing loader'
- error = gerror_ptr()
- if not gdkpixbuf.gdk_pixbuf_loader_write(self._loader, data, len(data), byref(error)):
- raise ImageDecodeException(_gerror_to_string(error))
-
- def get_pixbuf(self):
- self._finish_load()
- pixbuf = gdkpixbuf.gdk_pixbuf_loader_get_pixbuf(self._loader)
- if pixbuf is None:
- raise ImageDecodeException('Failed to get pixbuf from loader')
- return GdkPixBuf(self, pixbuf)
-
- def get_animation(self):
- self._finish_load()
- anim = gdkpixbuf.gdk_pixbuf_loader_get_animation(self._loader)
- if anim is None:
- raise ImageDecodeException('Failed to get animation from loader')
- gif_delays = self._get_gif_delays()
- return GdkPixBufAnimation(self, anim, gif_delays)
-
- def _get_gif_delays(self):
- # GDK pixbuf animations will loop indefinitely if looping is enabled for the
- # gif, so get number of frames and delays from gif metadata
- assert self._file is not None
- self._file.seek(0)
- gif_stream = gif.read(self._file)
- return [image.delay for image in gif_stream.images]
-
-
-class GdkPixBuf:
- """
- Wrapper around GdkPixBuf object.
- """
- def __init__(self, loader, pixbuf):
- # Keep reference to loader alive
- self._loader = loader
- self._pixbuf = pixbuf
- gdk.g_object_ref(pixbuf)
-
- def __del__(self):
- if self._pixbuf is not None:
- gdk.g_object_unref(self._pixbuf)
-
- def load_next(self):
- return self._pixbuf is not None
-
- @property
- def width(self):
- assert self._pixbuf is not None
- return gdkpixbuf.gdk_pixbuf_get_width(self._pixbuf)
-
- @property
- def height(self):
- assert self._pixbuf is not None
- return gdkpixbuf.gdk_pixbuf_get_height(self._pixbuf)
-
- @property
- def channels(self):
- assert self._pixbuf is not None
- return gdkpixbuf.gdk_pixbuf_get_n_channels(self._pixbuf)
-
- @property
- def rowstride(self):
- assert self._pixbuf is not None
- return gdkpixbuf.gdk_pixbuf_get_rowstride(self._pixbuf)
-
- @property
- def has_alpha(self):
- assert self._pixbuf is not None
- return gdkpixbuf.gdk_pixbuf_get_has_alpha(self._pixbuf) == 1
-
- def get_pixels(self):
- pixels = gdkpixbuf.gdk_pixbuf_get_pixels(self._pixbuf)
- assert pixels is not None
- buf = (c_ubyte * (self.rowstride * self.height))()
- memmove(buf, pixels, self.rowstride * (self.height - 1) + self.width * self.channels)
- return buf
-
- def to_image(self):
- if self.width < 1 or self.height < 1 or self.channels < 1 or self.rowstride < 1:
- return None
-
- pixels = self.get_pixels()
-
- # Determine appropriate GL type
- if self.channels == 3:
- format = 'RGB'
- else:
- format = 'RGBA'
-
- return ImageData(self.width, self.height, format, pixels, -self.rowstride)
-
-
-class GdkPixBufAnimation:
- """
- Wrapper for a GdkPixBufIter for an animation.
- """
- def __init__(self, loader, anim, gif_delays):
- self._loader = loader
- self._anim = anim
- self._gif_delays = gif_delays
- gdk.g_object_ref(anim)
-
- def __del__(self):
- if self._anim is not None:
- gdk.g_object_unref(self._anim)
-
- def __iter__(self):
- time = GTimeVal(0, 0)
- anim_iter = gdkpixbuf.gdk_pixbuf_animation_get_iter(self._anim, byref(time))
- return GdkPixBufAnimationIterator(self._loader, anim_iter, time, self._gif_delays)
-
- def to_animation(self):
- return Animation(list(self))
-
-
-class GdkPixBufAnimationIterator:
- def __init__(self, loader, anim_iter, start_time, gif_delays):
- self._iter = anim_iter
- self._first = True
- self._time = start_time
- self._loader = loader
- self._gif_delays = gif_delays
- self.delay_time = None
-
- def __del__(self):
- if self._iter is not None:
- gdk.g_object_unref(self._iter)
- # The pixbuf returned by the iter is owned by the iter, so no need to destroy that one
-
- def __iter__(self):
- return self
-
- def __next__(self):
- self._advance()
- frame = self.get_frame()
- if frame is None:
- raise StopIteration
- return frame
-
- def _advance(self):
- if not self._gif_delays:
- raise StopIteration
- self.delay_time = self._gif_delays.pop(0)
-
- if self._first:
- self._first = False
- else:
- if self.gdk_delay_time == -1:
- raise StopIteration
- else:
- gdk_delay = self.gdk_delay_time * 1000 # milliseconds to microseconds
- us = self._time.tv_usec + gdk_delay
- self._time.tv_sec += us // 1000000
- self._time.tv_usec = us % 1000000
- gdkpixbuf.gdk_pixbuf_animation_iter_advance(self._iter, byref(self._time))
-
- def get_frame(self):
- pixbuf = gdkpixbuf.gdk_pixbuf_animation_iter_get_pixbuf(self._iter)
- if pixbuf is None:
- return None
- image = GdkPixBuf(self._loader, pixbuf).to_image()
- return AnimationFrame(image, self.delay_time)
-
- @property
- def gdk_delay_time(self):
- assert self._iter is not None
- return gdkpixbuf.gdk_pixbuf_animation_iter_get_delay_time(self._iter)
-
-
-class GdkPixbuf2ImageDecoder(ImageDecoder):
- def get_file_extensions(self):
- return ['.png', '.xpm', '.jpg', '.jpeg', '.tif', '.tiff', '.pnm',
- '.ras', '.bmp', '.gif']
-
- def get_animation_file_extensions(self):
- return ['.gif', '.ani']
-
- def decode(self, filename, file):
- if not file:
- file = open(filename, 'rb')
- loader = GdkPixBufLoader(filename, file)
- return loader.get_pixbuf().to_image()
-
- def decode_animation(self, filename, file):
- if not file:
- file = open(filename, 'rb')
- loader = GdkPixBufLoader(filename, file)
- return loader.get_animation().to_animation()
-
-
-def get_decoders():
- return [GdkPixbuf2ImageDecoder()]
-
-
-def get_encoders():
- return []
-
-
-def init():
- gdk.g_type_init()
-
-
-init()
diff --git a/spaces/akhaliq/Mask2Former/mask2former/utils/__init__.py b/spaces/akhaliq/Mask2Former/mask2former/utils/__init__.py
deleted file mode 100644
index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/Mask2Former/mask2former/utils/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
diff --git a/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/parallel/__init__.py b/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/parallel/__init__.py
deleted file mode 100644
index 9b52f49cc0755562218a460483cbf02514ddd773..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/lama/models/ade20k/segm_lib/nn/parallel/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .data_parallel import UserScatteredDataParallel, user_scattered_collate, async_copy_to
diff --git a/spaces/akhaliq/neural-waveshaping-synthesis/neural_waveshaping_synthesis/models/modules/shaping.py b/spaces/akhaliq/neural-waveshaping-synthesis/neural_waveshaping_synthesis/models/modules/shaping.py
deleted file mode 100644
index de04c9e25c029f40f8d976d1af0571e243eca00a..0000000000000000000000000000000000000000
--- a/spaces/akhaliq/neural-waveshaping-synthesis/neural_waveshaping_synthesis/models/modules/shaping.py
+++ /dev/null
@@ -1,173 +0,0 @@
-import gin
-import torch
-import torch.fft
-import torch.nn as nn
-import torch.nn.functional as F
-
-from .dynamic import FiLM, TimeDistributedMLP
-
-
-class Sine(nn.Module):
- def forward(self, x: torch.Tensor):
- return torch.sin(x)
-
-
-@gin.configurable
-class TrainableNonlinearity(nn.Module):
- def __init__(
- self, channels, width, nonlinearity=nn.ReLU, final_nonlinearity=Sine, depth=3
- ):
- super().__init__()
- self.input_scale = nn.Parameter(torch.randn(1, channels, 1) * 10)
- layers = []
- for i in range(depth):
- layers.append(
- nn.Conv1d(
- channels if i == 0 else channels * width,
- channels * width if i < depth - 1 else channels,
- 1,
- groups=channels,
- )
- )
- layers.append(nonlinearity() if i < depth - 1 else final_nonlinearity())
-
- self.net = nn.Sequential(*layers)
-
- def forward(self, x):
- return self.net(self.input_scale * x)
-
-
-@gin.configurable
-class NEWT(nn.Module):
- def __init__(
- self,
- n_waveshapers: int,
- control_embedding_size: int,
- shaping_fn_size: int = 16,
- out_channels: int = 1,
- ):
- super().__init__()
-
- self.n_waveshapers = n_waveshapers
-
- self.mlp = TimeDistributedMLP(
- control_embedding_size, control_embedding_size, n_waveshapers * 4, depth=4
- )
-
- self.waveshaping_index = FiLM()
- self.shaping_fn = TrainableNonlinearity(
- n_waveshapers, shaping_fn_size, nonlinearity=Sine
- )
- self.normalising_coeff = FiLM()
-
- self.mixer = nn.Sequential(
- nn.Conv1d(n_waveshapers, out_channels, 1),
- )
-
- def forward(self, exciter, control_embedding):
- film_params = self.mlp(control_embedding)
- film_params = F.upsample(film_params, exciter.shape[-1], mode="linear")
- gamma_index, beta_index, gamma_norm, beta_norm = torch.split(
- film_params, self.n_waveshapers, 1
- )
-
- x = self.waveshaping_index(exciter, gamma_index, beta_index)
- x = self.shaping_fn(x)
- x = self.normalising_coeff(x, gamma_norm, beta_norm)
-
- # return x
- return self.mixer(x)
-
-
-class FastNEWT(NEWT):
- def __init__(
- self,
- newt: NEWT,
- table_size: int = 4096,
- table_min: float = -3.0,
- table_max: float = 3.0,
- ):
- super().__init__()
- self.table_size = table_size
- self.table_min = table_min
- self.table_max = table_max
-
- self.n_waveshapers = newt.n_waveshapers
- self.mlp = newt.mlp
-
- self.waveshaping_index = newt.waveshaping_index
- self.normalising_coeff = newt.normalising_coeff
- self.mixer = newt.mixer
-
- self.lookup_table = self._init_lookup_table(
- newt, table_size, self.n_waveshapers, table_min, table_max
- )
- self.to(next(iter(newt.parameters())).device)
-
- def _init_lookup_table(
- self,
- newt: NEWT,
- table_size: int,
- n_waveshapers: int,
- table_min: float,
- table_max: float,
- ):
- sample_values = torch.linspace(table_min, table_max, table_size, device=next(iter(newt.parameters())).device).expand(
- 1, n_waveshapers, table_size
- )
- lookup_table = newt.shaping_fn(sample_values)[0]
- return nn.Parameter(lookup_table)
-
- def _lookup(self, idx):
- return torch.stack(
- [
- torch.stack(
- [
- self.lookup_table[shaper, idx[batch, shaper]]
- for shaper in range(idx.shape[1])
- ],
- dim=0,
- )
- for batch in range(idx.shape[0])
- ],
- dim=0,
- )
-
- def shaping_fn(self, x):
- idx = self.table_size * (x - self.table_min) / (self.table_max - self.table_min)
-
- lower = torch.floor(idx).long()
- lower[lower < 0] = 0
- lower[lower >= self.table_size] = self.table_size - 1
-
- upper = lower + 1
- upper[upper >= self.table_size] = self.table_size - 1
-
- fract = idx - lower
- lower_v = self._lookup(lower)
- upper_v = self._lookup(upper)
-
- output = (upper_v - lower_v) * fract + lower_v
- return output
-
-
-@gin.configurable
-class Reverb(nn.Module):
- def __init__(self, length_in_seconds, sr):
- super().__init__()
- self.ir = nn.Parameter(torch.randn(1, sr * length_in_seconds - 1) * 1e-6)
- self.register_buffer("initial_zero", torch.zeros(1, 1))
-
- def forward(self, x):
- ir_ = torch.cat((self.initial_zero, self.ir), dim=-1)
- if x.shape[-1] > ir_.shape[-1]:
- ir_ = F.pad(ir_, (0, x.shape[-1] - ir_.shape[-1]))
- x_ = x
- else:
- x_ = F.pad(x, (0, ir_.shape[-1] - x.shape[-1]))
- return (
- x
- + torch.fft.irfft(torch.fft.rfft(x_) * torch.fft.rfft(ir_))[
- ..., : x.shape[-1]
- ]
- )
diff --git a/spaces/alex-mindspace/gpt-agents/swarmai/utils/task_queue/TaskQueueBase.py b/spaces/alex-mindspace/gpt-agents/swarmai/utils/task_queue/TaskQueueBase.py
deleted file mode 100644
index 5d9e80945f4631a8923c71734c1a8519e39271e3..0000000000000000000000000000000000000000
--- a/spaces/alex-mindspace/gpt-agents/swarmai/utils/task_queue/TaskQueueBase.py
+++ /dev/null
@@ -1,58 +0,0 @@
-import threading
-from abc import ABC, abstractmethod
-
-from swarmai.utils.task_queue.Task import Task
-from swarmai.agents.AgentBase import AgentBase
-
-def synchronized_queue(method):
- timeout_sec = 5
- def wrapper(self, *args, **kwargs):
- with self.lock:
- self.lock.acquire(timeout = timeout_sec)
- try:
- return method(self, *args, **kwargs)
- except Exception as e:
- print(f"Failed to execute {method.__name__}: {e}")
- finally:
- self.lock.release()
- return wrapper
-
-
-class TaskQueueBase(ABC):
- """Abstract class for the Task Queue object.
- We can have different implementation of the task queues: from simple queue, to the custom priority queue.
- Not every implementatino is inherently thread safe, so we also put the locks here.
-
- Made a pull queue, just for the ease of implementation.
- """
- def __init__(self):
- self.lock = threading.Lock()
-
- @synchronized_queue
- @abstractmethod
- def add_task(self, taks: Task) -> bool:
- """Adds a task to the queue.
- """
- raise NotImplementedError
-
- @synchronized_queue
- @abstractmethod
- def get_task(self, agent: AgentBase) -> Task:
- """Gets the next task from the queue.
- """
- raise NotImplementedError
-
- @synchronized_queue
- @abstractmethod
- def complete_task(self, task_id: str):
- """Sets the task as completed.
- """
- raise NotImplementedError
-
- @synchronized_queue
- @abstractmethod
- def reset_task(self, task_id: str):
- """Resets the task if the agent failed to complete it.
- """
- raise NotImplementedError
-
diff --git a/spaces/allknowingroger/Image-Models-Test52/app.py b/spaces/allknowingroger/Image-Models-Test52/app.py
deleted file mode 100644
index 75f847839f9aed9cbbeb6489e001c18b48182c40..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test52/app.py
+++ /dev/null
@@ -1,144 +0,0 @@
-import gradio as gr
-# import os
-# import sys
-# from pathlib import Path
-import time
-
-models =[
- "RadonDong/LUSC_white_formalin",
- "aff1/pichanabooth",
- "Yntec/QToriReloaded",
- "sbrandeis/stable-diffusion-laplagne-2",
- "digiplay/LuckyStrikeMix1.05_Lovelylady",
- "digiplay/fantasticmix_v30_test",
- "oljike/nurtas_db_lora",
- "LinoyTsaban/web_y2k_v1",
- "juliajoanna/lora-trained-xl-fred-155",
-]
-
-
-model_functions = {}
-model_idx = 1
-for model_path in models:
- try:
- model_functions[model_idx] = gr.Interface.load(f"models/{model_path}", live=False, preprocess=True, postprocess=False)
- except Exception as error:
- def the_fn(txt):
- return None
- model_functions[model_idx] = gr.Interface(fn=the_fn, inputs=["text"], outputs=["image"])
- model_idx+=1
-
-
-def send_it_idx(idx):
- def send_it_fn(prompt):
- output = (model_functions.get(str(idx)) or model_functions.get(str(1)))(prompt)
- return output
- return send_it_fn
-
-def get_prompts(prompt_text):
- return prompt_text
-
-def clear_it(val):
- if int(val) != 0:
- val = 0
- else:
- val = 0
- pass
- return val
-
-def all_task_end(cnt,t_stamp):
- to = t_stamp + 60
- et = time.time()
- if et > to and t_stamp != 0:
- d = gr.update(value=0)
- tog = gr.update(value=1)
- #print(f'to: {to} et: {et}')
- else:
- if cnt != 0:
- d = gr.update(value=et)
- else:
- d = gr.update(value=0)
- tog = gr.update(value=0)
- #print (f'passing: to: {to} et: {et}')
- pass
- return d, tog
-
-def all_task_start():
- print("\n\n\n\n\n\n\n")
- t = time.gmtime()
- t_stamp = time.time()
- current_time = time.strftime("%H:%M:%S", t)
- return gr.update(value=t_stamp), gr.update(value=t_stamp), gr.update(value=0)
-
-def clear_fn():
- nn = len(models)
- return tuple([None, *[None for _ in range(nn)]])
-
-
-
-with gr.Blocks(title="SD Models") as my_interface:
- with gr.Column(scale=12):
- # with gr.Row():
- # gr.Markdown("""- Primary prompt: 你想画的内容(英文单词,如 a cat, 加英文逗号效果更好;点 Improve 按钮进行完善)\n- Real prompt: 完善后的提示词,出现后再点右边的 Run 按钮开始运行""")
- with gr.Row():
- with gr.Row(scale=6):
- primary_prompt=gr.Textbox(label="Prompt", value="")
- # real_prompt=gr.Textbox(label="Real prompt")
- with gr.Row(scale=6):
- # improve_prompts_btn=gr.Button("Improve")
- with gr.Row():
- run=gr.Button("Run",variant="primary")
- clear_btn=gr.Button("Clear")
- with gr.Row():
- sd_outputs = {}
- model_idx = 1
- for model_path in models:
- with gr.Column(scale=3, min_width=320):
- with gr.Box():
- sd_outputs[model_idx] = gr.Image(label=model_path)
- pass
- model_idx += 1
- pass
- pass
-
- with gr.Row(visible=False):
- start_box=gr.Number(interactive=False)
- end_box=gr.Number(interactive=False)
- tog_box=gr.Textbox(value=0,interactive=False)
-
- start_box.change(
- all_task_end,
- [start_box, end_box],
- [start_box, tog_box],
- every=1,
- show_progress=False)
-
- primary_prompt.submit(all_task_start, None, [start_box, end_box, tog_box])
- run.click(all_task_start, None, [start_box, end_box, tog_box])
- runs_dict = {}
- model_idx = 1
- for model_path in models:
- runs_dict[model_idx] = run.click(model_functions[model_idx], inputs=[primary_prompt], outputs=[sd_outputs[model_idx]])
- model_idx += 1
- pass
- pass
-
- # improve_prompts_btn_clicked=improve_prompts_btn.click(
- # get_prompts,
- # inputs=[primary_prompt],
- # outputs=[primary_prompt],
- # cancels=list(runs_dict.values()))
- clear_btn.click(
- clear_fn,
- None,
- [primary_prompt, *list(sd_outputs.values())],
- cancels=[*list(runs_dict.values())])
- tog_box.change(
- clear_it,
- tog_box,
- tog_box,
- cancels=[*list(runs_dict.values())])
-
-my_interface.queue(concurrency_count=600, status_update_rate=1)
-my_interface.launch(inline=True, show_api=False)
-
\ No newline at end of file
diff --git a/spaces/allknowingroger/Image-Models-Test70/README.md b/spaces/allknowingroger/Image-Models-Test70/README.md
deleted file mode 100644
index dd6d153c1b4300376a1ad0030f2318f2369781df..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/Image-Models-Test70/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: More Image Models
-emoji: 😻
-colorFrom: red
-colorTo: gray
-sdk: gradio
-sdk_version: 3.23.0
-app_file: app.py
-pinned: true
-duplicated_from: allknowingroger/Image-Models-Test69
----
-
-
\ No newline at end of file
diff --git a/spaces/allknowingroger/text-generation-webui-space-1/modules/ui.py b/spaces/allknowingroger/text-generation-webui-space-1/modules/ui.py
deleted file mode 100644
index bb193e35c11b2a3d474ea89e7567206a3343395a..0000000000000000000000000000000000000000
--- a/spaces/allknowingroger/text-generation-webui-space-1/modules/ui.py
+++ /dev/null
@@ -1,92 +0,0 @@
-import gradio as gr
-
-refresh_symbol = '\U0001f504' # 🔄
-
-css = """
-.tabs.svelte-710i53 {
- margin-top: 0
-}
-.py-6 {
- padding-top: 2.5rem
-}
-.dark #refresh-button {
- background-color: #ffffff1f;
-}
-#refresh-button {
- flex: none;
- margin: 0;
- padding: 0;
- min-width: 50px;
- border: none;
- box-shadow: none;
- border-radius: 10px;
- background-color: #0000000d;
-}
-#download-label, #upload-label {
- min-height: 0
-}
-#accordion {
-}
-.dark svg {
- fill: white;
-}
-svg {
- display: unset !important;
- vertical-align: middle !important;
- margin: 5px;
-}
-ol li p, ul li p {
- display: inline-block;
-}
-"""
-
-chat_css = """
-.h-\[40vh\], .wrap.svelte-byatnx.svelte-byatnx.svelte-byatnx {
- height: 66.67vh
-}
-.gradio-container {
- max-width: 800px !important;
- margin-left: auto !important;
- margin-right: auto !important;
-}
-.w-screen {
- width: unset
-}
-div.svelte-362y77>*, div.svelte-362y77>.form>* {
- flex-wrap: nowrap
-}
-/* fixes the API documentation in chat mode */
-.api-docs.svelte-1iguv9h.svelte-1iguv9h.svelte-1iguv9h {
- display: grid;
-}
-.pending.svelte-1ed2p3z {
- opacity: 1;
-}
-"""
-
-class ToolButton(gr.Button, gr.components.FormComponent):
- """Small button with single emoji as text, fits inside gradio forms"""
-
- def __init__(self, **kwargs):
- super().__init__(variant="tool", **kwargs)
-
- def get_block_name(self):
- return "button"
-
-def create_refresh_button(refresh_component, refresh_method, refreshed_args, elem_id):
- def refresh():
- refresh_method()
- args = refreshed_args() if callable(refreshed_args) else refreshed_args
-
- for k, v in args.items():
- setattr(refresh_component, k, v)
-
- return gr.update(**(args or {}))
-
- refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id)
- refresh_button.click(
- fn=refresh,
- inputs=[],
- outputs=[refresh_component]
- )
- return refresh_button
diff --git a/spaces/aodianyun/stable-diffusion-webui/run.py b/spaces/aodianyun/stable-diffusion-webui/run.py
deleted file mode 100644
index f7788fb90a3dfa7bcb50d0e8bab83f3a6eaa65fc..0000000000000000000000000000000000000000
--- a/spaces/aodianyun/stable-diffusion-webui/run.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# import os
-
-# if not os.path.exists("extensions/deforum"):
-# exec(open("deforum.sh").read())
-
-#exec(open("run.sh").read())
diff --git a/spaces/arbml/Ashaar/poetry_diacritizer/models/baseline.py b/spaces/arbml/Ashaar/poetry_diacritizer/models/baseline.py
deleted file mode 100644
index 1b1e2c6ccb2160e394ecde108020689d7cf30290..0000000000000000000000000000000000000000
--- a/spaces/arbml/Ashaar/poetry_diacritizer/models/baseline.py
+++ /dev/null
@@ -1,60 +0,0 @@
-from typing import List
-from torch import nn
-import torch
-
-
-class BaseLineModel(nn.Module):
- def __init__(
- self,
- inp_vocab_size: int,
- targ_vocab_size: int,
- embedding_dim: int = 512,
- layers_units: List[int] = [256, 256, 256],
- use_batch_norm: bool = False,
- ):
- super().__init__()
- self.targ_vocab_size = targ_vocab_size
- self.embedding = nn.Embedding(inp_vocab_size, embedding_dim)
-
- layers_units = [embedding_dim // 2] + layers_units
-
- layers = []
-
- for i in range(1, len(layers_units)):
- layers.append(
- nn.LSTM(
- layers_units[i - 1] * 2,
- layers_units[i],
- bidirectional=True,
- batch_first=True,
- )
- )
- if use_batch_norm:
- layers.append(nn.BatchNorm1d(layers_units[i] * 2))
-
- self.layers = nn.ModuleList(layers)
- self.projections = nn.Linear(layers_units[-1] * 2, targ_vocab_size)
- self.layers_units = layers_units
- self.use_batch_norm = use_batch_norm
-
- def forward(self, src: torch.Tensor, lengths: torch.Tensor, target=None):
-
- outputs = self.embedding(src)
-
- # embedded_inputs = [batch_size, src_len, embedding_dim]
-
- for i, layer in enumerate(self.layers):
- if isinstance(layer, nn.BatchNorm1d):
- outputs = layer(outputs.permute(0, 2, 1))
- outputs = outputs.permute(0, 2, 1)
- continue
- if i > 0:
- outputs, (hn, cn) = layer(outputs, (hn, cn))
- else:
- outputs, (hn, cn) = layer(outputs)
-
- predictions = self.projections(outputs)
-
- output = {"diacritics": predictions}
-
- return output
diff --git a/spaces/arbml/Ashaar/poetry_diacritizer/models/seq2seq.py b/spaces/arbml/Ashaar/poetry_diacritizer/models/seq2seq.py
deleted file mode 100644
index 5fcf05c5cfb6087d90601b246a1235c47ded6903..0000000000000000000000000000000000000000
--- a/spaces/arbml/Ashaar/poetry_diacritizer/models/seq2seq.py
+++ /dev/null
@@ -1,277 +0,0 @@
-from typing import List
-from typing import List, Optional
-
-import torch
-from torch import nn
-from torch.autograd import Variable
-
-from poetry_diacritizer.modules.attention import AttentionWrapper
-from poetry_diacritizer.modules.layers import ConvNorm
-from poetry_diacritizer.modules.tacotron_modules import CBHG, Prenet
-from poetry_diacritizer.options import AttentionType
-from poetry_diacritizer.util.utils import get_mask_from_lengths
-
-
-class Seq2Seq(nn.Module):
- def __init__(self, encoder: nn.Module, decoder: nn.Module):
- super().__init__()
- # Trying smaller std
- self.encoder = encoder
- self.decoder = decoder
-
- def forward(
- self,
- src: torch.Tensor,
- lengths: torch.Tensor,
- target: Optional[torch.Tensor] = None,
- ):
-
- encoder_outputs = self.encoder(src, lengths)
- mask = get_mask_from_lengths(encoder_outputs, lengths)
- outputs, alignments = self.decoder(encoder_outputs, target, mask)
-
- output = {"diacritics": outputs, "attention": alignments}
-
- return output
-
-
-class Encoder(nn.Module):
- def __init__(
- self,
- inp_vocab_size: int,
- embedding_dim: int = 512,
- layers_units: List[int] = [256, 256, 256],
- use_batch_norm: bool = False,
- ):
- super().__init__()
- self.embedding = nn.Embedding(inp_vocab_size, embedding_dim)
-
- layers_units = [embedding_dim // 2] + layers_units
-
- layers = []
-
- for i in range(1, len(layers_units)):
- layers.append(
- nn.LSTM(
- layers_units[i - 1] * 2,
- layers_units[i],
- bidirectional=True,
- batch_first=True,
- )
- )
- if use_batch_norm:
- layers.append(nn.BatchNorm1d(layers_units[i] * 2))
-
- self.layers = nn.ModuleList(layers)
- self.layers_units = layers_units
- self.use_batch_norm = use_batch_norm
-
- def forward(self, inputs: torch.Tensor, inputs_lengths: torch.Tensor):
-
- outputs = self.embedding(inputs)
-
- # embedded_inputs = [batch_size, src_len, embedding_dim]
-
- for i, layer in enumerate(self.layers):
- if isinstance(layer, nn.BatchNorm1d):
- outputs = layer(outputs.permute(0, 2, 1))
- outputs = outputs.permute(0, 2, 1)
- continue
- if i > 0:
- outputs, (hn, cn) = layer(outputs, (hn, cn))
- else:
- outputs, (hn, cn) = layer(outputs)
-
- return outputs
-
-class Decoder(nn.Module):
- """A seq2seq decoder that decode a diacritic at a time ,
- Args:
- encoder_dim (int): the encoder output dim
- decoder_units (int): the number of neurons for each decoder layer
- decoder_layers (int): number of decoder layers
- """
-
- def __init__(
- self,
- trg_vocab_size: int,
- start_symbol_id: int,
- encoder_dim: int = 256,
- embedding_dim: int = 256,
- decoder_units: int = 256,
- decoder_layers: int = 2,
- attention_units: int = 256,
- attention_type: AttentionType = AttentionType.LocationSensitive,
- is_attention_accumulative: bool = False,
- prenet_depth: List[int] = [256, 128],
- use_prenet: bool = True,
- teacher_forcing_probability: float = 0.0,
- ):
- super().__init__()
-
- self.output_dim: int = trg_vocab_size
- self.start_symbol_id = start_symbol_id
- self.attention_units = attention_units
- self.decoder_units = decoder_units
- self.encoder_dim = encoder_dim
- self.use_prenet = use_prenet
- self.teacher_forcing_probability = teacher_forcing_probability
- self.is_attention_accumulative = is_attention_accumulative
- self.embbeding = nn.Embedding(trg_vocab_size, embedding_dim, padding_idx=0)
- attention_in = embedding_dim
- if use_prenet:
- self.prenet = Prenet(embedding_dim, prenet_depth)
- attention_in = prenet_depth[-1]
-
- self.attention_layer = nn.GRUCell(encoder_dim + attention_in, attention_units)
- self.attention_wrapper = AttentionWrapper(attention_type, attention_units)
- self.keys_layer = nn.Linear(encoder_dim, attention_units, bias=False)
- self.project_to_decoder_in = nn.Linear(
- attention_units + encoder_dim,
- decoder_units,
- )
-
- self.decoder_rnns = nn.ModuleList(
- [nn.GRUCell(decoder_units, decoder_units) for _ in range(decoder_layers)]
- )
-
- self.diacritics_layer = nn.Linear(decoder_units, trg_vocab_size)
- self.device = "cuda"
-
- def decode(
- self,
- diacritic: torch.Tensor,
- ):
- """
- Decode one time-step
- Args:
- diacritic (Tensor): (batch_size, 1)
- Returns:
- """
-
- diacritic = self.embbeding(diacritic)
- if self.use_prenet:
- prenet_out = self.prenet(diacritic)
- else:
- prenet_out = diacritic
-
- cell_input = torch.cat((prenet_out, self.prev_attention), -1)
-
- self.attention_hidden = self.attention_layer(cell_input, self.attention_hidden)
- output = self.attention_hidden
-
- # The queries are the hidden state of the RNN layer
- attention, alignment = self.attention_wrapper(
- query=self.attention_hidden,
- values=self.encoder_outputs,
- keys=self.keys,
- mask=self.mask,
- prev_alignment=self.prev_alignment,
- )
-
- decoder_input = torch.cat((output, attention), -1)
-
- decoder_input = self.project_to_decoder_in(decoder_input)
-
- for idx in range(len(self.decoder_rnns)):
- self.decoder_hiddens[idx] = self.decoder_rnns[idx](
- decoder_input, self.decoder_hiddens[idx]
- )
- decoder_input = self.decoder_hiddens[idx] + decoder_input
-
- output = decoder_input
-
- output = self.diacritics_layer(output)
-
- if self.is_attention_accumulative:
- self.prev_alignment = self.prev_alignment + alignment
- else:
- self.prev_alignment = alignment
-
- self.prev_attention = attention
-
- return output, alignment
-
- def inference(self):
- """Generate diacritics one at a time"""
- batch_size = self.encoder_outputs.size(0)
- trg_len = self.encoder_outputs.size(1)
- diacritic = (
- torch.full((batch_size,), self.start_symbol_id).to(self.device).long()
- )
- outputs, alignments = [], []
- self.initialize()
-
- for _ in range(trg_len):
- output, alignment = self.decode(diacritic=diacritic)
-
- outputs.append(output)
- alignments.append(alignment)
- diacritic = torch.max(output, 1).indices
-
- alignments = torch.stack(alignments).transpose(0, 1)
- outputs = torch.stack(outputs).transpose(0, 1).contiguous()
- return outputs, alignments
-
- def forward(
- self,
- encoder_outputs: torch.Tensor,
- diacritics: Optional[torch.Tensor] = None,
- input_mask: Optional[torch.Tensor] = None,
- ):
- """calculate forward propagation
- Args:
- encoder_outputs (Tensor): the output of the encoder
- (batch_size, Tx, encoder_units * 2)
- diacritics(Tensor): target sequence
- input_mask (Tensor): the inputs mask (batch_size, Tx)
- """
- self.mask = input_mask
- self.encoder_outputs = encoder_outputs
- self.keys = self.keys_layer(encoder_outputs)
-
- if diacritics is None:
- return self.inference()
-
- batch_size = diacritics.size(0)
- trg_len = diacritics.size(1)
-
- # Init decoder states
- outputs = []
- alignments = []
-
- self.initialize()
-
- diacritic = (
- torch.full((batch_size,), self.start_symbol_id).to(self.device).long()
- )
-
- for time in range(trg_len):
- output, alignment = self.decode(diacritic=diacritic)
- outputs += [output]
- alignments += [alignment]
- #if random.random() > self.teacher_forcing_probability:
- diacritic = diacritics[:, time] # use training input
- #else:
- #diacritic = torch.max(output, 1).indices # use last output
-
- alignments = torch.stack(alignments).transpose(0, 1)
- outputs = torch.stack(outputs).transpose(0, 1).contiguous()
-
- return outputs, alignments
-
- def initialize(self):
- """Initialize the first step variables"""
- batch_size = self.encoder_outputs.size(0)
- src_len = self.encoder_outputs.size(1)
- self.attention_hidden = Variable(
- torch.zeros(batch_size, self.attention_units)
- ).to(self.device)
- self.decoder_hiddens = [
- Variable(torch.zeros(batch_size, self.decoder_units)).to(self.device)
- for _ in range(len(self.decoder_rnns))
- ]
- self.prev_attention = Variable(torch.zeros(batch_size, self.encoder_dim)).to(
- self.device
- )
- self.prev_alignment = Variable(torch.zeros(batch_size, src_len)).to(self.device)
diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/english/__init__.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/utils/text/english/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ashercn97/AsherTesting/extensions/whisper_stt/readme.md b/spaces/ashercn97/AsherTesting/extensions/whisper_stt/readme.md
deleted file mode 100644
index cd9abbf68cb4f7adf1172fdd57e9e26466e47778..0000000000000000000000000000000000000000
--- a/spaces/ashercn97/AsherTesting/extensions/whisper_stt/readme.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# whisper_stt
-
-Allows you to enter your inputs in chat mode using your microphone.
-
-## Settings
-
-To adjust your default settings, you can add the following to your settings.yaml file.
-
-```
-whisper_stt-whipser_language: chinese
-whisper_stt-whipser_model: tiny
-whisper_stt-auto_submit: False
-```
-
-See source documentation for [model names](https://github.com/openai/whisper#available-models-and-languages) and (languages)[https://github.com/openai/whisper/blob/main/whisper/tokenizer.py] you can use.
\ No newline at end of file
diff --git a/spaces/awacke1/HEDIS.Roster.Dash.Component.Service/app.py b/spaces/awacke1/HEDIS.Roster.Dash.Component.Service/app.py
deleted file mode 100644
index fdc56957b6ec799dd26894211577fe278076de43..0000000000000000000000000000000000000000
--- a/spaces/awacke1/HEDIS.Roster.Dash.Component.Service/app.py
+++ /dev/null
@@ -1,318 +0,0 @@
-import streamlit as st
-
-data = [
- {
- "Condition": "Pain",
- "Question": "Do you have any pain or discomfort?",
- "CT Code": "SNOMED CT: 22253000",
- "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=22253000&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
- },
- {
- "Condition": "Depression",
- "Question": "Have you been feeling down, depressed, or hopeless?",
- "CT Code": "SNOMED CT: 35489007",
- "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=35489007&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
- },
- {
- "Condition": "Anxiety",
- "Question": "Have you been feeling nervous, anxious, or on edge?",
- "CT Code": "SNOMED CT: 197480006",
- "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=197480006&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
- },
- {
- "Condition": "Sleep problems",
- "Question": "Have you been having trouble sleeping?",
- "CT Code": "SNOMED CT: 309087008",
- "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=309087008&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
- },
- {
- "Condition": "Fatigue",
- "Question": "Have you been feeling tired or worn out?",
- "CT Code": "SNOMED CT: 84229001",
- "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=84229001&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
- },
- {
- "Condition": "Mobility problems",
- "Question": "Do you have any difficulty walking or moving around?",
- "CT Code": "SNOMED CT: 288939007",
- "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=288939007&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
- },
- {
- "Condition": "Incontinence",
- "Question": "Do you have any problems with bladder or bowel control?",
- "CT Code": "SNOMED CT: 48694002",
- "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=48694002&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
- },
- {
- "Condition": "Memory problems",
- "Question": "Have you been having trouble remembering things?",
- "CT Code": "SNOMED CT: 386807006",
- "URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=386807006&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Vision problems",
-"Question": "Do you have any problems with your vision?",
-"CT Code": "SNOMED CT: 246636008",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=246636008&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Hearing problems",
-"Question": "Do you have any problems with your hearing?",
-"CT Code": "SNOMED CT: 405729008",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=405729008&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Breathing problems",
-"Question": "Have you been having trouble breathing?",
-"CT Code": "SNOMED CT: 267036007",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=267036007&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Cognitive impairment",
-"Question": "Have you been having difficulty thinking or making decisions?",
-"CT Code": "SNOMED CT: 373930000",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=373930000&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Social isolation",
-"Question": "Do you feel lonely or isolated from others?",
-"CT Code": "SNOMED CT: 160303001",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=160303001&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Nutrition problems",
-"Question": "Have you been having problems with your appetite or eating?",
-"CT Code": "SNOMED CT: 248490000",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=248490000&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Substance use",
-"Question": "Have you been using alcohol or drugs?",
-"CT Code": "SNOMED CT: 228280008",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=228280008&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Safety concerns",
-"Question": "Do you have any concerns about your safety or the safety of others?",
-"CT Code": "SNOMED CT: 409596002",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=409596002&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Financial concerns",
-"Question": "Do you have any concerns about your finances or ability to pay for care?",
-"CT Code": "SNOMED CT: 721991003",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=721991003&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Transportation problems",
-"Question": "Do you have any problems getting to appointments or running errands?",
-"CT Code": "SNOMED CT: 405609001",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=405609001&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Family/caregiver stress",
-"Question": "Do you feel overwhelmed or stressed by your family or caregiving responsibilities?",
-"CT Code": "SNOMED CT: 308292007",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=308292007&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-},
-{
-"Condition": "Activities of daily living (ADLs)",
-"Question": "Do you have any difficulty with bathing, dressing, grooming, or other basic activities?",
-"CT Code": "SNOMED CT: 410518003",
-"URL": "https://browser.ihtsdotools.org/?perspective=full&conceptId1=410518003&edition=MAIN/SNOMEDCT/2021-09-30&release=&languages=en"
-}
-]
-
-st.write("Top 20 Conditions used in Surveys and Assessments as Questions about Care Needs:")
-for item in data:
- st.write(f"Condition: {item['Condition']}") # gpt fail - improper indent, continue problem
- st.write(f"Question: {item['Question']}")
- st.write(f"CT Code: {item['CT Code']}")
- st.markdown(f"VerificationURL : {item['URL']}")
- st.write()
- st.write("---")
-
-
-st.markdown("""
-
-Prompt:
-Using the 20 point outline below, write a streamlit program that displays a table grid of the top 20 FHIR based observations. For each come up with a representative emoji and reducce reading level to grade school level. Create a CT code which is one of the following types for each: ICD10, CPT, HCPCS, SNOMED, LOINC, OMS, RxNorm. Generate all code in streamlit python as a Hedis Pro Guide.
-
-
-Patient Name (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient)
-Patient Date of Birth (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-birthdate)
-Patient Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-address)
-Patient Phone Number (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone)
-Patient Email Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-email)
-Patient Gender (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient)
-Encounter Date/Time (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter)
-Encounter Type (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter)
-Encounter Location (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter)
-Provider Name (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner)
-Provider NPI (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner)
-Provider Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-address)
-Provider Phone Number (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone)
-Provider Email Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-email)
-Reason for Visit (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-diagnosticreport-lab)
-Lab Test Result (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-diagnosticreport-lab)
-Medication Order (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-medicationrequest)
-Medication Administration (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-medicationrequest)
-Allergy List (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-allergyintolerance)
-Immunization Record (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-immunization)
-
-""")
-
-
-
-st.markdown("""
-
-# Prompt:
-## According to https://www.healthit.gov/ Clinical Notes are aligned to different clinical terminology codes inccluding Applicable Vocabulary Standards to HEDIS. Name top twenty USCDI Data Classes and include the code URI, the code type, and the code value. Create a python dictionary list for streamlit python which includes the markdown for the data. use st.markdown function to display the data.
-
-Clinical Notes are documents created by healthcare professionals that contain information about a patient's medical history, current condition, and treatment plan. They can include progress notes, discharge summaries, and consultation reports.
-
-The United States Core Data for Interoperability (USCDI) is a standardized set of health data classes that are required to be exchanged between different healthcare providers and systems. The top twenty USCDI data classes are:
-
-Patient Name (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient)
-Patient Date of Birth (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-birthdate)
-Patient Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-address)
-Patient Phone Number (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone)
-Patient Email Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-email)
-Patient Gender (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient)
-Encounter Date/Time (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter)
-Encounter Type (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter)
-Encounter Location (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter)
-Provider Name (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner)
-Provider NPI (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner)
-Provider Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-address)
-Provider Phone Number (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone)
-Provider Email Address (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-email)
-Reason for Visit (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-diagnosticreport-lab)
-Lab Test Result (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-diagnosticreport-lab)
-Medication Order (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-medicationrequest)
-Medication Administration (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-medicationrequest)
-Allergy List (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-allergyintolerance)
-Immunization Record (URI: http://hl7.org/fhir/us/core/StructureDefinition/us-core-immunization)
-
-# Prompt 2: Fail: Show as markdown code in streamlit python
-
-# Prompt 3: Fail: continue and show full code listing including markdown so it displays as a numbered list in st.markdown() function
-
-# Prompt 4: Fail:
-
-import streamlit as st
-
-data = {
- 'Patient Name': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Name'},
- 'Patient Date of Birth': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-birthdate', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Date of Birth'},
- 'Patient Address': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-address', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Address'},
- 'Patient Phone Number': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Phone Number'},
- 'Patient Email Address': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-email', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Email Address'},
- 'Patient Gender': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-patient', 'Code Type': 'USCDI Data Class', 'Code Value': 'Patient Gender'},
- 'Encounter Date/Time': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter', 'Code Type': 'USCDI Data Class', 'Code Value': 'Encounter Date/Time'},
- 'Encounter Type': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter', 'Code Type': 'USCDI Data Class', 'Code Value': 'Encounter Type'},
- 'Encounter Location': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-encounter', 'Code Type': 'USCDI Data Class', 'Code Value': 'Encounter Location'},
- 'Provider Name': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner', 'Code Type': 'USCDI Data Class', 'Code Value': 'Provider Name'},
- 'Provider NPI': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-practitioner', 'Code Type': 'USCDI Data Class', 'Code Value': 'Provider NPI'},
- 'Provider Address': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-address', 'Code Type': 'USCDI Data Class', 'Code Value': 'Provider Address'},
- 'Provider Phone Number': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-phone', 'Code Type': 'USCDI Data Class', 'Code Value': 'Provider Phone Number'},
- 'Provider Email Address': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-email', 'Code Type': 'USCDI Data Class', 'Code Value': 'Provider Email Address'},
- 'Reason for Visit': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core-diagnosticreport-lab', 'Code Type': 'USCDI Data Class', 'Code Value': 'Reason for Visit'},
- 'Lab Test Result': {'URI': 'http://hl7.org/fhir/us/core/StructureDefinition/us-core
-...
-
-Failure reason - cannot continue. 3.5 could but in subsequent continue - Eval submitted
-
-
-""")
-
-st.markdown("""
-
-# Component UI Tile Sizes for Optimal Display
-
-Prompt on GPT-4:
-```
-Display a list of top ten table size proportions dividing a typical computer screen into 2,3,4,5,6 tiles supporting the tiles in a table with each iframe sized so total width and height add to 2560 x 1440 for HD computer displays.
-
-# RLHF Feedback:
-##👎GPT-4 Difference: Failed first time, submitted eval on fail. Tried again, different better outcome.
-
-Right Answer:
-
-Here is a list of top ten table size proportions for dividing a typical computer screen into 2, 3, 4, 5, and 6 tiles. Each iframe is sized so that the total width and height add up to 2560 x 1440 for HD computer displays:
-
-...
-These table size proportions provide various arrangements for dividing a typical HD computer display into multiple tiles. You can choose the most suitable layout based on your requirements and preferences.
-
-Aaron Wacker
-Show markdown code listing
-
-1. 2 tiles (1x2):
- - Tile 1: 1280 x 1440
- - Tile 2: 1280 x 1440
-
-2. 2 tiles (2x1):
- - Tile 1: 2560 x 720
- - Tile 2: 2560 x 720
-
-3. 3 tiles (1x3):
- - Tile 1: 853 x 1440
- - Tile 2: 853 x 1440
- - Tile 3: 854 x 1440
-
-4. 3 tiles (3x1):
- - Tile 1: 2560 x 480
- - Tile 2: 2560 x 480
- - Tile 3: 2560 x 480
-
-5. 4 tiles (2x2):
- - Tile 1: 1280 x 720
- - Tile 2: 1280 x 720
- - Tile 3: 1280 x 720
- - Tile 4: 1280 x 720
-
-6. 5 tiles (1x5):
- - Tile 1: 512 x 1440
- - Tile 2: 512 x 1440
- - Tile 3: 512 x 1440
- - Tile 4: 512 x 1440
- - Tile 5: 512 x 1440
-
-7. 5 tiles (5x1):
- - Tile 1: 2560 x 288
- - Tile 2: 2560 x 288
- - Tile 3: 2560 x 288
- - Tile 4: 2560 x 288
- - Tile 5: 2560 x 288
-
-8. 6 tiles (2x3):
- - Tile 1: 1280 x 480
- - Tile 2: 1280 x 480
- - Tile 3: 1280 x 480
- - Tile 4: 1280 x 480
- - Tile 5: 1280 x 480
- - Tile 6: 1280 x 480
-
-9. 6 tiles (3x2):
- - Tile 1: 853 x 720
- - Tile 2: 853 x 720
- - Tile 3: 854 x 720
- - Tile 4: 853 x 720
- - Tile 5: 853 x 720
- - Tile 6: 854 x 720
-
-10. 6 tiles (1x6):
- - Tile 1: 426 x 1440
- - Tile 2: 426 x 1440
- - Tile 3: 426 x 1440
- - Tile 4: 426 x 1440
- - Tile 5: 426 x 1440
- - Tile 6: 426 x 1440
-
-#👍RLHF:
-## Provide Additional Feedback : GPT-4 Got this right and did it differently second time after an eval about an error. Well done - very useful
-
-
-```
-
-""")
\ No newline at end of file
diff --git a/spaces/azusarang/so-vits-svc-models-ba_P/cluster/kmeans.py b/spaces/azusarang/so-vits-svc-models-ba_P/cluster/kmeans.py
deleted file mode 100644
index 6111ea45e66a15d41b5b904be6f75affd3c4369f..0000000000000000000000000000000000000000
--- a/spaces/azusarang/so-vits-svc-models-ba_P/cluster/kmeans.py
+++ /dev/null
@@ -1,201 +0,0 @@
-import math,pdb
-import torch,pynvml
-from torch.nn.functional import normalize
-from time import time
-import numpy as np
-# device=torch.device("cuda:0")
-def _kpp(data: torch.Tensor, k: int, sample_size: int = -1):
- """ Picks k points in the data based on the kmeans++ method.
-
- Parameters
- ----------
- data : torch.Tensor
- Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
- data, rank 2 multidimensional data, in which case one
- row is one observation.
- k : int
- Number of samples to generate.
- sample_size : int
- sample data to avoid memory overflow during calculation
-
- Returns
- -------
- init : ndarray
- A 'k' by 'N' containing the initial centroids.
-
- References
- ----------
- .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
- careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
- on Discrete Algorithms, 2007.
- .. [2] scipy/cluster/vq.py: _kpp
- """
- batch_size=data.shape[0]
- if batch_size>sample_size:
- data = data[torch.randint(0, batch_size,[sample_size], device=data.device)]
- dims = data.shape[1] if len(data.shape) > 1 else 1
- init = torch.zeros((k, dims)).to(data.device)
- r = torch.distributions.uniform.Uniform(0, 1)
- for i in range(k):
- if i == 0:
- init[i, :] = data[torch.randint(data.shape[0], [1])]
- else:
- D2 = torch.cdist(init[:i, :][None, :], data[None, :], p=2)[0].amin(dim=0)
- probs = D2 / torch.sum(D2)
- cumprobs = torch.cumsum(probs, dim=0)
- init[i, :] = data[torch.searchsorted(cumprobs, r.sample([1]).to(data.device))]
- return init
-class KMeansGPU:
- '''
- Kmeans clustering algorithm implemented with PyTorch
-
- Parameters:
- n_clusters: int,
- Number of clusters
-
- max_iter: int, default: 100
- Maximum number of iterations
-
- tol: float, default: 0.0001
- Tolerance
-
- verbose: int, default: 0
- Verbosity
-
- mode: {'euclidean', 'cosine'}, default: 'euclidean'
- Type of distance measure
-
- init_method: {'random', 'point', '++'}
- Type of initialization
-
- minibatch: {None, int}, default: None
- Batch size of MinibatchKmeans algorithm
- if None perform full KMeans algorithm
-
- Attributes:
- centroids: torch.Tensor, shape: [n_clusters, n_features]
- cluster centroids
- '''
- def __init__(self, n_clusters, max_iter=200, tol=1e-4, verbose=0, mode="euclidean",device=torch.device("cuda:0")):
- self.n_clusters = n_clusters
- self.max_iter = max_iter
- self.tol = tol
- self.verbose = verbose
- self.mode = mode
- self.device=device
- pynvml.nvmlInit()
- gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(device.index)
- info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle)
- self.minibatch=int(33e6/self.n_clusters*info.free/ 1024 / 1024 / 1024)
- print("free_mem/GB:",info.free/ 1024 / 1024 / 1024,"minibatch:",self.minibatch)
-
- @staticmethod
- def cos_sim(a, b):
- """
- Compute cosine similarity of 2 sets of vectors
-
- Parameters:
- a: torch.Tensor, shape: [m, n_features]
-
- b: torch.Tensor, shape: [n, n_features]
- """
- return normalize(a, dim=-1) @ normalize(b, dim=-1).transpose(-2, -1)
-
- @staticmethod
- def euc_sim(a, b):
- """
- Compute euclidean similarity of 2 sets of vectors
- Parameters:
- a: torch.Tensor, shape: [m, n_features]
- b: torch.Tensor, shape: [n, n_features]
- """
- return 2 * a @ b.transpose(-2, -1) -(a**2).sum(dim=1)[..., :, None] - (b**2).sum(dim=1)[..., None, :]
-
- def max_sim(self, a, b):
- """
- Compute maximum similarity (or minimum distance) of each vector
- in a with all of the vectors in b
- Parameters:
- a: torch.Tensor, shape: [m, n_features]
- b: torch.Tensor, shape: [n, n_features]
- """
- if self.mode == 'cosine':
- sim_func = self.cos_sim
- elif self.mode == 'euclidean':
- sim_func = self.euc_sim
- sim = sim_func(a, b)
- max_sim_v, max_sim_i = sim.max(dim=-1)
- return max_sim_v, max_sim_i
-
- def fit_predict(self, X):
- """
- Combination of fit() and predict() methods.
- This is faster than calling fit() and predict() seperately.
- Parameters:
- X: torch.Tensor, shape: [n_samples, n_features]
- centroids: {torch.Tensor, None}, default: None
- if given, centroids will be initialized with given tensor
- if None, centroids will be randomly chosen from X
- Return:
- labels: torch.Tensor, shape: [n_samples]
-
- mini_=33kk/k*remain
- mini=min(mini_,fea_shape)
- offset=log2(k/1000)*1.5
- kpp_all=min(mini_*10/offset,fea_shape)
- kpp_sample=min(mini_/12/offset,fea_shape)
- """
- assert isinstance(X, torch.Tensor), "input must be torch.Tensor"
- assert X.dtype in [torch.half, torch.float, torch.double], "input must be floating point"
- assert X.ndim == 2, "input must be a 2d tensor with shape: [n_samples, n_features] "
- # print("verbose:%s"%self.verbose)
-
- offset = np.power(1.5,np.log(self.n_clusters / 1000))/np.log(2)
- with torch.no_grad():
- batch_size= X.shape[0]
- # print(self.minibatch, int(self.minibatch * 10 / offset), batch_size)
- start_time = time()
- if (self.minibatch*10//offset< batch_size):
- x = X[torch.randint(0, batch_size,[int(self.minibatch*10/offset)])].to(self.device)
- else:
- x = X.to(self.device)
- # print(x.device)
- self.centroids = _kpp(x, self.n_clusters, min(int(self.minibatch/12/offset),batch_size))
- del x
- torch.cuda.empty_cache()
- # self.centroids = self.centroids.to(self.device)
- num_points_in_clusters = torch.ones(self.n_clusters, device=self.device, dtype=X.dtype)#全1
- closest = None#[3098036]#int64
- if(self.minibatch>=batch_size//2 and self.minibatch=batch_size):
- X=X.to(self.device)
- for i in range(self.max_iter):
- iter_time = time()
- if self.minibatch= 2:
- print('iter:', i, 'error:', error.item(), 'time spent:', round(time()-iter_time, 4))
- if error <= self.tol:
- break
-
- if self.verbose >= 1:
- print(f'used {i+1} iterations ({round(time()-start_time, 4)}s) to cluster {batch_size} items into {self.n_clusters} clusters')
- return closest
diff --git a/spaces/badayvedat/LLaVA/llava/eval/model_vqa_science.py b/spaces/badayvedat/LLaVA/llava/eval/model_vqa_science.py
deleted file mode 100644
index aa77b39c0df7bcf0c8200f1282b165dee493ad73..0000000000000000000000000000000000000000
--- a/spaces/badayvedat/LLaVA/llava/eval/model_vqa_science.py
+++ /dev/null
@@ -1,141 +0,0 @@
-import argparse
-import torch
-import os
-import json
-from tqdm import tqdm
-import shortuuid
-
-from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN
-from llava.conversation import conv_templates, SeparatorStyle
-from llava.model.builder import load_pretrained_model
-from llava.utils import disable_torch_init
-from llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria
-
-from PIL import Image
-import math
-
-
-def split_list(lst, n):
- """Split a list into n (roughly) equal-sized chunks"""
- chunk_size = math.ceil(len(lst) / n) # integer division
- return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
-
-
-def get_chunk(lst, n, k):
- chunks = split_list(lst, n)
- return chunks[k]
-
-
-def eval_model(args):
- # Model
- disable_torch_init()
- model_path = os.path.expanduser(args.model_path)
- model_name = get_model_name_from_path(model_path)
- tokenizer, model, image_processor, context_len = load_pretrained_model(model_path, args.model_base, model_name)
-
- questions = json.load(open(os.path.expanduser(args.question_file), "r"))
- questions = get_chunk(questions, args.num_chunks, args.chunk_idx)
- answers_file = os.path.expanduser(args.answers_file)
- os.makedirs(os.path.dirname(answers_file), exist_ok=True)
- ans_file = open(answers_file, "w")
- for i, line in enumerate(tqdm(questions)):
- idx = line["id"]
- question = line['conversations'][0]
- qs = question['value'].replace('', '').strip()
- cur_prompt = qs
-
- if 'image' in line:
- image_file = line["image"]
- image = Image.open(os.path.join(args.image_folder, image_file))
- image_tensor = image_processor.preprocess(image, return_tensors='pt')['pixel_values'][0]
- images = image_tensor.unsqueeze(0).half().cuda()
- if getattr(model.config, 'mm_use_im_start_end', False):
- qs = DEFAULT_IM_START_TOKEN + DEFAULT_IMAGE_TOKEN + DEFAULT_IM_END_TOKEN + '\n' + qs
- else:
- qs = DEFAULT_IMAGE_TOKEN + '\n' + qs
- cur_prompt = '' + '\n' + cur_prompt
- else:
- images = None
-
- conv = conv_templates[args.conv_mode].copy()
- conv.append_message(conv.roles[0], qs)
- conv.append_message(conv.roles[1], None)
- prompt = conv.get_prompt()
-
- input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
-
- stop_str = conv.sep if conv.sep_style != SeparatorStyle.TWO else conv.sep2
- keywords = [stop_str]
- stopping_criteria = [KeywordsStoppingCriteria(keywords, tokenizer, input_ids)] if conv.version == "v0" else None
-
- with torch.inference_mode():
- output_ids = model.generate(
- input_ids,
- images=images,
- do_sample=True,
- temperature=0.2,
- max_new_tokens=1024,
- use_cache=True,
- stopping_criteria=stopping_criteria,
- )
-
- input_token_len = input_ids.shape[1]
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
- if n_diff_input_output > 0:
- print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
- outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
- outputs = outputs.strip()
- if outputs.endswith(stop_str):
- outputs = outputs[:-len(stop_str)]
- outputs = outputs.strip()
-
- # prompt for answer
- if args.answer_prompter:
- outputs_reasoning = outputs
- input_ids = tokenizer_image_token(prompt + outputs_reasoning + ' ###\nANSWER:', tokenizer, IMAGE_TOKEN_INDEX, return_tensors='pt').unsqueeze(0).cuda()
-
- with torch.inference_mode():
- output_ids = model.generate(
- input_ids,
- images=images,
- do_sample=True,
- temperature=0.2,
- max_new_tokens=64,
- use_cache=True,
- stopping_criteria=[stopping_criteria])
-
- input_token_len = input_ids.shape[1]
- n_diff_input_output = (input_ids != output_ids[:, :input_token_len]).sum().item()
- if n_diff_input_output > 0:
- print(f'[Warning] {n_diff_input_output} output_ids are not the same as the input_ids')
- outputs = tokenizer.batch_decode(output_ids[:, input_token_len:], skip_special_tokens=True)[0]
- outputs = outputs.strip()
- if outputs.endswith(stop_str):
- outputs = outputs[:-len(stop_str)]
- outputs = outputs.strip()
- outputs = outputs_reasoning + '\n The answer is ' + outputs
-
- ans_id = shortuuid.uuid()
- ans_file.write(json.dumps({"question_id": idx,
- "prompt": cur_prompt,
- "text": outputs,
- "answer_id": ans_id,
- "model_id": model_name,
- "metadata": {}}) + "\n")
- ans_file.flush()
- ans_file.close()
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser()
- parser.add_argument("--model-path", type=str, default="facebook/opt-350m")
- parser.add_argument("--model-base", type=str, default=None)
- parser.add_argument("--image-folder", type=str, default="")
- parser.add_argument("--question-file", type=str, default="tables/question.json")
- parser.add_argument("--answers-file", type=str, default="answer.jsonl")
- parser.add_argument("--conv-mode", type=str, default="llava_v0")
- parser.add_argument("--num-chunks", type=int, default=1)
- parser.add_argument("--chunk-idx", type=int, default=0)
- parser.add_argument("--answer-prompter", action="store_true")
- args = parser.parse_args()
-
- eval_model(args)
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/core/DirectGeometry.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/core/DirectGeometry.d.ts
deleted file mode 100644
index f1651790695f4a64cec9c3b62e1485715f6fd0f5..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/core/DirectGeometry.d.ts
+++ /dev/null
@@ -1,50 +0,0 @@
-import { Vector3 } from './../math/Vector3';
-import { Color } from './../math/Color';
-import { Vector2 } from './../math/Vector2';
-import { Vector4 } from './../math/Vector4';
-import { Box3 } from './../math/Box3';
-import { Sphere } from './../math/Sphere';
-import { Geometry } from './Geometry';
-import { Event } from './Face3';
-import { EventDispatcher } from './EventDispatcher';
-import { MorphTarget } from './Geometry';
-/**
- * @see src/core/DirectGeometry.js
- */
-export class DirectGeometry extends EventDispatcher {
- constructor();
-
- id: number;
- uuid: string;
- name: string;
- type: string;
- indices: number[];
- vertices: Vector3[];
- normals: Vector3[];
- colors: Color[];
- uvs: Vector2[];
- uvs2: Vector2[];
- groups: { start: number; materialIndex: number }[];
- morphTargets: MorphTarget[];
- skinWeights: Vector4[];
- skinIndices: Vector4[];
- boundingBox: Box3;
- boundingSphere: Sphere;
- verticesNeedUpdate: boolean;
- normalsNeedUpdate: boolean;
- colorsNeedUpdate: boolean;
- uvsNeedUpdate: boolean;
- groupsNeedUpdate: boolean;
-
- computeBoundingBox(): void;
- computeBoundingSphere(): void;
- computeGroups(geometry: Geometry): void;
- fromGeometry(geometry: Geometry): DirectGeometry;
- dispose(): void;
-
- // EventDispatcher mixins
- addEventListener(type: string, listener: (event: Event) => void): void;
- hasEventListener(type: string, listener: (event: Event) => void): boolean;
- removeEventListener(type: string, listener: (event: Event) => void): void;
- dispatchEvent(event: { type: string; [attachment: string]: any }): void;
-}
diff --git a/spaces/banana-projects/web3d/node_modules/three/src/loaders/BufferGeometryLoader.d.ts b/spaces/banana-projects/web3d/node_modules/three/src/loaders/BufferGeometryLoader.d.ts
deleted file mode 100644
index fe3148f4ddf88a2cede3a12db5a9454a465ef865..0000000000000000000000000000000000000000
--- a/spaces/banana-projects/web3d/node_modules/three/src/loaders/BufferGeometryLoader.d.ts
+++ /dev/null
@@ -1,16 +0,0 @@
-import { LoadingManager } from './LoadingManager';
-import { BufferGeometry } from './../core/BufferGeometry';
-
-export class BufferGeometryLoader {
- constructor(manager?: LoadingManager);
-
- manager: LoadingManager;
-
- load(
- url: string,
- onLoad: (bufferGeometry: BufferGeometry) => void,
- onProgress?: (event: any) => void,
- onError?: (event: any) => void
- ): void;
- parse(json: any): BufferGeometry;
-}
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/__init__.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/__init__.py
deleted file mode 100644
index cfb1e4d7bb221c429082bd389d9140e5b1cc07b0..0000000000000000000000000000000000000000
--- a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/archs/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import importlib
-from copy import deepcopy
-from os import path as osp
-
-from basicsr.utils import get_root_logger, scandir
-from basicsr.utils.registry import ARCH_REGISTRY
-
-__all__ = ['build_network']
-
-# automatically scan and import arch modules for registry
-# scan all the files under the 'archs' folder and collect files ending with
-# '_arch.py'
-arch_folder = osp.dirname(osp.abspath(__file__))
-arch_filenames = [osp.splitext(osp.basename(v))[0] for v in scandir(arch_folder) if v.endswith('_arch.py')]
-# import all the arch modules
-_arch_modules = [importlib.import_module(f'basicsr.archs.{file_name}') for file_name in arch_filenames]
-
-
-def build_network(opt):
- opt = deepcopy(opt)
- network_type = opt.pop('type')
- net = ARCH_REGISTRY.get(network_type)(**opt)
- logger = get_root_logger()
- logger.info(f'Network [{net.__class__.__name__}] is created.')
- return net
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/gfpgan/weights/README.md b/spaces/beihai/GFPGAN-V1.3-whole-image/gfpgan/weights/README.md
deleted file mode 100644
index 4d7b7e642591ef88575d9e6c360a4d29e0cc1a4f..0000000000000000000000000000000000000000
--- a/spaces/beihai/GFPGAN-V1.3-whole-image/gfpgan/weights/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Weights
-
-Put the downloaded weights to this folder.
diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/inference_gfpgan.py b/spaces/beihai/GFPGAN-V1.3-whole-image/inference_gfpgan.py
deleted file mode 100644
index ac814a50ead170ab69f32c1714bd445c6c5baf17..0000000000000000000000000000000000000000
--- a/spaces/beihai/GFPGAN-V1.3-whole-image/inference_gfpgan.py
+++ /dev/null
@@ -1,116 +0,0 @@
-import argparse
-import cv2
-import glob
-import numpy as np
-import os
-import torch
-from basicsr.utils import imwrite
-
-from gfpgan import GFPGANer
-
-
-def main():
- """Inference demo for GFPGAN.
- """
- parser = argparse.ArgumentParser()
- parser.add_argument('--upscale', type=int, default=2, help='The final upsampling scale of the image')
- parser.add_argument('--arch', type=str, default='clean', help='The GFPGAN architecture. Option: clean | original')
- parser.add_argument('--channel', type=int, default=2, help='Channel multiplier for large networks of StyleGAN2')
- parser.add_argument('--model_path', type=str, default='experiments/pretrained_models/GFPGANv1.3.pth')
- parser.add_argument('--bg_upsampler', type=str, default='realesrgan', help='background upsampler')
- parser.add_argument(
- '--bg_tile', type=int, default=400, help='Tile size for background sampler, 0 for no tile during testing')
- parser.add_argument('--test_path', type=str, default='inputs/whole_imgs', help='Input folder')
- parser.add_argument('--suffix', type=str, default=None, help='Suffix of the restored faces')
- parser.add_argument('--only_center_face', action='store_true', help='Only restore the center face')
- parser.add_argument('--aligned', action='store_true', help='Input are aligned faces')
- parser.add_argument('--paste_back', action='store_false', help='Paste the restored faces back to images')
- parser.add_argument('--save_root', type=str, default='results', help='Path to save root')
- parser.add_argument(
- '--ext',
- type=str,
- default='auto',
- help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
- args = parser.parse_args()
-
- args = parser.parse_args()
- if args.test_path.endswith('/'):
- args.test_path = args.test_path[:-1]
- os.makedirs(args.save_root, exist_ok=True)
-
- # background upsampler
- if args.bg_upsampler == 'realesrgan':
- if not torch.cuda.is_available(): # CPU
- import warnings
- warnings.warn('The unoptimized RealESRGAN is very slow on CPU. We do not use it. '
- 'If you really want to use it, please modify the corresponding codes.')
- bg_upsampler = None
- else:
- from basicsr.archs.rrdbnet_arch import RRDBNet
- from realesrgan import RealESRGANer
- model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
- bg_upsampler = RealESRGANer(
- scale=2,
- model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth',
- model=model,
- tile=args.bg_tile,
- tile_pad=10,
- pre_pad=0,
- half=True) # need to set False in CPU mode
- else:
- bg_upsampler = None
- # set up GFPGAN restorer
- restorer = GFPGANer(
- model_path=args.model_path,
- upscale=args.upscale,
- arch=args.arch,
- channel_multiplier=args.channel,
- bg_upsampler=bg_upsampler)
-
- img_list = sorted(glob.glob(os.path.join(args.test_path, '*')))
- for img_path in img_list:
- # read image
- img_name = os.path.basename(img_path)
- print(f'Processing {img_name} ...')
- basename, ext = os.path.splitext(img_name)
- input_img = cv2.imread(img_path, cv2.IMREAD_COLOR)
-
- # restore faces and background if necessary
- cropped_faces, restored_faces, restored_img = restorer.enhance(
- input_img, has_aligned=args.aligned, only_center_face=args.only_center_face, paste_back=args.paste_back)
-
- # save faces
- for idx, (cropped_face, restored_face) in enumerate(zip(cropped_faces, restored_faces)):
- # save cropped face
- save_crop_path = os.path.join(args.save_root, 'cropped_faces', f'{basename}_{idx:02d}.png')
- imwrite(cropped_face, save_crop_path)
- # save restored face
- if args.suffix is not None:
- save_face_name = f'{basename}_{idx:02d}_{args.suffix}.png'
- else:
- save_face_name = f'{basename}_{idx:02d}.png'
- save_restore_path = os.path.join(args.save_root, 'restored_faces', save_face_name)
- imwrite(restored_face, save_restore_path)
- # save comparison image
- cmp_img = np.concatenate((cropped_face, restored_face), axis=1)
- imwrite(cmp_img, os.path.join(args.save_root, 'cmp', f'{basename}_{idx:02d}.png'))
-
- # save restored img
- if restored_img is not None:
- if args.ext == 'auto':
- extension = ext[1:]
- else:
- extension = args.ext
-
- if args.suffix is not None:
- save_restore_path = os.path.join(args.save_root, 'restored_imgs',
- f'{basename}_{args.suffix}.{extension}')
- else:
- save_restore_path = os.path.join(args.save_root, 'restored_imgs', f'{basename}.{extension}')
- imwrite(restored_img, save_restore_path)
-
- print(f'Results are in the [{args.save_root}] folder.')
-
-
-if __name__ == '__main__':
- main()
diff --git a/spaces/bigjoker/stable-diffusion-webui/scripts/postprocessing_upscale.py b/spaces/bigjoker/stable-diffusion-webui/scripts/postprocessing_upscale.py
deleted file mode 100644
index ccec72fcbc72eeffbe24a659bf53ecba71162391..0000000000000000000000000000000000000000
--- a/spaces/bigjoker/stable-diffusion-webui/scripts/postprocessing_upscale.py
+++ /dev/null
@@ -1,131 +0,0 @@
-from PIL import Image
-import numpy as np
-
-from modules import scripts_postprocessing, shared
-import gradio as gr
-
-from modules.ui_components import FormRow
-
-
-upscale_cache = {}
-
-
-class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
- name = "Upscale"
- order = 1000
-
- def ui(self):
- selected_tab = gr.State(value=0)
-
- with gr.Tabs(elem_id="extras_resize_mode"):
- with gr.TabItem('Scale by', elem_id="extras_scale_by_tab") as tab_scale_by:
- upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize")
-
- with gr.TabItem('Scale to', elem_id="extras_scale_to_tab") as tab_scale_to:
- with FormRow():
- upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w")
- upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h")
- upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
-
- with FormRow():
- extras_upscaler_1 = gr.Dropdown(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
-
- with FormRow():
- extras_upscaler_2 = gr.Dropdown(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
- extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=0.0, elem_id="extras_upscaler_2_visibility")
-
- tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab])
- tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab])
-
- return {
- "upscale_mode": selected_tab,
- "upscale_by": upscaling_resize,
- "upscale_to_width": upscaling_resize_w,
- "upscale_to_height": upscaling_resize_h,
- "upscale_crop": upscaling_crop,
- "upscaler_1_name": extras_upscaler_1,
- "upscaler_2_name": extras_upscaler_2,
- "upscaler_2_visibility": extras_upscaler_2_visibility,
- }
-
- def upscale(self, image, info, upscaler, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop):
- if upscale_mode == 1:
- upscale_by = max(upscale_to_width/image.width, upscale_to_height/image.height)
- info["Postprocess upscale to"] = f"{upscale_to_width}x{upscale_to_height}"
- else:
- info["Postprocess upscale by"] = upscale_by
-
- cache_key = (hash(np.array(image.getdata()).tobytes()), upscaler.name, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
- cached_image = upscale_cache.pop(cache_key, None)
-
- if cached_image is not None:
- image = cached_image
- else:
- image = upscaler.scaler.upscale(image, upscale_by, upscaler.data_path)
-
- upscale_cache[cache_key] = image
- if len(upscale_cache) > shared.opts.upscaling_max_images_in_cache:
- upscale_cache.pop(next(iter(upscale_cache), None), None)
-
- if upscale_mode == 1 and upscale_crop:
- cropped = Image.new("RGB", (upscale_to_width, upscale_to_height))
- cropped.paste(image, box=(upscale_to_width // 2 - image.width // 2, upscale_to_height // 2 - image.height // 2))
- image = cropped
- info["Postprocess crop to"] = f"{image.width}x{image.height}"
-
- return image
-
- def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_mode=1, upscale_by=2.0, upscale_to_width=None, upscale_to_height=None, upscale_crop=False, upscaler_1_name=None, upscaler_2_name=None, upscaler_2_visibility=0.0):
- if upscaler_1_name == "None":
- upscaler_1_name = None
-
- upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_1_name]), None)
- assert upscaler1 or (upscaler_1_name is None), f'could not find upscaler named {upscaler_1_name}'
-
- if not upscaler1:
- return
-
- if upscaler_2_name == "None":
- upscaler_2_name = None
-
- upscaler2 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_2_name and x.name != "None"]), None)
- assert upscaler2 or (upscaler_2_name is None), f'could not find upscaler named {upscaler_2_name}'
-
- upscaled_image = self.upscale(pp.image, pp.info, upscaler1, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
- pp.info[f"Postprocess upscaler"] = upscaler1.name
-
- if upscaler2 and upscaler_2_visibility > 0:
- second_upscale = self.upscale(pp.image, pp.info, upscaler2, upscale_mode, upscale_by, upscale_to_width, upscale_to_height, upscale_crop)
- upscaled_image = Image.blend(upscaled_image, second_upscale, upscaler_2_visibility)
-
- pp.info[f"Postprocess upscaler 2"] = upscaler2.name
-
- pp.image = upscaled_image
-
- def image_changed(self):
- upscale_cache.clear()
-
-
-class ScriptPostprocessingUpscaleSimple(ScriptPostprocessingUpscale):
- name = "Simple Upscale"
- order = 900
-
- def ui(self):
- with FormRow():
- upscaler_name = gr.Dropdown(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name)
- upscale_by = gr.Slider(minimum=0.05, maximum=8.0, step=0.05, label="Upscale by", value=2)
-
- return {
- "upscale_by": upscale_by,
- "upscaler_name": upscaler_name,
- }
-
- def process(self, pp: scripts_postprocessing.PostprocessedImage, upscale_by=2.0, upscaler_name=None):
- if upscaler_name is None or upscaler_name == "None":
- return
-
- upscaler1 = next(iter([x for x in shared.sd_upscalers if x.name == upscaler_name]), None)
- assert upscaler1, f'could not find upscaler named {upscaler_name}'
-
- pp.image = self.upscale(pp.image, pp.info, upscaler1, 0, upscale_by, 0, 0, False)
- pp.info[f"Postprocess upscaler"] = upscaler1.name
diff --git a/spaces/bigscience/promptsource/promptsource/utils.py b/spaces/bigscience/promptsource/promptsource/utils.py
deleted file mode 100644
index ce57a2ac159508bdfb39a61188bd1031e28e6f6f..0000000000000000000000000000000000000000
--- a/spaces/bigscience/promptsource/promptsource/utils.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# coding=utf-8
-import os
-
-import datasets
-import requests
-
-from promptsource import DEFAULT_PROMPTSOURCE_CACHE_HOME
-from promptsource.templates import INCLUDED_USERS
-
-
-def removeHyphen(example):
- example_clean = {}
- for key in example.keys():
- if "-" in key:
- new_key = key.replace("-", "_")
- example_clean[new_key] = example[key]
- else:
- example_clean[key] = example[key]
- example = example_clean
- return example
-
-
-def renameDatasetColumn(dataset):
- col_names = dataset.column_names
- for cols in col_names:
- if "-" in cols:
- dataset = dataset.rename_column(cols, cols.replace("-", "_"))
- return dataset
-
-
-#
-# Helper functions for datasets library
-#
-
-
-def get_dataset_builder(path, conf=None):
- "Get a dataset builder from name and conf."
- module_path = datasets.load.dataset_module_factory(path)
- builder_cls = datasets.load.import_main_class(module_path.module_path, dataset=True)
- if conf:
- builder_instance = builder_cls(name=conf, cache_dir=None, hash=module_path.hash)
- else:
- builder_instance = builder_cls(cache_dir=None, hash=module_path.hash)
- return builder_instance
-
-
-def get_dataset(path, conf=None):
- "Get a dataset from name and conf."
- builder_instance = get_dataset_builder(path, conf)
- if builder_instance.manual_download_instructions is None and builder_instance.info.size_in_bytes is not None:
- builder_instance.download_and_prepare()
- return builder_instance.as_dataset()
- else:
- return load_dataset(path, conf)
-
-
-def load_dataset(dataset_name, subset_name):
- try:
- return datasets.load_dataset(dataset_name, subset_name)
- except datasets.builder.ManualDownloadError:
- cache_root_dir = (
- os.environ["PROMPTSOURCE_MANUAL_DATASET_DIR"]
- if "PROMPTSOURCE_MANUAL_DATASET_DIR" in os.environ
- else DEFAULT_PROMPTSOURCE_CACHE_HOME
- )
- data_dir = (
- f"{cache_root_dir}/{dataset_name}"
- if subset_name is None
- else f"{cache_root_dir}/{dataset_name}/{subset_name}"
- )
- return datasets.load_dataset(
- dataset_name,
- subset_name,
- data_dir=data_dir,
- )
-
-
-def get_dataset_confs(path):
- "Get the list of confs for a dataset."
- module_path = datasets.load.dataset_module_factory(path).module_path
- # Get dataset builder class from the processing script
- builder_cls = datasets.load.import_main_class(module_path, dataset=True)
- # Instantiate the dataset builder
- confs = builder_cls.BUILDER_CONFIGS
- if confs and len(confs) > 1:
- return confs
- return []
-
-
-def render_features(features):
- """Recursively render the dataset schema (i.e. the fields)."""
- if isinstance(features, dict):
- return {k: render_features(v) for k, v in features.items()}
- if isinstance(features, datasets.features.ClassLabel):
- return features.names
-
- if isinstance(features, datasets.features.Value):
- return features.dtype
-
- if isinstance(features, datasets.features.Sequence):
- return {"[]": render_features(features.feature)}
- return features
-
-
-#
-# Loads dataset information
-#
-
-
-def filter_english_datasets():
- """
- Filter English datasets based on language tags in metadata.
-
- Also includes the datasets of any users listed in INCLUDED_USERS
- """
- english_datasets = []
-
- response = requests.get("https://huggingface.co/api/datasets?full=true")
- tags = response.json()
-
- for dataset in tags:
- dataset_name = dataset["id"]
-
- is_community_dataset = "/" in dataset_name
- if is_community_dataset:
- user = dataset_name.split("/")[0]
- if user in INCLUDED_USERS:
- english_datasets.append(dataset_name)
- continue
-
- if "cardData" not in dataset:
- continue
- metadata = dataset["cardData"]
-
- if "language" not in metadata:
- continue
- languages = metadata["language"]
-
- if "en" in languages or "en-US" in languages:
- english_datasets.append(dataset_name)
-
- return sorted(english_datasets)
-
-
-def list_datasets():
- """Get all the datasets to work with."""
- dataset_list = filter_english_datasets()
- dataset_list.sort(key=lambda x: x.lower())
- return dataset_list
diff --git a/spaces/bioriAsaeru/text-to-voice/Dragon City Hack Tool Dragon City Hack In Five Minutes Or Lesser REPACK.md b/spaces/bioriAsaeru/text-to-voice/Dragon City Hack Tool Dragon City Hack In Five Minutes Or Lesser REPACK.md
deleted file mode 100644
index 7748f788587eded67e8e535826c49833a50653cc..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Dragon City Hack Tool Dragon City Hack In Five Minutes Or Lesser REPACK.md
+++ /dev/null
@@ -1,140 +0,0 @@
-
-
Dragon City Hack Tool: How to Hack Dragon City in Five Minutes or Lesser
-
-
Dragon City is a fun and addictive game where you can breed, hatch and collect dragons of different elements and rarities. You can also build your own dragon city, complete quests, join alliances and battle other players. However, the game can also be challenging and time-consuming, especially if you want to get the best dragons and resources. That's why many players are looking for a way to hack Dragon City and get unlimited gems, gold, food and other in-game items.
-
-
If you are one of them, then you are in luck. In this article, we will show you how to use a Dragon City hack tool that can help you hack Dragon City in five minutes or lesser. This hack tool is easy to use, safe and effective. You don't need to jailbreak or root your device, or download any software. All you need is a web browser and an internet connection. With this hack tool, you can get access to all the features of the game without spending a dime.
-
Dragon City Hack Tool Dragon City Hack In Five Minutes Or Lesser
Dragon City hack tool is an online generator that can generate unlimited gems, gold, food and other in-game items for your Dragon City account. You can use it on any device, whether it's PC, Android or iOS. You don't need to download anything or install anything on your device. The hack tool works by connecting to the game server and injecting the resources into your account. It's fast, easy and secure.
-
-
How to Use Dragon City Hack Tool?
-
-
Using Dragon City hack tool is very simple. Just follow these steps:
-
-
-
Go to the Dragon City hack tool website.
-
Enter your game ID or username.
-
Select your platform (PC, Android or iOS).
-
Choose the amount of gems, gold, food and other items you want to generate.
-
Click on the "Hack" button.
-
Wait for a few seconds while the hack tool does its magic.
-
Verify that you are not a robot by completing a short survey or offer.
-
Enjoy your free resources!
-
-
-
Why Use Dragon City Hack Tool?
-
-
There are many reasons why you should use Dragon City hack tool. Here are some of them:
-
-
-
You can get unlimited gems, gold, food and other items for free.
-
You can save time and money by not having to buy or earn them in the game.
-
You can unlock all the dragons and buildings in the game.
-
You can breed rare and legendary dragons with ease.
-
You can level up faster and complete quests quicker.
-
You can dominate the battles and tournaments against other players.
-
You can have more fun and enjoyment playing the game.
-
-
-
Is Dragon City Hack Tool Safe?
-
-
Yes, Dragon City hack tool is safe to use. It has been tested by thousands of users and has never caused any problems. The hack tool uses advanced encryption technology to protect your account and data from being detected or banned by the game server. It also has a proxy system that hides your IP address and location from being traced. You don't have to worry about anything when using this hack tool.
-
-
Conclusion
-
-
Dragon City is a great game that can keep you entertained for hours. However, if you want to get the most out of it, you may need some help from a Dragon City hack tool. This hack tool can help you hack Dragon City in five minutes or lesser and get unlimited gems, gold, food and other items for free. You don't need to jailbreak or root your device, or download any software. All you need is a web browser and an internet connection. With this hack tool, you can enjoy the game without any limitations or restrictions.
-
-
-
If you are interested in using this hack tool, just go to the website below and follow the instructions. You will be amazed by how easy and fast it is to hack Dragon City with this tool. Don't miss this opportunity and try it now!
Dragon City hack tool is not just a simple generator that can give you unlimited gems and gold. It also has many other features that can enhance your gaming experience and make you the best dragon master. Here are some of the features of Dragon City hack tool:
-
-
-
Instant breeding: You can breed any two dragons instantly without waiting for hours or days.
-
Instant leveling: You can level up any dragon instantly without feeding them or spending gems.
-
Unlock all islands: You can unlock all the islands in the game without completing any requirements or spending gems.
-
Unlock all dragons: You can unlock all the dragons in the game, including the rare and legendary ones.
-
Anti-ban protection: You can use the hack tool without worrying about getting banned by the game server. The hack tool has a built-in anti-ban system that prevents detection.
-
No root or jailbreak required: You don't need to root or jailbreak your device to use the hack tool. It works on any device, whether it's PC, Android or iOS.
-
-
-
How to Get Dragon City Hack Tool?
-
-
If you want to get Dragon City hack tool and enjoy all its features, you don't need to look any further. You can get it right here on this website. All you need to do is follow these simple steps:
-
-
-
Click on the "Download" button below.
-
You will be redirected to a verification page. Complete a short survey or offer to prove that you are human.
-
After verification, you will get access to the download link.
-
Download the hack tool and install it on your device.
-
Run the hack tool and enter your game ID or username.
-
Select your platform (PC, Android or iOS).
-
Choose the amount of gems, gold, food and other items you want to generate.
-
Click on the "Hack" button and wait for a few seconds.
-
Enjoy your free resources!
-
-
-
Don't miss this chance and get Dragon City hack tool now! You will be amazed by how easy and fast it is to hack Dragon City with this tool. You will be able to breed, hatch and collect all the dragons you want, build your own dragon city, complete quests, join alliances and battle other players. You will have more fun and enjoyment playing the game. So what are you waiting for? Download Dragon City hack tool now and become the best dragon master!
How to Hack Dragon City in Five Minutes or Lesser?
-
-
If you are wondering how to hack Dragon City in five minutes or lesser, then you have come to the right place. In this article, we will show you a simple and effective method that can help you get unlimited gems and gold in Dragon City without spending any money or wasting any time. This method is based on using a Dragon City hack tool that can generate resources for your account in a matter of minutes.
-
-
A Dragon City hack tool is an online generator that can connect to the game server and inject resources into your account. You don't need to download anything or install anything on your device. All you need is a web browser and an internet connection. With this hack tool, you can get access to all the features of the game without any limitations or restrictions.
-
-
What are the Steps to Hack Dragon City in Five Minutes or Lesser?
-
-
To hack Dragon City in five minutes or lesser, you just need to follow these simple steps:
-
-
-
Go to the Dragon City hack tool website by clicking on the link below.
-
Enter your game ID or username.
-
Select your platform (PC, Android or iOS).
-
Choose the amount of gems and gold you want to generate.
-
Click on the "Generate" button.
-
Wait for a few seconds while the hack tool does its work.
-
Verify that you are not a robot by completing a short survey or offer.
-
Check your game account and enjoy your free resources!
-
-
-
That's it! You have successfully hacked Dragon City in five minutes or lesser. You can now breed, hatch and collect all the dragons you want, build your own dragon city, complete quests, join alliances and battle other players. You can also repeat the process as many times as you want to get more resources whenever you need them.
-
-
Why Should You Hack Dragon City in Five Minutes or Lesser?
-
-
There are many reasons why you should hack Dragon City in five minutes or lesser. Here are some of them:
-
-
-
You can get unlimited gems and gold for free.
-
You can save time and money by not having to buy or earn them in the game.
-
You can unlock all the dragons and buildings in the game.
-
You can breed rare and legendary dragons with ease.
-
You can level up faster and complete quests quicker.
-
You can dominate the battles and tournaments against other players.
-
You can have more fun and enjoyment playing the game.
-
-
-
Is It Safe to Hack Dragon City in Five Minutes or Lesser?
-
-
Yes, it is safe to hack Dragon City in five minutes or lesser. The hack tool is tested by thousands of users and has never caused any problems. The hack tool uses advanced encryption technology to protect your account and data from being detected or banned by the game server. It also has a proxy system that hides your IP address and location from being traced. You don't have to worry about anything when using this hack tool.
-
-
Conclusion
-
-
Hacking Dragon City in five minutes or lesser is possible and easy with the help of a Dragon City hack tool. This hack tool can help you get unlimited gems and gold in Dragon City without spending any money or wasting any time. You don't need to download anything or install anything on your device. All you need is a web browser and an internet connection. With this hack tool, you can enjoy the game without any limitations or restrictions.
-
-
If you are interested in hacking Dragon City in five minutes or lesser, then don't hesitate and click on the link below. You will be amazed by how easy and fast it is to hack Dragon City with this tool. Don't miss this opportunity and try it now!
Dragon City is a fun and addictive game that can keep you entertained for hours. However, if you want to get the most out of it, you may need some help from a Dragon City hack tool. This hack tool can help you hack Dragon City in five minutes or lesser and get unlimited gems and gold for free. You don't need to jailbreak or root your device, or download any software. All you need is a web browser and an internet connection. With this hack tool, you can enjoy the game without any limitations or restrictions.
-
-
If you are interested in using this hack tool, just go to the website below and follow the instructions. You will be amazed by how easy and fast it is to hack Dragon City with this tool. Don't miss this opportunity and try it now!
3cee63e6c2
-
-
\ No newline at end of file
diff --git a/spaces/bioriAsaeru/text-to-voice/For King And Country Run Wild Mp3 Download dbpower corazon cant The Lyrics and Chords of the Song.md b/spaces/bioriAsaeru/text-to-voice/For King And Country Run Wild Mp3 Download dbpower corazon cant The Lyrics and Chords of the Song.md
deleted file mode 100644
index c1f4d99e761a6252b4a12f227442b9710b30660e..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/For King And Country Run Wild Mp3 Download dbpower corazon cant The Lyrics and Chords of the Song.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
For King And Country Run Wild Mp3 Download dbpower corazon cant
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/bioriAsaeru/text-to-voice/Get Mastery Robert Greene Pdf Download The Ultimate Book on Power Skill and Creativity.md b/spaces/bioriAsaeru/text-to-voice/Get Mastery Robert Greene Pdf Download The Ultimate Book on Power Skill and Creativity.md
deleted file mode 100644
index b799d1ad9ada25ab51e076b427c10bcadd2bd0e2..0000000000000000000000000000000000000000
--- a/spaces/bioriAsaeru/text-to-voice/Get Mastery Robert Greene Pdf Download The Ultimate Book on Power Skill and Creativity.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-16.- Written in Red: A Cyberpunk Anthology (2013) by Peter David 17.- Final Flight (2013) by Tom Jolly
-
-10.- These number by Shawn and Corina together make it possible for Corina to play Noughts and Crosses without any computer. She usually gets her results about four games out of ten
-
-11.- A day to remember (an anniversary and a final day) by Shannon Messenger. It's the day they had fallen in love (twenty-seven years ago). They are now married, have two children and live in a small town in Michigan.
-
-12.- Last Leaf by Donna Gaines. A sequel to her bestseller, Blossom, and the complete story.
-
-13.- Dolcis by Heather Duffelink and Mark Sefton. It's the tale of an adopted boy from Vancouver who is shipped off to Nova Scotia in his youth and who is adopted by a family in the town of Dolcis. They will change their name. The story is going to be published in May 2014.
-
-14.- My Perfect Husband by William C. Dietz. In this story the main character, William has just broken up with his girlfriend. He is sitting in his trailer home, in Seattle, when suddenly a gorgeous blonde woman walks up and knocks on his door. This is the start of the story.
-
-15.- Shadow and Bone by Leigh Bardugo. This book has lots of characters but the main one is the heroine, Alina Starkov. She's a girl with no money, no love and no family. But she has a talent and that is: she can kill with her feet. She must try to survive in a country where half of the people want to kill her and the other half wish they were her. It's the story of Alina's rise to power and her quest to find out why she is so special and why she must kill people for a living.
-
-16.- It's not often that I like science fiction. It's normally that kind of books that I find really dull. But somehow, I just found this story great. The story, "Don't put it down!" is about a couple of scientists working together on the finest robot on the planet. The robot is called Nomula and is built as a weapon. It was designed as a robot to work for others and to become an assassin, but the people at the top of the organisation don't want that. But the robot is programmed 4fefd39f24
-
-
-
diff --git a/spaces/bradarrML/Diffusion_Space/README.md b/spaces/bradarrML/Diffusion_Space/README.md
deleted file mode 100644
index 9dc74f62b98e9081f685e4f3f0535b37f8bf9daa..0000000000000000000000000000000000000000
--- a/spaces/bradarrML/Diffusion_Space/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-title: Diffusion Space
-emoji: 💽
-colorFrom: blue
-colorTo: pink
-sdk: gradio
-sdk_version: 3.6
-app_file: app.py
-pinned: true
-license: creativeml-openrail-m
-duplicated_from: nitrosocke/Diffusion_Space
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/breadlicker45/Muse-gen/README.md b/spaces/breadlicker45/Muse-gen/README.md
deleted file mode 100644
index 4e2e20bd9ae161c409002a091bbc020066cc5a8d..0000000000000000000000000000000000000000
--- a/spaces/breadlicker45/Muse-gen/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: Muse text gen
-emoji: ⚡
-colorFrom: blue
-colorTo: white
-sdk: streamlit
-sdk_version: 1.9.0
-app_file: app.py
-pinned: true
-duplicated_from: Pippoz/Hugging_Space
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
\ No newline at end of file
diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/postprocessing.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/postprocessing.py
deleted file mode 100644
index 84512606a43d6991df0ae1f046164eb3c70d751a..0000000000000000000000000000000000000000
--- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/modeling/postprocessing.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-import torch
-from torch.nn import functional as F
-
-from detectron2.structures import Instances, ROIMasks
-
-
-# perhaps should rename to "resize_instance"
-def detector_postprocess(
- results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5
-):
- """
- Resize the output instances.
- The input images are often resized when entering an object detector.
- As a result, we often need the outputs of the detector in a different
- resolution from its inputs.
-
- This function will resize the raw outputs of an R-CNN detector
- to produce outputs according to the desired output resolution.
-
- Args:
- results (Instances): the raw outputs from the detector.
- `results.image_size` contains the input image resolution the detector sees.
- This object might be modified in-place.
- output_height, output_width: the desired output resolution.
- Returns:
- Instances: the resized output from the model, based on the output resolution
- """
- if isinstance(output_width, torch.Tensor):
- # This shape might (but not necessarily) be tensors during tracing.
- # Converts integer tensors to float temporaries to ensure true
- # division is performed when computing scale_x and scale_y.
- output_width_tmp = output_width.float()
- output_height_tmp = output_height.float()
- new_size = torch.stack([output_height, output_width])
- else:
- new_size = (output_height, output_width)
- output_width_tmp = output_width
- output_height_tmp = output_height
-
- scale_x, scale_y = (
- output_width_tmp / results.image_size[1],
- output_height_tmp / results.image_size[0],
- )
- results = Instances(new_size, **results.get_fields())
-
- if results.has("pred_boxes"):
- output_boxes = results.pred_boxes
- elif results.has("proposal_boxes"):
- output_boxes = results.proposal_boxes
- else:
- output_boxes = None
- assert output_boxes is not None, "Predictions must contain boxes!"
-
- output_boxes.scale(scale_x, scale_y)
- output_boxes.clip(results.image_size)
-
- results = results[output_boxes.nonempty()]
-
- if results.has("pred_masks"):
- if isinstance(results.pred_masks, ROIMasks):
- roi_masks = results.pred_masks
- else:
- # pred_masks is a tensor of shape (N, 1, M, M)
- roi_masks = ROIMasks(results.pred_masks[:, 0, :, :])
- results.pred_masks = roi_masks.to_bitmasks(
- results.pred_boxes, output_height, output_width, mask_threshold
- ).tensor # TODO return ROIMasks/BitMask object in the future
-
- if results.has("pred_keypoints"):
- results.pred_keypoints[:, :, 0] *= scale_x
- results.pred_keypoints[:, :, 1] *= scale_y
-
- return results
-
-
-def sem_seg_postprocess(result, img_size, output_height, output_width):
- """
- Return semantic segmentation predictions in the original resolution.
-
- The input images are often resized when entering semantic segmentor. Moreover, in same
- cases, they also padded inside segmentor to be divisible by maximum network stride.
- As a result, we often need the predictions of the segmentor in a different
- resolution from its inputs.
-
- Args:
- result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
- where C is the number of classes, and H, W are the height and width of the prediction.
- img_size (tuple): image size that segmentor is taking as input.
- output_height, output_width: the desired output resolution.
-
- Returns:
- semantic segmentation prediction (Tensor): A tensor of the shape
- (C, output_height, output_width) that contains per-pixel soft predictions.
- """
- result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
- result = F.interpolate(
- result, size=(output_height, output_width), mode="bilinear", align_corners=False
- )[0]
- return result
diff --git a/spaces/bugbugbug/vits-uma-genshin-honkai/Docker/Dockerfile b/spaces/bugbugbug/vits-uma-genshin-honkai/Docker/Dockerfile
deleted file mode 100644
index 4d39cdf02a2ec151686cc1d61234bf723068fed8..0000000000000000000000000000000000000000
--- a/spaces/bugbugbug/vits-uma-genshin-honkai/Docker/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM python:3.9-bullseye
-VOLUME ["/app"]
-WORKDIR /app
-# Set apt to Chinese mirror
-RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list
-RUN apt-get update && apt-get -y install cmake git
-RUN git clone https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai
-WORKDIR /app/vits-uma-genshin-honkai
-RUN sed -i "s/\.launch()/\.launch(server_name=\"0.0.0.0\")/" /app/vits-uma-genshin-honkai/app.py
-ADD vits.sh /app/vits.sh
-EXPOSE 7860
-ENTRYPOINT [ "/app/vits.sh" ]
\ No newline at end of file
diff --git a/spaces/captainChan/CaptainChan/utils.py b/spaces/captainChan/CaptainChan/utils.py
deleted file mode 100644
index 1b7b5db1bc1dd191191c31b3e72228ccd1c4f7a1..0000000000000000000000000000000000000000
--- a/spaces/captainChan/CaptainChan/utils.py
+++ /dev/null
@@ -1,304 +0,0 @@
-import logging
-import os
-import time
-
-import cv2
-import numpy as np
-import torch
-import yaml
-from matplotlib import colors
-from matplotlib import pyplot as plt
-from torch import Tensor, nn
-from torch.utils.data import ConcatDataset
-
-class CharsetMapper(object):
- """A simple class to map ids into strings.
-
- It works only when the character set is 1:1 mapping between individual
- characters and individual ids.
- """
-
- def __init__(self,
- filename='',
- max_length=30,
- null_char=u'\u2591'):
- """Creates a lookup table.
-
- Args:
- filename: Path to charset file which maps characters to ids.
- max_sequence_length: The max length of ids and string.
- null_char: A unicode character used to replace '' character.
- the default value is a light shade block '░'.
- """
- self.null_char = null_char
- self.max_length = max_length
-
- self.label_to_char = self._read_charset(filename)
- self.char_to_label = dict(map(reversed, self.label_to_char.items()))
- self.num_classes = len(self.label_to_char)
-
- def _read_charset(self, filename):
- """Reads a charset definition from a tab separated text file.
-
- Args:
- filename: a path to the charset file.
-
- Returns:
- a dictionary with keys equal to character codes and values - unicode
- characters.
- """
- import re
- pattern = re.compile(r'(\d+)\t(.+)')
- charset = {}
- self.null_label = 0
- charset[self.null_label] = self.null_char
- with open(filename, 'r') as f:
- for i, line in enumerate(f):
- m = pattern.match(line)
- assert m, f'Incorrect charset file. line #{i}: {line}'
- label = int(m.group(1)) + 1
- char = m.group(2)
- charset[label] = char
- return charset
-
- def trim(self, text):
- assert isinstance(text, str)
- return text.replace(self.null_char, '')
-
- def get_text(self, labels, length=None, padding=True, trim=False):
- """ Returns a string corresponding to a sequence of character ids.
- """
- length = length if length else self.max_length
- labels = [l.item() if isinstance(l, Tensor) else int(l) for l in labels]
- if padding:
- labels = labels + [self.null_label] * (length-len(labels))
- text = ''.join([self.label_to_char[label] for label in labels])
- if trim: text = self.trim(text)
- return text
-
- def get_labels(self, text, length=None, padding=True, case_sensitive=False):
- """ Returns the labels of the corresponding text.
- """
- length = length if length else self.max_length
- if padding:
- text = text + self.null_char * (length - len(text))
- if not case_sensitive:
- text = text.lower()
- labels = [self.char_to_label[char] for char in text]
- return labels
-
- def pad_labels(self, labels, length=None):
- length = length if length else self.max_length
-
- return labels + [self.null_label] * (length - len(labels))
-
- @property
- def digits(self):
- return '0123456789'
-
- @property
- def digit_labels(self):
- return self.get_labels(self.digits, padding=False)
-
- @property
- def alphabets(self):
- all_chars = list(self.char_to_label.keys())
- valid_chars = []
- for c in all_chars:
- if c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ':
- valid_chars.append(c)
- return ''.join(valid_chars)
-
- @property
- def alphabet_labels(self):
- return self.get_labels(self.alphabets, padding=False)
-
-
-class Timer(object):
- """A simple timer."""
- def __init__(self):
- self.data_time = 0.
- self.data_diff = 0.
- self.data_total_time = 0.
- self.data_call = 0
- self.running_time = 0.
- self.running_diff = 0.
- self.running_total_time = 0.
- self.running_call = 0
-
- def tic(self):
- self.start_time = time.time()
- self.running_time = self.start_time
-
- def toc_data(self):
- self.data_time = time.time()
- self.data_diff = self.data_time - self.running_time
- self.data_total_time += self.data_diff
- self.data_call += 1
-
- def toc_running(self):
- self.running_time = time.time()
- self.running_diff = self.running_time - self.data_time
- self.running_total_time += self.running_diff
- self.running_call += 1
-
- def total_time(self):
- return self.data_total_time + self.running_total_time
-
- def average_time(self):
- return self.average_data_time() + self.average_running_time()
-
- def average_data_time(self):
- return self.data_total_time / (self.data_call or 1)
-
- def average_running_time(self):
- return self.running_total_time / (self.running_call or 1)
-
-
-class Logger(object):
- _handle = None
- _root = None
-
- @staticmethod
- def init(output_dir, name, phase):
- format = '[%(asctime)s %(filename)s:%(lineno)d %(levelname)s {}] ' \
- '%(message)s'.format(name)
- logging.basicConfig(level=logging.INFO, format=format)
-
- try: os.makedirs(output_dir)
- except: pass
- config_path = os.path.join(output_dir, f'{phase}.txt')
- Logger._handle = logging.FileHandler(config_path)
- Logger._root = logging.getLogger()
-
- @staticmethod
- def enable_file():
- if Logger._handle is None or Logger._root is None:
- raise Exception('Invoke Logger.init() first!')
- Logger._root.addHandler(Logger._handle)
-
- @staticmethod
- def disable_file():
- if Logger._handle is None or Logger._root is None:
- raise Exception('Invoke Logger.init() first!')
- Logger._root.removeHandler(Logger._handle)
-
-
-class Config(object):
-
- def __init__(self, config_path, host=True):
- def __dict2attr(d, prefix=''):
- for k, v in d.items():
- if isinstance(v, dict):
- __dict2attr(v, f'{prefix}{k}_')
- else:
- if k == 'phase':
- assert v in ['train', 'test']
- if k == 'stage':
- assert v in ['pretrain-vision', 'pretrain-language',
- 'train-semi-super', 'train-super']
- self.__setattr__(f'{prefix}{k}', v)
-
- assert os.path.exists(config_path), '%s does not exists!' % config_path
- with open(config_path) as file:
- config_dict = yaml.load(file, Loader=yaml.FullLoader)
- with open('configs/template.yaml') as file:
- default_config_dict = yaml.load(file, Loader=yaml.FullLoader)
- __dict2attr(default_config_dict)
- __dict2attr(config_dict)
- self.global_workdir = os.path.join(self.global_workdir, self.global_name)
-
- def __getattr__(self, item):
- attr = self.__dict__.get(item)
- if attr is None:
- attr = dict()
- prefix = f'{item}_'
- for k, v in self.__dict__.items():
- if k.startswith(prefix):
- n = k.replace(prefix, '')
- attr[n] = v
- return attr if len(attr) > 0 else None
- else:
- return attr
-
- def __repr__(self):
- str = 'ModelConfig(\n'
- for i, (k, v) in enumerate(sorted(vars(self).items())):
- str += f'\t({i}): {k} = {v}\n'
- str += ')'
- return str
-
-def blend_mask(image, mask, alpha=0.5, cmap='jet', color='b', color_alpha=1.0):
- # normalize mask
- mask = (mask-mask.min()) / (mask.max() - mask.min() + np.finfo(float).eps)
- if mask.shape != image.shape:
- mask = cv2.resize(mask,(image.shape[1], image.shape[0]))
- # get color map
- color_map = plt.get_cmap(cmap)
- mask = color_map(mask)[:,:,:3]
- # convert float to uint8
- mask = (mask * 255).astype(dtype=np.uint8)
-
- # set the basic color
- basic_color = np.array(colors.to_rgb(color)) * 255
- basic_color = np.tile(basic_color, [image.shape[0], image.shape[1], 1])
- basic_color = basic_color.astype(dtype=np.uint8)
- # blend with basic color
- blended_img = cv2.addWeighted(image, color_alpha, basic_color, 1-color_alpha, 0)
- # blend with mask
- blended_img = cv2.addWeighted(blended_img, alpha, mask, 1-alpha, 0)
-
- return blended_img
-
-def onehot(label, depth, device=None):
- """
- Args:
- label: shape (n1, n2, ..., )
- depth: a scalar
-
- Returns:
- onehot: (n1, n2, ..., depth)
- """
- if not isinstance(label, torch.Tensor):
- label = torch.tensor(label, device=device)
- onehot = torch.zeros(label.size() + torch.Size([depth]), device=device)
- onehot = onehot.scatter_(-1, label.unsqueeze(-1), 1)
-
- return onehot
-
-class MyDataParallel(nn.DataParallel):
-
- def gather(self, outputs, target_device):
- r"""
- Gathers tensors from different GPUs on a specified device
- (-1 means the CPU).
- """
- def gather_map(outputs):
- out = outputs[0]
- if isinstance(out, (str, int, float)):
- return out
- if isinstance(out, list) and isinstance(out[0], str):
- return [o for out in outputs for o in out]
- if isinstance(out, torch.Tensor):
- return torch.nn.parallel._functions.Gather.apply(target_device, self.dim, *outputs)
- if out is None:
- return None
- if isinstance(out, dict):
- if not all((len(out) == len(d) for d in outputs)):
- raise ValueError('All dicts must have the same number of keys')
- return type(out)(((k, gather_map([d[k] for d in outputs]))
- for k in out))
- return type(out)(map(gather_map, zip(*outputs)))
-
- # Recursive function calls like this create reference cycles.
- # Setting the function to None clears the refcycle.
- try:
- res = gather_map(outputs)
- finally:
- gather_map = None
- return res
-
-
-class MyConcatDataset(ConcatDataset):
- def __getattr__(self, k):
- return getattr(self.datasets[0], k)
diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/tracking/__init__.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/tracking/__init__.py
deleted file mode 100644
index 21078ae822b04b71dbd8b056b5993d173eaf6bff..0000000000000000000000000000000000000000
--- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/detectron2/tracking/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (c) Facebook, Inc. and its affiliates.
-from .base_tracker import ( # noqa
- BaseTracker,
- build_tracker_head,
- TRACKER_HEADS_REGISTRY,
-)
-from .bbox_iou_tracker import BBoxIOUTracker # noqa
-from .hungarian_tracker import BaseHungarianTracker # noqa
-from .iou_weighted_hungarian_bbox_iou_tracker import ( # noqa
- IOUWeightedHungarianBBoxIOUTracker,
-)
-from .utils import create_prediction_pairs # noqa
-from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker # noqa
-
-__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/spaces/ccolas/TastyPiano/src/cocktails/representation_learning/run.py b/spaces/ccolas/TastyPiano/src/cocktails/representation_learning/run.py
deleted file mode 100644
index a1278ac80039d25130b4c05bc5670bcfe197d13a..0000000000000000000000000000000000000000
--- a/spaces/ccolas/TastyPiano/src/cocktails/representation_learning/run.py
+++ /dev/null
@@ -1,557 +0,0 @@
-import torch; torch.manual_seed(0)
-import torch.utils
-from torch.utils.data import DataLoader
-import torch.distributions
-import torch.nn as nn
-import matplotlib.pyplot as plt; plt.rcParams['figure.dpi'] = 200
-from src.cocktails.representation_learning.dataset import MyDataset, get_representation_from_ingredient, get_max_n_ingredients
-import json
-import pandas as pd
-import numpy as np
-import os
-from src.cocktails.representation_learning.vae_model import get_vae_model
-from src.cocktails.config import COCKTAILS_CSV_DATA, FULL_COCKTAIL_REP_PATH, EXPERIMENT_PATH
-from src.cocktails.utilities.cocktail_utilities import get_bunch_of_rep_keys
-from src.cocktails.utilities.ingredients_utilities import ingredient_profiles
-from resource import getrusage
-from resource import RUSAGE_SELF
-import gc
-gc.collect(2)
-device = 'cuda' if torch.cuda.is_available() else 'cpu'
-
-def get_params():
- data = pd.read_csv(COCKTAILS_CSV_DATA)
- max_ingredients, ingredient_set, liquor_set, liqueur_set = get_max_n_ingredients(data)
- num_ingredients = len(ingredient_set)
- rep_keys = get_bunch_of_rep_keys()['custom']
- ing_keys = [k.split(' ')[1] for k in rep_keys]
- ing_keys.remove('volume')
- nb_ing_categories = len(set(ingredient_profiles['type']))
- category_encodings = dict(zip(sorted(set(ingredient_profiles['type'])), np.eye(nb_ing_categories)))
-
- params = dict(trial_id='test',
- save_path=EXPERIMENT_PATH + "/deepset_vae/",
- nb_epochs=2000,
- print_every=50,
- plot_every=100,
- batch_size=64,
- lr=0.001,
- dropout=0.,
- nb_epoch_switch_beta=600,
- latent_dim=10,
- beta_vae=0.2,
- ing_keys=ing_keys,
- nb_ingredients=len(ingredient_set),
- hidden_dims_ingredients=[128],
- hidden_dims_cocktail=[32],
- hidden_dims_decoder=[32],
- agg='mean',
- activation='relu',
- auxiliaries_dict=dict(categories=dict(weight=0, type='classif', final_activ=None, dim_output=len(set(data['subcategory']))),
- glasses=dict(weight=0, type='classif', final_activ=None, dim_output=len(set(data['glass']))),
- prep_type=dict(weight=0, type='classif', final_activ=None, dim_output=len(set(data['category']))),
- cocktail_reps=dict(weight=0, type='regression', final_activ=None, dim_output=13),
- volume=dict(weight=0, type='regression', final_activ='relu', dim_output=1),
- taste_reps=dict(weight=0, type='regression', final_activ='relu', dim_output=2),
- ingredients_presence=dict(weight=0, type='multiclassif', final_activ=None, dim_output=num_ingredients)),
- category_encodings=category_encodings
- )
- # params = dict(trial_id='test',
- # save_path=EXPERIMENT_PATH + "/deepset_vae/",
- # nb_epochs=1000,
- # print_every=50,
- # plot_every=100,
- # batch_size=64,
- # lr=0.001,
- # dropout=0.,
- # nb_epoch_switch_beta=500,
- # latent_dim=64,
- # beta_vae=0.3,
- # ing_keys=ing_keys,
- # nb_ingredients=len(ingredient_set),
- # hidden_dims_ingredients=[128],
- # hidden_dims_cocktail=[128, 128],
- # hidden_dims_decoder=[128, 128],
- # agg='mean',
- # activation='mish',
- # auxiliaries_dict=dict(categories=dict(weight=0.5, type='classif', final_activ=None, dim_output=len(set(data['subcategory']))),
- # glasses=dict(weight=0.03, type='classif', final_activ=None, dim_output=len(set(data['glass']))),
- # prep_type=dict(weight=0.02, type='classif', final_activ=None, dim_output=len(set(data['category']))),
- # cocktail_reps=dict(weight=1, type='regression', final_activ=None, dim_output=13),
- # volume=dict(weight=1, type='regression', final_activ='relu', dim_output=1),
- # taste_reps=dict(weight=1, type='regression', final_activ='relu', dim_output=2),
- # ingredients_presence=dict(weight=1.5, type='multiclassif', final_activ=None, dim_output=num_ingredients)),
- # category_encodings=category_encodings
- # )
- water_rep, indexes_to_normalize = get_representation_from_ingredient(ingredients=['water'], quantities=[1],
- max_q_per_ing=dict(zip(ingredient_set, [1] * num_ingredients)), index=0,
- params=params)
- dim_rep_ingredient = water_rep.size
- params['indexes_ing_to_normalize'] = indexes_to_normalize
- params['deepset_latent_dim'] = dim_rep_ingredient * max_ingredients
- params['input_dim'] = dim_rep_ingredient
- params['dim_rep_ingredient'] = dim_rep_ingredient
- params = compute_expe_name_and_save_path(params)
- del params['category_encodings'] # to dump
- with open(params['save_path'] + 'params.json', 'w') as f:
- json.dump(params, f)
-
- params = complete_params(params)
- return params
-
-def complete_params(params):
- data = pd.read_csv(COCKTAILS_CSV_DATA)
- cocktail_reps = np.loadtxt(FULL_COCKTAIL_REP_PATH)
- nb_ing_categories = len(set(ingredient_profiles['type']))
- category_encodings = dict(zip(sorted(set(ingredient_profiles['type'])), np.eye(nb_ing_categories)))
- params['cocktail_reps'] = cocktail_reps
- params['raw_data'] = data
- params['category_encodings'] = category_encodings
- return params
-
-def compute_losses_and_accuracies(loss_functions, auxiliaries, auxiliaries_str, outputs, data):
- losses = dict()
- accuracies = dict()
- other_metrics = dict()
- for i_k, k in enumerate(auxiliaries_str):
- # get ground truth
- # compute loss
- if k == 'volume':
- outputs[i_k] = outputs[i_k].flatten()
- ground_truth = auxiliaries[k]
- if ground_truth.dtype == torch.float64:
- losses[k] = loss_functions[k](outputs[i_k], ground_truth.float()).float()
- elif ground_truth.dtype == torch.int64:
- if str(loss_functions[k]) != "BCEWithLogitsLoss()":
- losses[k] = loss_functions[k](outputs[i_k].float(), ground_truth.long()).float()
- else:
- losses[k] = loss_functions[k](outputs[i_k].float(), ground_truth.float()).float()
- else:
- losses[k] = loss_functions[k](outputs[i_k], ground_truth).float()
- # compute accuracies
- if str(loss_functions[k]) == 'CrossEntropyLoss()':
- bs, n_options = outputs[i_k].shape
- predicted = outputs[i_k].argmax(dim=1).detach().numpy()
- true = ground_truth.int().detach().numpy()
- confusion_matrix = np.zeros([n_options, n_options])
- for i in range(bs):
- confusion_matrix[true[i], predicted[i]] += 1
- acc = confusion_matrix.diagonal().sum() / bs
- for i in range(n_options):
- if confusion_matrix[i].sum() != 0:
- confusion_matrix[i] /= confusion_matrix[i].sum()
- other_metrics[k + '_confusion'] = confusion_matrix
- accuracies[k] = np.mean(outputs[i_k].argmax(dim=1).detach().numpy() == ground_truth.int().detach().numpy())
- assert (acc - accuracies[k]) < 1e-5
-
- elif str(loss_functions[k]) == 'BCEWithLogitsLoss()':
- assert k == 'ingredients_presence'
- outputs_rescaled = outputs[i_k].detach().numpy() * data.dataset.std_ing_quantities + data.dataset.mean_ing_quantities
- predicted_presence = (outputs_rescaled > 0).astype(bool)
- presence = ground_truth.detach().numpy().astype(bool)
- other_metrics[k + '_false_positive'] = np.mean(np.logical_and(predicted_presence.astype(bool), ~presence.astype(bool)))
- other_metrics[k + '_false_negative'] = np.mean(np.logical_and(~predicted_presence.astype(bool), presence.astype(bool)))
- accuracies[k] = np.mean(predicted_presence == presence) # accuracy for multi class labeling
- elif str(loss_functions[k]) == 'MSELoss()':
- accuracies[k] = np.nan
- else:
- raise ValueError
- return losses, accuracies, other_metrics
-
-def compute_metric_output(aux_other_metrics, data, ingredient_quantities, x_hat):
- ing_q = ingredient_quantities.detach().numpy() * data.dataset.std_ing_quantities + data.dataset.mean_ing_quantities
- ing_presence = (ing_q > 0)
- x_hat = x_hat.detach().numpy() * data.dataset.std_ing_quantities + data.dataset.mean_ing_quantities
- # abs_diff = np.abs(ing_q - x_hat) * data.dataset.max_ing_quantities
- abs_diff = np.abs(ing_q - x_hat)
- ing_q_abs_loss_when_present, ing_q_abs_loss_when_absent = [], []
- for i in range(ingredient_quantities.shape[0]):
- ing_q_abs_loss_when_present.append(np.mean(abs_diff[i, np.where(ing_presence[i])]))
- ing_q_abs_loss_when_absent.append(np.mean(abs_diff[i, np.where(~ing_presence[i])]))
- aux_other_metrics['ing_q_abs_loss_when_present'] = np.mean(ing_q_abs_loss_when_present)
- aux_other_metrics['ing_q_abs_loss_when_absent'] = np.mean(ing_q_abs_loss_when_absent)
- return aux_other_metrics
-
-def run_epoch(opt, train, model, data, loss_functions, weights, params):
- if train:
- model.train()
- else:
- model.eval()
-
- # prepare logging of losses
- losses = dict(kld_loss=[],
- mse_loss=[],
- vae_loss=[],
- volume_loss=[],
- global_loss=[])
- accuracies = dict()
- other_metrics = dict()
- for aux in params['auxiliaries_dict'].keys():
- losses[aux] = []
- accuracies[aux] = []
- if train: opt.zero_grad()
-
- for d in data:
- nb_ingredients = d[0]
- batch_size = nb_ingredients.shape[0]
- x_ingredients = d[1].float()
- ingredient_quantities = d[2]
- cocktail_reps = d[3]
- auxiliaries = d[4]
- for k in auxiliaries.keys():
- if auxiliaries[k].dtype == torch.float64: auxiliaries[k] = auxiliaries[k].float()
- taste_valid = d[-1]
- x = x_ingredients.to(device)
- x_hat, z, mean, log_var, outputs, auxiliaries_str = model.forward_direct(ingredient_quantities.float())
- # get auxiliary losses and accuracies
- aux_losses, aux_accuracies, aux_other_metrics = compute_losses_and_accuracies(loss_functions, auxiliaries, auxiliaries_str, outputs, data)
-
- # compute vae loss
- mse_loss = ((ingredient_quantities - x_hat) ** 2).mean().float()
- kld_loss = torch.mean(-0.5 * torch.sum(1 + log_var - mean ** 2 - log_var.exp(), dim=1)).float()
- vae_loss = mse_loss + params['beta_vae'] * (params['latent_dim'] / params['nb_ingredients']) * kld_loss
- # compute total volume loss to train decoder
- # volume_loss = ((ingredient_quantities.sum(dim=1) - x_hat.sum(dim=1)) ** 2).mean().float()
- volume_loss = torch.FloatTensor([0])
-
- aux_other_metrics = compute_metric_output(aux_other_metrics, data, ingredient_quantities, x_hat)
-
- indexes_taste_valid = np.argwhere(taste_valid.detach().numpy()).flatten()
- if indexes_taste_valid.size > 0:
- outputs_taste = model.get_auxiliary(z[indexes_taste_valid], aux_str='taste_reps')
- gt = auxiliaries['taste_reps'][indexes_taste_valid]
- factor_loss = indexes_taste_valid.size / (0.3 * batch_size)# factor on the loss: if same ratio as actual dataset factor = 1 if there is less data, then the factor decreases, more data, it increases
- aux_losses['taste_reps'] = (loss_functions['taste_reps'](outputs_taste, gt) * factor_loss).float()
- else:
- aux_losses['taste_reps'] = torch.FloatTensor([0]).reshape([])
- aux_accuracies['taste_reps'] = 0
-
- # aggregate losses
- global_loss = torch.sum(torch.cat([torch.atleast_1d(vae_loss), torch.atleast_1d(volume_loss)] + [torch.atleast_1d(aux_losses[k] * weights[k]) for k in params['auxiliaries_dict'].keys()]))
- # for k in params['auxiliaries_dict'].keys():
- # global_loss += aux_losses[k] * weights[k]
-
- if train:
- global_loss.backward()
- opt.step()
- opt.zero_grad()
-
- # logging
- losses['global_loss'].append(float(global_loss))
- losses['mse_loss'].append(float(mse_loss))
- losses['vae_loss'].append(float(vae_loss))
- losses['volume_loss'].append(float(volume_loss))
- losses['kld_loss'].append(float(kld_loss))
- for k in params['auxiliaries_dict'].keys():
- losses[k].append(float(aux_losses[k]))
- accuracies[k].append(float(aux_accuracies[k]))
- for k in aux_other_metrics.keys():
- if k not in other_metrics.keys():
- other_metrics[k] = [aux_other_metrics[k]]
- else:
- other_metrics[k].append(aux_other_metrics[k])
-
- for k in losses.keys():
- losses[k] = np.mean(losses[k])
- for k in accuracies.keys():
- accuracies[k] = np.mean(accuracies[k])
- for k in other_metrics.keys():
- other_metrics[k] = np.mean(other_metrics[k], axis=0)
- return model, losses, accuracies, other_metrics
-
-def prepare_data_and_loss(params):
- train_data = MyDataset(split='train', params=params)
- test_data = MyDataset(split='test', params=params)
-
- train_data_loader = DataLoader(train_data, batch_size=params['batch_size'], shuffle=True)
- test_data_loader = DataLoader(test_data, batch_size=params['batch_size'], shuffle=True)
-
- loss_functions = dict()
- weights = dict()
- for k in sorted(params['auxiliaries_dict'].keys()):
- if params['auxiliaries_dict'][k]['type'] == 'classif':
- if k == 'glasses':
- classif_weights = train_data.glasses_weights
- elif k == 'prep_type':
- classif_weights = train_data.prep_types_weights
- elif k == 'categories':
- classif_weights = train_data.categories_weights
- else:
- raise ValueError
- loss_functions[k] = nn.CrossEntropyLoss(torch.FloatTensor(classif_weights))
- elif params['auxiliaries_dict'][k]['type'] == 'multiclassif':
- loss_functions[k] = nn.BCEWithLogitsLoss()
- elif params['auxiliaries_dict'][k]['type'] == 'regression':
- loss_functions[k] = nn.MSELoss()
- else:
- raise ValueError
- weights[k] = params['auxiliaries_dict'][k]['weight']
-
-
- return loss_functions, train_data_loader, test_data_loader, weights
-
-def print_losses(train, losses, accuracies, other_metrics):
- keyword = 'Train' if train else 'Eval'
- print(f'\t{keyword} logs:')
- keys = ['global_loss', 'vae_loss', 'mse_loss', 'kld_loss', 'volume_loss']
- for k in keys:
- print(f'\t\t{k} - Loss: {losses[k]:.2f}')
- for k in sorted(accuracies.keys()):
- print(f'\t\t{k} (aux) - Loss: {losses[k]:.2f}, Acc: {accuracies[k]:.2f}')
- for k in sorted(other_metrics.keys()):
- if 'confusion' not in k:
- print(f'\t\t{k} - {other_metrics[k]:.2f}')
-
-
-def run_experiment(params, verbose=True):
- loss_functions, train_data_loader, test_data_loader, weights = prepare_data_and_loss(params)
- params['filter_decoder_output'] = train_data_loader.dataset.filter_decoder_output
-
- model_params = [params[k] for k in ["input_dim", "deepset_latent_dim", "hidden_dims_ingredients", "activation",
- "hidden_dims_cocktail", "hidden_dims_decoder", "nb_ingredients", "latent_dim", "agg", "dropout", "auxiliaries_dict",
- "filter_decoder_output"]]
- model = get_vae_model(*model_params)
- opt = torch.optim.AdamW(model.parameters(), lr=params['lr'])
-
-
- all_train_losses = []
- all_eval_losses = []
- all_train_accuracies = []
- all_eval_accuracies = []
- all_eval_other_metrics = []
- all_train_other_metrics = []
- best_loss = np.inf
- model, eval_losses, eval_accuracies, eval_other_metrics = run_epoch(opt=opt, train=False, model=model, data=test_data_loader, loss_functions=loss_functions,
- weights=weights, params=params)
- all_eval_losses.append(eval_losses)
- all_eval_accuracies.append(eval_accuracies)
- all_eval_other_metrics.append(eval_other_metrics)
- if verbose: print(f'\n--------\nEpoch #0')
- if verbose: print_losses(train=False, accuracies=eval_accuracies, losses=eval_losses, other_metrics=eval_other_metrics)
- for epoch in range(params['nb_epochs']):
- if verbose and (epoch + 1) % params['print_every'] == 0: print(f'\n--------\nEpoch #{epoch+1}')
- model, train_losses, train_accuracies, train_other_metrics = run_epoch(opt=opt, train=True, model=model, data=train_data_loader, loss_functions=loss_functions,
- weights=weights, params=params)
- if verbose and (epoch + 1) % params['print_every'] == 0: print_losses(train=True, accuracies=train_accuracies, losses=train_losses, other_metrics=train_other_metrics)
- model, eval_losses, eval_accuracies, eval_other_metrics = run_epoch(opt=opt, train=False, model=model, data=test_data_loader, loss_functions=loss_functions,
- weights=weights, params=params)
- if verbose and (epoch + 1) % params['print_every'] == 0: print_losses(train=False, accuracies=eval_accuracies, losses=eval_losses, other_metrics=eval_other_metrics)
- if eval_losses['global_loss'] < best_loss:
- best_loss = eval_losses['global_loss']
- if verbose: print(f'Saving new best model with loss {best_loss:.2f}')
- torch.save(model.state_dict(), params['save_path'] + f'checkpoint_best.save')
-
- # log
- all_train_losses.append(train_losses)
- all_train_accuracies.append(train_accuracies)
- all_eval_losses.append(eval_losses)
- all_eval_accuracies.append(eval_accuracies)
- all_eval_other_metrics.append(eval_other_metrics)
- all_train_other_metrics.append(train_other_metrics)
-
- # if epoch == params['nb_epoch_switch_beta']:
- # params['beta_vae'] = 2.5
- # params['auxiliaries_dict']['prep_type']['weight'] /= 10
- # params['auxiliaries_dict']['glasses']['weight'] /= 10
-
- if (epoch + 1) % params['plot_every'] == 0:
-
- plot_results(all_train_losses, all_train_accuracies, all_train_other_metrics,
- all_eval_losses, all_eval_accuracies, all_eval_other_metrics, params['plot_path'], weights)
-
- return model
-
-def plot_results(all_train_losses, all_train_accuracies, all_train_other_metrics,
- all_eval_losses, all_eval_accuracies, all_eval_other_metrics, plot_path, weights):
-
- steps = np.arange(len(all_eval_accuracies))
-
- loss_keys = sorted(all_train_losses[0].keys())
- acc_keys = sorted(all_train_accuracies[0].keys())
- metrics_keys = sorted(all_train_other_metrics[0].keys())
-
- plt.figure()
- plt.title('Train losses')
- for k in loss_keys:
- factor = 1 if k == 'mse_loss' else 1
- if k not in weights.keys():
- plt.plot(steps[1:], [train_loss[k] * factor for train_loss in all_train_losses], label=k)
- else:
- if weights[k] != 0:
- plt.plot(steps[1:], [train_loss[k] * factor for train_loss in all_train_losses], label=k)
-
- plt.legend()
- plt.ylim([0, 4])
- plt.savefig(plot_path + 'train_losses.png', dpi=200)
- fig = plt.gcf()
- plt.close(fig)
-
- plt.figure()
- plt.title('Train accuracies')
- for k in acc_keys:
- if weights[k] != 0:
- plt.plot(steps[1:], [train_acc[k] for train_acc in all_train_accuracies], label=k)
- plt.legend()
- plt.ylim([0, 1])
- plt.savefig(plot_path + 'train_acc.png', dpi=200)
- fig = plt.gcf()
- plt.close(fig)
-
- plt.figure()
- plt.title('Train other metrics')
- for k in metrics_keys:
- if 'confusion' not in k and 'presence' in k:
- plt.plot(steps[1:], [train_metric[k] for train_metric in all_train_other_metrics], label=k)
- plt.legend()
- plt.ylim([0, 1])
- plt.savefig(plot_path + 'train_ing_presence_errors.png', dpi=200)
- fig = plt.gcf()
- plt.close(fig)
-
- plt.figure()
- plt.title('Train other metrics')
- for k in metrics_keys:
- if 'confusion' not in k and 'presence' not in k:
- plt.plot(steps[1:], [train_metric[k] for train_metric in all_train_other_metrics], label=k)
- plt.legend()
- plt.savefig(plot_path + 'train_ing_q_error.png', dpi=200)
- fig = plt.gcf()
- plt.close(fig)
-
- plt.figure()
- plt.title('Eval losses')
- for k in loss_keys:
- factor = 1 if k == 'mse_loss' else 1
- if k not in weights.keys():
- plt.plot(steps, [eval_loss[k] * factor for eval_loss in all_eval_losses], label=k)
- else:
- if weights[k] != 0:
- plt.plot(steps, [eval_loss[k] * factor for eval_loss in all_eval_losses], label=k)
- plt.legend()
- plt.ylim([0, 4])
- plt.savefig(plot_path + 'eval_losses.png', dpi=200)
- fig = plt.gcf()
- plt.close(fig)
-
- plt.figure()
- plt.title('Eval accuracies')
- for k in acc_keys:
- if weights[k] != 0:
- plt.plot(steps, [eval_acc[k] for eval_acc in all_eval_accuracies], label=k)
- plt.legend()
- plt.ylim([0, 1])
- plt.savefig(plot_path + 'eval_acc.png', dpi=200)
- fig = plt.gcf()
- plt.close(fig)
-
- plt.figure()
- plt.title('Eval other metrics')
- for k in metrics_keys:
- if 'confusion' not in k and 'presence' in k:
- plt.plot(steps, [eval_metric[k] for eval_metric in all_eval_other_metrics], label=k)
- plt.legend()
- plt.ylim([0, 1])
- plt.savefig(plot_path + 'eval_ing_presence_errors.png', dpi=200)
- fig = plt.gcf()
- plt.close(fig)
-
- plt.figure()
- plt.title('Eval other metrics')
- for k in metrics_keys:
- if 'confusion' not in k and 'presence' not in k:
- plt.plot(steps, [eval_metric[k] for eval_metric in all_eval_other_metrics], label=k)
- plt.legend()
- plt.savefig(plot_path + 'eval_ing_q_error.png', dpi=200)
- fig = plt.gcf()
- plt.close(fig)
-
-
- for k in metrics_keys:
- if 'confusion' in k:
- plt.figure()
- plt.title(k)
- plt.ylabel('True')
- plt.xlabel('Predicted')
- plt.imshow(all_eval_other_metrics[-1][k], vmin=0, vmax=1)
- plt.colorbar()
- plt.savefig(plot_path + f'eval_{k}.png', dpi=200)
- fig = plt.gcf()
- plt.close(fig)
-
- for k in metrics_keys:
- if 'confusion' in k:
- plt.figure()
- plt.title(k)
- plt.ylabel('True')
- plt.xlabel('Predicted')
- plt.imshow(all_train_other_metrics[-1][k], vmin=0, vmax=1)
- plt.colorbar()
- plt.savefig(plot_path + f'train_{k}.png', dpi=200)
- fig = plt.gcf()
- plt.close(fig)
-
- plt.close('all')
-
-
-def get_model(model_path):
-
- with open(model_path + 'params.json', 'r') as f:
- params = json.load(f)
- params['save_path'] = model_path
- max_ing_quantities = np.loadtxt(params['save_path'] + 'max_ing_quantities.txt')
- mean_ing_quantities = np.loadtxt(params['save_path'] + 'mean_ing_quantities.txt')
- std_ing_quantities = np.loadtxt(params['save_path'] + 'std_ing_quantities.txt')
- min_when_present_ing_quantities = np.loadtxt(params['save_path'] + 'min_when_present_ing_quantities.txt')
- def filter_decoder_output(output):
- output = output.detach().numpy()
- output_unnormalized = output * std_ing_quantities + mean_ing_quantities
- if output.ndim == 1:
- output_unnormalized[np.where(output_unnormalized < min_when_present_ing_quantities)] = 0
- else:
- for i in range(output.shape[0]):
- output_unnormalized[i, np.where(output_unnormalized[i] < min_when_present_ing_quantities)] = 0
- return output_unnormalized.copy()
- params['filter_decoder_output'] = filter_decoder_output
- model_chkpt = model_path + "checkpoint_best.save"
- model_params = [params[k] for k in ["input_dim", "deepset_latent_dim", "hidden_dims_ingredients", "activation",
- "hidden_dims_cocktail", "hidden_dims_decoder", "nb_ingredients", "latent_dim", "agg", "dropout", "auxiliaries_dict",
- "filter_decoder_output"]]
- model = get_vae_model(*model_params)
- model.load_state_dict(torch.load(model_chkpt))
- model.eval()
- return model, filter_decoder_output, params
-
-
-def compute_expe_name_and_save_path(params):
- weights_str = '['
- for aux in params['auxiliaries_dict'].keys():
- weights_str += f'{params["auxiliaries_dict"][aux]["weight"]}, '
- weights_str = weights_str[:-2] + ']'
- save_path = params['save_path'] + params["trial_id"]
- save_path += f'_lr{params["lr"]}'
- save_path += f'_betavae{params["beta_vae"]}'
- save_path += f'_bs{params["batch_size"]}'
- save_path += f'_latentdim{params["latent_dim"]}'
- save_path += f'_hding{params["hidden_dims_ingredients"]}'
- save_path += f'_hdcocktail{params["hidden_dims_cocktail"]}'
- save_path += f'_hddecoder{params["hidden_dims_decoder"]}'
- save_path += f'_agg{params["agg"]}'
- save_path += f'_activ{params["activation"]}'
- save_path += f'_w{weights_str}'
- counter = 0
- while os.path.exists(save_path + f"_{counter}"):
- counter += 1
- save_path = save_path + f"_{counter}" + '/'
- params["save_path"] = save_path
- os.makedirs(save_path)
- os.makedirs(save_path + 'plots/')
- params['plot_path'] = save_path + 'plots/'
- print(f'logging to {save_path}')
- return params
-
-
-
-if __name__ == '__main__':
- params = get_params()
- run_experiment(params)
-
diff --git a/spaces/ccolas/TastyPiano/src/music/pipeline/music_pipeline.py b/spaces/ccolas/TastyPiano/src/music/pipeline/music_pipeline.py
deleted file mode 100644
index 056dd3050eb827723b86d8b90b4a2e31fd701c7f..0000000000000000000000000000000000000000
--- a/spaces/ccolas/TastyPiano/src/music/pipeline/music_pipeline.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from src.music.pipeline.url2audio import url2audio
-from src.music.pipeline.audio2midi import audio2midi
-from src.music.pipeline.midi2processed import midi2processed
-from src.music.pipeline.processed2encoded import processed2encoded
-from src.music.pipeline.encoded2rep import encoded2rep
-from src.music.config import RANDOM_CROP, NB_AUG, FROM_URL_PATH
-# from src.music.pipeline.synth2audio import AudioRecorder
-# from src.music.pipeline.processed2handcodedrep import processed2handcodedrep
-import time
-import hashlib
-
-VERBOSE = True
-AUGMENTATION, NOISE_INJECTED = False, False
-CROP = 10# crop 30s before transcription
-
-# AUDIO_RECORDER = AudioRecorder(place='home')
-
-def encode_music(url=None,
- audio_path=None,
- midi_path=None,
- processed_path=None,
- record=False,
- crop=CROP,
- random_crop=RANDOM_CROP,
- augmentation=AUGMENTATION,
- noise_injection=NOISE_INJECTED,
- apply_filtering=True,
- nb_aug=NB_AUG,
- level=0,
- verbose=VERBOSE):
- if not record: assert url is not None or audio_path is not None or midi_path is not None or processed_path is not None
- init_time = time.time()
- error = ''
- try:
- if record:
- assert audio_path is None and midi_path is None
- if verbose: print(' ' * level + 'Processing music, recorded from mic.')
- audio_path = AUDIO_RECORDER.record_one()
- error = ''
- if processed_path is None:
- if midi_path is None:
- if audio_path is None:
- if verbose and not record: print(' ' * level + 'Processing music, from audio source.')
- init_t = time.time()
- audio_path, _, error = url2audio(playlist_path=FROM_URL_PATH, video_url=url, verbose=verbose, level=level+2)
- if verbose: print(' ' * (level + 4) + f'Audio downloaded in {int(time.time() - init_t)} seconds.')
- else:
- if verbose and not record: print(' ' * level + 'Processing music, from midi source.')
- init_t = time.time()
- midi_path, error = audio2midi(audio_path, crop=crop, random_crop=random_crop, verbose=verbose, level=level+2)
- if verbose: print(' ' * (level + 4) + f'Audio transcribed to midi in {int(time.time() - init_t)} seconds.')
- init_t = time.time()
- processed_path, error = midi2processed(midi_path, apply_filtering=apply_filtering, verbose=verbose, level=level+2)
- if verbose: print(' ' * (level + 4) + f'Midi preprocessed in {int(time.time() - init_t)} seconds.')
- init_t = time.time()
- encoded_path, error = processed2encoded(processed_path, augmentation=augmentation, nb_aug=nb_aug, noise_injection=noise_injection, verbose=verbose, level=level+2)
- if verbose: print(' ' * (level + 4) + f'Midi encoded in {int(time.time() - init_t)} seconds.')
- init_t = time.time()
- representation_path, representation, error = encoded2rep(encoded_path, return_rep=True, level=level+2, verbose=verbose)
- if verbose: print(' ' * (level + 4) + f'Music representation computed in {int(time.time() - init_t)} seconds.')
- init_t = time.time()
- handcoded_rep_path, handcoded_rep, error = None, None, ''
- # handcoded_rep_path, handcoded_rep, error = processed2handcodedrep(processed_path, return_rep=True, level=level+2, verbose=verbose)
- if verbose: print(' ' * (level + 4) + f'Music handcoded representation computed in {int(time.time() - init_t)} seconds.')
- # assert handcoded_rep_path is not None and representation_path is not None
- all_paths = dict(url=url, audio_path=audio_path, midi_path=midi_path, processed_path=processed_path, encoded_path=encoded_path,
- representation_path=representation_path, handcoded_rep_path=handcoded_rep_path)
- if audio_path is not None:
- print('audio hash: ', hashlib.md5(open(audio_path, 'rb').read()).hexdigest())
- if midi_path is not None:
- print('midi hash: ', hashlib.md5(open(midi_path, 'rb').read()).hexdigest())
- print('processed hash: ', hashlib.md5(open(processed_path, 'rb').read()).hexdigest())
- print('encoded hash: ', hashlib.md5(open(encoded_path, 'rb').read()).hexdigest())
- print('rep hash: ', hashlib.md5(open(representation_path, 'rb').read()).hexdigest())
- print("rep:", representation[:10])
- if verbose: print(' ' * (level + 2) + f'Music processed in {int(time.time() - init_time)} seconds.')
- except Exception as err:
- print(err, error)
- if verbose: print(' ' * (level + 2) + f'Music FAILED to process in {int(time.time() - init_time)} seconds.')
- representation = None
- handcoded_rep = None
- all_paths = dict()
-
- return representation, handcoded_rep, all_paths, error
-
-if __name__ == '__main__':
- representation = encode_music(url="https://www.youtube.com/watch?v=a2LFVWBmoiw")[0]
- # representation = encode_music(record=True)[0]
\ No newline at end of file
diff --git a/spaces/ccolas/TastyPiano/src/music/utilities/__init__.py b/spaces/ccolas/TastyPiano/src/music/utilities/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/ccolas/TastyPiano/src/music/utilities/handcoded_rep_utilities/tht/defaults.py b/spaces/ccolas/TastyPiano/src/music/utilities/handcoded_rep_utilities/tht/defaults.py
deleted file mode 100644
index e0d678337432bbcced7d525d0cfa9be43b712f59..0000000000000000000000000000000000000000
--- a/spaces/ccolas/TastyPiano/src/music/utilities/handcoded_rep_utilities/tht/defaults.py
+++ /dev/null
@@ -1,21 +0,0 @@
-'''Default configuration for THT'''
-
-from src.music.utilities.handcoded_rep_utilities.tht import similarity, confidence, correction
-
-eval_f = confidence.windowed_conf
-corr_f = correction.windowed_corr
-sim_f = similarity.min_dist_sim
-similarity_epsilon = 0.005
-max_delta = (60000.0 / 40) # 40 bpm
-min_delta = (60000.0 / 320) # 320 bpm
-max_hypotheses = 30
-
-config = {
- 'eval_f': eval_f,
- 'corr_f': corr_f,
- 'sim_f': sim_f,
- 'similarity_epsilon': similarity_epsilon,
- 'max_delta': max_delta,
- 'min_delta': min_delta,
- 'max_hypotheses': max_hypotheses
-}
diff --git a/spaces/cfwef/gpt/show_math.py b/spaces/cfwef/gpt/show_math.py
deleted file mode 100644
index 80fa881d1c2ace5813f75b5d8a19ca056a8bfa4f..0000000000000000000000000000000000000000
--- a/spaces/cfwef/gpt/show_math.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# This program is written by: https://github.com/polarwinkel/mdtex2html
-
-from latex2mathml.converter import convert as tex2mathml
-import re
-
-incomplete = '⚠formula incomplete'
-convError = '⚠LaTeX-convert-error'
-
-def convert(mdtex, extensions=[], splitParagraphs=True):
- ''' converts recursively the Markdown-LaTeX-mixture to HTML with MathML '''
- found = False
- # handle all paragraphs separately (prevents aftereffects)
- if splitParagraphs:
- parts = re.split("\n\n", mdtex)
- result = ''
- for part in parts:
- result += convert(part, extensions, splitParagraphs=False)
- return result
- # find first $$-formula:
- parts = re.split('\${2}', mdtex, 2)
- if len(parts)>1:
- found = True
- result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
- try:
- result += '
'+tex2mathml(parts[1])+'
\n'
- except:
- result += '
'+convError+'
'
- if len(parts)==3:
- result += convert(parts[2], extensions, splitParagraphs=False)
- else:
- result += '
'+incomplete+'
'
- # else find first $-formulas:
- else:
- parts = re.split('\${1}', mdtex, 2)
- if len(parts)>1 and not found:
- found = True
- try:
- mathml = tex2mathml(parts[1])
- except:
- mathml = convError
- if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
- parts[0]=parts[0]+''
- if len(parts)==3:
- result = convert(parts[0]+mathml+parts[2], extensions, splitParagraphs=False)
- else:
- result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
- # else find first \[..\]-equation:
- else:
- parts = re.split(r'\\\[', mdtex, 1)
- if len(parts)>1 and not found:
- found = True
- result = convert(parts[0], extensions, splitParagraphs=False)+'\n'
- parts = re.split(r'\\\]', parts[1], 1)
- try:
- result += '
'+tex2mathml(parts[0])+'
\n'
- except:
- result += '
'+convError+'
'
- if len(parts)==2:
- result += convert(parts[1], extensions, splitParagraphs=False)
- else:
- result += '
'+incomplete+'
'
- # else find first \(..\)-equation:
- else:
- parts = re.split(r'\\\(', mdtex, 1)
- if len(parts)>1 and not found:
- found = True
- subp = re.split(r'\\\)', parts[1], 1)
- try:
- mathml = tex2mathml(subp[0])
- except:
- mathml = convError
- if parts[0].endswith('\n\n') or parts[0]=='': # make sure textblock starts before formula!
- parts[0]=parts[0]+''
- if len(subp)==2:
- result = convert(parts[0]+mathml+subp[1], extensions, splitParagraphs=False)
- else:
- result = convert(parts[0]+mathml+incomplete, extensions, splitParagraphs=False)
- if not found:
- result = mdtex
- return result
diff --git a/spaces/cha0smagick/RPG_Character_generator/README.md b/spaces/cha0smagick/RPG_Character_generator/README.md
deleted file mode 100644
index 91743c598df9801fefcf02ed37f5946ff0df5309..0000000000000000000000000000000000000000
--- a/spaces/cha0smagick/RPG_Character_generator/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: RPG Character Generator
-emoji: 📉
-colorFrom: yellow
-colorTo: pink
-sdk: streamlit
-sdk_version: 1.27.2
-app_file: app.py
-pinned: false
-license: mit
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/chendl/compositional_test/transformers/src/transformers/generation/flax_utils.py b/spaces/chendl/compositional_test/transformers/src/transformers/generation/flax_utils.py
deleted file mode 100644
index 4ff1164c88e91a4c37c864756d75e6ce74033c48..0000000000000000000000000000000000000000
--- a/spaces/chendl/compositional_test/transformers/src/transformers/generation/flax_utils.py
+++ /dev/null
@@ -1,1004 +0,0 @@
-# coding=utf-8
-# Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team.
-# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-import copy
-import inspect
-import warnings
-from functools import partial
-from typing import Any, Dict, Optional, Union
-
-import flax
-import jax
-import jax.numpy as jnp
-import numpy as np
-from jax import lax
-
-from ..models.auto import (
- FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
- FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
- FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
-)
-from ..utils import ModelOutput, logging
-from .configuration_utils import GenerationConfig
-from .flax_logits_process import (
- FlaxForcedBOSTokenLogitsProcessor,
- FlaxForcedEOSTokenLogitsProcessor,
- FlaxForceTokensLogitsProcessor,
- FlaxLogitsProcessorList,
- FlaxMinLengthLogitsProcessor,
- FlaxSuppressTokensAtBeginLogitsProcessor,
- FlaxSuppressTokensLogitsProcessor,
- FlaxTemperatureLogitsWarper,
- FlaxTopKLogitsWarper,
- FlaxTopPLogitsWarper,
-)
-
-
-logger = logging.get_logger(__name__)
-
-
-@flax.struct.dataclass
-class FlaxGreedySearchOutput(ModelOutput):
- """
- Flax Base class for outputs of decoder-only generation models using greedy search.
-
-
- Args:
- sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
- The generated sequences.
- """
-
- sequences: jnp.ndarray = None
-
-
-@flax.struct.dataclass
-class FlaxSampleOutput(ModelOutput):
- """
- Flax Base class for outputs of decoder-only generation models using sampling.
-
-
- Args:
- sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
- The generated sequences.
- """
-
- sequences: jnp.ndarray = None
-
-
-@flax.struct.dataclass
-class FlaxBeamSearchOutput(ModelOutput):
- """
- Flax Base class for outputs of decoder-only generation models using greedy search.
-
-
- Args:
- sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
- The generated sequences.
- scores (`jnp.ndarray` of shape `(batch_size,)`):
- The scores (log probabilities) of the generated sequences.
- """
-
- sequences: jnp.ndarray = None
- scores: jnp.ndarray = None
-
-
-@flax.struct.dataclass
-class GreedyState:
- cur_len: jnp.ndarray
- sequences: jnp.ndarray
- running_token: jnp.ndarray
- is_sent_finished: jnp.ndarray
- model_kwargs: Dict[str, jnp.ndarray]
-
-
-@flax.struct.dataclass
-class SampleState:
- cur_len: jnp.ndarray
- sequences: jnp.ndarray
- running_token: jnp.ndarray
- is_sent_finished: jnp.ndarray
- prng_key: jnp.ndarray
- model_kwargs: Dict[str, jnp.ndarray]
-
-
-@flax.struct.dataclass
-class BeamSearchState:
- cur_len: jnp.ndarray
- running_sequences: jnp.ndarray
- running_scores: jnp.ndarray
- sequences: jnp.ndarray
- scores: jnp.ndarray
- is_sent_finished: jnp.ndarray
- model_kwargs: Dict[str, jnp.ndarray]
-
-
-class FlaxGenerationMixin:
- """
- A class containing all functions for auto-regressive text generation, to be used as a mixin in
- [`FlaxPreTrainedModel`].
-
- The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for:
- - *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and
- `do_sample=False`
- - *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and
- `do_sample=True`
- - *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and
- `do_sample=False`
-
- You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
- learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
- """
-
- def prepare_inputs_for_generation(self, *args, **kwargs):
- raise NotImplementedError(
- "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`."
- )
-
- @staticmethod
- def _run_loop_in_debug(cond_fn, body_fn, init_state):
- """
- Run generation in untraced mode. This should only be used for debugging purposes.
- """
- state = init_state
- while cond_fn(state):
- state = body_fn(state)
- return state
-
- def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs):
- encoder_kwargs = {
- argument: value
- for argument, value in model_kwargs.items()
- if not (argument.startswith("decoder_") or argument.startswith("cross_attn"))
- }
- model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs)
- return model_kwargs
-
- def _prepare_decoder_input_ids_for_generation(
- self,
- batch_size: int,
- decoder_start_token_id: int = None,
- bos_token_id: int = None,
- model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
- ) -> jnp.ndarray:
- if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
- # Only use this arg if not None, otherwise just remove from model_kwargs
- decoder_input_ids = model_kwargs.pop("decoder_input_ids")
- if decoder_input_ids is not None:
- return decoder_input_ids
- decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
- return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0)
-
- def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
- # retrieve decoder_start_token_id for encoder-decoder models
- # fall back to bos_token_id if necessary
- decoder_start_token_id = (
- decoder_start_token_id
- if decoder_start_token_id is not None
- else self.generation_config.decoder_start_token_id
- )
- bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
- if decoder_start_token_id is not None:
- return decoder_start_token_id
- elif (
- hasattr(self.config, "decoder")
- and hasattr(self.config.decoder, "decoder_start_token_id")
- and self.config.decoder.decoder_start_token_id is not None
- ):
- return self.config.decoder.decoder_start_token_id
- elif bos_token_id is not None:
- return bos_token_id
- elif (
- hasattr(self.config, "decoder")
- and hasattr(self.config.decoder, "bos_token_id")
- and self.config.decoder.bos_token_id is not None
- ):
- return self.config.decoder.bos_token_id
- raise ValueError(
- "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
- )
-
- @staticmethod
- def _expand_to_num_beams(tensor, num_beams):
- return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:])
-
- def _adapt_logits_for_beam_search(self, logits):
- """
- This function can be overwritten in the specific modeling_flax_.py classes to allow for custom beam
- search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`].
- """
- return logits
-
- def _validate_model_class(self):
- """
- Confirms that the model class is compatible with generation. If not, raises an exception that points to the
- right class to use.
- """
- if not self.can_generate():
- generate_compatible_mappings = [
- FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
- FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
- FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
- ]
- generate_compatible_classes = set()
- for model_mapping in generate_compatible_mappings:
- supported_models = model_mapping.get(type(self.config), default=None)
- if supported_models is not None:
- generate_compatible_classes.add(supported_models.__name__)
- exception_message = (
- f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
- "it doesn't have a language model head."
- )
- if generate_compatible_classes:
- exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
- raise TypeError(exception_message)
-
- def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
- """Validates model kwargs for generation. Generate argument typos will also be caught here."""
- unused_model_args = []
- model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
- # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
- # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
- if "kwargs" in model_args or "model_kwargs" in model_args:
- model_args |= set(inspect.signature(self.__call__).parameters)
- for key, value in model_kwargs.items():
- if value is not None and key not in model_args:
- unused_model_args.append(key)
-
- if unused_model_args:
- raise ValueError(
- f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
- " generate arguments will also show up in this list)"
- )
-
- def generate(
- self,
- input_ids: jnp.ndarray,
- generation_config: Optional[GenerationConfig] = None,
- prng_key: Optional[jnp.ndarray] = None,
- trace: bool = True,
- params: Optional[Dict[str, jnp.ndarray]] = None,
- logits_processor: Optional[FlaxLogitsProcessorList] = None,
- **kwargs,
- ):
- r"""
- Generates sequences of token ids for models with a language modeling head.
-
- Parameters:
- input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
- The sequence used as a prompt for the generation.
- generation_config (`~generation.GenerationConfig`, *optional*):
- The generation configuration to be used as base parametrization for the generation call. `**kwargs`
- passed to generate matching the attributes of `generation_config` will override them. If
- `generation_config` is not provided, the default will be used, which had the following loading
- priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
- configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
- default values, whose documentation should be checked to parameterize generation.
- trace (`bool`, *optional*, defaults to `True`):
- Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a
- considerably slower runtime.
- params (`Dict[str, jnp.ndarray]`, *optional*):
- Optionally the model parameters can be passed. Can be useful for parallelized generation.
- logits_processor (`FlaxLogitsProcessorList `, *optional*):
- Custom logits processors that complement the default logits processors built from arguments and
- generation config. If a logit processor is passed that is already created with the arguments or a
- generation config an error is thrown. This feature is intended for advanced users.
- kwargs:
- Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
- forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
- specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
-
- Return:
- [`~utils.ModelOutput`].
-
- """
- # Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
- self._validate_model_class()
-
- # priority: `generation_config` argument > `model.generation_config` (the default generation config)
- if generation_config is None:
- # legacy: users may modify the model configuration to control generation -- update the generation config
- # model attribute accordingly, if it was created from the model config
- if self.generation_config._from_model_config:
- new_generation_config = GenerationConfig.from_model_config(self.config)
- if new_generation_config != self.generation_config:
- warnings.warn(
- "You have modified the pretrained model configuration to control generation. This is a"
- " deprecated strategy to control generation and will be removed soon, in a future version."
- " Please use a generation configuration file (see"
- " https://huggingface.co/docs/transformers/main_classes/text_generation)"
- )
- self.generation_config = new_generation_config
- generation_config = self.generation_config
-
- generation_config = copy.deepcopy(generation_config)
- model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
- generation_config.validate()
- self._validate_model_kwargs(model_kwargs.copy())
-
- logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList()
-
- # set init values
- prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
-
- if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
- if model_kwargs.get("attention_mask") is None:
- logger.warning(
- "The attention mask and the pad token id were not set. As a consequence, you may observe "
- "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
- )
- eos_token_id = generation_config.eos_token_id
- if isinstance(eos_token_id, list):
- eos_token_id = eos_token_id[0]
- logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
- generation_config.pad_token_id = eos_token_id
-
- if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder:
- raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.")
-
- # decoder-only models should use left-padding for generation (can't be checked with `trace=True`)
- if not self.config.is_encoder_decoder and not trace:
- if (
- generation_config.pad_token_id is not None
- and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0
- ):
- logger.warning(
- "A decoder-only architecture is being used, but right-padding was detected! For correct "
- "generation results, please set `padding_side='left'` when initializing the tokenizer."
- )
-
- batch_size = input_ids.shape[0]
-
- if self.config.is_encoder_decoder:
- # add encoder_outputs to model_kwargs
- if model_kwargs.get("encoder_outputs") is None:
- model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs)
- # prepare decoder_input_ids for generation
- input_ids = self._prepare_decoder_input_ids_for_generation(
- batch_size,
- decoder_start_token_id=generation_config.decoder_start_token_id,
- bos_token_id=generation_config.bos_token_id,
- model_kwargs=model_kwargs,
- )
-
- # Prepare `max_length` depending on other stopping criteria.
- input_ids_seq_length = input_ids.shape[-1]
- has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
- if has_default_max_length and generation_config.max_new_tokens is None:
- warnings.warn(
- f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. "
- "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we"
- " recommend using `max_new_tokens` to control the maximum length of the generation.",
- UserWarning,
- )
- elif generation_config.max_new_tokens is not None:
- generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
- if not has_default_max_length:
- logger.warn(
- f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
- f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
- "Please refer to the documentation for more information. "
- "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)",
- UserWarning,
- )
-
- if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
- raise ValueError(
- f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than"
- f" the maximum length ({generation_config.max_length})"
- )
- if input_ids_seq_length >= generation_config.max_length:
- input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
- logger.warning(
- f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
- f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
- " increasing`max_new_tokens`."
- )
-
- logits_processor = self._get_logits_processor(
- generation_config=generation_config,
- input_ids_seq_length=input_ids_seq_length,
- logits_processor=logits_processor,
- )
-
- if not generation_config.do_sample and generation_config.num_beams == 1:
- return self._greedy_search(
- input_ids,
- generation_config.max_length,
- generation_config.pad_token_id,
- generation_config.eos_token_id,
- logits_processor=logits_processor,
- trace=trace,
- params=params,
- model_kwargs=model_kwargs,
- )
- elif generation_config.do_sample and generation_config.num_beams == 1:
- logits_warper = self._get_logits_warper(generation_config=generation_config)
- return self._sample(
- input_ids,
- generation_config.max_length,
- generation_config.pad_token_id,
- generation_config.eos_token_id,
- prng_key,
- logits_warper=logits_warper,
- logits_processor=logits_processor,
- trace=trace,
- params=params,
- model_kwargs=model_kwargs,
- )
- elif not generation_config.do_sample and generation_config.num_beams > 1:
- # broadcast input_ids & encoder_outputs
- input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams)
-
- if "encoder_outputs" in model_kwargs:
- model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams(
- model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams
- )
-
- for kwarg in ["attention_mask", "decoder_attention_mask"]:
- if kwarg in model_kwargs:
- model_kwargs[kwarg] = self._expand_to_num_beams(
- model_kwargs[kwarg], num_beams=generation_config.num_beams
- )
-
- return self._beam_search(
- input_ids,
- generation_config.max_length,
- generation_config.pad_token_id,
- generation_config.eos_token_id,
- length_penalty=generation_config.length_penalty,
- early_stopping=generation_config.early_stopping,
- logits_processor=logits_processor,
- trace=trace,
- params=params,
- model_kwargs=model_kwargs,
- )
- else:
- raise NotImplementedError("`Beam sampling is currently not implemented.")
-
- def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList:
- """
- This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`]
- instances used for multinomial sampling.
- """
- warpers = FlaxLogitsProcessorList()
-
- if generation_config.temperature is not None and generation_config.temperature != 1.0:
- warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature))
- if generation_config.top_k is not None and generation_config.top_k != 0:
- warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1))
- if generation_config.top_p is not None and generation_config.top_p < 1.0:
- warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1))
-
- return warpers
-
- def _get_logits_processor(
- self,
- generation_config: GenerationConfig,
- input_ids_seq_length: int,
- logits_processor: Optional[FlaxLogitsProcessorList],
- ) -> FlaxLogitsProcessorList:
- """
- This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`]
- instances used to modify the scores of the language model head.
- """
- processors = FlaxLogitsProcessorList()
-
- if (
- generation_config.min_length is not None
- and generation_config.eos_token_id is not None
- and generation_config.min_length > -1
- ):
- processors.append(
- FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)
- )
- if generation_config.forced_bos_token_id is not None:
- processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
- if generation_config.forced_eos_token_id is not None:
- processors.append(
- FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
- )
- if generation_config.suppress_tokens is not None:
- processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens))
- if generation_config.begin_suppress_tokens is not None:
- begin_index = input_ids_seq_length
- begin_index = (
- begin_index
- if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
- else begin_index + 1
- )
- if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0:
- # generation starts after the last token that is forced
- begin_index += generation_config.forced_decoder_ids[-1][0]
- processors.append(
- FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
- )
- if generation_config.forced_decoder_ids is not None:
- forced_decoder_ids = [
- [input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids
- ]
- processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids))
- processors = self._merge_criteria_processor_list(processors, logits_processor)
-
- return processors
-
- def _merge_criteria_processor_list(
- self,
- default_list: FlaxLogitsProcessorList,
- custom_list: FlaxLogitsProcessorList,
- ) -> FlaxLogitsProcessorList:
- if len(custom_list) == 0:
- return default_list
- for default in default_list:
- for custom in custom_list:
- if type(custom) is type(default):
- object_type = "logits processor"
- raise ValueError(
- f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
- f" `generate`, but it has already been created with the values {default}. {default} has been"
- " created by passing the corresponding arguments to generate or by the model's config default"
- f" values. If you just want to change the default values of {object_type} consider passing"
- f" them as arguments to `generate` instead of using a custom {object_type}."
- )
- default_list.extend(custom_list)
- return default_list
-
- def _greedy_search(
- self,
- input_ids: None,
- max_length: Optional[int] = None,
- pad_token_id: Optional[int] = None,
- eos_token_id: Optional[int] = None,
- logits_processor: Optional[FlaxLogitsProcessorList] = None,
- trace: bool = True,
- params: Optional[Dict[str, jnp.ndarray]] = None,
- model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
- ):
- # init values
- max_length = max_length if max_length is not None else self.generation_config.max_length
- pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
- eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
-
- batch_size, cur_len = input_ids.shape
-
- eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
- pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
- cur_len = jnp.array(cur_len)
-
- # per batch-item holding current token in loop.
- sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
- sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
-
- # per batch-item state bit indicating if sentence has finished.
- is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
-
- # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
- # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
- model = self.decode if self.config.is_encoder_decoder else self
- # initialize model specific kwargs
- model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
-
- # initialize state
- state = GreedyState(
- cur_len=cur_len,
- sequences=sequences,
- running_token=input_ids,
- is_sent_finished=is_sent_finished,
- model_kwargs=model_kwargs,
- )
-
- def greedy_search_cond_fn(state):
- """state termination condition fn."""
- has_reached_max_length = state.cur_len == max_length
- all_sequence_finished = jnp.all(state.is_sent_finished)
- finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
- return ~finish_generation
-
- def greedy_search_body_fn(state):
- """state update fn."""
- model_outputs = model(state.running_token, params=params, **state.model_kwargs)
- logits = model_outputs.logits[:, -1]
-
- # apply min_length, ...
- logits = logits_processor(state.sequences, logits, state.cur_len)
-
- next_token = jnp.argmax(logits, axis=-1)
-
- next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished
- next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
- next_token = next_token[:, None]
-
- next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
- next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
- return GreedyState(
- cur_len=state.cur_len + 1,
- sequences=next_sequences,
- running_token=next_token,
- is_sent_finished=next_is_sent_finished,
- model_kwargs=next_model_kwargs,
- )
-
- # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
- if input_ids.shape[1] > 1:
- state = greedy_search_body_fn(state)
-
- if not trace:
- state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state)
- else:
- state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state)
-
- return FlaxGreedySearchOutput(sequences=state.sequences)
-
- def _sample(
- self,
- input_ids: None,
- max_length: Optional[int] = None,
- pad_token_id: Optional[int] = None,
- eos_token_id: Optional[int] = None,
- prng_key: Optional[jnp.ndarray] = None,
- logits_processor: Optional[FlaxLogitsProcessorList] = None,
- logits_warper: Optional[FlaxLogitsProcessorList] = None,
- trace: bool = True,
- params: Optional[Dict[str, jnp.ndarray]] = None,
- model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
- ):
- # init values
- max_length = max_length if max_length is not None else self.generation_config.max_length
- pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
- eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
- prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
-
- batch_size, cur_len = input_ids.shape
-
- eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
- pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
- cur_len = jnp.array(cur_len)
-
- # per batch-item holding current token in loop.
- sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
- sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
-
- # per batch-item state bit indicating if sentence has finished.
- is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
-
- # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
- # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
- model = self.decode if self.config.is_encoder_decoder else self
-
- # initialize model specific kwargs
- model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
-
- # initialize state
- state = SampleState(
- cur_len=cur_len,
- sequences=sequences,
- running_token=input_ids,
- is_sent_finished=is_sent_finished,
- prng_key=prng_key,
- model_kwargs=model_kwargs,
- )
-
- def sample_search_cond_fn(state):
- """state termination condition fn."""
- has_reached_max_length = state.cur_len == max_length
- all_sequence_finished = jnp.all(state.is_sent_finished)
- finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
- return ~finish_generation
-
- def sample_search_body_fn(state):
- """state update fn."""
- prng_key, prng_key_next = jax.random.split(state.prng_key)
- model_outputs = model(state.running_token, params=params, **state.model_kwargs)
-
- logits = model_outputs.logits[:, -1]
-
- # apply min_length, ...
- logits = logits_processor(state.sequences, logits, state.cur_len)
- # apply top_p, top_k, temperature
- logits = logits_warper(logits, logits, state.cur_len)
-
- next_token = jax.random.categorical(prng_key, logits, axis=-1)
-
- next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
- next_token = next_token * ~next_is_sent_finished + pad_token_id * next_is_sent_finished
- next_token = next_token[:, None]
-
- next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
- next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
-
- return SampleState(
- cur_len=state.cur_len + 1,
- sequences=next_sequences,
- running_token=next_token,
- is_sent_finished=next_is_sent_finished,
- model_kwargs=next_model_kwargs,
- prng_key=prng_key_next,
- )
-
- # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
- if input_ids.shape[1] > 1:
- state = sample_search_body_fn(state)
-
- if not trace:
- state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state)
- else:
- state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state)
-
- return FlaxSampleOutput(sequences=state.sequences)
-
- def _beam_search(
- self,
- input_ids: None,
- max_length: Optional[int] = None,
- pad_token_id: Optional[int] = None,
- eos_token_id: Optional[int] = None,
- length_penalty: Optional[float] = None,
- early_stopping: Optional[Union[bool, str]] = None,
- logits_processor: Optional[FlaxLogitsProcessorList] = None,
- trace: bool = True,
- params: Optional[Dict[str, jnp.ndarray]] = None,
- model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
- ):
- """
- This beam search function is heavily inspired by Flax's official example:
- https://github.com/google/flax/blob/main/examples/wmt/decode.py
- """
-
- def flatten_beam_dim(tensor):
- """Flattens the first two dimensions of a non-scalar array."""
- # ignore scalars (e.g. cache index)
- if tensor.ndim == 0:
- return tensor
- return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:])
-
- def unflatten_beam_dim(tensor, batch_size, num_beams):
- """Unflattens the first, flat batch*beam dimension of a non-scalar array."""
- # ignore scalars (e.g. cache index)
- if tensor.ndim == 0:
- return tensor
- return tensor.reshape((batch_size, num_beams) + tensor.shape[1:])
-
- def gather_beams(nested, beam_indices, batch_size, new_num_beams):
- """
- Gathers the beam slices indexed by beam_indices into new beam array.
- """
- batch_indices = jnp.reshape(
- jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams)
- )
-
- def gather_fn(tensor):
- # ignore scalars (e.g. cache index)
- if tensor.ndim == 0:
- return tensor
- else:
- return tensor[batch_indices, beam_indices]
-
- return jax.tree_util.tree_map(gather_fn, nested)
-
- # init values
- max_length = max_length if max_length is not None else self.generation_config.max_length
- pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
- eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
- length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty
- early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping
-
- batch_size, num_beams, cur_len = input_ids.shape
-
- eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
- pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
- cur_len = jnp.array(cur_len)
-
- # per batch,beam-item holding current token in loop.
- sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
- running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
- running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0))
-
- # per batch,beam-item state bit indicating if sentence has finished.
- is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_)
-
- # per batch,beam-item score, logprobs
- running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1])
- scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7)
-
- # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
- # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
- model = self.decode if self.config.is_encoder_decoder else self
-
- # flatten beam dim
- if "encoder_outputs" in model_kwargs:
- model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim(
- model_kwargs["encoder_outputs"]["last_hidden_state"]
- )
- for kwarg in ["attention_mask", "decoder_attention_mask"]:
- if kwarg in model_kwargs:
- model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg])
-
- # initialize model specific kwargs
- model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs)
-
- # initialize state
- state = BeamSearchState(
- cur_len=cur_len,
- running_sequences=running_sequences,
- running_scores=running_scores,
- sequences=sequences,
- scores=scores,
- is_sent_finished=is_sent_finished,
- model_kwargs=model_kwargs,
- )
-
- def beam_search_cond_fn(state):
- """beam search state termination condition fn."""
-
- # 1. is less than max length?
- not_max_length_yet = state.cur_len < max_length
-
- # 2. can the new beams still improve?
- # early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion
- # below for more details.
- # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
- # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of
- # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there.
- if early_stopping == "never" and length_penalty > 0.0:
- best_running_score = state.running_scores[:, :1] / (max_length**length_penalty)
- else:
- best_running_score = state.running_scores[:, :1] / (state.cur_len**length_penalty)
- worst_finished_score = jnp.where(
- state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7)
- )
- improvement_still_possible = jnp.any(best_running_score > worst_finished_score)
-
- # 3. is there still a beam that has not finished?
- still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True))
-
- return not_max_length_yet & still_open_beam & improvement_still_possible
-
- def beam_search_body_fn(state, input_ids_length=1):
- """beam search state update fn."""
- # 1. Forward current tokens
- # Collect the current position slice along length to feed the fast
- # autoregressive decoder model. Flatten the beam dimension into batch
- # dimension for feeding into the model.
- # unflatten beam dimension
- # Unflatten beam dimension in attention cache arrays
- input_token = flatten_beam_dim(
- lax.dynamic_slice(
- state.running_sequences,
- (0, 0, state.cur_len - input_ids_length),
- (batch_size, num_beams, input_ids_length),
- )
- )
- model_outputs = model(input_token, params=params, **state.model_kwargs)
-
- logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams)
- cache = jax.tree_util.tree_map(
- lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values
- )
-
- # adapt logits for FlaxMarianMTModel
- logits = self._adapt_logits_for_beam_search(logits)
-
- # 2. Compute log probs
- # get log probabilities from logits,
- # process logits with processors (*e.g.* min_length, ...), and
- # add new logprobs to existing running logprobs scores.
- log_probs = jax.nn.log_softmax(logits)
- log_probs = logits_processor(
- flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), state.cur_len
- )
- log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams)
- log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2)
- vocab_size = log_probs.shape[2]
- log_probs = log_probs.reshape((batch_size, num_beams * vocab_size))
-
- # 3. Retrieve top-K
- # Each item in batch has num_beams * vocab_size candidate sequences.
- # For each item, get the top 2*k candidates with the highest log-
- # probabilities. We gather the top 2*K beams here so that even if the best
- # K sequences reach EOS simultaneously, we have another K sequences
- # remaining to continue the live beam search.
- # Gather the top 2*K scores from _all_ beams.
- # Gather 2*k top beams.
- # Recover the beam index by floor division.
- # Recover token id by modulo division and expand Id array for broadcasting.
- # Update sequences for the 2*K top-k new sequences.
- beams_to_keep = 2 * num_beams
- topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep)
- topk_beam_indices = topk_indices // vocab_size
- topk_running_sequences = gather_beams(
- state.running_sequences, topk_beam_indices, batch_size, beams_to_keep
- )
- topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2)
- topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len))
-
- # 4. Check which sequences have ended
- # Update current sequences:
- # Did any of these sequences reach an end marker?
- # To prevent these just finished sequences from being added to the current sequences
- # set of active beam search sequences, set their log probs to a very large
- # negative value.
- did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id
- running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7)
- # 5. Get running sequences scores for next
- # Determine the top k beam indices (from top 2*k beams) from log probs
- # and gather top k beams (from top 2*k beams).
- next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1]
- next_running_sequences, next_running_scores = gather_beams(
- [topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams
- )
-
- # 6. Process topk logits
- # Further process log probs:
- # - add length penalty
- # - make sure no scores can be added anymore if beam is full
- # - make sure still running sequences cannot be chosen as finalized beam
- topk_log_probs = topk_log_probs / (state.cur_len**length_penalty)
- beams_in_batch_are_full = jnp.broadcast_to(
- state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape
- ) & (early_stopping is True)
- add_penalty = ~did_topk_just_finished | beams_in_batch_are_full
- topk_log_probs += add_penalty * np.array(-1.0e7)
-
- # 7. Get scores, sequences, is sentence finished for next.
- # Combine sequences, scores, and flags along the beam dimension and compare
- # new finished sequence scores to existing finished scores and select the
- # best from the new set of beams
- merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1)
- merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1)
- merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1)
- topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1]
- next_sequences, next_scores, next_is_sent_finished = gather_beams(
- [merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams
- )
-
- # 8. Update model kwargs.
- # Determine the top k beam indices from the original set of all beams.
- # With these, gather the top k beam-associated caches.
- next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams)
- next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams)
- model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache)
- next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
-
- return BeamSearchState(
- cur_len=state.cur_len + 1,
- running_scores=next_running_scores,
- running_sequences=next_running_sequences,
- scores=next_scores,
- sequences=next_sequences,
- is_sent_finished=next_is_sent_finished,
- model_kwargs=next_model_kwargs,
- )
-
- # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
- if input_ids.shape[-1] > 1:
- state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state)
-
- if not trace:
- state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state)
- else:
- state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state)
-
- # Account for the edge-case where there are no finished sequences for a
- # particular batch item. If so, return running sequences for that batch item.
- none_finished = jnp.any(state.is_sent_finished, axis=1)
- sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences)
- scores = jnp.where(none_finished[:, None], state.scores, state.running_scores)
-
- # take best beam for each batch
- sequences = sequences[:, 0]
- scores = scores[:, 0]
-
- return FlaxBeamSearchOutput(sequences=sequences, scores=scores)
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImImagePlugin.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImImagePlugin.py
deleted file mode 100644
index 746743f658cf3fa2e0022ae049808eb68d3d1221..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/ImImagePlugin.py
+++ /dev/null
@@ -1,371 +0,0 @@
-#
-# The Python Imaging Library.
-# $Id$
-#
-# IFUNC IM file handling for PIL
-#
-# history:
-# 1995-09-01 fl Created.
-# 1997-01-03 fl Save palette images
-# 1997-01-08 fl Added sequence support
-# 1997-01-23 fl Added P and RGB save support
-# 1997-05-31 fl Read floating point images
-# 1997-06-22 fl Save floating point images
-# 1997-08-27 fl Read and save 1-bit images
-# 1998-06-25 fl Added support for RGB+LUT images
-# 1998-07-02 fl Added support for YCC images
-# 1998-07-15 fl Renamed offset attribute to avoid name clash
-# 1998-12-29 fl Added I;16 support
-# 2001-02-17 fl Use 're' instead of 'regex' (Python 2.1) (0.7)
-# 2003-09-26 fl Added LA/PA support
-#
-# Copyright (c) 1997-2003 by Secret Labs AB.
-# Copyright (c) 1995-2001 by Fredrik Lundh.
-#
-# See the README file for information on usage and redistribution.
-#
-
-
-import os
-import re
-
-from . import Image, ImageFile, ImagePalette
-
-# --------------------------------------------------------------------
-# Standard tags
-
-COMMENT = "Comment"
-DATE = "Date"
-EQUIPMENT = "Digitalization equipment"
-FRAMES = "File size (no of images)"
-LUT = "Lut"
-NAME = "Name"
-SCALE = "Scale (x,y)"
-SIZE = "Image size (x*y)"
-MODE = "Image type"
-
-TAGS = {
- COMMENT: 0,
- DATE: 0,
- EQUIPMENT: 0,
- FRAMES: 0,
- LUT: 0,
- NAME: 0,
- SCALE: 0,
- SIZE: 0,
- MODE: 0,
-}
-
-OPEN = {
- # ifunc93/p3cfunc formats
- "0 1 image": ("1", "1"),
- "L 1 image": ("1", "1"),
- "Greyscale image": ("L", "L"),
- "Grayscale image": ("L", "L"),
- "RGB image": ("RGB", "RGB;L"),
- "RLB image": ("RGB", "RLB"),
- "RYB image": ("RGB", "RLB"),
- "B1 image": ("1", "1"),
- "B2 image": ("P", "P;2"),
- "B4 image": ("P", "P;4"),
- "X 24 image": ("RGB", "RGB"),
- "L 32 S image": ("I", "I;32"),
- "L 32 F image": ("F", "F;32"),
- # old p3cfunc formats
- "RGB3 image": ("RGB", "RGB;T"),
- "RYB3 image": ("RGB", "RYB;T"),
- # extensions
- "LA image": ("LA", "LA;L"),
- "PA image": ("LA", "PA;L"),
- "RGBA image": ("RGBA", "RGBA;L"),
- "RGBX image": ("RGBX", "RGBX;L"),
- "CMYK image": ("CMYK", "CMYK;L"),
- "YCC image": ("YCbCr", "YCbCr;L"),
-}
-
-# ifunc95 extensions
-for i in ["8", "8S", "16", "16S", "32", "32F"]:
- OPEN[f"L {i} image"] = ("F", f"F;{i}")
- OPEN[f"L*{i} image"] = ("F", f"F;{i}")
-for i in ["16", "16L", "16B"]:
- OPEN[f"L {i} image"] = (f"I;{i}", f"I;{i}")
- OPEN[f"L*{i} image"] = (f"I;{i}", f"I;{i}")
-for i in ["32S"]:
- OPEN[f"L {i} image"] = ("I", f"I;{i}")
- OPEN[f"L*{i} image"] = ("I", f"I;{i}")
-for i in range(2, 33):
- OPEN[f"L*{i} image"] = ("F", f"F;{i}")
-
-
-# --------------------------------------------------------------------
-# Read IM directory
-
-split = re.compile(rb"^([A-Za-z][^:]*):[ \t]*(.*)[ \t]*$")
-
-
-def number(s):
- try:
- return int(s)
- except ValueError:
- return float(s)
-
-
-##
-# Image plugin for the IFUNC IM file format.
-
-
-class ImImageFile(ImageFile.ImageFile):
- format = "IM"
- format_description = "IFUNC Image Memory"
- _close_exclusive_fp_after_loading = False
-
- def _open(self):
- # Quick rejection: if there's not an LF among the first
- # 100 bytes, this is (probably) not a text header.
-
- if b"\n" not in self.fp.read(100):
- msg = "not an IM file"
- raise SyntaxError(msg)
- self.fp.seek(0)
-
- n = 0
-
- # Default values
- self.info[MODE] = "L"
- self.info[SIZE] = (512, 512)
- self.info[FRAMES] = 1
-
- self.rawmode = "L"
-
- while True:
- s = self.fp.read(1)
-
- # Some versions of IFUNC uses \n\r instead of \r\n...
- if s == b"\r":
- continue
-
- if not s or s == b"\0" or s == b"\x1A":
- break
-
- # FIXME: this may read whole file if not a text file
- s = s + self.fp.readline()
-
- if len(s) > 100:
- msg = "not an IM file"
- raise SyntaxError(msg)
-
- if s[-2:] == b"\r\n":
- s = s[:-2]
- elif s[-1:] == b"\n":
- s = s[:-1]
-
- try:
- m = split.match(s)
- except re.error as e:
- msg = "not an IM file"
- raise SyntaxError(msg) from e
-
- if m:
- k, v = m.group(1, 2)
-
- # Don't know if this is the correct encoding,
- # but a decent guess (I guess)
- k = k.decode("latin-1", "replace")
- v = v.decode("latin-1", "replace")
-
- # Convert value as appropriate
- if k in [FRAMES, SCALE, SIZE]:
- v = v.replace("*", ",")
- v = tuple(map(number, v.split(",")))
- if len(v) == 1:
- v = v[0]
- elif k == MODE and v in OPEN:
- v, self.rawmode = OPEN[v]
-
- # Add to dictionary. Note that COMMENT tags are
- # combined into a list of strings.
- if k == COMMENT:
- if k in self.info:
- self.info[k].append(v)
- else:
- self.info[k] = [v]
- else:
- self.info[k] = v
-
- if k in TAGS:
- n += 1
-
- else:
- msg = "Syntax error in IM header: " + s.decode("ascii", "replace")
- raise SyntaxError(msg)
-
- if not n:
- msg = "Not an IM file"
- raise SyntaxError(msg)
-
- # Basic attributes
- self._size = self.info[SIZE]
- self.mode = self.info[MODE]
-
- # Skip forward to start of image data
- while s and s[:1] != b"\x1A":
- s = self.fp.read(1)
- if not s:
- msg = "File truncated"
- raise SyntaxError(msg)
-
- if LUT in self.info:
- # convert lookup table to palette or lut attribute
- palette = self.fp.read(768)
- greyscale = 1 # greyscale palette
- linear = 1 # linear greyscale palette
- for i in range(256):
- if palette[i] == palette[i + 256] == palette[i + 512]:
- if palette[i] != i:
- linear = 0
- else:
- greyscale = 0
- if self.mode in ["L", "LA", "P", "PA"]:
- if greyscale:
- if not linear:
- self.lut = list(palette[:256])
- else:
- if self.mode in ["L", "P"]:
- self.mode = self.rawmode = "P"
- elif self.mode in ["LA", "PA"]:
- self.mode = "PA"
- self.rawmode = "PA;L"
- self.palette = ImagePalette.raw("RGB;L", palette)
- elif self.mode == "RGB":
- if not greyscale or not linear:
- self.lut = list(palette)
-
- self.frame = 0
-
- self.__offset = offs = self.fp.tell()
-
- self._fp = self.fp # FIXME: hack
-
- if self.rawmode[:2] == "F;":
- # ifunc95 formats
- try:
- # use bit decoder (if necessary)
- bits = int(self.rawmode[2:])
- if bits not in [8, 16, 32]:
- self.tile = [("bit", (0, 0) + self.size, offs, (bits, 8, 3, 0, -1))]
- return
- except ValueError:
- pass
-
- if self.rawmode in ["RGB;T", "RYB;T"]:
- # Old LabEye/3PC files. Would be very surprised if anyone
- # ever stumbled upon such a file ;-)
- size = self.size[0] * self.size[1]
- self.tile = [
- ("raw", (0, 0) + self.size, offs, ("G", 0, -1)),
- ("raw", (0, 0) + self.size, offs + size, ("R", 0, -1)),
- ("raw", (0, 0) + self.size, offs + 2 * size, ("B", 0, -1)),
- ]
- else:
- # LabEye/IFUNC files
- self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))]
-
- @property
- def n_frames(self):
- return self.info[FRAMES]
-
- @property
- def is_animated(self):
- return self.info[FRAMES] > 1
-
- def seek(self, frame):
- if not self._seek_check(frame):
- return
-
- self.frame = frame
-
- if self.mode == "1":
- bits = 1
- else:
- bits = 8 * len(self.mode)
-
- size = ((self.size[0] * bits + 7) // 8) * self.size[1]
- offs = self.__offset + frame * size
-
- self.fp = self._fp
-
- self.tile = [("raw", (0, 0) + self.size, offs, (self.rawmode, 0, -1))]
-
- def tell(self):
- return self.frame
-
-
-#
-# --------------------------------------------------------------------
-# Save IM files
-
-
-SAVE = {
- # mode: (im type, raw mode)
- "1": ("0 1", "1"),
- "L": ("Greyscale", "L"),
- "LA": ("LA", "LA;L"),
- "P": ("Greyscale", "P"),
- "PA": ("LA", "PA;L"),
- "I": ("L 32S", "I;32S"),
- "I;16": ("L 16", "I;16"),
- "I;16L": ("L 16L", "I;16L"),
- "I;16B": ("L 16B", "I;16B"),
- "F": ("L 32F", "F;32F"),
- "RGB": ("RGB", "RGB;L"),
- "RGBA": ("RGBA", "RGBA;L"),
- "RGBX": ("RGBX", "RGBX;L"),
- "CMYK": ("CMYK", "CMYK;L"),
- "YCbCr": ("YCC", "YCbCr;L"),
-}
-
-
-def _save(im, fp, filename):
- try:
- image_type, rawmode = SAVE[im.mode]
- except KeyError as e:
- msg = f"Cannot save {im.mode} images as IM"
- raise ValueError(msg) from e
-
- frames = im.encoderinfo.get("frames", 1)
-
- fp.write(f"Image type: {image_type} image\r\n".encode("ascii"))
- if filename:
- # Each line must be 100 characters or less,
- # or: SyntaxError("not an IM file")
- # 8 characters are used for "Name: " and "\r\n"
- # Keep just the filename, ditch the potentially overlong path
- name, ext = os.path.splitext(os.path.basename(filename))
- name = "".join([name[: 92 - len(ext)], ext])
-
- fp.write(f"Name: {name}\r\n".encode("ascii"))
- fp.write(("Image size (x*y): %d*%d\r\n" % im.size).encode("ascii"))
- fp.write(f"File size (no of images): {frames}\r\n".encode("ascii"))
- if im.mode in ["P", "PA"]:
- fp.write(b"Lut: 1\r\n")
- fp.write(b"\000" * (511 - fp.tell()) + b"\032")
- if im.mode in ["P", "PA"]:
- im_palette = im.im.getpalette("RGB", "RGB;L")
- colors = len(im_palette) // 3
- palette = b""
- for i in range(3):
- palette += im_palette[colors * i : colors * (i + 1)]
- palette += b"\x00" * (256 - colors)
- fp.write(palette) # 768 bytes
- ImageFile._save(im, fp, [("raw", (0, 0) + im.size, 0, (rawmode, 0, -1))])
-
-
-#
-# --------------------------------------------------------------------
-# Registry
-
-
-Image.register_open(ImImageFile.format, ImImageFile)
-Image.register_save(ImImageFile.format, _save)
-
-Image.register_extension(ImImageFile.format, ".im")
diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_o_s_t.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_o_s_t.py
deleted file mode 100644
index dba637117a0ac148af65c75853dd3bffbbbd1154..0000000000000000000000000000000000000000
--- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_o_s_t.py
+++ /dev/null
@@ -1,308 +0,0 @@
-from fontTools import ttLib
-from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
-from fontTools.misc import sstruct
-from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex
-from . import DefaultTable
-import sys
-import struct
-import array
-import logging
-
-log = logging.getLogger(__name__)
-
-postFormat = """
- >
- formatType: 16.16F
- italicAngle: 16.16F # italic angle in degrees
- underlinePosition: h
- underlineThickness: h
- isFixedPitch: L
- minMemType42: L # minimum memory if TrueType font is downloaded
- maxMemType42: L # maximum memory if TrueType font is downloaded
- minMemType1: L # minimum memory if Type1 font is downloaded
- maxMemType1: L # maximum memory if Type1 font is downloaded
-"""
-
-postFormatSize = sstruct.calcsize(postFormat)
-
-
-class table__p_o_s_t(DefaultTable.DefaultTable):
- def decompile(self, data, ttFont):
- sstruct.unpack(postFormat, data[:postFormatSize], self)
- data = data[postFormatSize:]
- if self.formatType == 1.0:
- self.decode_format_1_0(data, ttFont)
- elif self.formatType == 2.0:
- self.decode_format_2_0(data, ttFont)
- elif self.formatType == 3.0:
- self.decode_format_3_0(data, ttFont)
- elif self.formatType == 4.0:
- self.decode_format_4_0(data, ttFont)
- else:
- # supported format
- raise ttLib.TTLibError(
- "'post' table format %f not supported" % self.formatType
- )
-
- def compile(self, ttFont):
- data = sstruct.pack(postFormat, self)
- if self.formatType == 1.0:
- pass # we're done
- elif self.formatType == 2.0:
- data = data + self.encode_format_2_0(ttFont)
- elif self.formatType == 3.0:
- pass # we're done
- elif self.formatType == 4.0:
- data = data + self.encode_format_4_0(ttFont)
- else:
- # supported format
- raise ttLib.TTLibError(
- "'post' table format %f not supported" % self.formatType
- )
- return data
-
- def getGlyphOrder(self):
- """This function will get called by a ttLib.TTFont instance.
- Do not call this function yourself, use TTFont().getGlyphOrder()
- or its relatives instead!
- """
- if not hasattr(self, "glyphOrder"):
- raise ttLib.TTLibError("illegal use of getGlyphOrder()")
- glyphOrder = self.glyphOrder
- del self.glyphOrder
- return glyphOrder
-
- def decode_format_1_0(self, data, ttFont):
- self.glyphOrder = standardGlyphOrder[: ttFont["maxp"].numGlyphs]
-
- def decode_format_2_0(self, data, ttFont):
- (numGlyphs,) = struct.unpack(">H", data[:2])
- numGlyphs = int(numGlyphs)
- if numGlyphs > ttFont["maxp"].numGlyphs:
- # Assume the numGlyphs field is bogus, so sync with maxp.
- # I've seen this in one font, and if the assumption is
- # wrong elsewhere, well, so be it: it's hard enough to
- # work around _one_ non-conforming post format...
- numGlyphs = ttFont["maxp"].numGlyphs
- data = data[2:]
- indices = array.array("H")
- indices.frombytes(data[: 2 * numGlyphs])
- if sys.byteorder != "big":
- indices.byteswap()
- data = data[2 * numGlyphs :]
- maxIndex = max(indices)
- self.extraNames = extraNames = unpackPStrings(data, maxIndex - 257)
- self.glyphOrder = glyphOrder = [""] * int(ttFont["maxp"].numGlyphs)
- for glyphID in range(numGlyphs):
- index = indices[glyphID]
- if index > 257:
- try:
- name = extraNames[index - 258]
- except IndexError:
- name = ""
- else:
- # fetch names from standard list
- name = standardGlyphOrder[index]
- glyphOrder[glyphID] = name
- self.build_psNameMapping(ttFont)
-
- def build_psNameMapping(self, ttFont):
- mapping = {}
- allNames = {}
- for i in range(ttFont["maxp"].numGlyphs):
- glyphName = psName = self.glyphOrder[i]
- if glyphName == "":
- glyphName = "glyph%.5d" % i
- if glyphName in allNames:
- # make up a new glyphName that's unique
- n = allNames[glyphName]
- while (glyphName + "#" + str(n)) in allNames:
- n += 1
- allNames[glyphName] = n + 1
- glyphName = glyphName + "#" + str(n)
-
- self.glyphOrder[i] = glyphName
- allNames[glyphName] = 1
- if glyphName != psName:
- mapping[glyphName] = psName
-
- self.mapping = mapping
-
- def decode_format_3_0(self, data, ttFont):
- # Setting self.glyphOrder to None will cause the TTFont object
- # try and construct glyph names from a Unicode cmap table.
- self.glyphOrder = None
-
- def decode_format_4_0(self, data, ttFont):
- from fontTools import agl
-
- numGlyphs = ttFont["maxp"].numGlyphs
- indices = array.array("H")
- indices.frombytes(data)
- if sys.byteorder != "big":
- indices.byteswap()
- # In some older fonts, the size of the post table doesn't match
- # the number of glyphs. Sometimes it's bigger, sometimes smaller.
- self.glyphOrder = glyphOrder = [""] * int(numGlyphs)
- for i in range(min(len(indices), numGlyphs)):
- if indices[i] == 0xFFFF:
- self.glyphOrder[i] = ""
- elif indices[i] in agl.UV2AGL:
- self.glyphOrder[i] = agl.UV2AGL[indices[i]]
- else:
- self.glyphOrder[i] = "uni%04X" % indices[i]
- self.build_psNameMapping(ttFont)
-
- def encode_format_2_0(self, ttFont):
- numGlyphs = ttFont["maxp"].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- assert len(glyphOrder) == numGlyphs
- indices = array.array("H")
- extraDict = {}
- extraNames = self.extraNames = [
- n for n in self.extraNames if n not in standardGlyphOrder
- ]
- for i in range(len(extraNames)):
- extraDict[extraNames[i]] = i
- for glyphID in range(numGlyphs):
- glyphName = glyphOrder[glyphID]
- if glyphName in self.mapping:
- psName = self.mapping[glyphName]
- else:
- psName = glyphName
- if psName in extraDict:
- index = 258 + extraDict[psName]
- elif psName in standardGlyphOrder:
- index = standardGlyphOrder.index(psName)
- else:
- index = 258 + len(extraNames)
- extraDict[psName] = len(extraNames)
- extraNames.append(psName)
- indices.append(index)
- if sys.byteorder != "big":
- indices.byteswap()
- return (
- struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
- )
-
- def encode_format_4_0(self, ttFont):
- from fontTools import agl
-
- numGlyphs = ttFont["maxp"].numGlyphs
- glyphOrder = ttFont.getGlyphOrder()
- assert len(glyphOrder) == numGlyphs
- indices = array.array("H")
- for glyphID in glyphOrder:
- glyphID = glyphID.split("#")[0]
- if glyphID in agl.AGL2UV:
- indices.append(agl.AGL2UV[glyphID])
- elif len(glyphID) == 7 and glyphID[:3] == "uni":
- indices.append(int(glyphID[3:], 16))
- else:
- indices.append(0xFFFF)
- if sys.byteorder != "big":
- indices.byteswap()
- return indices.tobytes()
-
- def toXML(self, writer, ttFont):
- formatstring, names, fixes = sstruct.getformat(postFormat)
- for name in names:
- value = getattr(self, name)
- writer.simpletag(name, value=value)
- writer.newline()
- if hasattr(self, "mapping"):
- writer.begintag("psNames")
- writer.newline()
- writer.comment(
- "This file uses unique glyph names based on the information\n"
- "found in the 'post' table. Since these names might not be unique,\n"
- "we have to invent artificial names in case of clashes. In order to\n"
- "be able to retain the original information, we need a name to\n"
- "ps name mapping for those cases where they differ. That's what\n"
- "you see below.\n"
- )
- writer.newline()
- items = sorted(self.mapping.items())
- for name, psName in items:
- writer.simpletag("psName", name=name, psName=psName)
- writer.newline()
- writer.endtag("psNames")
- writer.newline()
- if hasattr(self, "extraNames"):
- writer.begintag("extraNames")
- writer.newline()
- writer.comment(
- "following are the name that are not taken from the standard Mac glyph order"
- )
- writer.newline()
- for name in self.extraNames:
- writer.simpletag("psName", name=name)
- writer.newline()
- writer.endtag("extraNames")
- writer.newline()
- if hasattr(self, "data"):
- writer.begintag("hexdata")
- writer.newline()
- writer.dumphex(self.data)
- writer.endtag("hexdata")
- writer.newline()
-
- def fromXML(self, name, attrs, content, ttFont):
- if name not in ("psNames", "extraNames", "hexdata"):
- setattr(self, name, safeEval(attrs["value"]))
- elif name == "psNames":
- self.mapping = {}
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "psName":
- self.mapping[attrs["name"]] = attrs["psName"]
- elif name == "extraNames":
- self.extraNames = []
- for element in content:
- if not isinstance(element, tuple):
- continue
- name, attrs, content = element
- if name == "psName":
- self.extraNames.append(attrs["name"])
- else:
- self.data = readHex(content)
-
-
-def unpackPStrings(data, n):
- # extract n Pascal strings from data.
- # if there is not enough data, use ""
-
- strings = []
- index = 0
- dataLen = len(data)
-
- for _ in range(n):
- if dataLen <= index:
- length = 0
- else:
- length = byteord(data[index])
- index += 1
-
- if dataLen <= index + length - 1:
- name = ""
- else:
- name = tostr(data[index : index + length], encoding="latin1")
- strings.append(name)
- index += length
-
- if index < dataLen:
- log.warning("%d extra bytes in post.stringData array", dataLen - index)
-
- elif dataLen < index:
- log.warning("not enough data in post.stringData array")
-
- return strings
-
-
-def packPStrings(strings):
- data = b""
- for s in strings:
- data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
- return data
diff --git a/spaces/cihyFjudo/fairness-paper-search/Aashiqui 2 movie download blu-ray movies for free Enjoy the romantic musical drama in HD quality.md b/spaces/cihyFjudo/fairness-paper-search/Aashiqui 2 movie download blu-ray movies for free Enjoy the romantic musical drama in HD quality.md
deleted file mode 100644
index 1e13eb5819af49717b3e8f41ed12e5d0ef54b00a..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Aashiqui 2 movie download blu-ray movies for free Enjoy the romantic musical drama in HD quality.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Achaneyanenikkishtam Malayalam Movie Songs Downloadl Watch Mohanlals Guest Appearance and Sing Along.md b/spaces/cihyFjudo/fairness-paper-search/Achaneyanenikkishtam Malayalam Movie Songs Downloadl Watch Mohanlals Guest Appearance and Sing Along.md
deleted file mode 100644
index b6fe602d0ef8abe33968e01541bf12b2f5f8f3b3..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Achaneyanenikkishtam Malayalam Movie Songs Downloadl Watch Mohanlals Guest Appearance and Sing Along.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
Malayalam song download, Here you can malayalam song download and listen free in high quality and all superhit malayalam songs of fresh and legendary artists available here. You can also explore a to z malayalam songs free, latest malayalam song new
-
Achaneyanenikkishtam Malayalam Movie Songs Downloadl
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/Myob accountright premier v19 crack rar travaillez en ligne ou hors ligne collaborez avec votre quipe et votre comptable.md b/spaces/cihyFjudo/fairness-paper-search/Myob accountright premier v19 crack rar travaillez en ligne ou hors ligne collaborez avec votre quipe et votre comptable.md
deleted file mode 100644
index 26ec6a27792dcf4ed23fcd4b1df4b5fe7f3064f5..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Myob accountright premier v19 crack rar travaillez en ligne ou hors ligne collaborez avec votre quipe et votre comptable.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/cihyFjudo/fairness-paper-search/Nie opuszczaj mnie. Kazuo Ishiguro. Mobi. Co kryje si za fasad idyllicznego miejsca?.md b/spaces/cihyFjudo/fairness-paper-search/Nie opuszczaj mnie. Kazuo Ishiguro. Mobi. Co kryje si za fasad idyllicznego miejsca?.md
deleted file mode 100644
index 02a7b63389503262bfa5457e46929f2f7608f62b..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/Nie opuszczaj mnie. Kazuo Ishiguro. Mobi. Co kryje si za fasad idyllicznego miejsca?.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
Kazuo Ishiguro urodzi\\u0142 si\\u0119 w 1954 roku w Nagasaki. To brytyjski pisarz japo\\u0144skiego pochodzenia. Jest autorem siedmiu powie\\u015bci i jednego zbioru opowiada\\u0144. Pisze tak\\u017ce scenariusze, kr\\u00f3tkie opowiadania dla prasy i teksty piosenek. Do jego najwa\\u017cniejszych dzie\\u0142 nale\\u017cy zaliczy\\u0107 takie ksi\\u0105\\u017cki jak \\u201eMalarz \\u015bwiata u\\u0142udy\\u201d, \\u201eOkruchy dnia\\u201d i \\u201ePejza\\u017c w kolorze sepii\\u201d. W 2017 roku Akademia Szwedzka po raz kolejny zgotowa\\u0142a niespodziank\\u0119 wszystkim czytelnikom \\u015bledz\\u0105cym werdykty noblowskie. Laureatem Nagrody Nobla w dziedzinie literatury zosta\\u0142 bowiem Kazuo Ishiguro, cho\\u0107 wszyscy spodziewali si\\u0119, \\u017ce je\\u015bli nagroda trafi do autora zwi\\u0105zanego z japo\\u0144skim kr\\u0119giem kulturowym, b\\u0119dzie to Haruki Murakami. Ishiguro otrzyma\\u0142 Nobla, jako ten, \\u201ekt\\u00f3ry w powie\\u015bciach o pot\\u0119\\u017cnej sile emocjonalnej ods\\u0142oni\\u0142 otch\\u0142a\\u0144 pod naszym iluzorycznym poczuciem \\u0142\\u0105czno\\u015bci ze \\u015bwiatem\\u201d.\\r\\n\\r\\nRodzina Kazuo Ishiguro przeprowadzi\\u0142a si\\u0119 do Wielkiej Brytanii, gdy ch\\u0142opiec mia\\u0142 pi\\u0119\\u0107 lat. Cho\\u0107 pierwsze lata \\u017cycia sp\\u0119dzi\\u0142 w Japonii, to zdecydowanie najwi\\u0119kszy wp\\u0142yw na jego m\\u0142odo\\u015b\\u0107 mia\\u0142o przebywanie w \\u015brodowisku brytyjskim. Ishiguro wielokrotnie opowiada\\u0142 o znaczeniu emigracyjnego \\u017cycia w jego tw\\u00f3rczo\\u015bci. Z jednej strony widzia\\u0142 \\u017cycie w Wielkiej Brytanii z innej perspektywy ni\\u017c jego r\\u00f3wie\\u015bnicy, z drugiej za\\u015b tworzy\\u0142 si\\u0119 w jego g\\u0142owie wyimaginowany obraz Japonii, kt\\u00f3ry po wizycie w ojczy\\u017anie musia\\u0142 zrewidowa\\u0107. Do dzi\\u015b pisarz mieszka i tworzy w Londynie. \\r\\n\\r\\nIshiguro ucz\\u0119szcza\\u0142 do Stoughton Primary School, a nast\\u0119pnie Woking County Grammar School w Surrey, w miejscowo\\u015bci, w kt\\u00f3rej pracowa\\u0142 jego ojciec. Po zako\\u0144czeniu nauki w szkole, w 1973 roku sp\\u0119dzi\\u0142 tzw. \\u201egap year\\u201d podr\\u00f3\\u017cuj\\u0105c po Stanach Zjednoczonych i Kanadzie. W trakcie wyprawy nagrywa\\u0142 piosenki i wysy\\u0142a\\u0142 je do r\\u00f3\\u017cnych wytw\\u00f3rni p\\u0142ytowych. W 1974 Ishiguro rozpocz\\u0105\\u0142 studia na Uniwersytecie Kent w Canterbury. Szko\\u0142\\u0119 uko\\u0144czy\\u0142 w 1978 roku z tytu\\u0142em Bachelor of Arts z j\\u0119zyka angielskiego i filozofii. P\\u00f3\\u017aniej studiowa\\u0142 na Uniwersytecie Wschodniej Anglii. Jego praca dyplomowa z kreatywnego pisania sta\\u0142a si\\u0119 podstaw\\u0105 pierwszej ksi\\u0105\\u017cki.\\r\\n\\r\\nTw\\u00f3rczo\\u015b\\u0107 Kazuo Ishiguro\\r\\n\\r\\nIshiguro debiutowa\\u0142 w 1982 roku powie\\u015bci\\u0105 \\u201ePejza\\u017c w kolorze sepii\\u201d. Bohaterk\\u0105 ksi\\u0105\\u017cki jest Etsuko, japo\\u0144ska wdowa mieszkaj\\u0105ca w Wielkiej Brytanii. Kobieta ma silne poczucie winy w zwi\\u0105zku z samob\\u00f3jcz\\u0105 \\u015bmierci\\u0105 c\\u00f3rki. Nie potrafi poradzi\\u0107 sobie z traum\\u0105 po utracie dziecka, a ponadto trapi\\u0105 j\\u0105 sny, w kt\\u00f3rych wracaj\\u0105 wspomnienia z tragicznej wojny w Japonii. W swojej powie\\u015bci Ishiguro wci\\u0105ga czytelnik\\u00f3w w pewnego rodzaju gr\\u0119, w kt\\u00f3rej przesz\\u0142o\\u015b\\u0107 zaczyna zlewa\\u0107 si\\u0119 z tera\\u017aniejszo\\u015bci\\u0105. \\r\\n\\r\\nW 1986 roku ukaza\\u0142a si\\u0119 druga powie\\u015b\\u0107 Ishiguro pt. \\u201eMalarz \\u015bwiata u\\u0142udy\\u201d, jedna z ciekawszych ksi\\u0105\\u017cek w jego dorobku. Ishiguro przedstawi\\u0142 w niej posta\\u0107 ekscentrycznego malarza, kt\\u00f3ry prowadzi zupe\\u0142nie zwyk\\u0142e \\u017cycie. Pewnego dnia artysta wpada na nietuzinkowy pomys\\u0142. Wytacza sam sobie proces, w kt\\u00f3rym wciela si\\u0119 w rol\\u0119 oskar\\u017cyciela, oskar\\u017conego i s\\u0119dziego. Czytelnik ma szans\\u0119 odkrywa\\u0107 histori\\u0119 niechronologicznie, na podstawie opis\\u00f3w fragment\\u00f3w \\u017cycia malarza Masuji Ono. W tle po raz kolejny pojawia si\\u0119 temat II wojny \\u015bwiatowej i tragicznych w skutkach bombardowa\\u0144 przeprowadzonych przez ameryka\\u0144skie wojsko.\\r\\n\\r\\nJedna z najbardziej znanych ksi\\u0105\\u017cek Ishiguro \\u2013 \\u201eOkruchy dnia\\u201d, mia\\u0142a swoj\\u0105 premier\\u0119 w 1989 roku. To najg\\u0142o\\u015bniejsza do tej pory powie\\u015b\\u0107 autora, kt\\u00f3ra znalaz\\u0142a si\\u0119 na li\\u015bcie \\u201c100 ksi\\u0105\\u017cek, kt\\u00f3re trzeba przeczyta\\u0107\\u201d stworzonej przez BBC. Pisarz otrzyma\\u0142 za ni\\u0105 Nagrod\\u0119 Bookera. Popularno\\u015bci przysporzy\\u0142a ksi\\u0105\\u017cce r\\u00f3wnie\\u017c ekranizacja z Anthonym Hopkinsem w roli g\\u0142\\u00f3wnej. Bohaterem ksi\\u0105\\u017cki jest Stevens, kamerdyner, kt\\u00f3ry wi\\u0119kszo\\u015b\\u0107 \\u017cycia sp\\u0119dzi\\u0142 s\\u0142u\\u017c\\u0105c lordowi Darlingtonowi. Nawet po \\u015bmierci gospodarza Stevens zajmuje si\\u0119 posiad\\u0142o\\u015bci\\u0105. Pewnego dnia bohater postanawia wyruszy\\u0107 do Francji by nam\\u00f3wi\\u0107 poprzedni\\u0105 gospodyni\\u0119 do powrotu. W trakcie wyprawy u\\u015bwiadamia sobie, \\u017ce wi\\u0119kszo\\u015b\\u0107 \\u017cycia ma ju\\u017c za sob\\u0105 i nie uda mu si\\u0119 cofn\\u0105\\u0107 czasu.\\r\\n\\r\\nSze\\u015b\\u0107 lat p\\u00f3\\u017aniej ukaza\\u0142a si\\u0119 kolejna powie\\u015b\\u0107 autora \\u201eNiepocieszony\\u201d, w kt\\u00f3rej Ishiguro przedstawia losy znanego muzyka, kt\\u00f3ry trafia do bli\\u017cej nieokre\\u015blonego miasta w centrum Europy by da\\u0107 koncert. Fabu\\u0142a zaw\\u0119\\u017ca si\\u0119 do trzech dni, w trakcie kt\\u00f3rych pianista ci\\u0105gle proszony jest o spe\\u0142nianie przys\\u0142ug. Szybko u\\u015bwiadamia sobie, \\u017ce nie da rady sprosta\\u0107 wszystkim oczekiwaniom.\\r\\n\\r\\nW 2000 roku swoj\\u0105 premier\\u0119 mia\\u0142a ksi\\u0105\\u017cka \\u201eKiedy byli\\u015bmy sierotami\\u201d stylizowana na krymina\\u0142 noir. Fabu\\u0142a powie\\u015bci rozgrywa si\\u0119 w latach 30. XX wieku. Znany brytyjski detektyw wyrusza w podr\\u00f3\\u017c do Szanghaju by odnale\\u017a\\u0107 rodzic\\u00f3w, kt\\u00f3rzy zagin\\u0119li w nieznanych okoliczno\\u015bciach, gdy ten by\\u0142 ma\\u0142ym ch\\u0142opcem. Christopher Banks coraz g\\u0142\\u0119biej zanurza si\\u0119 w \\u015bwiat miasta, kt\\u00f3re chowa przed nim wi\\u0119cej ni\\u017c przypuszcza\\u0142.\\r\\n\\r\\nPi\\u0119\\u0107 lat p\\u00f3\\u017aniej ukaza\\u0142a si\\u0119 powie\\u015b\\u0107 \\u201eNie opuszczaj mnie\\u201d. Tym razem pisarz sprezentowa\\u0142 czytelnikom ksi\\u0105\\u017ck\\u0119 inspirowan\\u0105 science-fiction. W powie\\u015bci Ishiguro przedstawi\\u0142 uczni\\u00f3w elitarnej szko\\u0142y, kt\\u00f3rych \\u017cycie nie r\\u00f3\\u017cni si\\u0119 specjalnie od \\u017cycia innych. Bohaterowie prze\\u017cywaj\\u0105 normalne emocje, nie maj\\u0105c \\u015bwiadomo\\u015bci, \\u017ce s\\u0105 jedynie nosicielami narz\\u0105d\\u00f3w - klonami stworzonymi na potrzeby transplantacji. \\r\\n\\r\\nPo publikacji \\u201eNie opuszczaj mnie\\u201d, Ishiguro zaserwowa\\u0142 czytelnikom najd\\u0142u\\u017csz\\u0105 przerw\\u0119 w dotychczasowej karierze. Niemal dziesi\\u0119\\u0107 lat up\\u0142yn\\u0119\\u0142o do wydania powie\\u015bci \\u201ePogrzebany olbrzym\\u201d, kolejnego zwrotu w karierze pisarza, tym razem w stron\\u0119 fantastyki. Ksi\\u0105\\u017cka przedstawia \\u015bredniowieczny \\u015bwiat, w czasach nast\\u0119puj\\u0105cych zaraz po latach w\\u0142adania kr\\u00f3la Artura, nad kt\\u00f3rym kr\\u0105\\u017cy widmo smoczycy Querig. Dw\\u00f3jka g\\u0142\\u00f3wnych bohater\\u00f3w wyrusza w drog\\u0119 przez kraj Bryt\\u00f3w by odszuka\\u0107 syna. W mi\\u0119dzyczasie trafiaj\\u0105 do wioski zamieszkanej przez ogry, spotykaj\\u0105 ostatniego rycerza Okr\\u0105g\\u0142ego Sto\\u0142u, walcz\\u0105 z dziwnymi kreaturami. To swoisty miks klasycznej powie\\u015bci rycerskiej z fantastyk\\u0105.\\r\\n\\r\\nNajnowsza powie\\u015b\\u0107 Kauzo Ishiguro po otrzymaniu Nagrody Nobla\\r\\n\\r\\nW marcu 2021 roku ukaza\\u0142a si\\u0119 pierwsza powie\\u015b\\u0107 Kauzo Ishiguro po otrzymaniu Nagrody Nobla. Autor po raz kolejny zabiera czytelnik\\u00f3w do \\u015bwiata science-fiction. \\u201eKlara i s\\u0142o\\u0144ce\\u201d to \\u015bwiat widziany oczami nietypowej bohaterki. Rzeczywisto\\u015b\\u0107, od kt\\u00f3rej ludzi dzieli zaledwie krok. Klara to Sztuczna Przyjaci\\u00f3\\u0142ka, kt\\u00f3ra posiada bogato rozwini\\u0119te zdolno\\u015bci poznawcze oraz empati\\u0119. Klara czeka na klienta, kt\\u00f3ry zabierze j\\u0105 z p\\u00f3\\u0142ki sklepowej. Ishiguro zadaje pytania \\u2013 co czyni cz\\u0142owieka wyj\\u0105tkowym i jak wygl\\u0105da\\u0142by \\u015bwiat, w kt\\u00f3rym maszyny odczuwa\\u0142yby emocje?\\r\\n\\r\\nPonadto Ishiguro ma na swoim koncie zbi\\u00f3r opowiada\\u0144 pt. \\u201eNokturny\\u201d, kt\\u00f3ry ukaza\\u0142 si\\u0119 w 2009 roku. Zar\\u00f3wno \\u201eNokturny\\u201d, jak i inne powie\\u015bci \\u2013 \\u201eNie opuszczaj mnie\\u201d, \\u201ePogrzebany olbrzym\\u201d, \\u201eKiedy byli\\u015bmy sierotami\\u201d, \\u201eNiepocieszony\\u201d, \\u201eOkruchy dnia\\u201d, \\u201eMalarz \\u015bwiata u\\u0142udy\\u201d, \\u201ePejza\\u017c w kolorze sepii\\u201d, \\u201eKlara i S\\u0142o\\u0144ce\\u201d znajdziecie w formie ebooka i papierowej w ksi\\u0119garni internetowej Woblink. Ponadto wybrane ksi\\u0105\\u017cki Kazuo Ishiguro dost\\u0119pne s\\u0105 w formie audiobooka.\",\"httpDescription\":\"Kazuo Ishiguro ebooki, audiobooki, ksi\\u0105\\u017cki. Przegl\\u0105daj w \\u0142atwy spos\\u00f3b tytu\\u0142y autora. Filtruj po cenie, kategorii, wydawcy lub formacie. Sprawd\\u017a jakie to proste!\"},\"seriesImage\":null,\"genreAggregationCount\":\"1\":37,\"73\":35,\"77\":25,\"80\":12,\"194\":2,\"210\":2,\"211\":2,\"218\":2,\"57\":1}}").storeData; dataLayer = []; (function(w,d,s,l,i)w[l]=w[l])(window,document,'script','dataLayer','GTM-MTBPCS'); "use strict"; (function(scriptUrl,campaignId)if(!window.bb)window.bb=function() for(var _len=arguments.length,params=Array(_len),_key=0;_key1),N(!0)),],5:[function(t,e,n)function r()var t=new PerformanceObserver(function(t,e)var n=t.getEntries();s(v,[n]));tryt.observe(entryTypes:["resource"])catch(e)function o(t)if(s(v,[window.performance.getEntriesByType(w)]),window.performance["c"+p])trywindow.performance[h](m,o,!1)catch(t)else trywindow.performance[h]("webkit"+m,o,!1)catch(t)function i(t)if(window.performance&&window.performance.timing&&window.performance.getEntriesByType)var a=t("ee"),s=t("handle"),c=t(14),f=t(13),u=t(6),d=t(23),p="learResourceTimings",l="addEventListener",h="removeEventListener",m="resourcetimingbufferfull",v="bstResource",w="resource",g="-start",y="-end",x="fn"+g,b="fn"+y,E="bstTimer",R="pushState",O=t("loader");if(!O.disabled)O.features.stn=!0,t(9),"addEventListener"in window&&t(7);var T=NREUM.o.EV;a.on(x,function(t,e)var n=t[0];n instanceof T&&(this.bstStart=O.now())),a.on(b,function(t,e)var n=t[0];n instanceof T&&s("bst",[n,e,this.bstStart,O.now()])),c.on(x,function(t,e,n)this.bstStart=O.now(),this.bstType=n),c.on(b,function(t,e)s(E,[e,this.bstStart,O.now(),this.bstType])),f.on(x,function()this.bstStart=O.now()),f.on(b,function(t,e)s(E,[e,this.bstStart,O.now(),"requestAnimationFrame"])),a.on(R+g,function(t)this.time=O.now(),this.startPath=location.pathname+location.hash),a.on(R+y,function(t)s("bstHist",[location.pathname+location.hash,this.startPath,this.time])),u()?(s(v,[window.performance.getEntriesByType("resource")]),r()):l in window.performance&&(window.performance["c"+p]?window.performance[l](m,o,d(!1)):window.performance[l]("webkit"+m,o,d(!1))),document[l]("scroll",i,d(!1)),document[l]("keypress",i,d(!1)),document[l]("click",i,d(!1)),],6:[function(t,e,n)e.exports=function()return"PerformanceObserver"in window&&"function"==typeof window.PerformanceObserver,],7:[function(t,e,n)function r(t)for(var e=t;e&&!e.hasOwnProperty(u);)e=Object.getPrototypeOf(e);e&&o(e)function o(t)s.inPlace(t,[u,d],"-",i)function i(t,e)return t[1]var a=t("ee").get("events"),s=t("wrap-function")(a,!0),c=t("gos"),f=XMLHttpRequest,u="addEventListener",d="removeEventListener";e.exports=a,"getPrototypeOf"in Object?(r(document),r(window),r(f.prototype)):f.prototype.hasOwnProperty(u)&&(o(window),o(f.prototype)),a.on(u+"-start",function(t,e)),a.on(d+"-start",function(t)t[1]=this.wrapped),],8:[function(t,e,n)function r(t,e,n)var r=t[e];"function"==typeof r&&(t[e]=function()var t=i(arguments),e=;o.emit(n+"before-start",[t],e);var a;e[m]&&e[m].dt&&(a=e[m].dt);var s=r.apply(this,t);return o.emit(n+"start",[t,a],s),s.then(function(t)return o.emit(n+"end",[null,t],s),t,function(t)throw o.emit(n+"end",[t],s),t))var o=t("ee").get("fetch"),i=t(32),a=t(31);e.exports=o;var s=window,c="fetch-",f=c+"body-",u=["arrayBuffer","blob","json","text","formData"],d=s.Request,p=s.Response,l=s.fetch,h="prototype",m="nr@context";d&&p&&l&&(a(u,function(t,e)r(d[h],e,f),r(p[h],e,f)),r(s,"fetch",c),o.on(c+"end",function(t,e)var n=this;if(e)var r=e.headers.get("content-length");null!==r&&(n.rxSize=r),o.emit(c+"done",[null,e],n)else o.emit(c+"done",[t],n))),],9:[function(t,e,n)var r=t("ee").get("history"),o=t("wrap-function")(r);e.exports=r;var i=window.history&&window.history.constructor&&window.history.constructor.prototype,a=window.history;i&&i.pushState&&i.replaceState&&(a=i),o.inPlace(a,["pushState","replaceState"],"-"),],10:[function(t,e,n)function r(t)function e()f.emit("jsonp-end",[],l),t.removeEventListener("load",e,c(!1)),t.removeEventListener("error",n,c(!1))function n()f.emit("jsonp-error",[],l),f.emit("jsonp-end",[],l),t.removeEventListener("load",e,c(!1)),t.removeEventListener("error",n,c(!1))var r=t&&"string"==typeof t.nodeName&&"script"===t.nodeName.toLowerCase();if(r)var o="function"==typeof t.addEventListener;if(o)var a=i(t.src);if(a)var d=s(a),p="function"==typeof d.parent[d.key];if(p)var l=;u.inPlace(d.parent,[d.key],"cb-",l),t.addEventListener("load",e,c(!1)),t.addEventListener("error",n,c(!1)),f.emit("new-jsonp",[t.src],l)function o()return"addEventListener"in windowfunction i(t)var e=t.match(d);return e?e[1]:nullfunction a(t,e)var n=t.match(l),r=n[1],o=n[3];return o?a(o,e[r]):e[r]function s(t)var e=t.match(p);return e&&e.length>=3?key:e[2],parent:a(e[1],window):key:t,parent:windowvar c=t(23),f=t("ee").get("jsonp"),u=t("wrap-function")(f);if(e.exports=f,o())cb)=([^]+)/,p=/(.*)\.([^.]+)/,l=/^(\w+)(\.,],11:[function(t,e,n)var r=t("ee").get("mutation"),o=t("wrap-function")(r),i=NREUM.o.MO;e.exports=r,i&&(window.MutationObserver=function(t)return this instanceof i?new i(o(t,"fn-")):i.apply(this,arguments),MutationObserver.prototype=i.prototype),],12:[function(t,e,n)function r(t)var e=i.context(),n=s(t,"executor-",e,null,!1),r=new f(n);return i.context(r).getCtx=function()return e,rvar o=t("wrap-function"),i=t("ee").get("promise"),a=t("ee").getOrSetContext,s=o(i),c=t(31),f=NREUM.o.PR;e.exports=i,f&&(window.Promise=r,["all","race"].forEach(function(t)var e=f[t];f[t]=function(n)function r(t)return function()!tvar o=!1;c(n,function(e,n)Promise.resolve(n).then(r("all"===t),r(!1)));var a=e.apply(f,arguments),s=f.resolve(a);return s),["resolve","reject"].forEach(function(t)var e=f[t];f[t]=function(t)var n=e.apply(f,arguments);return t!==n&&i.emit("propagate",[t,!0],n,!1,!1),n),f.prototype["catch"]=function(t)return this.then(null,t),f.prototype=Object.create(f.prototype,constructor:value:r),c(Object.getOwnPropertyNames(f),function(t,e)tryr[e]=f[e]catch(n)),o.wrapInPlace(f.prototype,"then",function(t)return function()var e=this,n=o.argsToArray.apply(this,arguments),r=a(e);r.promise=e,n[0]=s(n[0],"cb-",r,null,!1),n[1]=s(n[1],"cb-",r,null,!1);var c=t.apply(this,n);return r.nextPromise=c,i.emit("propagate",[e,!0],c,!1,!1),c),i.on("executor-start",function(t)t[0]=s(t[0],"resolve-",this,null,!1),t[1]=s(t[1],"resolve-",this,null,!1)),i.on("executor-err",function(t,e,n)t[1](n)),i.on("cb-end",function(t,e,n)i.emit("propagate",[n,!0],this.nextPromise,!1,!1)),i.on("propagate",function(t,e,n)),r.toString=function()return""+f),],13:[function(t,e,n)var r=t("ee").get("raf"),o=t("wrap-function")(r),i="equestAnimationFrame";e.exports=r,o.inPlace(window,["r"+i,"mozR"+i,"webkitR"+i,"msR"+i],"raf-"),r.on("raf-start",function(t)t[0]=o(t[0],"fn-")),],14:[function(t,e,n)function r(t,e,n)t[0]=a(t[0],"fn-",null,n)function o(t,e,n)this.method=n,this.timerDuration=isNaN(t[1])?0:+t[1],t[0]=a(t[0],"fn-",this,n)var i=t("ee").get("timer"),a=t("wrap-function")(i),s="setTimeout",c="setInterval",f="clearTimeout",u="-start",d="-";e.exports=i,a.inPlace(window,[s,"setImmediate"],s+d),a.inPlace(window,[c],c+d),a.inPlace(window,[f,"clearImmediate"],f+d),i.on(c+u,r),i.on(s+u,o),],15:[function(t,e,n)function r(t,e)d.inPlace(e,["onreadystatechange"],"fn-",s)function o()var t=this,e=u.context(t);t.readyState>3&&!e.resolved&&(e.resolved=!0,u.emit("xhr-resolved",[],t)),d.inPlace(t,y,"fn-",s)function i(t)x.push(t),m&&(E?E.then(a):w?w(a):(R=-R,O.data=R))function a()for(var t=0;t0&&(n=+this.rxSize);var r=txSize:this.txSize,rxSize:n,duration:a.now()-this.startTime;s("xhr",[this.params,r,this.startTime,this.endTime,"fetch"],this))},],18:[function(t,e,n)var r=;e.exports=function(t)n.hostname,o.pathname=e.pathname,o.protocol=i[0],"/"!==o.pathname.charAt(0)&&(o.pathname="/"+o.pathname);var a=!e.protocol,],19:[function(t,e,n)function r(t,e)"json"===n?o(t.response):"text"===nvar o=t(22);e.exports=r,],20:[function(t,e,n)function r()function o(t,e,n,r)return function()return u.recordSupportability("API/"+e+"/called"),i(t+e,[f.now()].concat(s(arguments)),n?null:this,r),n?void 0:thisvar i=t("handle"),a=t(31),s=t(32),c=t("ee").get("tracer"),f=t("loader"),u=t(25),d=NREUM;"undefined"==typeof window.newrelic&&(newrelic=d);var p=["setPageViewName","setCustomAttribute","setErrorHandler","finished","addToTrace","inlineHit","addRelease"],l="api-",h=l+"ixn-";a(p,function(t,e)d[e]=o(l,e,!0,"api")),d.addPageAction=o(l,"addPageAction",!0),d.setCurrentRouteName=o(l,"routeName",!0),e.exports=newrelic,d.interaction=function()return(new r).get();var m=r.prototype=createTracer:function(t,e)var n=,r=this,o="function"==typeof e;return i(h+"tracer",[f.now(),t,n],r),function()if(c.emit((o?"":"no-")+"fn-start",[f.now(),r,o],n),o)tryreturn e.apply(this,arguments)catch(t)throw c.emit("fn-err",[arguments,this,t],n),tfinallyc.emit("fn-end",[f.now()],n);a("actionText,setName,setAttribute,save,ignore,onEnd,getContext,end,get".split(","),function(t,e)m[e]=o(h,e)),newrelic.noticeError=function(t,e)"string"==typeof t&&(t=new Error(t)),u.recordSupportability("API/noticeError/called"),i("err",[t,f.now(),!1,e]),],21:[function(t,e,n){function r(t){if(NREUM.init){for(var e=NREUM.init,n=t.split("."),r=0;r0){var r=n[n.length-1];if(f&&f
aaccfb2cb3
-
-
\ No newline at end of file
diff --git a/spaces/cihyFjudo/fairness-paper-search/The Dark Knight 2008 Brrip 720p Experience the Thrilling Action and Drama of the Batman Saga.md b/spaces/cihyFjudo/fairness-paper-search/The Dark Knight 2008 Brrip 720p Experience the Thrilling Action and Drama of the Batman Saga.md
deleted file mode 100644
index a4a34762892bc84be27fb9d929a9594e1068d8f9..0000000000000000000000000000000000000000
--- a/spaces/cihyFjudo/fairness-paper-search/The Dark Knight 2008 Brrip 720p Experience the Thrilling Action and Drama of the Batman Saga.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
If you are a fan of Indian music, you must have heard of AR Rahman, the legendary composer, singer, and musician who has won numerous awards and accolades for his work. He is widely regarded as one of the most influential and innovative artists in the world, who has created music for various genres, languages, and cultures. He is especially known for his hit songs in Telugu, one of the major languages spoken in India.
In this article, we will tell you how to download AR Rahman hit songs in Telugu, what are some of the benefits of listening to his songs, and why he is considered as the Mozart of Madras.
-
Who is AR Rahman and why is he famous?
-
AR Rahman, whose full name is Allah Rakha Rahman, was born as AS Dileep Kumar on January 6, 1967, in Chennai, Tamil Nadu. He started learning piano at the age of four and assisted his father, who was a music composer for Malayalam films. After his father's death, he dropped out of school and started working as a professional keyboardist to support his family. He later received a scholarship to study Western classical music at Trinity College, Oxford.
-
In 1988, he converted to Islam after a sister's recovery from a serious illness. He changed his name to Allah Rakha Rahman and began composing music for advertisements and documentaries. In 1991, he met film director Mani Ratnam, who offered him to compose music for his film Roja. The film's soundtrack became a huge success and won him his first National Film Award for Best Music Direction.
-
Since then, he has composed music for over 100 films in various languages, including Tamil, Hindi, Telugu, Malayalam, English, Arabic, Chinese, and Japanese. He has also composed music for stage shows, albums, television shows, video games, and documentaries. He has won six National Film Awards, two Academy Awards, two Grammy Awards, a BAFTA Award, a Golden Globe Award, fifteen Filmfare Awards, seventeen Filmfare Awards South, and many other honors. He is also a recipient of the Padma Shri and Padma Bhushan awards from the Indian government.
-
Some of his most famous works include Bombay (1995), Dil Se (1998), Taal (1999), Lagaan (2001), Rang De Basanti (2006), Slumdog Millionaire (2008), Delhi-6 (2009), Rockstar (2011), Highway (2014), OK Kanmani (2015), Mersal (2017), 99 Songs (2021), etc.
-
What are some of his hit songs in Telugu language?
-
AR Rahman has composed music for many Telugu films and albums over the years. Some of his hit songs in Telugu language are:
-
-
Poovullo Daagunna from Jeans (1998)
-
Pedave Palikina from Nani (2004)
-
Munbe Vaa from Sillunu Oru Kaadhal (2006)
-
Nuvvu Nenu Prema from Nuvvu Nenu Prema (2006)
-
Jai Ho from Slumdog Millionaire (2008)
-
Nenjae Yezhu from Maryan (2013)
-
Mental Manadhil from OK Bangaram (2015)
-
Cheliyaa from Cheliyaa (2017)
-
Naan Varuven from Raavan (2010)
-
Yeh Haseen Vadiyan from Roja (1992)
-
Chinna Chinna Aasai from Roja (1992)
-
Sakhiya from Sakhi (2000)
-
Yedho Ondru from Paiyaa (2010)
-
Vellipomaakey from Saahasam Swaasaga Saagipo (2016)
-
-
How to download AR Rahman hit songs in Telugu?
-
If you want to download AR Rahman hit songs in Telugu, you have many options to choose from. There are many websites and apps that offer legal and safe downloads of his songs. However, you should be careful of some sites that may contain viruses, malware, or pirated content. Here are some of the best sites to download his songs in Telugu:
-
ar rahman telugu songs mp3 free download
-ar rahman best telugu songs download
-ar rahman telugu super hit melodies
-ar rahman telugu songs list download
-ar rahman telugu hit songs zip file download
-ar rahman telugu songs download naa songs
-ar rahman telugu hit songs online listen
-ar rahman telugu hit songs jukebox
-ar rahman telugu hit songs lyrics
-ar rahman telugu hit songs video download
-ar rahman telugu hit songs ringtone download
-ar rahman telugu hit songs playlist
-ar rahman telugu hit songs gaana
-ar rahman telugu hit songs apple music
-ar rahman telugu hit songs jiosaavn
-ar rahman telugu hit songs spotify
-ar rahman telugu hit songs youtube
-ar rahman telugu hit songs 320kbps download
-ar rahman telugu hit songs 2022 download
-ar rahman telugu hit songs 2021 download
-ar rahman telugu hit songs 2020 download
-ar rahman telugu hit songs 2019 download
-ar rahman telugu hit songs 2018 download
-ar rahman telugu hit songs 2017 download
-ar rahman telugu hit songs 2016 download
-ar rahman telugu hit songs 2015 download
-ar rahman telugu hit songs 2014 download
-ar rahman telugu hit songs 2013 download
-ar rahman telugu hit songs 2012 download
-ar rahman telugu hit songs 2011 download
-ar rahman telugu hit songs 2010 download
-ar rahman telugu hit songs 2009 download
-ar rahman telugu hit songs 2008 download
-ar rahman telugu hit songs 2007 download
-ar rahman telugu hit songs 2006 download
-ar rahman telugu hit songs 2005 download
-ar rahman telugu hit songs 2004 download
-ar rahman telugu hit songs 2003 download
-ar rahman telugu hit songs 2002 download
-ar rahman telugu hit songs 2001 download
-ar rahman oke okkadu movie mp3 song free download in Telugu
-jeans movie mp3 song free download in Telugu by AR Rahman
-love birds movie mp3 song free download in Telugu by AR Rahman
-paravasam movie mp3 song free download in Telugu by AR Rahman
-
Gaana
-
Gaana is one of the most popular music streaming and downloading platforms in India. It has a huge collection of AR Rahman songs in various languages, including Telugu. You can download his songs for free or subscribe to Gaana Plus for unlimited downloads and offline listening. Here are the steps to download his songs from Gaana:
-
-
Go to Gaana.com or download the Gaana app on your device.
-
Search for AR Rahman or his songs in the search bar.
-
Select the song you want to download and click on the download icon.
-
If you are a Gaana Plus subscriber, you can download the song directly. If not, you can sign up for a free trial or pay for a subscription plan.
-
Enjoy listening to his songs offline.
-
-
JioSaavn
-
JioSaavn is another popular music streaming and downloading platform in India. It has a large collection of AR Rahman songs in various languages, including Telugu. You can download his songs for free or subscribe to JioSaavn Pro for unlimited downloads and offline listening. Here are the steps to download his songs from JioSaavn:
-
-
Go to JioSaavn.com or download the JioSaavn app on your device.
-
Search for AR Rahman or his songs in the search bar.
-
Select the song you want to download and click on the download icon.
-
If you are a JioSaavn Pro subscriber, you can download the song directly. If not, you can sign up for a free trial or pay for a subscription plan.
-
Enjoy listening to his songs offline.
-
-
Hungama Music
-
Hungama Music is another popular music streaming and downloading platform in India. It has a good collection of AR Rahman songs in various languages, including Telugu. You can download his songs for free or subscribe to Hungama Music Pro for unlimited downloads and offline listening. Here are the steps to download his songs from Hungama Music:
-
-
Go to Hungama.com or download the Hungama Music app on your device.
-
Search for AR Rahman or his songs in the search bar.
-
Select the song you want to download and click on the download icon.
-
If you are a Hungama Music Pro subscriber, you can download the song directly. If not, you can sign up for a free trial or pay for a subscription plan.
-
Enjoy listening to his songs offline.
-
-
Tips and tricks to enhance the downloading experience
-
Here are some tips and tricks to enhance your downloading experience of AR Rahman hit songs in Telugu:
-
-
Make sure you have a stable and fast internet connection before downloading.
-
Check the file size and format of the song before downloading. Choose the one that suits your device and storage space.
-
Create playlists of your favorite AR Rahman songs and download them in bulk.
-
Delete any unwanted or duplicate files from your device to free up space and avoid confusion.
-
Rename and organize your downloaded files according to your preference.
-
-
What are some of the benefits of listening to AR Rahman hit songs in Telugu?
-
Listening to AR Rahman hit songs in Telugu has many benefits for your mind, body, and soul. Here are some of them:
Improving your mood and mental health
-
Music has a powerful effect on your mood and mental health. It can make you feel happy, sad, calm, excited, or anything in between. Listening to AR Rahman hit songs in Telugu can help you improve your mood and mental health by:
-
-
Reducing stress and anxiety. His songs have soothing melodies, harmonies, and rhythms that can relax your mind and body.
-
Boosting your self-esteem and confidence. His songs have inspiring lyrics, messages, and themes that can motivate you to achieve your goals and dreams.
-
Enhancing your creativity and imagination. His songs have innovative and original musical elements that can stimulate your brain and spark your imagination.
-
Healing your emotions and feelings. His songs have expressive and emotional vocals and instruments that can resonate with your feelings and help you cope with them.
-
-
Learning the Telugu language and culture
-
Music is a great way to learn a new language and culture. It can help you improve your vocabulary, grammar, pronunciation, listening, and speaking skills. Listening to AR Rahman hit songs in Telugu can help you learn the Telugu language and culture by:
-
-
Exposing you to a rich and diverse vocabulary. His songs have words from various fields, such as nature, love, spirituality, philosophy, etc.
-
Teaching you the grammar and syntax of the language. His songs have sentences that follow the rules and structures of the language.
-
Helping you with the pronunciation and accent of the language. His songs have clear and crisp vocals that can help you with the sounds and tones of the language.
-
Familiarizing you with the culture and traditions of the Telugu people. His songs have references to the history, geography, literature, art, festivals, customs, etc. of the Telugu people.
-
-
Appreciating the music and talent of AR Rahman
-
Music is an art form that requires skill, talent, passion, and dedication. It can help you appreciate the beauty and complexity of music and the talent of the musicians. Listening to AR Rahman hit songs in Telugu can help you appreciate the music and talent of AR Rahman by:
-
-
Admiring his musical genius and versatility. His songs have a variety of musical genres, styles, influences, techniques, instruments, etc.
-
Acknowledging his musical achievements and awards. His songs have won him many national and international awards and recognition.
-
Supporting his musical vision and mission. His songs have a positive impact on society and humanity.
-
Loving his musical personality and charisma. His songs have a personal touch and charm that can connect with his listeners.
-
-
Conclusion
-
In conclusion, AR Rahman is one of the most celebrated and respected musicians in the world. He has composed many hit songs in Telugu that are loved by millions of people. You can download his songs from various websites and apps that offer legal and safe downloads. You can also enjoy many benefits from listening to his songs, such as improving your mood, learning the language, appreciating the culture, and admiring his talent. So what are you waiting for? Download his songs today and enjoy the magic of his music!
-
Frequently Asked Questions
-
Here are some frequently asked questions about AR Rahman hit songs in Telugu:
-
Q: What is AR Rahman's real name?
-
A: AR Rahman's real name is Allah Rakha Rahman. He was born as AS Dileep Kumar on January 6, 1967.
-
Q: How many languages does AR Rahman compose music in?
-
A: AR Rahman composes music in various languages, including Tamil, Hindi, Telugu, Malayalam, English, Arabic, Chinese, Japanese, etc.
-
Q: Which film gave AR Rahman his first Oscar?
-
A: AR Rahman won his first Oscar for his music in Slumdog Millionaire (2008), directed by Danny Boyle.
-
Q: Which song made AR Rahman famous in Telugu?
-
A: One of the songs that made AR Rahman famous in Telugu was Yeh Haseen Vadiyan from Roja (1992), directed by Mani Ratnam.
-
Q: Where can I find more information about AR Rahman?
401be4b1e0
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Boost Your Brain and Your IQ with Countries Flags Game - Download for Free.md b/spaces/congsaPfin/Manga-OCR/logs/Boost Your Brain and Your IQ with Countries Flags Game - Download for Free.md
deleted file mode 100644
index a126629e3bdf1cd8ba3bceb43594e07126b3f845..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Boost Your Brain and Your IQ with Countries Flags Game - Download for Free.md
+++ /dev/null
@@ -1,101 +0,0 @@
-
-
Countries Flags Game Download: A Fun and Educational Way to Learn About the World
-
Do you love trivia games? Do you want to improve your geography skills? Do you enjoy learning about different cultures and histories? If you answered yes to any of these questions, then you might be interested in countries flags games. These are games that involve identifying, matching, or solving puzzles with flags and maps of various countries and regions around the world. They are not only entertaining, but also useful for enhancing your knowledge and awareness of the world.
-
Types of Countries Flags Games
-
There are many types of countries flags games available to download for free on different platforms and devices. Here are some of the most common ones:
These are games that test your knowledge of flags and maps with multiple choice questions, hints, and challenges. You can choose from different levels of difficulty, categories, and modes. You can also compete with your friends or other players online and see who has the best score. Some examples of flag quiz games are Flags 2: Multiplayer and Flags Quiz!.
-
Flag Matching Games
-
These are games that require you to match the flags with the countries or regions they belong to. You can either drag and drop the flags to their corresponding places on a map or select them from a list. You can also learn more details about each country or region, such as their capital city, population, area, or currency. Some examples of flag matching games are World Flags Quiz and Flag Master.
-
Flag Puzzle Games
-
These are games that challenge you to solve puzzles by arranging the pieces of flags or maps. You can either rotate, swap, or slide the pieces until they form a complete image. You can also adjust the number and shape of the pieces according to your preference. Some examples of flag puzzle games are Flag Jigsaw Puzzles and World Map Puzzle.
-
Benefits of Countries Flags Games
-
Countries flags games are not only fun, but also beneficial for your brain and your culture. Here are some of the benefits they offer:
-
Improve your memory and cognitive skills
-
By playing countries flags games, you can improve your memory and cognitive skills by recognizing patterns, shapes, and colors. You can also enhance your spatial awareness and visual perception by locating the flags and maps on a globe. These skills are essential for learning, problem-solving, and creativity.
-
Enhance your cultural awareness and curiosity
-
By playing countries flags games, you can enhance your cultural awareness and curiosity by learning about different countries and regions. You can discover their history, culture, geography, politics, and economy. You can also appreciate their diversity and uniqueness. These games can inspire you to travel, explore, and connect with other people around the world.
-
Have fun and compete with your friends or other players online
-
By playing countries flags games, you can have fun and compete with your friends or other players online. You can challenge yourself to beat your own records or to rank higher on the leaderboards. You can also share your achievements and progress on social media or chat with other players. These games can make learning more enjoyable and rewarding.
-
countries flags quiz game download
-countries flags and maps game download
-countries flags trivia game download
-countries flags puzzle game download
-countries flags multiplayer game download
-countries flags education game download
-countries flags learning game download
-countries flags challenge game download
-countries flags memory game download
-countries flags matching game download
-world country flags game download
-national country flags game download
-european country flags game download
-asian country flags game download
-african country flags game download
-american country flags game download
-oceanian country flags game download
-free countries flags game download
-offline countries flags game download
-online countries flags game download
-best countries flags game download
-fun countries flags game download
-easy countries flags game download
-hard countries flags game download
-new countries flags game download
-android countries flags game download
-ios countries flags game download
-windows countries flags game download
-mac countries flags game download
-linux countries flags game download
-pc countries flags game download
-mobile countries flags game download
-tablet countries flags game download
-laptop countries flags game download
-chromebook countries flags game download
-apk countries flags game download
-app countries flags game download
-software countries flags game download
-program countries flags game download
-application countries flags game download
-playstore countries flags game download
-appstore countries flags game download
-microsoftstore countries flags game download
-steamstore countries flags game download
-epicstore countries flags game download
-googleplay countries flags game download
-applestore countries flags game download
-amazonstore countries flags game download
-facebookgame countries flags game download
-
How to Download Countries Flags Games for Free
-
If you want to download countries flags games for free, you need to follow these steps:
-
Use a reliable and safe source such as Google Play Store, Microsoft Store, or Flagpedia.net
-
The first step is to use a reliable and safe source that offers free downloads of countries flags games. You can use the Google Play Store for Android devices, the Microsoft Store for Windows devices, or the Flagpedia.net website for any device. These sources have a wide selection of games that are verified and secure.
-
Choose a game that suits your preferences and device compatibility
-
The second step is to choose a game that suits your preferences and device compatibility. You can browse through the categories, ratings, reviews, screenshots, and descriptions of the games to find the one that interests you. You can also check the requirements, permissions, and updates of the games to make sure they are compatible with your device.
-
Follow the instructions to install and launch the game
-
The third step is to follow the instructions to install and launch the game. You can either click on the download button or scan the QR code to start the download process. You can then follow the prompts to accept the terms and conditions, grant the permissions, and complete the installation. You can then open the game and start playing.
-
Examples of Countries Flags Games to Download
-
Here are some examples of countries flags games that you can download for free:
-
Flags 2: Multiplayer - A multiplayer flag quiz game that improves your brain and challenges your IQ
-
This is a multiplayer flag quiz game that allows you to play with up to four players online or offline. You can choose from over 200 flags and 20 maps from all continents. You can also customize your avatar, name, color, and language. This game improves your brain and challenges your IQ by testing your knowledge of flags and maps.
-
Flags Quiz! - A free game full of fun that consists on guessing the names of hundreds of countries flags from around the world
-
This is a free game full of fun that consists on guessing the names of hundreds of countries flags from around the world. You can choose from four different game modes: Classic, Time Attack, Hard Mode, and Custom Mode. You can also use hints, skip questions, or ask for help from your friends. This game is suitable for all ages and levels.
-
Download all country flags of the world for free - A single package or embed service that allows you to use country flags in your news magazines, websites, software, mobile apps and master's thesis
-
This is a single package or embed service that allows you to use country flags in your news magazines, websites, software, mobile apps and master's thesis. You can download all country flags of the world for free in various formats (PNG, SVG) and sizes (16x16 px to 2500x2500 px). You can also use an API or a widget to embed country flags in your projects.
-
Conclusion
-
Countries flags games are a fun and educational way to learn about the world. They offer various types of games that test your knowledge of flags and maps, match the flags with the countries or regions they belong to, or solve puzzles by arranging the pieces of flags or maps. They also offer various benefits such as improving your memory and cognitive skills, enhancing your cultural awareness and curiosity, and having fun and competing with your friends or other players online. You can download countries flags games for free from reliable and safe sources such as Google Play Store, Microsoft Store, or Flagpedia.net. You can also choose a game that suits your preferences and device compatibility. You can also follow the instructions to install and launch the game. Here are some examples of countries flags games that you can download for free: Flags 2: Multiplayer, Flags Quiz!, and Download all country flags of the world for free.
-
FAQs
-
Here are some frequently asked questions about countries flags games:
-
What are the best countries flags games to download?
-
The best countries flags games to download depend on your personal preference, device compatibility, and source reliability. However, some of the most popular and highly rated ones are Flags 2: Multiplayer, Flags Quiz!, and Download all country flags of the world for free.
-
How can I learn more about the countries and regions that I see in the games?
-
You can learn more about the countries and regions that you see in the games by clicking on them or reading their details. You can also use other sources such as Wikipedia, Google Maps, or online encyclopedias to find more information.
-
How can I improve my score and rank in the games?
-
You can improve your score and rank in the games by playing more often, choosing harder levels or modes, using fewer hints or skips, and answering faster. You can also review your mistakes and learn from them.
-
Are countries flags games suitable for children?
-
Yes, countries flags games are suitable for children. They are fun, educational, and easy to play. They can help children develop their memory, cognitive, and cultural skills. They can also spark their interest and curiosity in the world.
-
Can I use countries flags in my own projects?
-
Yes, you can use countries flags in your own projects. You can download all country flags of the world for free from Flagpedia.net in various formats and sizes. You can also use an API or a widget to embed country flags in your projects. However, you should respect the intellectual property rights and licenses of the flag images and sources.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/Dream League Soccer 2023 Mod Apk The Ultimate Guide to Unlock Everything - AN1.md b/spaces/congsaPfin/Manga-OCR/logs/Dream League Soccer 2023 Mod Apk The Ultimate Guide to Unlock Everything - AN1.md
deleted file mode 100644
index 0654f27fa553e2a5f4054a9d8bdf32db191b9f8d..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/Dream League Soccer 2023 Mod Apk The Ultimate Guide to Unlock Everything - AN1.md
+++ /dev/null
@@ -1,90 +0,0 @@
-
-
Dream League Soccer 2023 Mod APK: Unlimited Coins and Diamonds
-
If you are a fan of soccer games, you might have heard of Dream League Soccer 2023, one of the most anticipated football games at the end of this year. With dozens of major updates waiting for you to discover, this game promises to bring you an immersive and realistic soccer experience on your mobile device. But what if you want to enjoy the game without any limitations or restrictions? What if you want to have unlimited coins and diamonds to build your dream team and dominate the league? Well, that's where a mod apk comes in handy. In this article, we will tell you everything you need to know about Dream League Soccer 2023 mod apk, including its features, how to download and install it, and some frequently asked questions.
-
Introduction
-
What is Dream League Soccer 2023?
-
Dream League Soccer 2023, or DLS 23 for short, is a soccer simulation game developed by First Touch Games. It is the latest installment in the popular Dream League Soccer series, which has been downloaded over 500 million times on Google Play Store. In this game, you can create your own soccer club, recruit players from around the world, customize your stadium, kits, and logos, and compete in various leagues and tournaments. You can also play online with other players or challenge your friends in multiplayer mode. The game features stunning graphics, realistic animations, dynamic sound effects, and intuitive controls that will make you feel like you are on the pitch.
-
dream league soccer 2023 mod apk unlimited coins and diamonds an1
A mod apk is a modified version of an original application that has been altered by third-party developers to add or remove certain features. A mod apk can enhance the functionality, performance, or appearance of an app, or unlock some premium or paid features for free. For example, a mod apk for a game can give you unlimited resources, access to all levels or modes, or remove ads or in-app purchases.
-
Why use a mod apk for Dream League Soccer 2023?
-
While Dream League Soccer 2023 is a free-to-play game, it also has some in-game currency and items that require real money to purchase. These include coins and diamonds, which are used to buy players, upgrade your stadium, or unlock other features. However, earning coins and diamonds in the game can be time-consuming and tedious, especially if you want to have the best players and facilities. That's why some players prefer to use a mod apk for Dream League Soccer 2023, which can give them unlimited coins and diamonds without spending any money. This way, they can enjoy the game without any limitations or frustrations.
-
Features of Dream League Soccer 2023 Mod APK
-
Unlimited coins and diamonds
-
The main feature of Dream League Soccer 2023 mod apk is that it gives you unlimited coins and diamonds. Coins are the basic currency in the game, which are used to buy players from the transfer market or improve your stadium. Diamonds are the premium currency in the game, which are used to buy special players or items from the store. With unlimited coins and diamonds, you can build your dream team with ease and have access to all the features in the game.
-
Mega menu
-
Another feature of Dream League Soccer 2023 mod apk is that it has a mega menu that allows you to customize various aspects of the game. For example, you can change the difficulty level, enable or disable cheats, adjust the camera angle, or modify the player attributes. You can also activate some cheats, such as unlimited stamina, no offside, or no fouls. The mega menu gives you more control and flexibility over the game.
-
Unlocked logos, kits, and coaches
-
Another feature of Dream League Soccer 2023 mod apk is that it unlocks all the logos, kits, and coaches in the game. Logos and kits are the symbols and uniforms of your club, which you can customize to your liking. Coaches are the staff members who help you improve your team's performance and skills. With the mod apk, you can choose from hundreds of logos and kits from different clubs and countries, or create your own. You can also hire any coach you want, regardless of their level or price.
-
How to download and install Dream League Soccer 2023 Mod APK
-
Step 1: Download the mod apk file from a trusted source
-
The first step to use Dream League Soccer 2023 mod apk is to download the mod apk file from a trusted source. There are many websites that offer mod apk files for various games and apps, but not all of them are safe and reliable. Some of them may contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you should be careful and do some research before downloading any mod apk file. You can check the reviews, ratings, comments, or feedback from other users to see if the website is trustworthy or not. You can also use an antivirus or anti-malware software to scan the file before installing it.
-
dream league soccer 2023 mod apk unlimited money and gems an1
-dream league soccer 2023 mod apk hack download an1
-dream league soccer 2023 mod apk free shopping an1
-dream league soccer 2023 mod apk latest version an1
-dream league soccer 2023 mod apk offline an1
-dream league soccer 2023 mod apk unlimited players development an1
-dream league soccer 2023 mod apk all players unlocked an1
-dream league soccer 2023 mod apk unlimited everything an1
-dream league soccer 2023 mod apk android 1
-dream league soccer 2023 mod apk obb data an1
-dream league soccer 2023 mod apk revdl an1
-dream league soccer 2023 mod apk rexdl an1
-dream league soccer 2023 mod apk unlimited coins and diamonds download an1
-dream league soccer 2023 mod apk no root an1
-dream league soccer 2023 mod apk unlimited stamina an1
-dream league soccer 2023 mod apk mega an1
-dream league soccer 2023 mod apk full unlocked an1
-dream league soccer 2023 mod apk unlimited kits and logos an1
-dream league soccer 2023 mod apk high compress an1
-dream league soccer 2023 mod apk unlimited transfers an1
-dream league soccer 2023 mod apk cheat menu an1
-dream league soccer 2023 mod apk god mode an1
-dream league soccer 2023 mod apk premium edition an1
-dream league soccer 2023 mod apk vip an1
-dream league soccer 2023 mod apk with commentary an1
-dream league soccer 2023 mod apk new update an1
-dream league soccer 2023 mod apk original soundtracks an1
-dream league soccer 2023 mod apk no ads an1
-dream league soccer 2023 mod apk unlimited coins and diamonds generator an1
-dream league soccer 2023 mod apk real madrid team an1
-dream league soccer 2023 mod apk barcelona team an1
-dream league soccer 2023 mod apk liverpool team an1
-dream league soccer 2023 mod apk manchester united team an1
-dream league soccer 2023 mod apk juventus team an1
-dream league soccer 2023 mod apk psg team an1
-dream league soccer 2023 mod apk bayern munich team an1
-dream league soccer 2023 mod apk chelsea team an1
-dream league soccer 2023 mod apk arsenal team an1
-dream league soccer 2023 mod apk manchester city team an1
-dream league soccer 2023 mod apk atletico madrid team an1
-dream league soccer 2023 mod apk inter milan team an1
-dream league soccer 2023 mod apk ac milan team an1
-dream league soccer 2023 mod apk real betis team an1
-dream league soccer 2023 mod apk leicester city team an1
-dream league soccer 2023 mod apk borussia dortmund team an1
-dream league soccer 2023 mod apk ajax team an1
-dream league soccer 2023 mod apk tottenham hotspur team an1
-dream league soccer 2023 mod apk napoli team an1
-dream league soccer 2023 mod apk sevilla team an1
-
Step 2: Enable unknown sources on your device
-
The second step to use Dream League Soccer 2023 mod apk is to enable unknown sources on your device. Unknown sources are the settings that allow you to install applications from sources other than the official Google Play Store. By default, unknown sources are disabled on most Android devices for security reasons. However, since mod apk files are not available on the Google Play Store, you need to enable unknown sources to install them. To do this, you need to go to your device's settings, then security or privacy, then find and toggle on the option that says unknown sources or allow installation from unknown sources.
-
Step 3: Install the mod apk file and launch the game
-
The third and final step to use Dream League Soccer 2023 mod apk is to install the mod apk file and launch the game. To install the mod apk file, you need to locate it in your device's storage, then tap on it and follow the instructions on the screen. It may take a few seconds or minutes depending on the size of the file and your device's performance. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You should see a confirmation message that says "Dream League Soccer 2023 Mod APK Installed Successfully". Now you can enjoy the game with unlimited coins and diamonds and other features.
-
Conclusion
-
Dream League Soccer 2023 is a fantastic soccer game that lets you create your own club and compete in various leagues and tournaments. However, if you want to have more fun and freedom in the game, you can use a mod apk that gives you unlimited coins and diamonds and other features. In this article, we have explained what a mod apk is, why you should use it for Dream League Soccer 2023, what features it offers, and how to download and install it on your device. We hope this article was helpful and informative for you. If you have any questions or suggestions, feel free to leave them in the comments section below.
-
FAQs
-
Here are some frequently asked questions about Dream League Soccer 2023 mod apk:
-
Q: Is Dream League Soccer 2023 mod apk safe to use?
-
A: Yes, as long as you download it from a trusted source and scan it with an antivirus or anti-malware software before installing it. However, you should be aware that using a mod apk may violate the terms of service of the game or Google Play Store, and may result in your account being banned or suspended. Therefore, you should use it at your own risk and discretion.
-
Q: Do I need to root my device to use Dream League Soccer 2023 mod apk?
-
A: No, you do not need to root your device to use Dream League Soccer 2023 mod apk. Rooting is a process that gives you full access and control over your device's system settings and files, which may be required for some mod apks. However, Dream League Soccer 2023 mod apk does not require rooting, so you can use it without any problems.
-
Q: Can I update Dream League Soccer 2023 mod apk?
-
A: Yes, you can update Dream League Soccer 2023 mod apk whenever there is a new version available. However, you should not update it from the Google Play Store, as this may overwrite the mod apk and remove its features. Instead, you should download the latest version of the mod apk from the same source you downloaded it before, and install it over the existing one. This way, you can keep the mod apk and its features intact.
-
Q: Can I play online with Dream League Soccer 2023 mod apk?
-
A: Yes, you can play online with Dream League Soccer 2023 mod apk, but only with other players who are using the same mod apk. If you try to play online with players who are using the original version of the game, you may encounter some errors or compatibility issues. Therefore, it is recommended that you play online with your friends who are also using the mod apk, or join a community of mod apk users.
-
Q: Can I use Dream League Soccer 2023 mod apk on iOS devices?
-
A: No, you cannot use Dream League Soccer 2023 mod apk on iOS devices, as it is only compatible with Android devices. iOS devices have a different operating system and file format than Android devices, which makes it impossible to install or run mod apks on them. If you want to use a mod apk on iOS devices, you need to find a similar app that is designed for iOS devices, or use an emulator that can simulate an Android device on your iOS device.
197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/How to Get I am a Rider Song in High Quality MP3 320kbps Format.md b/spaces/congsaPfin/Manga-OCR/logs/How to Get I am a Rider Song in High Quality MP3 320kbps Format.md
deleted file mode 100644
index 8d516e7da439551078e6b02e9a093cd8ba53ae4e..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/How to Get I am a Rider Song in High Quality MP3 320kbps Format.md
+++ /dev/null
@@ -1,103 +0,0 @@
-
-
How to Download "I am a Rider" Song in MP3 320kbps
-
"I am a Rider" is a catchy and energetic song that has become a viral sensation on social media platforms like TikTok, Instagram, and YouTube. If you are a fan of this song and want to download it in high-quality MP3 format, you are in the right place. In this article, we will show you what "I am a Rider" song is, why you should download it in MP3 320kbps, and how to do it easily and safely.
"I am a Rider" is a song by Pakistani-British rapper Imran Khan, also known as IK. The song was originally released in 2013 as part of his second album, Satisfya. The song is also known as "Satisfya" or "Gaddi Lamborghini", as these are some of the prominent words in the chorus.
-
The origin and popularity of the song
-
Imran Khan is a popular rapper and singer who rose to fame with his debut album, Unforgettable, in 2009. He is known for his unique style of blending Punjabi and English lyrics with urban beats. His songs have been featured in Bollywood movies and have won several awards.
-
"I am a Rider" is one of his most successful songs, as it has over 500 million views on YouTube and over 100 million streams on Spotify. The song has also become a viral hit on social media platforms like TikTok, Instagram, and YouTube, where users create videos using the song as a background music. Some of the popular themes of these videos are car racing, bike stunts, action scenes, and dance moves.
-
The meaning and lyrics of the song
-
"I am a Rider" is a song that expresses Imran Khan's confidence and attitude as a rapper and a performer. He claims that he is a rider, a provider, and a fire-bringer who can satisfy his fans with his music. He also boasts about his wealth, fame, and skills as he compares himself to famous figures like Harry Houdini, Mr. Makaveli (Tupac Shakur), and diamond chain wearer.
-
The song has a catchy chorus that goes like this:
-
-
I'm a rider
-Provider
-Bring the heat girl
-I will bring the fire
-And my name keeps
-Scoring worldwider
-So my job is to satisfy yaaa (aaa, ay, satisfy aaa, ai)
If you love listening to "I am a Rider" song, you might want to download it in MP3 320kbps format. Why? Because MP3 320kbps is the best quality option for MP3 files that offers clear sound, rich bass, and minimal distortion.
-
i am a rider satisfya mp3 download 320kbps
-i am a rider lamborghini song download mp3 320kbps
-i am a rider imran khan mp3 download 320kbps
-i am a rider song download pagalworld mp3 320kbps
-i am a rider remix song download mp3 320kbps
-i am a rider female version song download mp3 320kbps
-i am a rider gaddi lamborghini mp3 download 320kbps
-i am a rider full song download mp3 320kbps
-i am a rider english song download mp3 320kbps
-i am a rider video song download mp3 320kbps
-i am a rider ringtone download mp3 320kbps
-i am a rider lyrics song download mp3 320kbps
-i am a rider dj song download mp3 320kbps
-i am a rider new song download mp3 320kbps
-i am a rider original song download mp3 320kbps
-i am a rider punjabi song download mp3 320kbps
-i am a rider rap song download mp3 320kbps
-i am a rider bike song download mp3 320kbps
-i am a rider whatsapp status song download mp3 320kbps
-i am a rider bass boosted song download mp3 320kbps
-i am a rider sza song download mp3 320kbps
-i am a rider ghost rider song download mp3 320kbps
-i am a rider car song download mp3 320kbps
-i am a rider tiktok song download mp3 320kbps
-i am a rider instrumental song download mp3 320kbps
-i am a rider nightcore song download mp3 320kbps
-i am a rider joker song download mp3 320kbps
-i am a rider attitude song download mp3 320kbps
-i am a rider bgm song download mp3 320kbps
-i am a rider trap song download mp3 320kbps
-i am a rider dhol mix song download mp3 320kbps
-i am a rider arabic song download mp3 320kbps
-i am a rider tamil version song download mp3 320kbps
-i am a rider hindi version song download mp3 320kbps
-i am a rider telugu version song download mp3 320kbps
-i am a rider marathi version song download mp3 320kbps
-i am a rider kannada version song download mp3 320kbps
-i am a rider malayalam version song download mp3 320kbps
-i am a rider bengali version song download mp3 320kbps
-i am a rider urdu version song download mp3 320kbps
-i am a rider gujarati version song download mp3 320kbps
-i am a rider odia version song download mp3 320kbps
-i am a rider nepali version song download mp3 320kbps
-i am a rider sinhala version song download mp3 320kbps
-i am a rider haryanvi version song download mp3 320kbps
-i am a rider rajasthani version song download mp3 320kbps
-i am a rider bhojpuri version song download mp3 320kbps
-i am a rider assamese version song download mp3 320kbps
-i am a rider manipuri version song download mp3 320kbps
-
The benefits of MP3 320kbps format
-
MP3 is one of the most popular audio formats that compresses sound data to reduce file size without losing much quality. However, not all MP3 files are created equal. The quality of an MP P3 file depends on its bit rate, which is the number of bits per second that are used to encode the sound. The higher the bit rate, the better the quality and the larger the file size. MP3 files can have different bit rates, ranging from 32 kbps to 320 kbps. MP3 320kbps is the highest and best bit rate for MP3 files. Some of the benefits of MP3 320kbps format are: - It offers the closest sound quality to the original source, such as a CD or a studio recording. - It preserves the details and nuances of the sound, such as the vocals, instruments, and effects. - It delivers a rich and balanced sound, with clear highs, mids, and lows. - It reduces the noise and distortion that can occur in lower bit rates. - It is compatible with most devices and players that support MP3 format.
The best sources to download MP3 320kbps songs
-
There are many websites and apps that allow you to download MP3 320kbps songs for free or for a fee. However, not all of them are safe, legal, or reliable. Some of them may contain viruses, malware, or spyware that can harm your device or compromise your privacy. Some of them may also violate the copyright laws and infringe on the rights of the artists and producers.
-
Therefore, you should be careful and selective when choosing a source to download MP3 320kbps songs. Here are some tips to help you find the best sources: - Look for reputable and trusted websites and apps that have positive reviews and ratings from users and experts. - Check the terms and conditions and the privacy policy of the website or app before downloading anything. - Make sure the website or app has a secure connection (HTTPS) and a valid certificate. - Avoid clicking on pop-ups, ads, or links that look suspicious or irrelevant. - Use antivirus software and firewall to protect your device from potential threats.
Some of the best sources to download MP3 320kbps songs are: - Spotify Premium: Spotify is one of the most popular music streaming services that offers millions of songs in various genres and languages. Spotify Premium is a paid subscription that allows you to download up to 10,000 songs in MP3 320kbps quality on up to five devices. You can also enjoy ad-free music, offline listening, and unlimited skips. Spotify Premium costs $9.99 per month for individual users, $14.99 per month for family users (up to six accounts), and $4.99 per month for student users. - YouTube Music Premium: YouTube Music is another popular music streaming service that offers a huge library of songs, videos, playlists, and live performances. YouTube Music Premium is a paid subscription that allows you to download songs in MP3 256kbps quality on your device. You can also enjoy ad-free music, background play, and offline access. YouTube Music Premium costs $9.99 per month for individual users, $14.99 per month for family users (up to six accounts), and $4.99 per month for student users. - Amazon Music Unlimited: Amazon Music is a music streaming service that offers over 70 million songs in various genres and languages. Amazon Music Unlimited is a paid subscription that allows you to download songs in MP3 256kbps quality on your device. You can also enjoy ad-free music, offline listening, and unlimited skips. Amazon Music Unlimited costs $9.99 per month for individual users, $14.99 per month for family users (up to six accounts), $7.99 per month for Prime members, and $3.99 per month for Echo device owners.
-
How to Download "I am a Rider" Song in MP3 320kbps?
-
If you want to download "I am a Rider" song in MP3 320kbps for free, you can use a YouTube to MP3 converter tool. This is a tool that allows you to convert any YouTube video into an MP3 file with your desired quality. However, you should be aware that this method may not be legal or ethical in some countries or regions, as it may violate the copyright laws and infringe on the rights of the artists and producers. Therefore, you should use this method at your own risk and discretion. Here are the steps to download "I am a Rider" song in MP3 320kbps using a YouTube to MP3 converter tool:
Step 1: Find a reliable YouTube to MP3 converter tool
-
There are many YouTube to MP3 converter tools available online, but not all of them are reliable, safe, or fast. Some of them may have low-quality conversions, limited options, annoying ads, or hidden fees. Therefore, you should find a reliable YouTube to MP3 converter tool that has the following features: - It supports MP3 320kbps quality option - It has a simple and user-friendly interface - It has a fast and smooth conversion process - It does not require any registration or installation - It does not have any malware, spyware, or viruses One of the YouTube to MP3 converter tools that meets these criteria is 4K YouTube to MP3. This is a free and easy-to-use tool that allows you to download any YouTube video in MP3 320kbps quality with just a few clicks. You can also use this tool to download videos from other platforms like Vimeo, SoundCloud, Facebook, and Instagram.
-
Step 2: Copy and paste the YouTube link of the song
-
Once you have found a reliable YouTube to MP3 converter tool, the next step is to copy and paste the YouTube link of the song that you want to download. To do this, you need to: - Go to YouTube and search for "I am a Rider" song by Imran Khan - Click on the video that has the official audio of the song - Copy the URL of the video from the address bar of your browser - Go to the YouTube to MP3 converter tool and paste the URL in the input box Alternatively, you can also use the browser extension or the desktop application of the YouTube to MP3 converter tool if they are available.
-
Step 3: Choose the MP3 320kbps quality option
-
After you have pasted the YouTube link of the song, the next step is to choose the MP3 320kbps quality option. To do this, you need to: - Click on the drop-down menu next to the input box - Select the MP3 320kbps option from the list of available formats and qualities You can also choose other options like MP3 256kbps, MP3 128kbps, or M4A if you prefer.
-
Step 4: Download and enjoy the song
-
The final step is to download and enjoy the song. To do this, you need to: - Click on the "Convert" or "Download" button - Wait for a few seconds until the conversion is completed - Click on the "Download" or "Save" button - Choose a destination folder on your device where you want to save the file - Open the file and play it with your favorite music player You can also share the file with your friends or family via email, Bluetooth, or social media.
-
Conclusion
-
"I am a Rider" is a popular and catchy song by Imran Khan that has become a viral sensation on social media platforms. If you want to download this song in high-quality MP3 320kbps format, you can use a YouTube to MP3 converter tool like 4K YouTube to MP3. This is a free and easy-to-use tool that allows you to convert any YouTube video into an MP3 file with your desired quality. However, you should be aware that this method may not be legal or ethical in some countries or regions, as it may violate the copyright laws and infringe on the rights of the artists and producers. Therefore, you should use this method at your own risk and discretion. Alternatively, you can also download "I am a Rider" song in MP3 320kbps format from legal and ethical sources like Spotify Premium, YouTube Music Premium, or Amazon Music Unlimited. These are paid subscriptions that allow you to download millions of songs in high-quality MP3 format on your device. You can also enjoy other benefits like ad-free music, offline listening, and unlimited skips. We hope this article has helped you learn how to download "I am a Rider" song in MP3 320kbps. If you have any questions or feedback, please feel free to leave a comment below. Happy listening!
FAQs
-
Here are some of the frequently asked questions about downloading "I am a Rider" song in MP3 320kbps:
-
-
Is it legal to download "I am a Rider" song in MP3 320kbps from YouTube?
-
It depends on the country or region where you live and the source where you download the song from. Some countries or regions may have strict copyright laws that prohibit downloading or distributing copyrighted content without the permission of the owners. Some sources may also have terms and conditions that forbid downloading or converting their content into other formats. Therefore, you should check the laws and the policies of the source before downloading anything from YouTube.
-
Is it safe to download "I am a Rider" song in MP3 320kbps from YouTube?
-
It depends on the tool that you use to download the song from YouTube. Some tools may be safe, reliable, and fast, while others may be unsafe, unreliable, or slow. Some tools may also contain viruses, malware, or spyware that can harm your device or compromise your privacy. Therefore, you should use a trusted and reputable tool like 4K YouTube to MP3 that has positive reviews and ratings from users and experts.
-
How long does it take to download "I am a Rider" song in MP3 320kbps from YouTube?
-
It depends on the speed of your internet connection, the size of the file, and the performance of the tool that you use to download the song from YouTube. Generally, it should not take more than a few minutes to download "I am a Rider" song in MP3 320kbps from YouTube. However, if you encounter any issues or errors, you can try again later or contact the support team of the tool.
-
How much space does "I am a Rider" song in MP3 320kbps take on my device?
-
It depends on the length and the quality of the song that you download from YouTube. Generally, an MP3 320kbps file takes about 2.4 MB per minute of audio. Therefore, if "I am a Rider" song is about 3 minutes long, it should take about 7.2 MB of space on your device.
-
Can I download other songs in MP3 320kbps from YouTube?
-
Yes, you can download other songs in MP3 320kbps from YouTube using the same method and tool that we have described in this article. However, you should be aware of the legal and ethical implications of doing so, as we have mentioned above.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/congsaPfin/Manga-OCR/logs/What is Robi Red Cube APK 2022 and Why You Need It.md b/spaces/congsaPfin/Manga-OCR/logs/What is Robi Red Cube APK 2022 and Why You Need It.md
deleted file mode 100644
index 8475693569be2adff19a069ff4d9e8180e1a3f1b..0000000000000000000000000000000000000000
--- a/spaces/congsaPfin/Manga-OCR/logs/What is Robi Red Cube APK 2022 and Why You Need It.md
+++ /dev/null
@@ -1,228 +0,0 @@
-
-
Robi Red Cube APK 2022 Download: A Complete Guide for Retailers
-
If you are a retailer who sells Robi and Airtel SIMs and recharge services, you might have heard of the Robi Red Cube app. This app is a one-stop solution for all your easy load recharge and product requisition needs. It also offers many other features and benefits that can help you grow your business and earn more commissions.
-
In this article, we will explain what is Robi Red Cube app, how to download and install it, how to use it, and what are some alternatives to it. By the end of this article, you will have a clear idea of how to use this app to boost your sales and customer satisfaction.
Robi Red Cube app is a mobile application developed by Robi Axiata Limited for its retailers. It allows retailers to perform easy load recharge for Robi and Airtel SIMs, check the offers according to the customer, request products, place complaints, view commissions, and manage their accounts. The app is not available on the Google Play Store, but it can be downloaded from other sources (see below).
-
Features of Robi Red Cube App
-
The app has many features that make it a useful tool for retailers. Some of the main features are:
-
-
Easy Load Recharge System: You can recharge any Robi or Airtel SIM with any amount or package using this app. You can also view the balance and validity of the SIMs.
-
Tong & GStore Recharge Available: You can also recharge Tong and GStore SIMs using this app. These are special SIMs that offer low-cost voice and data services.
-
Check the Offer According to the Customer: You can check the best offers for any Robi or Airtel SIM using this app. You can also send the offer details to the customer via SMS.
-
SIM Sales: You can sell new Robi or Airtel SIMs using this app. You can also activate the SIMs and register them with biometric verification.
-
Campaign: You can participate in various campaigns run by Robi or Airtel and earn rewards and incentives.
-
Commission: You can view your commission details and history using this app. You can also request for commission payout and check the status of your request.
-
Retailer Information: You can update your personal and business information using this app. You can also change your password and PIN.
-
Recharge History: You can view your recharge history and details using this app. You can also filter the history by date, amount, or SIM type.
-
Paint Reset: You can reset your paint balance using this app. Paint balance is a credit limit that allows you to recharge without having enough money in your account.
-
Device Control: You can control which devices can access your account using this app. You can also block or unblock any device.
-
-
Benefits of Robi Red Cube App
-
The
The app also has many benefits that make it a valuable asset for retailers. Some of the main benefits are:
-
-
Convenience: You can perform all your recharge and product requisition tasks using this app. You don't need to visit any Robi or Airtel outlet or use any other device. You can also access the app anytime and anywhere.
-
Security: You can secure your account and transactions using this app. You can use a password and a PIN to log in and verify your actions. You can also block any unauthorized device from accessing your account.
-
Efficiency: You can save time and money using this app. You can recharge any SIM with any amount or package in seconds. You can also check the offers and commissions in real-time. You can also avoid errors and frauds by using biometric verification and SMS confirmation.
-
Customer Satisfaction: You can increase your customer satisfaction and loyalty using this app. You can offer them the best deals and services according to their needs. You can also communicate with them via SMS and resolve their issues quickly.
-
Growth: You can grow your business and income using this app. You can sell more SIMs and recharge services to more customers. You can also participate in campaigns and earn rewards and incentives.
-
-
How to Download and Install Robi Red Cube App?
-
If you want to download and install Robi Red Cube app, you need to follow these steps:
-
Download Links for Robi Red Cube App
-
The app is not available on the Google Play Store, but you can download it from other sources. Here are some of the download links for Robi Red Cube app:
After downloading the app from any of the above sources, you need to install it on your device. Here are the installation steps for Robi Red Cube app:
-
-
Go to the download folder on your device and locate the Robi Red Cube APK file.
-
Tap on the file and allow the installation from unknown sources if prompted.
-
Wait for the installation to complete and then open the app.
-
Login with your retailer ID and password. If you don't have an account, you can register with your mobile number and NID number.
-
Create a PIN for your account and verify it with an OTP sent to your mobile number.
-
You are now ready to use the app.
-
-
How to Use Robi Red Cube App?
-
The app is very easy to use and has a user-friendly interface. Here are some of the main functions that you can perform using the app:
-
robi red cube retailer app free download v.3.0.4
-robi red cube app apk latest version 2023
-robi red cube easy load recharge system
-robi red cube sim sales and commission
-robi red cube retailer app features and benefits
-robi red cube apk download from mega or one drive
-robi red cube app for robi and airtel retailers
-robi red cube app not in google play store
-robi red cube app developed by sheba technologies ltd
-robi red cube app device control and paint reset
-how to use robi red cube app for flexiload business
-robi red cube app tong and gstore recharge available
-robi red cube app check the offer according to the customer
-robi red cube app campaign and retailer information
-robi red cube app recharge history and balance check
-how to install robi red cube app on android device
-how to update robi red cube app to latest version
-how to get robi red cube app from area manager
-how to solve robi red cube app login issues
-how to contact robi red cube app customer support
-how to register for robi red cube app as a retailer
-how to activate and deactivate services on robi red cube app
-how to transfer balance from robi red cube app to another retailer
-how to change password and pin on robi red cube app
-how to view and redeem rewards on robi red cube app
-
Easy Load Recharge System
-
This is the main function of the app that allows you to recharge any Robi or Airtel SIM with any amount or package. To do this, you need to follow these steps:
-
-
Select the Easy Load option from the home screen of the app.
-
Enter the mobile number of the SIM that you want to recharge.
-
Select the amount or package that you want to recharge from the list of options.
-
Enter your PIN and confirm the transaction.
-
You will receive an SMS confirmation of the recharge along with the balance and validity details.
-
You can also view the recharge history and details by selecting the Recharge History option from the menu.
-
-
Tong & GStore Recharge Available
Tong & GStore Recharge Available
-
This is another function of the app that allows you to recharge Tong and GStore SIMs. These are special SIMs that offer low-cost voice and data services. To do this, you need to follow these steps:
-
-
Select the Tong or GStore option from the home screen of the app.
-
Enter the mobile number of the SIM that you want to recharge.
-
Select the amount or package that you want to recharge from the list of options.
-
Enter your PIN and confirm the transaction.
-
You will receive an SMS confirmation of the recharge along with the balance and validity details.
-
You can also view the recharge history and details by selecting the Recharge History option from the menu.
-
-
Check the Offer According to the Customer
-
This is a useful function of the app that allows you to check the best offers for any Robi or Airtel SIM. You can also send the offer details to the customer via SMS. To do this, you need to follow these steps:
-
-
Select the Offer option from the home screen of the app.
-
Enter the mobile number of the SIM that you want to check the offer for.
-
The app will show you the best offers for that SIM according to its usage and preference.
-
You can select any offer and see its details and benefits.
-
You can also send the offer details to the customer by selecting the Send SMS option.
-
The customer will receive an SMS with the offer details and instructions on how to activate it.
-
-
SIM Sales
-
This is a function of the app that allows you to sell new Robi or Airtel SIMs. You can also activate the SIMs and register them with biometric verification. To do this, you need to follow these steps:
-
-
Select the SIM Sales option from the home screen of the app.
-
Scan or enter the barcode of the SIM that you want to sell.
-
The app will show you the SIM details and price.
-
Enter your PIN and confirm the transaction.
-
The app will activate the SIM and send an SMS confirmation to it.
-
You can also register the SIM with biometric verification by selecting the Register option.
-
The app will ask you to scan or enter the NID number of the customer and capture their fingerprint and photo.
-
The app will verify the biometric data and register the SIM with it.
-
The app will send an SMS confirmation to both you and the customer.
-
-
Campaign
-
This is a function of the app that allows you to participate in various campaigns run by Robi or Airtel and earn rewards and incentives. To do this, you need to follow these steps:
-
-
Select the Campaign option from the home screen of the app.
-
The app will show you the list of ongoing and upcoming campaigns that you can join.
-
You can select any campaign and see its details, objectives, criteria, rewards, and terms and conditions.
-
You can also join any campaign by selecting the Join option.
-
The app will register you for that campaign and send you an SMS confirmation.
-
You can also view your progress and performance in any campaign by selecting the View option.
-
Commission
-
This is a function of the app that allows you to view your commission details and history. You can also request for commission payout and check the status of your request. To do this, you need to follow these steps:
-
-
Select the Commission option from the home screen of the app.
-
The app will show you your current commission balance and history.
-
You can filter the history by date, amount, or transaction type.
-
You can also request for commission payout by selecting the Request option.
-
The app will ask you to enter your bank account details and the amount that you want to withdraw.
-
Enter your PIN and confirm the request.
-
The app will process your request and send you an SMS confirmation.
-
You can also check the status of your request by selecting the Status option.
-
-
Retailer Information
-
This is a function of the app that allows you to update your personal and business information. You can also change your password and PIN. To do this, you need to follow these steps:
-
-
Select the Retailer Information option from the home screen of the app.
-
The app will show you your profile details and settings.
-
You can edit any information by selecting the Edit option.
-
The app will ask you to enter the new information and verify it with an OTP sent to your mobile number.
-
Enter your PIN and confirm the changes.
-
The app will update your information and send you an SMS confirmation.
-
You can also change your password or PIN by selecting the Change Password or Change PIN option.
-
The app will ask you to enter your current password or PIN and then enter the new one.
-
Enter your PIN and confirm the changes.
-
The app will change your password or PIN and send you an SMS confirmation.
-
-
Recharge History
-
This is a function of the app that allows you to view your recharge history and details. You can also filter the history by date, amount, or SIM type. To do this, you need to follow these steps:
-
-
Select the Recharge History option from the home screen of the app.
-
The app will show you your recharge history and details.
-
You can filter the history by date, amount, or SIM type by selecting the Filter option.
-
The app will show you the filtered history and details.
-
-
Paint Reset
-
This is a function of the app that allows you to reset your paint balance. Paint balance is a credit limit that allows you to recharge without having enough money in your account. To do this, you need to follow these steps:
-
-
Select the Paint Reset option from the home screen of the app.
-
The app will show you your current paint balance and limit.
-
You can reset your paint balance by selecting the Reset option.
-
The app will ask you to enter the amount that you want to reset.
-
Enter your PIN and confirm the reset.
-
The app will reset your paint balance and send you an SMS confirmation.
-
Device Control
-
This is a function of the app that allows you to control which devices can access your account. You can also block or unblock any device. To do this, you need to follow these steps:
-
-
Select the Device Control option from the home screen of the app.
-
The app will show you the list of devices that are linked to your account.
-
You can block any device by selecting the Block option.
-
The app will ask you to enter your PIN and confirm the block.
-
The app will block the device and send you an SMS confirmation.
-
You can also unblock any device by selecting the Unblock option.
-
The app will ask you to enter your PIN and confirm the unblock.
-
The app will unblock the device and send you an SMS confirmation.
-
-
Alternatives to Robi Red Cube App
-
If you are looking for some alternatives to Robi Red Cube app, you can try these apps:
-
My Robi App
-
This is an app that allows you to manage your Robi SIM and account. You can check your balance, validity, offers, packages, bills, and transactions. You can also recharge, buy bundles, pay bills, and transfer balance. You can also access various digital services and entertainment content. You can download this app from the Google Play Store or from https://www.robi.com.bd/my-robi/.
-
My Airtel App
-
This is an app that allows you to manage your Airtel SIM and account. You can check your balance, validity, offers, packages, bills, and transactions. You can also recharge, buy bundles, pay bills, and transfer balance. You can also access various digital services and entertainment content. You can download this app from the Google Play Store or from https://www.bd.airtel.com/my-airtel/.
-
Conclusion
-
Robi Red Cube app is a mobile application developed by Robi Axiata Limited for its retailers. It allows retailers to perform easy load recharge for Robi and Airtel SIMs, check the offers according to the customer, request products, place complaints, view commissions, and manage their accounts. The app is not available on the Google Play Store, but it can be downloaded from other sources. The app has many features and benefits that make it a useful tool for retailers. The app is also easy to use and has a user-friendly interface. However, if you are looking for some alternatives to Robi Red Cube app, you can try My Robi app or My Airtel app.
-
FAQs
-
Here are some of the frequently asked questions about Robi Red Cube app:
-
-
Q: How can I contact Robi customer care using Robi Red Cube app?
-
A: You can contact Robi customer care using Robi Red Cube app by selecting the Complaint option from the home screen of the app. You can then select the category and sub-category of your complaint and write a description of your issue. You can also attach a screenshot or a photo of your issue if needed. You can then submit your complaint and wait for a response from Robi customer care.
-
Q: How can I update Robi Red Cube app to the latest version?
-
A: You can update Robi Red Cube app to the latest version by downloading it from any of the sources mentioned above. You can then install it over the existing version of the app. You don't need to uninstall the previous version of the app.
-
Q: How can I delete my account from Robi Red Cube app?
-
A: You can delete your account from Robi Red Cube app by contacting Robi customer care and requesting them to deactivate your account. You will need to provide your retailer ID and mobile number for verification. Once your account is deactivated, you will not be able to use Robi Red Cube app anymore.
-
Q: How can I get more customers using Robi Red Cube app?
-
A: You can get more customers using Robi Red Cube app by offering them the best deals and services according to their needs. You can also communicate with them via SMS and resolve their issues quickly. You can also participate in campaigns and earn rewards and incentives.
-
Q: How can I get more commissions using Robi Red Cube app?
-
A: You can get more commissions using Robi A: You can get more commissions using Robi Red Cube app by selling more SIMs and recharge services to more customers. You can also request for commission payout and check the status of your request. You can also view your commission details and history using the app.
- 197e85843d
-
-
\ No newline at end of file
diff --git a/spaces/contluForse/HuggingGPT/assets/Copal Dpb 1500 Drivers For Mac Best Practices and Tips.md b/spaces/contluForse/HuggingGPT/assets/Copal Dpb 1500 Drivers For Mac Best Practices and Tips.md
deleted file mode 100644
index 038332545fd6814c5c3b32eb3ea6d3b16957017a..0000000000000000000000000000000000000000
--- a/spaces/contluForse/HuggingGPT/assets/Copal Dpb 1500 Drivers For Mac Best Practices and Tips.md
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
Please note that many of these drivers are currently underdevelopment, and we do not necessarily have full specifications on allof them. We will fill in this list as we verify successful operationof these printers. You can help by testing this with your own printerand reporting the results!
-
-## Todo:
-
-- (Top Priority) 调用另一个开源项目text-generation-webui的web接口,使用其他llm模型
-- 总结大工程源代码时,文本过长、token溢出的问题(目前的方法是直接二分丢弃处理溢出,过于粗暴,有效信息大量丢失)
-- UI不够美观
-
diff --git a/spaces/dexrm/Weewee/README.md b/spaces/dexrm/Weewee/README.md
deleted file mode 100644
index f2233389d9aed5fac3aaca6eec0576c8e6f30f87..0000000000000000000000000000000000000000
--- a/spaces/dexrm/Weewee/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
----
-title: Weewee
-emoji: 📚
-colorFrom: red
-colorTo: yellow
-sdk: docker
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/diacanFperku/AutoGPT/Amd A68m Chipset Driver Windows 7.md b/spaces/diacanFperku/AutoGPT/Amd A68m Chipset Driver Windows 7.md
deleted file mode 100644
index d76d9b685ebcd6179819d39c37a860589b318c37..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Amd A68m Chipset Driver Windows 7.md
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
Most of the popular drivers that are already on the web will do just fine for most parts of your PC. There are some, however, that only work with specific components or versions of Windows. You'll want to download that driver. Several sites sell individually tailored drivers. As with many drivers, however, it's always best to download from the manufacturer.
The following table shows all the motherboards listed in the table above supported by drivers on the Internet. When you find your motherboard's name in this table, you can safely download the corresponding drivers.
-
In most cases, the simple process of installing or upgrading the AMD Ryzenchipset drivers can be completed without issues. However, if there have been recent changes to your system hardware configuration or the currently installed chipset drivers are extremely outdated, then it is recommended to uninstall the existing chipset drivers before installing the new package. Performing the uninstall ensures a clean system and can help reduce issues or conflicts which may occur with the new installation.
-
Key Features: - Supports AMD Ryzen 1st and 2nd Generation/ Ryzen with Radeon... Game experience: TURBO M.2 with M.2 Shield, StoreMI, AMD Turbo USB 3.1 GEN2 - Audio Boost 4... MULTI-GPU: With STEEL ARMOR PCI-E slots. Supports 2-Way AMD Crossfire - BIOS FLASHBACK+: Simply use a USB... package contains the files needed for installing the Chipset driver. If it has been installed, updating (overwrite-installing)...
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/diacanFperku/AutoGPT/Crack No Cable Rocksmith 2014 Torrent.md b/spaces/diacanFperku/AutoGPT/Crack No Cable Rocksmith 2014 Torrent.md
deleted file mode 100644
index 7c003437d916254963556c93ce44e2a30e5c9df2..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Crack No Cable Rocksmith 2014 Torrent.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
- 3cee63e6c2
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/FS2004 Active Camera 2.0 With Cracked DLL For FS9.1 Tournament Cheats REPACK.md b/spaces/diacanFperku/AutoGPT/FS2004 Active Camera 2.0 With Cracked DLL For FS9.1 Tournament Cheats REPACK.md
deleted file mode 100644
index 3a40773df733312c5546d7b69c332760753b7589..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/FS2004 Active Camera 2.0 With Cracked DLL For FS9.1 Tournament Cheats REPACK.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
manga download full free for windows 10 cracker[/url] [url= zindaggi rocks movie mp4 hindi [url= keygen cs6 illustrator mac[/url] taiseertaids [url= melsatterve [url= sesspaphpag [url= keygen autocad electrical 2018 crack[/url] touchgrind skate 2 1.48 apk mod data for android [url= briletypeabumunult [url= quiz-academy-la-piramide-del-sab [url= [url= comics in english free download of chacha chaudhary pdf [url= walpzoffoopyiptyday [url= zindaggi rocks movie mp4 hindi [url= keygen cs6 illustrator mac[/url] taiseertaids [url= melsatterve [url= sesspaphpag [url= keygen autocad electrical 2018 crack[/url] touchgrind skate 2 1.48 apk mod data for android [url= briletypeabumunult [url= quiz-academy-la-piramide-del-sab [url= [url= comics in english free download of chacha chaudhary pdf [url= [url= [url= zindaggi rocks movie mp4 hindi [url= keygen cs6 illustrator mac[/url] taiseertaids [url= melsatterve [url= sesspaphpag [url= keygen autocad electrical 2018 crack[/url] touchgrind skate 2 1.48 apk mod data for android [url= briletypeabumunult [url= quiz-academy-la-piramide-del-sab [url= [url= cs6 illustrator mac[/url] taiseertaids [url= melsatterve [url= sesspaphpag [url= keygen autocad electrical 2018 crack[/url] touchgrind skate 2 1.
-
your house prices at a time of your pandemic in the united states have appeared to drop by an average of around $1,000-$2, 000. a faucet dimension sized by idaho is put on in the beginning of the pandemic and also the condition portion is reduced to 7.2%, the smallest reported at this time.6% represents one of the worst situations in 10 years, immediately after indicating a 42.3% decrease having its starting point the 1918 influenza pandemic. boston is deemed to be the worst metropolis below in which there are numerous region locations which have already been quarantined.
-
FS2004 Active Camera 2.0 With Cracked DLL For FS9.1 Tournament Cheats
-
-Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf. Students Book 1,200 Pages. Chris Redston Face2face Elementary Students Book.pdf 4fefd39f24
-
-
-
diff --git a/spaces/diacanFperku/AutoGPT/Loaris Trojan Remover 3.0.76.211 Patch.md b/spaces/diacanFperku/AutoGPT/Loaris Trojan Remover 3.0.76.211 Patch.md
deleted file mode 100644
index d6a9bce94f5b7718ed64099d3f6d354f8efcd0c3..0000000000000000000000000000000000000000
--- a/spaces/diacanFperku/AutoGPT/Loaris Trojan Remover 3.0.76.211 Patch.md
+++ /dev/null
@@ -1,141 +0,0 @@
-
-
Loaris Trojan Remover 3.0.76.211 Patch: A Comprehensive Review
-
If you are looking for a reliable and effective tool to remove trojans, malware, adware, and other unwanted software from your computer, you might want to consider Loaris Trojan Remover 3.0.76.211 Patch. This is a powerful program that can scan your system, detect and eliminate all kinds of threats, and optimize your PC performance.
In this article, we will review the main features, benefits, and drawbacks of Loaris Trojan Remover 3.0.76.211 Patch, and help you decide if it is worth downloading and installing.
-
-
What is Loaris Trojan Remover 3.0.76.211 Patch?
-
Loaris Trojan Remover 3.0.76.211 Patch is a software that can help you get rid of various types of threats that often try to penetrate your system when working with the Internet. It can prevent trojans, spyware, hacking programs, and other unwanted software from invading your PC.
-
The program works on a simple principle: it checks all system files, scans the registry and applications, and only those that are activated when Windows starts. It can also scan your hard disk and reveal all vulnerabilities and hidden programs in your system.
-
The program's actions are aimed at blocking and removing threats. After starting, you can select the scan mode, after which all threats will be found and deleted.
-
-
-
What are the main features of Loaris Trojan Remover 3.0.76.211 Patch?
-
Loaris Trojan Remover 3.0.76.211 Patch has many features that make it a powerful and versatile tool for malware removal and PC optimization.
-
-
It can detect and remove all kinds of threats, including trojans, worms, rootkits, ransomware, keyloggers, adware, spyware, etc.
-
It can block advertising banners and any other type of aggressive advertising that can interfere with your browsing experience.
-
It can close access to potentially dangerous files that can harm your system or compromise your privacy.
-
It can use new technologies that will not allow the threat to harm the operating system, as they will be destroyed before they can penetrate.
-
It can delete files that do not want to be deleted by simple methods, such as stubborn viruses that are deeply embedded in the system.
-
It can navigate in the network and remain invisible, which will protect you from hacker attacks.
-
It can conduct a full analysis of all emerging threats and provide you with detailed reports.
-
It can clean and optimize your PC by removing unnecessary files, fixing registry errors, defragmenting disks, etc.
-
It has a user-friendly interface with a large number of settings that allow you to customize the program according to your needs.
-
It has a multilingual support that includes English and Russian languages.
-
-
-
What are the benefits of Loaris Trojan Remover 3.0.76.211 Patch?
-
Loaris Trojan Remover 3.0.76.211 Patch has many benefits that make it a worthwhile choice for anyone who wants to keep their PC safe and fast.
-
-
It can provide you with comprehensive protection against various types of threats that can damage your data or steal your identity.
-
It can improve your browsing experience by blocking annoying ads and pop-ups that can slow down your internet connection or redirect you to malicious sites.
-
It can enhance your PC performance by removing junk files, fixing errors, and optimizing settings that can speed up your system.
-
It can save you time and money by preventing you from buying a new PC or paying for expensive repairs or antivirus subscriptions.
-
It can give you peace of mind by ensuring that your PC is clean and secure at all times.
-
-
-
What are the drawbacks of Loaris Trojan Remover 3.0.76.211 Patch?
-
Loaris Trojan Remover 3.0.76.211 Patch is not a perfect software and it has some drawbacks that you should be aware of before downloading and installing it.
-
-
It is not a full-fledged antivirus and it cannot replace your existing antivirus software. It is designed to complement your antivirus by removing the threats that it might miss or cannot handle.
-
It is not free and it requires a license key to activate its full functionality. You can download a trial version that will allow you to scan your PC but not remove any threats.
-
It might cause some false positives or conflicts with some legitimate programs or files that it might mistake for threats.
-
It might require some technical knowledge or skills to use some of its advanced features or settings.
-
-
-
How to download and install Loaris Trojan Remover 3.0.76.211 Patch?
-
If you want to download and install Loaris Trojan Remover 3.0.76.211 Patch on your PC, you need to follow these steps:
-
-
Go to the official website of Loaris Trojan Remover (https://loaris.app/) and click on the "Download" button.
-
Select the version that suits your operating system (Windows XP/Vista/7/8/10) and click on the "Download Now" button.
-
Save the file (loarist_setup.exe) on your computer and run it as an administrator.
-
Follow the instructions on the screen to complete the installation process.
-
To activate the full functionality of the program, you need to purchase a license key from the official website or from other authorized sources.
-
Enter the license key in the program's interface and click on the "Activate" button.
-
-
-
Conclusion
-
Loaris Trojan Remover 3.0.76.211 Patch is a powerful and effective tool that can help you remove trojans, malware, adware, and other unwanted software from your computer and optimize its performance.
-
The program has many features that make it versatile and user-friendly.
-
The program has many benefits that make it worthwhile for anyone who wants to keep their PC safe and fast.
-
The program has some drawbacks that you should be aware of before downloading and installing it.
-
The program is not free and it requires a license key to activate its full functionality.
-
If you want to download and install Loaris Trojan Remover 3.0.76.211 Patch on your PC, you need to follow some simple steps.
-
If you are looking for a reliable and effective tool to remove trojans, malware, adware, and other unwanted software from your computer,download Loaris Trojan Remover 3
-
How to use Loaris Trojan Remover 3.0.76.211 Patch?
-
Using Loaris Trojan Remover 3.0.76.211 Patch is very easy and straightforward. You just need to follow these steps:
-
-
Launch the program and click on the "Scan" button.
-
Choose the scan mode that suits your needs: standard, full, custom, or removable.
-
Wait for the scan to finish and review the results.
-
Select the items that you want to remove and click on the "Apply" button.
-
Restart your computer if necessary.
-
-
You can also access other features of the program from the main interface, such as:
-
-
The "Tools" tab, where you can manage your startup items, browser settings, host files, Windows updates, etc.
-
The "Settings" tab, where you can adjust the program's options, such as scan parameters, update frequency, language, etc.
-
The "Log files" tab, where you can view the history of your scans and actions.
-
The "Help" tab, where you can get support from the developers or contact them for feedback.
-
-
-
What are the alternatives to Loaris Trojan Remover 3.0.76.211 Patch?
-
Loaris Trojan Remover 3.0.76.211 Patch is not the only software that can help you remove trojans, malware, adware, and other unwanted software from your computer. There are some alternatives that you can try if you are not satisfied with Loaris or if you want to compare different options.
-
Some of the most popular alternatives are:
-
-
Malwarebytes: This is a well-known and trusted software that can detect and remove various types of malware, including trojans, ransomware, spyware, adware, etc. It also offers real-time protection, web protection, exploit protection, etc.
-
HitmanPro: This is a lightweight and fast software that can scan your computer for malware and remove it in minutes. It uses cloud-based technology and behavioral analysis to detect and eliminate threats that other programs might miss.
-
Zemana AntiMalware: This is a simple and effective software that can scan your computer for malware and remove it with one click. It also offers real-time protection, ransomware protection, browser cleanup, etc.
-
-
-
Is Loaris Trojan Remover 3.0.76.211 Patch worth it?
-
Loaris Trojan Remover 3.0.76.211 Patch is a powerful and effective tool that can help you remove trojans, malware, adware, and other unwanted software from your computer and optimize its performance.
-
The program has many features that make it versatile and user-friendly.
-
The program has many benefits that make it worthwhile for anyone who wants to keep their PC safe and fast.
-
The program has some drawbacks that you should be aware of before downloading and installing it.
-
The program is not free and it requires a license key to activate its full functionality.
-
If you are looking for a reliable and effective tool to remove trojans, malware, adware, and other unwanted software from your computer,download Loaris Trojan Remover 3
-
- """, unsafe_allow_html=True)
- html(f"""
-
-
- """, width=None, height=42, scrolling=False)
-
-
-st.title("Scientific Question Answering with Citations")
-
-st.write("""
-Ask a scientific question and get an answer drawn from [scite.ai](https://scite.ai) corpus of over 1.1bn citation statements.
-Answers are linked to source documents containing citations where users can explore further evidence from scientific literature for the answer.
-
-For example try: Do tanning beds cause cancer?
-""")
-
-st.markdown("""
-
-""", unsafe_allow_html=True)
-
-# with st.expander("Settings (strictness, context limit, top hits)"):
-# concat_passages = st.radio(
-# "Concatenate passages as one long context?",
-# ('yes', 'no'))
-# present_impossible = st.radio(
-# "Present impossible answers? (if the model thinks its impossible to answer should it still try?)",
-# ('yes', 'no'))
-# support_all = st.radio(
-# "Use abstracts and titles as a ranking signal (if the words are matched in the abstract then the document is more relevant)?",
-# ('no', 'yes'))
-# support_abstracts = st.radio(
-# "Use abstracts as a source document?",
-# ('yes', 'no', 'abstract only'))
-# strict_lenient_mix = st.radio(
-# "Type of strict+lenient combination: Fallback or Mix? If fallback, strict is run first then if the results are less than context_lim we also search lenient. Mix will search them both and let reranking sort em out",
-# ('mix', 'fallback'))
-# confidence_threshold = st.slider('Confidence threshold for answering questions? This number represents how confident the model should be in the answers it gives. The number is out of 100%', 0, 100, 1)
-# use_reranking = st.radio(
-# "Use Reranking? Reranking will rerank the top hits using semantic similarity of document and query.",
-# ('yes', 'no'))
-# top_hits_limit = st.slider('Top hits? How many documents to use for reranking. Larger is slower but higher quality', 10, 300, 100)
-# context_lim = st.slider('Context limit? How many documents to use for answering from. Larger is slower but higher quality', 10, 300, 25)
-
-# def paraphrase(text, max_length=128):
-# input_ids = queryexp_tokenizer.encode(text, return_tensors="pt", add_special_tokens=True)
-# generated_ids = queryexp_model.generate(input_ids=input_ids, num_return_sequences=suggested_queries or 5, num_beams=suggested_queries or 5, max_length=max_length)
-# queries = set([queryexp_tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in generated_ids])
-# preds = '\n * '.join(queries)
-# return preds
-
-
-def group_results_by_context(results):
- result_groups = {}
- for result in results:
- if result['context'] not in result_groups:
- result_groups[result['context']] = result
- result_groups[result['context']]['texts'] = []
-
- result_groups[result['context']]['texts'].append(
- result['answer']
- )
- if result['score'] > result_groups[result['context']]['score']:
- result_groups[result['context']]['score'] = result['score']
- return list(result_groups.values())
-
-
-def matched_context(start_i, end_i, contexts_string, seperator='---'):
- # find seperators to identify start and end
- doc_starts = [0]
- for match in re.finditer(seperator, contexts_string):
- doc_starts.append(match.end())
-
- for i in range(len(doc_starts)):
- if i == len(doc_starts) - 1:
- if start_i >= doc_starts[i]:
- return contexts_string[doc_starts[i]:len(contexts_string)].replace(seperator, '')
-
- if start_i >= doc_starts[i] and end_i <= doc_starts[i+1]:
- return contexts_string[doc_starts[i]:doc_starts[i+1]].replace(seperator, '')
- return None
-
-
-# def run_query_full(query, progress_bar):
-# # if use_query_exp == 'yes':
-# # query_exp = paraphrase(f"question2question: {query}")
-# # st.markdown(f"""
-# # If you are not getting good results try one of:
-# # * {query_exp}
-# # """)
-
-# # could also try fallback if there are no good answers by score...
-# limit = top_hits_limit or 100
-# context_limit = context_lim or 10
-# contexts_strict, orig_docs_strict = search(query, limit=limit, strict=True, all_mode=support_all == 'yes', abstracts= support_abstracts == 'yes', abstract_only=support_abstracts == 'abstract only')
-# if strict_lenient_mix == 'fallback' and len(contexts_strict) < context_limit:
-# contexts_lenient, orig_docs_lenient = search(query, limit=limit, strict=False, all_mode=support_all == 'yes', abstracts= support_abstracts == 'yes', abstract_only= support_abstracts == 'abstract only')
-# contexts = list(
-# set(contexts_strict + contexts_lenient)
-# )
-# orig_docs = orig_docs_strict + orig_docs_lenient
-# elif strict_lenient_mix == 'mix':
-# contexts_lenient, orig_docs_lenient = search(query, limit=limit, strict=False)
-# contexts = list(
-# set(contexts_strict + contexts_lenient)
-# )
-# orig_docs = orig_docs_strict + orig_docs_lenient
-# else:
-# contexts = list(
-# set(contexts_strict)
-# )
-# orig_docs = orig_docs_strict
-# progress_bar.progress(25)
-
-# if len(contexts) == 0 or not ''.join(contexts).strip():
-# return st.markdown("""
-#
-#
-#
- # Sorry... no results for that question! Try another...
- #
- Sorry... no results for that question! Try another...
-
-
-
- """, unsafe_allow_html=True)
-
- for r in resp['results']:
- ctx = remove_html(r["context"])
- for answer in r['texts']:
- ctx = ctx.replace(answer.strip(), f"{answer.strip()}")
- # .replace( ' b + 2 * std):
- warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
- "The distribution of values may be incorrect.",
- stacklevel=2)
-
- with torch.no_grad():
- # Values are generated by using a truncated uniform distribution and
- # then using the inverse CDF for the normal distribution.
- # Get upper and lower cdf values
- l = norm_cdf((a - mean) / std)
- u = norm_cdf((b - mean) / std)
-
- # Uniformly fill tensor with values from [l, u], then translate to
- # [2l-1, 2u-1].
- tensor.uniform_(2 * l - 1, 2 * u - 1)
-
- # Use inverse cdf transform for normal distribution to get truncated
- # standard normal
- tensor.erfinv_()
-
- # Transform to proper mean, std
- tensor.mul_(std * math.sqrt(2.))
- tensor.add_(mean)
-
- # Clamp to ensure it's in the proper range
- tensor.clamp_(min=a, max=b)
- return tensor
-
-
-def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
- r"""Fills the input Tensor with values drawn from a truncated
- normal distribution. The values are effectively drawn from the
- normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
- with values outside :math:`[a, b]` redrawn until they are within
- the bounds. The method used for generating the random values works
- best when :math:`a \leq \text{mean} \leq b`.
- Args:
- tensor: an n-dimensional `torch.Tensor`
- mean: the mean of the normal distribution
- std: the standard deviation of the normal distribution
- a: the minimum cutoff value
- b: the maximum cutoff value
- Examples:
- >>> w = torch.empty(3, 5)
- >>> nn.init.trunc_normal_(w)
- """
- return _no_grad_trunc_normal_(tensor, mean, std, a, b)
diff --git a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/sanskrit.py b/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/sanskrit.py
deleted file mode 100644
index 0223aaac384a2f850f5bc20651fc18eb964607d0..0000000000000000000000000000000000000000
--- a/spaces/eIysia/VITS-Umamusume-voice-synthesizer/text/sanskrit.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import re
-from indic_transliteration import sanscript
-
-
-# List of (iast, ipa) pairs:
-_iast_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
- ('a', 'ə'),
- ('ā', 'aː'),
- ('ī', 'iː'),
- ('ū', 'uː'),
- ('ṛ', 'ɹ`'),
- ('ṝ', 'ɹ`ː'),
- ('ḷ', 'l`'),
- ('ḹ', 'l`ː'),
- ('e', 'eː'),
- ('o', 'oː'),
- ('k', 'k⁼'),
- ('k⁼h', 'kʰ'),
- ('g', 'g⁼'),
- ('g⁼h', 'gʰ'),
- ('ṅ', 'ŋ'),
- ('c', 'ʧ⁼'),
- ('ʧ⁼h', 'ʧʰ'),
- ('j', 'ʥ⁼'),
- ('ʥ⁼h', 'ʥʰ'),
- ('ñ', 'n^'),
- ('ṭ', 't`⁼'),
- ('t`⁼h', 't`ʰ'),
- ('ḍ', 'd`⁼'),
- ('d`⁼h', 'd`ʰ'),
- ('ṇ', 'n`'),
- ('t', 't⁼'),
- ('t⁼h', 'tʰ'),
- ('d', 'd⁼'),
- ('d⁼h', 'dʰ'),
- ('p', 'p⁼'),
- ('p⁼h', 'pʰ'),
- ('b', 'b⁼'),
- ('b⁼h', 'bʰ'),
- ('y', 'j'),
- ('ś', 'ʃ'),
- ('ṣ', 's`'),
- ('r', 'ɾ'),
- ('l̤', 'l`'),
- ('h', 'ɦ'),
- ("'", ''),
- ('~', '^'),
- ('ṃ', '^')
-]]
-
-
-def devanagari_to_ipa(text):
- text = text.replace('ॐ', 'ओम्')
- text = re.sub(r'\s*।\s*$', '.', text)
- text = re.sub(r'\s*।\s*', ', ', text)
- text = re.sub(r'\s*॥', '.', text)
- text = sanscript.transliterate(text, sanscript.DEVANAGARI, sanscript.IAST)
- for regex, replacement in _iast_to_ipa:
- text = re.sub(regex, replacement, text)
- text = re.sub('(.)[`ː]*ḥ', lambda x: x.group(0)
- [:-1]+'h'+x.group(1)+'*', text)
- return text
diff --git a/spaces/ealbinu/automatic-speech-recognition/app.py b/spaces/ealbinu/automatic-speech-recognition/app.py
deleted file mode 100644
index 9746c32bf5175cd7a8d5dace31ae5581a24df98b..0000000000000000000000000000000000000000
--- a/spaces/ealbinu/automatic-speech-recognition/app.py
+++ /dev/null
@@ -1,391 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
-#
-# See LICENSE for clarification regarding multiple authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# References:
-# https://gradio.app/docs/#dropdown
-
-import base64
-import logging
-import os
-import tempfile
-import time
-from datetime import datetime
-
-import gradio as gr
-import torch
-import torchaudio
-import urllib.request
-
-
-from examples import examples
-from model import decode, get_pretrained_model, language_to_models, sample_rate
-
-languages = list(language_to_models.keys())
-
-
-def convert_to_wav(in_filename: str) -> str:
- """Convert the input audio file to a wave file"""
- out_filename = in_filename + ".wav"
- logging.info(f"Converting '{in_filename}' to '{out_filename}'")
- _ = os.system(f"ffmpeg -hide_banner -i '{in_filename}' -ar 16000 '{out_filename}'")
- _ = os.system(
- f"ffmpeg -hide_banner -loglevel error -i '{in_filename}' -ar 16000 '{out_filename}.flac'"
- )
-
- with open(out_filename + ".flac", "rb") as f:
- s = "\n" + out_filename + "\n"
- s += base64.b64encode(f.read()).decode()
- logging.info(s)
-
- return out_filename
-
-
-def build_html_output(s: str, style: str = "result_item_success"):
- return f"""
-
-
- {s}
-
-
- """
-
-def process_url(
- language: str,
- repo_id: str,
- decoding_method: str,
- num_active_paths: int,
- url: str,
-):
- logging.info(f"Processing URL: {url}")
- with tempfile.NamedTemporaryFile() as f:
- try:
- urllib.request.urlretrieve(url, f.name)
-
- return process(
- in_filename=f.name,
- language=language,
- repo_id=repo_id,
- decoding_method=decoding_method,
- num_active_paths=num_active_paths,
- )
- except Exception as e:
- logging.info(str(e))
- return "", build_html_output(str(e), "result_item_error")
-
-def process_uploaded_file(
- language: str,
- repo_id: str,
- decoding_method: str,
- num_active_paths: int,
- in_filename: str,
-):
- if in_filename is None or in_filename == "":
- return "", build_html_output(
- "Please first upload a file and then click "
- 'the button "submit for recognition"',
- "result_item_error",
- )
-
- logging.info(f"Processing uploaded file: {in_filename}")
- try:
- return process(
- in_filename=in_filename,
- language=language,
- repo_id=repo_id,
- decoding_method=decoding_method,
- num_active_paths=num_active_paths,
- )
- except Exception as e:
- logging.info(str(e))
- return "", build_html_output(str(e), "result_item_error")
-
-
-def process_microphone(
- language: str,
- repo_id: str,
- decoding_method: str,
- num_active_paths: int,
- in_filename: str,
-):
- if in_filename is None or in_filename == "":
- return "", build_html_output(
- "Please first click 'Record from microphone', speak, "
- "click 'Stop recording', and then "
- "click the button 'submit for recognition'",
- "result_item_error",
- )
-
- logging.info(f"Processing microphone: {in_filename}")
- try:
- return process(
- in_filename=in_filename,
- language=language,
- repo_id=repo_id,
- decoding_method=decoding_method,
- num_active_paths=num_active_paths,
- )
- except Exception as e:
- logging.info(str(e))
- return "", build_html_output(str(e), "result_item_error")
-
-
-@torch.no_grad()
-def process(
- language: str,
- repo_id: str,
- decoding_method: str,
- num_active_paths: int,
- in_filename: str,
-):
- logging.info(f"language: {language}")
- logging.info(f"repo_id: {repo_id}")
- logging.info(f"decoding_method: {decoding_method}")
- logging.info(f"num_active_paths: {num_active_paths}")
- logging.info(f"in_filename: {in_filename}")
-
- filename = convert_to_wav(in_filename)
-
- now = datetime.now()
- date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f")
- logging.info(f"Started at {date_time}")
-
- start = time.time()
-
- recognizer = get_pretrained_model(
- repo_id,
- decoding_method=decoding_method,
- num_active_paths=num_active_paths,
- )
-
- text = decode(recognizer, filename)
-
- date_time = now.strftime("%Y-%m-%d %H:%M:%S.%f")
- end = time.time()
-
- metadata = torchaudio.info(filename)
- duration = metadata.num_frames / sample_rate
- rtf = (end - start) / duration
-
- logging.info(f"Finished at {date_time} s. Elapsed: {end - start: .3f} s")
-
- info = f"""
- Wave duration : {duration: .3f} s
- Processing time: {end - start: .3f} s
- RTF: {end - start: .3f}/{duration: .3f} = {rtf:.3f}
- """
- if rtf > 1:
- info += (
- " We are loading the model for the first run. "
- "Please run again to measure the real RTF. "
- )
-
- logging.info(info)
- logging.info(f"\nrepo_id: {repo_id}\nhyp: {text}")
-
- return text, build_html_output(info)
-
-
-title = "# Automatic Speech Recognition with Next-gen Kaldi"
-description = """
-This space shows how to do automatic speech recognition with Next-gen Kaldi.
-
-Please visit
-
-for streaming speech recognition with **Next-gen Kaldi**.
-
-It is running on CPU within a docker container provided by Hugging Face.
-
-See more information by visiting the following links:
-
--
--
--
--
-
-If you want to deploy it locally, please see
-
-"""
-
-# css style is copied from
-# https://huggingface.co/spaces/alphacep/asr/blob/main/app.py#L113
-css = """
-.result {display:flex;flex-direction:column}
-.result_item {padding:15px;margin-bottom:8px;border-radius:15px;width:100%}
-.result_item_success {background-color:mediumaquamarine;color:white;align-self:start}
-.result_item_error {background-color:#ff7070;color:white;align-self:start}
-"""
-
-
-def update_model_dropdown(language: str):
- if language in language_to_models:
- choices = language_to_models[language]
- return gr.Dropdown.update(choices=choices, value=choices[0])
-
- raise ValueError(f"Unsupported language: {language}")
-
-
-demo = gr.Blocks(css=css)
-
-
-with demo:
- gr.Markdown(title)
- language_choices = list(language_to_models.keys())
-
- language_radio = gr.Radio(
- label="Language",
- choices=language_choices,
- value=language_choices[0],
- )
- model_dropdown = gr.Dropdown(
- choices=language_to_models[language_choices[0]],
- label="Select a model",
- value=language_to_models[language_choices[0]][0],
- )
-
- language_radio.change(
- update_model_dropdown,
- inputs=language_radio,
- outputs=model_dropdown,
- )
-
- decoding_method_radio = gr.Radio(
- label="Decoding method",
- choices=["greedy_search", "modified_beam_search"],
- value="greedy_search",
- )
-
- num_active_paths_slider = gr.Slider(
- minimum=1,
- value=4,
- step=1,
- label="Number of active paths for modified_beam_search",
- )
-
- with gr.Tabs():
- with gr.TabItem("Upload from disk"):
- uploaded_file = gr.Audio(
- source="upload", # Choose between "microphone", "upload"
- type="filepath",
- optional=False,
- label="Upload from disk",
- )
- upload_button = gr.Button("Submit for recognition")
- uploaded_output = gr.Textbox(label="Recognized speech from uploaded file")
- uploaded_html_info = gr.HTML(label="Info")
-
- gr.Examples(
- examples=examples,
- inputs=[
- language_radio,
- model_dropdown,
- decoding_method_radio,
- num_active_paths_slider,
- uploaded_file,
- ],
- outputs=[uploaded_output, uploaded_html_info],
- fn=process_uploaded_file,
- )
-
- with gr.TabItem("Record from microphone"):
- microphone = gr.Audio(
- source="microphone", # Choose between "microphone", "upload"
- type="filepath",
- optional=False,
- label="Record from microphone",
- )
-
- record_button = gr.Button("Submit for recognition")
- recorded_output = gr.Textbox(label="Recognized speech from recordings")
- recorded_html_info = gr.HTML(label="Info")
-
- gr.Examples(
- examples=examples,
- inputs=[
- language_radio,
- model_dropdown,
- decoding_method_radio,
- num_active_paths_slider,
- microphone,
- ],
- outputs=[recorded_output, recorded_html_info],
- fn=process_microphone,
- )
-
- with gr.TabItem("From URL"):
- url_textbox = gr.Textbox(
- max_lines=1,
- placeholder="URL to an audio file",
- label="URL",
- interactive=True,
- )
-
- url_button = gr.Button("Submit for recognition")
- url_output = gr.Textbox(label="Recognized speech from URL")
- url_html_info = gr.HTML(label="Info")
-
- upload_button.click(
- process_uploaded_file,
- inputs=[
- language_radio,
- model_dropdown,
- decoding_method_radio,
- num_active_paths_slider,
- uploaded_file,
- ],
- outputs=[uploaded_output, uploaded_html_info],
- )
-
- record_button.click(
- process_microphone,
- inputs=[
- language_radio,
- model_dropdown,
- decoding_method_radio,
- num_active_paths_slider,
- microphone,
- ],
- outputs=[recorded_output, recorded_html_info],
- )
-
- url_button.click(
- process_url,
- inputs=[
- language_radio,
- model_dropdown,
- decoding_method_radio,
- num_active_paths_slider,
- url_textbox,
- ],
- outputs=[url_output, url_html_info],
- )
-
- gr.Markdown(description)
-
-torch.set_num_threads(1)
-torch.set_num_interop_threads(1)
-
-torch._C._jit_set_profiling_executor(False)
-torch._C._jit_set_profiling_mode(False)
-torch._C._set_graph_executor_optimize(False)
-
-if __name__ == "__main__":
- formatter = "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
-
- logging.basicConfig(format=formatter, level=logging.INFO)
-
- demo.launch()
diff --git a/spaces/emc348/faces-through-time/models/StyleCLIP/models/facial_recognition/__init__.py b/spaces/emc348/faces-through-time/models/StyleCLIP/models/facial_recognition/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/spaces/epsilonator/euclidean_distance/README.md b/spaces/epsilonator/euclidean_distance/README.md
deleted file mode 100644
index 7766cb06314cd2b18b9d290a0b39d5a7828e9018..0000000000000000000000000000000000000000
--- a/spaces/epsilonator/euclidean_distance/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
----
-title: Euclidean Distance
-emoji: 🏃
-colorFrom: green
-colorTo: red
-sdk: gradio
-sdk_version: 3.7
-app_file: app.py
-pinned: false
----
-
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/spaces/evaluate-metric/exact_match/exact_match.py b/spaces/evaluate-metric/exact_match/exact_match.py
deleted file mode 100644
index d8c499b3722b0bdbbf3d8a7e3d48899513f27d19..0000000000000000000000000000000000000000
--- a/spaces/evaluate-metric/exact_match/exact_match.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Exact Match metric."""
-import re
-import string
-
-import datasets
-import numpy as np
-
-import evaluate
-
-
-_DESCRIPTION = """
-Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
-"""
-
-_KWARGS_DESCRIPTION = """
-Args:
- predictions: List of predicted texts.
- references: List of reference texts.
- regexes_to_ignore: List, defaults to None. Regex expressions of characters to
- ignore when calculating the exact matches. Note: these regexes are removed
- from the input data before the changes based on the options below (e.g. ignore_case,
- ignore_punctuation, ignore_numbers) are applied.
- ignore_case: Boolean, defaults to False. If true, turns everything
- to lowercase so that capitalization differences are ignored.
- ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
- comparing predictions and references.
- ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
- comparing predictions and references.
-Returns:
- exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 1.0, inclusive.
-Examples:
- >>> exact_match = evaluate.load("exact_match")
- >>> refs = ["the cat", "theater", "YELLING", "agent007"]
- >>> preds = ["cat?", "theater", "yelling", "agent"]
- >>> results = exact_match.compute(references=refs, predictions=preds)
- >>> print(round(results["exact_match"], 2))
- 0.25
-
- >>> exact_match = evaluate.load("exact_match")
- >>> refs = ["the cat", "theater", "YELLING", "agent007"]
- >>> preds = ["cat?", "theater", "yelling", "agent"]
- >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
- >>> print(round(results["exact_match"], 2))
- 0.5
-
-
- >>> exact_match = evaluate.load("exact_match")
- >>> refs = ["the cat", "theater", "YELLING", "agent007"]
- >>> preds = ["cat?", "theater", "yelling", "agent"]
- >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
- >>> print(round(results["exact_match"], 2))
- 0.75
-
- >>> exact_match = evaluate.load("exact_match")
- >>> refs = ["the cat", "theater", "YELLING", "agent007"]
- >>> preds = ["cat?", "theater", "yelling", "agent"]
- >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
- >>> print(round(results["exact_match"], 2))
- 1.0
-
- >>> exact_match = evaluate.load("exact_match")
- >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It's like comparing oranges and apples."]
- >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It's like comparing apples and oranges."]
- >>> results = exact_match.compute(references=refs, predictions=preds)
- >>> print(round(results["exact_match"], 2))
- 0.33
-"""
-
-_CITATION = """
-"""
-
-
-@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
-class ExactMatch(evaluate.Metric):
- def _info(self):
- return evaluate.MetricInfo(
- description=_DESCRIPTION,
- citation=_CITATION,
- inputs_description=_KWARGS_DESCRIPTION,
- features=datasets.Features(
- {
- "predictions": datasets.Value("string", id="sequence"),
- "references": datasets.Value("string", id="sequence"),
- }
- ),
- reference_urls=[],
- )
-
- def _compute(
- self,
- predictions,
- references,
- regexes_to_ignore=None,
- ignore_case=False,
- ignore_punctuation=False,
- ignore_numbers=False,
- ):
-
- if regexes_to_ignore is not None:
- for s in regexes_to_ignore:
- predictions = np.array([re.sub(s, "", x) for x in predictions])
- references = np.array([re.sub(s, "", x) for x in references])
- else:
- predictions = np.asarray(predictions)
- references = np.asarray(references)
-
- if ignore_case:
- predictions = np.char.lower(predictions)
- references = np.char.lower(references)
-
- if ignore_punctuation:
- repl_table = string.punctuation.maketrans("", "", string.punctuation)
- predictions = np.char.translate(predictions, table=repl_table)
- references = np.char.translate(references, table=repl_table)
-
- if ignore_numbers:
- repl_table = string.digits.maketrans("", "", string.digits)
- predictions = np.char.translate(predictions, table=repl_table)
- references = np.char.translate(references, table=repl_table)
-
- score_list = predictions == references
-
- return {"exact_match": np.mean(score_list)}
diff --git a/spaces/failfast/2D-GameCreator/src/lib/theme.ts b/spaces/failfast/2D-GameCreator/src/lib/theme.ts
deleted file mode 100644
index e31ce5292b1103b7ef462c7b1a7efabc255508fa..0000000000000000000000000000000000000000
--- a/spaces/failfast/2D-GameCreator/src/lib/theme.ts
+++ /dev/null
@@ -1,79 +0,0 @@
-import { Fira_Code, Poppins } from "next/font/google";
-import { experimental_extendTheme as extendTheme, Theme } from "@mui/material/styles";
-
-export const poppins = Poppins({
- weight: ["300", "400", "500", "700"],
- subsets: ["latin"],
- display: "swap",
- fallback: ["Helvetica", "Arial", "sans-serif"],
-});
-
-const theme = extendTheme({
- colorSchemes: {
- light: {
- palette: {
- primary: {
- main: "#2c90fc",
- },
- secondary: {
- main: "#b827fc",
- },
- },
- },
- dark: {
- palette: {
- primary: {
- main: "#2c90fc",
- },
- secondary: {
- main: "#b827fc",
- },
- text: {
- secondary: "#ffffff",
- },
- },
- },
- },
- typography: {
- ...poppins.style,
- h1: {
- fontSize: "5em",
- },
- },
- components: {
- MuiLink: {
- styleOverrides: {
- root: {
- textDecoration: "none",
- ":hover": {
- textDecoration: "underline",
- },
- },
- },
- },
- MuiListSubheader: {
- styleOverrides: {
- root: {
- fontSize: "1.35rem",
- },
- },
- },
- MuiButton: {
- styleOverrides: {
- startIcon: ({ ownerState }) => ({
- ...(ownerState.children
- ? {}
- : {
- // if no button label, center icon (e.g mobile)
- marginRight: 0,
- }),
- }),
- },
- },
- },
-});
-
-export default theme;
-export const fontMono = Fira_Code({
- subsets: ["latin"],
-});
diff --git a/spaces/falterWliame/Face_Mask_Detection/Crack WORK >> Harry Potter And The Deathly Hallows - Razor 1911 Hack Tool.md b/spaces/falterWliame/Face_Mask_Detection/Crack WORK >> Harry Potter And The Deathly Hallows - Razor 1911 Hack Tool.md
deleted file mode 100644
index caab60704f79e9da02074b910070d03d47622583..0000000000000000000000000000000000000000
--- a/spaces/falterWliame/Face_Mask_Detection/Crack WORK >> Harry Potter And The Deathly Hallows - Razor 1911 Hack Tool.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
CRACK >> Harry Potter And The Deathly Hallows - Razor 1911 hack tool
-
- aaccfb2cb3
-
-
-
diff --git a/spaces/gotiQspiryo/whisper-ui/examples/Constantine 2 full movie in hindi free download Find out what happens next in the saga of the anti-hero.md b/spaces/gotiQspiryo/whisper-ui/examples/Constantine 2 full movie in hindi free download Find out what happens next in the saga of the anti-hero.md
deleted file mode 100644
index 63ecffcdab669e13ca5cb531d8170e03dba873dd..0000000000000000000000000000000000000000
--- a/spaces/gotiQspiryo/whisper-ui/examples/Constantine 2 full movie in hindi free download Find out what happens next in the saga of the anti-hero.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
as usual, getting the most of the game involves side quests. you will most certainly be getting a lot of the aforementioned chests and other loot, but youre also likely to be carrying around a prize or two from all the various third party vendors youll come across.
i must admit, i ve been one of those avid players trying to make the most of their time spent wandering the wastes with their hidden blades. now, i usually end up getting stopped before it dawns on me that i cant do anything but talk to the npcs. i should probably stop being the bad guy here, but i cant help it. its all worth it in the end.
-
besides consumables like health and stamina packs, theres also a variety of different health and status items for you to use in-game. the number of different versions of the items provided to the player will vary depending on which faction youre playing as. some of the better all around items include the first aid healing kit which basically does two things: heals you and gives you the ability to use your houdini skills at certain points along the way. the ice poisoning weapon allows you to poison your enemies by firing at them, doing major damage (until your stamina runs out) and making them immobile. a third item, the explosive bag is usually used to make enemies immobile, and, after exploding, its seeds will drop on the ground if you move a bit. this can then be picked up and used to plant harvests for you in the future. food and water will also be provided to the player, but arent really necessary, being able to scrounge for it elsewhere.
-
since youll be completing quests in egypt, ancient greece and rome, theres a new universal "enchanter" skill line . this allows you to build divine and physical power attributes with a variety of different items.
- 899543212b
-
-
\ No newline at end of file
diff --git a/spaces/inplisQlawa/anything-midjourney-v4-1/Breaking Bad Season S05 Complete 480p BluRay HDTV X264EncodeKingBreaking Bad Season S05 Complet !FULL!.md b/spaces/inplisQlawa/anything-midjourney-v4-1/Breaking Bad Season S05 Complete 480p BluRay HDTV X264EncodeKingBreaking Bad Season S05 Complet !FULL!.md
deleted file mode 100644
index 0ddb14fe1c41f76ac36863bcc89ee7cb2a1c441a..0000000000000000000000000000000000000000
--- a/spaces/inplisQlawa/anything-midjourney-v4-1/Breaking Bad Season S05 Complete 480p BluRay HDTV X264EncodeKingBreaking Bad Season S05 Complet !FULL!.md
+++ /dev/null
@@ -1,6 +0,0 @@
-
Breaking Bad Season S05 Complete 480p BluRay HDTV X264EncodeKingBreaking Bad Season S05 Complet
-
-Breaking bad all seasons complete bluray 720p download. ... 95ec0d2f82 breaking bad s05 season 5 complete 720p hdtv x264 publichd picktorrent. ... Breaking bad s0104 s04e01 hdtv 720p download full tv. ... Breaking bad season s05 complete 480p bluray hdtv x264encodekingbreaking bad season s05 complete 48. 1fdad05405
-
-
-
diff --git a/spaces/inreVtussa/clothingai/Examples/Adobe Acrobat Pro Dc 2020 Crack Serial Number Latest.md b/spaces/inreVtussa/clothingai/Examples/Adobe Acrobat Pro Dc 2020 Crack Serial Number Latest.md
deleted file mode 100644
index a179012af100bc71a42005fde68abc5b4eca31bb..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Adobe Acrobat Pro Dc 2020 Crack Serial Number Latest.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-
How to Download and Install Adobe Acrobat Pro DC 2020 Crack Serial Number Latest
-
Adobe Acrobat Pro DC 2020 is a powerful software that allows you to create, edit, convert, sign, and share PDF files. It also lets you access your PDF files from anywhere using the cloud services. If you want to download and install Adobe Acrobat Pro DC 2020 crack serial number for free, follow these steps:
-
-
Download the setup file and the patch file from here [^1^].
-
Extract the files using a file extractor such as WinRAR or 7-Zip.
-
Run the setup file and follow the instructions to install Adobe Acrobat Pro DC 2020 on your computer.
-
After the installation is complete, do not launch the program yet.
-
Copy the patch file and paste it into the installation folder of Adobe Acrobat Pro DC 2020.
-
Run the patch file as administrator and click on the patch button.
-
Wait for the patching process to finish and close the patch file.
-
Launch Adobe Acrobat Pro DC 2020 and enjoy the full version for free.
-
-
Note: You may need to disable your antivirus or firewall before running the patch file. Also, this method is only for educational purposes and we do not recommend using cracked software. You should buy the original software from here.
-
Adobe Acrobat Pro Dc 2020 Crack Serial Number {Latest}
Adobe Acrobat Pro DC 2020 is the latest version of the popular PDF software that offers many new and improved features. Some of the features are:
-
-
PDF editing: You can edit text, images, links, and other elements in your PDF files with ease. You can also compare two versions of a PDF file and highlight the differences.
-
PDF conversion: You can convert PDF files to various formats such as Word, Excel, PowerPoint, HTML, and more. You can also create PDF files from any application that supports printing.
-
PDF signing: You can sign your PDF files digitally using certificates or cloud signatures. You can also request signatures from others and track their status.
-
PDF sharing: You can share your PDF files with anyone using email, cloud services, or online platforms. You can also collaborate with others on PDF files using comments and annotations.
-
PDF accessibility: You can make your PDF files more accessible for people with disabilities using tools such as tags, headings, alt text, and more. You can also check and fix accessibility issues in your PDF files.
-
-
Adobe Acrobat Pro DC 2020 is a comprehensive and versatile software that can help you work with PDF files in various ways. It is compatible with Windows and Mac operating systems and requires a subscription to use. You can download a free trial version from here.
In this article, we have learned how to download and install Adobe Acrobat Pro DC 2020 crack serial number for free. We have also learned about some of the features of Adobe Acrobat Pro DC 2020 and how it can help us work with PDF files. However, we have also noted that using cracked software is illegal and risky. It may expose our computer to viruses, malware, or other threats. It may also violate the terms and conditions of Adobe and result in legal consequences. Therefore, we recommend that you buy the original software from Adobe and enjoy its benefits legally and safely.
-
Adobe Acrobat Pro DC 2020 is a powerful and versatile software that can help you create, edit, convert, sign, share, and access PDF files. It is compatible with Windows and Mac operating systems and requires a subscription to use. You can download a free trial version from here or buy the original software from here. We hope you found this article helpful and informative. Thank you for reading.
d5da3c52bf
-
-
\ No newline at end of file
diff --git a/spaces/inreVtussa/clothingai/Examples/Crack DWG TrueView 2016 Download LINK.md b/spaces/inreVtussa/clothingai/Examples/Crack DWG TrueView 2016 Download LINK.md
deleted file mode 100644
index 6d2ba129b7b58ed574900b6b7a8c4042320c2293..0000000000000000000000000000000000000000
--- a/spaces/inreVtussa/clothingai/Examples/Crack DWG TrueView 2016 Download LINK.md
+++ /dev/null
@@ -1,6 +0,0 @@
-